Cache "snlm0e" in Bard

Improve error handling in ChatgptLogin
Fix async example in readme
This commit is contained in:
Heiner Lohaus 2023-09-20 06:12:34 +02:00
parent a54291cb7c
commit 82bd6f9180
10 changed files with 80 additions and 74 deletions

View File

@ -238,43 +238,42 @@ response = g4f.ChatCompletion.create(
##### Async Support:
To enhance speed and overall performance, execute providers asynchronously. The total execution time will be determined by the duration of the slowest provider's execution.
To enhance speed and overall performance, execute providers asynchronously.
The total execution time will be determined by the duration of the slowest provider's execution.
```py
import g4f, asyncio
async def run_async():
_providers = [
g4f.Provider.AItianhu,
g4f.Provider.Acytoo,
g4f.Provider.Aichat,
g4f.Provider.Ails,
g4f.Provider.Aivvm,
g4f.Provider.ChatBase,
g4f.Provider.ChatgptAi,
g4f.Provider.ChatgptLogin,
g4f.Provider.CodeLinkAva,
g4f.Provider.DeepAi,
g4f.Provider.Opchatgpts,
g4f.Provider.Vercel,
g4f.Provider.Vitalentum,
g4f.Provider.Wewordle,
g4f.Provider.Ylokh,
g4f.Provider.You,
g4f.Provider.Yqcloud,
]
responses = [
provider.create_async(
model=g4f.models.default,
messages=[{"role": "user", "content": "Hello"}],
)
for provider in _providers
]
responses = await asyncio.gather(*responses)
for idx, provider in enumerate(_providers):
print(f"{provider.__name__}:", responses[idx])
_providers = [
g4f.Provider.Aichat,
g4f.Provider.Aivvm,
g4f.Provider.ChatBase,
g4f.Provider.Bing,
g4f.Provider.CodeLinkAva,
g4f.Provider.DeepAi,
g4f.Provider.GptGo,
g4f.Provider.Wewordle,
g4f.Provider.You,
g4f.Provider.Yqcloud,
]
asyncio.run(run_async())
async def run_provider(provider: g4f.Provider.AsyncProvider):
try:
response = await provider.create_async(
model=g4f.models.default.name,
messages=[{"role": "user", "content": "Hello"}],
)
print(f"{provider.__name__}:", response)
except Exception as e:
print(f"{provider.__name__}:", e)
async def run_all():
calls = [
run_provider(provider) for provider in _providers
]
await asyncio.gather(*calls)
asyncio.run(run_all())
```
### interference openai-proxy api (use with openai python package)

View File

@ -41,7 +41,7 @@ class Aivvm(AsyncGeneratorProvider):
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Accept-Language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",

View File

@ -13,6 +13,7 @@ class Bard(AsyncProvider):
url = "https://bard.google.com"
needs_auth = True
working = True
_snlm0e = None
@classmethod
async def create_async(
@ -31,7 +32,6 @@ class Bard(AsyncProvider):
headers = {
'authority': 'bard.google.com',
'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
'origin': 'https://bard.google.com',
'referer': 'https://bard.google.com/',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
@ -42,13 +42,14 @@ class Bard(AsyncProvider):
cookies=cookies,
headers=headers
) as session:
async with session.get(cls.url, proxy=proxy) as response:
text = await response.text()
if not cls._snlm0e:
async with session.get(cls.url, proxy=proxy) as response:
text = await response.text()
match = re.search(r'SNlM0e\":\"(.*?)\"', text)
if not match:
raise RuntimeError("No snlm0e value.")
snlm0e = match.group(1)
match = re.search(r'SNlM0e\":\"(.*?)\"', text)
if not match:
raise RuntimeError("No snlm0e value.")
cls._snlm0e = match.group(1)
params = {
'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
@ -57,7 +58,7 @@ class Bard(AsyncProvider):
}
data = {
'at': snlm0e,
'at': cls._snlm0e,
'f.req': json.dumps([None, json.dumps([[prompt]])])
}

View File

@ -52,7 +52,14 @@ class ChatgptLogin(AsyncProvider):
}
async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response:
response.raise_for_status()
return (await response.json())["data"]
data = await response.json()
if "data" in data:
return data["data"]
elif "msg" in data:
raise RuntimeError(data["msg"])
else:
raise RuntimeError(f"Response: {data}")
@classmethod
@property

View File

@ -40,11 +40,12 @@ class CodeLinkAva(AsyncGeneratorProvider):
}
async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response:
response.raise_for_status()
start = "data: "
async for line in response.content:
line = line.decode()
if line.startswith("data: ") and not line.startswith("data: [DONE]"):
line = json.loads(line[len(start):-1])
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
yield content

View File

@ -23,7 +23,7 @@ class H2o(AsyncGeneratorProvider):
**kwargs
) -> AsyncGenerator:
model = model if model else cls.model
headers = {"Referer": "https://gpt-gm.h2o.ai/"}
headers = {"Referer": cls.url + "/"}
async with ClientSession(
headers=headers
@ -36,14 +36,14 @@ class H2o(AsyncGeneratorProvider):
"searchEnabled": "true",
}
async with session.post(
"https://gpt-gm.h2o.ai/settings",
f"{cls.url}/settings",
proxy=proxy,
data=data
) as response:
response.raise_for_status()
async with session.post(
"https://gpt-gm.h2o.ai/conversation",
f"{cls.url}/conversation",
proxy=proxy,
json={"model": model},
) as response:
@ -71,7 +71,7 @@ class H2o(AsyncGeneratorProvider):
},
}
async with session.post(
f"https://gpt-gm.h2o.ai/conversation/{conversationId}",
f"{cls.url}/conversation/{conversationId}",
proxy=proxy,
json=data
) as response:
@ -83,6 +83,14 @@ class H2o(AsyncGeneratorProvider):
if not line["token"]["special"]:
yield line["token"]["text"]
async with session.delete(
f"{cls.url}/conversation/{conversationId}",
proxy=proxy,
json=data
) as response:
response.raise_for_status()
@classmethod
@property
def params(cls):

View File

@ -25,10 +25,10 @@ class HuggingChat(AsyncGeneratorProvider):
**kwargs
) -> AsyncGenerator:
model = model if model else cls.model
if not cookies:
cookies = get_cookies(".huggingface.co")
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
if not cookies:
cookies = get_cookies(".huggingface.co")
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
@ -37,7 +37,7 @@ class HuggingChat(AsyncGeneratorProvider):
cookies=cookies,
headers=headers
) as session:
async with session.post("https://huggingface.co/chat/conversation", proxy=proxy, json={"model": model}) as response:
async with session.post(f"{cls.url}/conversation", proxy=proxy, json={"model": model}) as response:
conversation_id = (await response.json())["conversationId"]
send = {
@ -62,7 +62,7 @@ class HuggingChat(AsyncGeneratorProvider):
"web_search_id": ""
}
}
async with session.post(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy, json=send) as response:
async with session.post(f"{cls.url}/conversation/{conversation_id}", proxy=proxy, json=send) as response:
if not stream:
data = await response.json()
if "error" in data:
@ -76,8 +76,6 @@ class HuggingChat(AsyncGeneratorProvider):
first = True
async for line in response.content:
line = line.decode("utf-8")
if not line:
continue
if line.startswith(start):
line = json.loads(line[len(start):-1])
if "token" not in line:
@ -89,7 +87,7 @@ class HuggingChat(AsyncGeneratorProvider):
else:
yield line["token"]["text"]
async with session.delete(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy) as response:
async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response:
response.raise_for_status()

View File

@ -46,7 +46,9 @@ class Vitalentum(AsyncGeneratorProvider):
response.raise_for_status()
async for line in response.content:
line = line.decode()
if line.startswith("data: ") and not line.startswith("data: [DONE]"):
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:

View File

@ -14,7 +14,7 @@ from .Provider import (
H2o
)
@dataclass
@dataclass(unsafe_hash=True)
class Model:
name: str
base_provider: str

View File

@ -1,6 +1,6 @@
import sys
from pathlib import Path
from colorama import Fore
from colorama import Fore, Style
sys.path.append(str(Path(__file__).parent.parent))
@ -8,10 +8,6 @@ from g4f import BaseProvider, models, Provider
logging = False
class Styles:
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def main():
providers = get_providers()
@ -29,11 +25,11 @@ def main():
print()
if failed_providers:
print(f"{Fore.RED + Styles.BOLD}Failed providers:{Styles.ENDC}")
print(f"{Fore.RED + Style.BRIGHT}Failed providers:{Style.RESET_ALL}")
for _provider in failed_providers:
print(f"{Fore.RED}{_provider.__name__}")
else:
print(f"{Fore.GREEN + Styles.BOLD}All providers are working")
print(f"{Fore.GREEN + Style.BRIGHT}All providers are working")
def get_providers() -> list[type[BaseProvider]]:
@ -45,21 +41,15 @@ def get_providers() -> list[type[BaseProvider]]:
"AsyncProvider",
"AsyncGeneratorProvider"
]
provider_names = [
provider_name
return [
getattr(Provider, provider_name)
for provider_name in provider_names
if not provider_name.startswith("__") and provider_name not in ignore_names
]
return [getattr(Provider, provider_name) for provider_name in provider_names]
def create_response(_provider: type[BaseProvider]) -> str:
if _provider.supports_gpt_35_turbo:
model = models.gpt_35_turbo.name
elif _provider.supports_gpt_4:
model = models.gpt_4.name
else:
model = models.default.name
model = models.gpt_35_turbo.name if _provider.supports_gpt_35_turbo else models.default.name
response = _provider.create_completion(
model=model,
messages=[{"role": "user", "content": "Hello, who are you? Answer in detail much as possible."}],