mirror of https://github.com/xtekky/gpt4free.git
Add CodeLinkAva Provider, Improve test_async.py
This commit is contained in:
parent
1fc9ec91ea
commit
ddb6fa30d9
|
@ -0,0 +1,63 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
import json
|
||||
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
|
||||
|
||||
class CodeLinkAva(AsyncGeneratorProvider):
|
||||
url = "https://ava-ai-ef611.web.app"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
headers = {
|
||||
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
||||
"Accept" : "*/*",
|
||||
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"Origin" : cls.url,
|
||||
"Referer" : cls.url + "/",
|
||||
"Sec-Fetch-Dest" : "empty",
|
||||
"Sec-Fetch-Mode" : "cors",
|
||||
"Sec-Fetch-Site" : "same-origin",
|
||||
}
|
||||
async with ClientSession(
|
||||
headers=headers
|
||||
) as session:
|
||||
data = {
|
||||
"messages": messages,
|
||||
"temperature": 0.6,
|
||||
"stream": True,
|
||||
**kwargs
|
||||
}
|
||||
async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response:
|
||||
response.raise_for_status()
|
||||
start = "data: "
|
||||
async for line in response.content:
|
||||
line = line.decode()
|
||||
if line.startswith("data: ") and not line.startswith("data: [DONE]"):
|
||||
line = json.loads(line[len(start):-1])
|
||||
content = line["choices"][0]["delta"].get("content")
|
||||
if content:
|
||||
yield content
|
||||
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def params(cls):
|
||||
params = [
|
||||
("model", "str"),
|
||||
("messages", "list[dict[str, str]]"),
|
||||
("stream", "bool"),
|
||||
("temperature", "float"),
|
||||
]
|
||||
param = ", ".join([": ".join(p) for p in params])
|
||||
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
|
@ -68,7 +68,7 @@ class HuggingChat(AsyncGeneratorProvider):
|
|||
if "error" in data:
|
||||
raise RuntimeError(data["error"])
|
||||
elif isinstance(data, list):
|
||||
yield data[0]["generated_text"]
|
||||
yield data[0]["generated_text"].strip()
|
||||
else:
|
||||
raise RuntimeError(f"Response: {data}")
|
||||
else:
|
||||
|
|
|
@ -34,7 +34,7 @@ class OpenaiChat(AsyncProvider):
|
|||
"https": proxy
|
||||
}
|
||||
if not access_token:
|
||||
access_token = await cls.get_access_token(cookies)
|
||||
access_token = await cls.get_access_token(cookies, proxies)
|
||||
headers = {
|
||||
"Accept": "text/event-stream",
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
|
|
|
@ -8,6 +8,7 @@ from .Bard import Bard
|
|||
from .Bing import Bing
|
||||
from .ChatgptAi import ChatgptAi
|
||||
from .ChatgptLogin import ChatgptLogin
|
||||
from .CodeLinkAva import CodeLinkAva
|
||||
from .DeepAi import DeepAi
|
||||
from .DfeHub import DfeHub
|
||||
from .EasyChat import EasyChat
|
||||
|
@ -44,6 +45,7 @@ __all__ = [
|
|||
'Bing',
|
||||
'ChatgptAi',
|
||||
'ChatgptLogin',
|
||||
'CodeLinkAva',
|
||||
'DeepAi',
|
||||
'DfeHub',
|
||||
'EasyChat',
|
||||
|
|
|
@ -7,31 +7,29 @@ sys.path.append(str(Path(__file__).parent.parent))
|
|||
import g4f
|
||||
from g4f.Provider import AsyncProvider
|
||||
from testing.test_providers import get_providers
|
||||
from testing.log_time import log_time_async
|
||||
from testing.log_time import log_time_async
|
||||
|
||||
async def create_async(provider: AsyncProvider):
|
||||
async def create_async(provider):
|
||||
model = g4f.models.gpt_35_turbo.name if provider.supports_gpt_35_turbo else g4f.models.default.name
|
||||
try:
|
||||
response = await log_time_async(
|
||||
response = await log_time_async(
|
||||
provider.create_async,
|
||||
model=model,
|
||||
messages=[{"role": "user", "content": "Hello Assistant!"}]
|
||||
)
|
||||
assert type(response) is str
|
||||
assert len(response) > 0
|
||||
return response
|
||||
print(f"{provider.__name__}:", response)
|
||||
except Exception as e:
|
||||
return e
|
||||
return f"{provider.__name__}: {e.__class__.__name__}: {e}"
|
||||
|
||||
async def run_async():
|
||||
_providers: list[AsyncProvider] = [
|
||||
_provider
|
||||
responses: list = [
|
||||
create_async(_provider)
|
||||
for _provider in get_providers()
|
||||
if _provider.working and hasattr(_provider, "create_async")
|
||||
if _provider.working and issubclass(_provider, AsyncProvider)
|
||||
]
|
||||
responses = [create_async(_provider) for _provider in _providers]
|
||||
responses = await asyncio.gather(*responses)
|
||||
for idx, provider in enumerate(_providers):
|
||||
print(f"{provider.__name__}:", responses[idx])
|
||||
for error in responses:
|
||||
if error:
|
||||
print(error)
|
||||
|
||||
print("Total:", asyncio.run(log_time_async(run_async)))
|
Loading…
Reference in New Issue