mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-09-17 10:29:29 -04:00
~| Merge pull request #889 from hlohaus/new
Add CodeLinkAva Provider, Improve test_async.py
This commit is contained in:
commit
edee7e72c0
62
g4f/Provider/ChatBase.py
Normal file
62
g4f/Provider/ChatBase.py
Normal file
@ -0,0 +1,62 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
|
||||
|
||||
class ChatBase(AsyncGeneratorProvider):
|
||||
url = "https://www.chatbase.co"
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
working = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
if model == "gpt-4":
|
||||
chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn"
|
||||
elif model == "gpt-3.5-turbo" or True:
|
||||
chat_id = "chatbase--1--pdf-p680fxvnm"
|
||||
headers = {
|
||||
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
||||
"Accept" : "*/*",
|
||||
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"Origin" : cls.url,
|
||||
"Referer" : cls.url + "/",
|
||||
"Sec-Fetch-Dest" : "empty",
|
||||
"Sec-Fetch-Mode" : "cors",
|
||||
"Sec-Fetch-Site" : "same-origin",
|
||||
}
|
||||
async with ClientSession(
|
||||
headers=headers
|
||||
) as session:
|
||||
data = {
|
||||
"messages": messages,
|
||||
"captchaCode": "hadsa",
|
||||
"chatId": chat_id,
|
||||
"conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}"
|
||||
}
|
||||
async with session.post("https://www.chatbase.co/api/fe/chat", json=data) as response:
|
||||
response.raise_for_status()
|
||||
async for stream in response.content.iter_any():
|
||||
stream = stream.decode()
|
||||
if stream:
|
||||
yield stream
|
||||
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def params(cls):
|
||||
params = [
|
||||
("model", "str"),
|
||||
("messages", "list[dict[str, str]]"),
|
||||
("stream", "bool"),
|
||||
]
|
||||
param = ", ".join([": ".join(p) for p in params])
|
||||
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
63
g4f/Provider/CodeLinkAva.py
Normal file
63
g4f/Provider/CodeLinkAva.py
Normal file
@ -0,0 +1,63 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
import json
|
||||
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
|
||||
|
||||
class CodeLinkAva(AsyncGeneratorProvider):
|
||||
url = "https://ava-ai-ef611.web.app"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
headers = {
|
||||
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
||||
"Accept" : "*/*",
|
||||
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"Origin" : cls.url,
|
||||
"Referer" : cls.url + "/",
|
||||
"Sec-Fetch-Dest" : "empty",
|
||||
"Sec-Fetch-Mode" : "cors",
|
||||
"Sec-Fetch-Site" : "same-origin",
|
||||
}
|
||||
async with ClientSession(
|
||||
headers=headers
|
||||
) as session:
|
||||
data = {
|
||||
"messages": messages,
|
||||
"temperature": 0.6,
|
||||
"stream": True,
|
||||
**kwargs
|
||||
}
|
||||
async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response:
|
||||
response.raise_for_status()
|
||||
start = "data: "
|
||||
async for line in response.content:
|
||||
line = line.decode()
|
||||
if line.startswith("data: ") and not line.startswith("data: [DONE]"):
|
||||
line = json.loads(line[len(start):-1])
|
||||
content = line["choices"][0]["delta"].get("content")
|
||||
if content:
|
||||
yield content
|
||||
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def params(cls):
|
||||
params = [
|
||||
("model", "str"),
|
||||
("messages", "list[dict[str, str]]"),
|
||||
("stream", "bool"),
|
||||
("temperature", "float"),
|
||||
]
|
||||
param = ", ".join([": ".join(p) for p in params])
|
||||
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
@ -68,7 +68,7 @@ class HuggingChat(AsyncGeneratorProvider):
|
||||
if "error" in data:
|
||||
raise RuntimeError(data["error"])
|
||||
elif isinstance(data, list):
|
||||
yield data[0]["generated_text"]
|
||||
yield data[0]["generated_text"].strip()
|
||||
else:
|
||||
raise RuntimeError(f"Response: {data}")
|
||||
else:
|
||||
|
@ -34,7 +34,7 @@ class OpenaiChat(AsyncProvider):
|
||||
"https": proxy
|
||||
}
|
||||
if not access_token:
|
||||
access_token = await cls.get_access_token(cookies)
|
||||
access_token = await cls.get_access_token(cookies, proxies)
|
||||
headers = {
|
||||
"Accept": "text/event-stream",
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
|
@ -6,8 +6,10 @@ from .AiService import AiService
|
||||
from .AItianhu import AItianhu
|
||||
from .Bard import Bard
|
||||
from .Bing import Bing
|
||||
from .ChatBase import ChatBase
|
||||
from .ChatgptAi import ChatgptAi
|
||||
from .ChatgptLogin import ChatgptLogin
|
||||
from .CodeLinkAva import CodeLinkAva
|
||||
from .DeepAi import DeepAi
|
||||
from .DfeHub import DfeHub
|
||||
from .EasyChat import EasyChat
|
||||
@ -42,8 +44,10 @@ __all__ = [
|
||||
'AItianhu',
|
||||
'Bard',
|
||||
'Bing',
|
||||
'ChatBase',
|
||||
'ChatgptAi',
|
||||
'ChatgptLogin',
|
||||
'CodeLinkAva',
|
||||
'DeepAi',
|
||||
'DfeHub',
|
||||
'EasyChat',
|
||||
|
@ -7,31 +7,29 @@ sys.path.append(str(Path(__file__).parent.parent))
|
||||
import g4f
|
||||
from g4f.Provider import AsyncProvider
|
||||
from testing.test_providers import get_providers
|
||||
from testing.log_time import log_time_async
|
||||
from testing.log_time import log_time_async
|
||||
|
||||
async def create_async(provider: AsyncProvider):
|
||||
async def create_async(provider):
|
||||
model = g4f.models.gpt_35_turbo.name if provider.supports_gpt_35_turbo else g4f.models.default.name
|
||||
try:
|
||||
response = await log_time_async(
|
||||
response = await log_time_async(
|
||||
provider.create_async,
|
||||
model=model,
|
||||
messages=[{"role": "user", "content": "Hello Assistant!"}]
|
||||
)
|
||||
assert type(response) is str
|
||||
assert len(response) > 0
|
||||
return response
|
||||
print(f"{provider.__name__}:", response)
|
||||
except Exception as e:
|
||||
return e
|
||||
return f"{provider.__name__}: {e.__class__.__name__}: {e}"
|
||||
|
||||
async def run_async():
|
||||
_providers: list[AsyncProvider] = [
|
||||
_provider
|
||||
responses: list = [
|
||||
create_async(_provider)
|
||||
for _provider in get_providers()
|
||||
if _provider.working and hasattr(_provider, "create_async")
|
||||
if _provider.working and issubclass(_provider, AsyncProvider)
|
||||
]
|
||||
responses = [create_async(_provider) for _provider in _providers]
|
||||
responses = await asyncio.gather(*responses)
|
||||
for idx, provider in enumerate(_providers):
|
||||
print(f"{provider.__name__}:", responses[idx])
|
||||
for error in responses:
|
||||
if error:
|
||||
print(error)
|
||||
|
||||
print("Total:", asyncio.run(log_time_async(run_async)))
|
Loading…
Reference in New Issue
Block a user