~ | Merge pull request #962 from hlohaus/nnn

Update HuggingChat to new api
This commit is contained in:
Tekky 2023-10-02 12:20:18 +02:00 committed by GitHub
commit c0632c2741
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 366 additions and 190 deletions

View File

@ -1,24 +1,25 @@
from __future__ import annotations from __future__ import annotations
import json import json
from curl_cffi.requests import AsyncSession
from .base_provider import AsyncProvider, format_prompt from ..typing import AsyncGenerator
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt
class AItianhu(AsyncProvider): class AItianhu(AsyncGeneratorProvider):
url = "https://www.aitianhu.com" url = "https://www.aitianhu.com"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@classmethod @classmethod
async def create_async( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> str: ) -> AsyncGenerator:
data = { data = {
"prompt": format_prompt(messages), "prompt": format_prompt(messages),
"options": {}, "options": {},
@ -27,12 +28,25 @@ class AItianhu(AsyncProvider):
"top_p": 1, "top_p": 1,
**kwargs **kwargs
} }
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107", verify=False) as session: headers = {
response = await session.post(cls.url + "/api/chat-process", json=data) "Authority": cls.url,
response.raise_for_status() "Accept": "application/json, text/plain, */*",
line = response.text.splitlines()[-1] "Origin": cls.url,
line = json.loads(line) "Referer": f"{cls.url}/"
return line["text"] }
async with StreamSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107", verify=False) as session:
async with session.post(f"{cls.url}/api/chat-process", json=data) as response:
response.raise_for_status()
async for line in response.iter_lines():
if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control")
line = json.loads(line)
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
if content:
yield content
else:
raise RuntimeError(f"Response: {line}")
@classmethod @classmethod

View File

@ -2,7 +2,8 @@ from __future__ import annotations
import random, json import random, json
from g4f.requests import AsyncSession, StreamRequest from ..typing import AsyncGenerator
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt from .base_provider import AsyncGeneratorProvider, format_prompt
domains = { domains = {
@ -22,7 +23,7 @@ class AItianhuSpace(AsyncGeneratorProvider):
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool = True, stream: bool = True,
**kwargs **kwargs
) -> str: ) -> AsyncGenerator:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
elif not model in domains: elif not model in domains:
@ -31,12 +32,9 @@ class AItianhuSpace(AsyncGeneratorProvider):
chars = 'abcdefghijklmnopqrstuvwxyz0123456789' chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
rand = ''.join(random.choice(chars) for _ in range(6)) rand = ''.join(random.choice(chars) for _ in range(6))
domain = domains[model] domain = domains[model]
url = f'https://{rand}{domain}/api/chat-process' url = f'https://{rand}{domain}'
headers = { async with StreamSession(impersonate="chrome110", verify=False) as session:
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
}
async with AsyncSession(headers=headers, impersonate="chrome107", verify=False) as session:
data = { data = {
"prompt": format_prompt(messages), "prompt": format_prompt(messages),
"options": {}, "options": {},
@ -45,10 +43,18 @@ class AItianhuSpace(AsyncGeneratorProvider):
"top_p": 1, "top_p": 1,
**kwargs **kwargs
} }
async with StreamRequest(session, "POST", url, json=data) as response: headers = {
"Authority": url,
"Accept": "application/json, text/plain, */*",
"Origin": url,
"Referer": f"{url}/"
}
async with session.post(f"{url}/api/chat-process", json=data, headers=headers) as response:
response.raise_for_status() response.raise_for_status()
async for line in response.content: async for line in response.iter_lines():
line = json.loads(line.rstrip()) if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control")
line = json.loads(line)
if "detail" in line: if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content") content = line["detail"]["choices"][0]["delta"].get("content")
if content: if content:
@ -56,7 +62,7 @@ class AItianhuSpace(AsyncGeneratorProvider):
elif "message" in line and "AI-4接口非常昂贵" in line["message"]: elif "message" in line and "AI-4接口非常昂贵" in line["message"]:
raise RuntimeError("Rate limit for GPT 4 reached") raise RuntimeError("Rate limit for GPT 4 reached")
else: else:
raise RuntimeError("Response: {line}") raise RuntimeError(f"Response: {line}")
@classmethod @classmethod

View File

@ -4,7 +4,7 @@ import time
import hashlib import hashlib
from ..typing import AsyncGenerator from ..typing import AsyncGenerator
from g4f.requests import AsyncSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -20,7 +20,7 @@ class Aibn(AsyncGeneratorProvider):
messages: list[dict[str, str]], messages: list[dict[str, str]],
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
async with AsyncSession(impersonate="chrome107") as session: async with StreamSession(impersonate="chrome107") as session:
timestamp = int(time.time()) timestamp = int(time.time())
data = { data = {
"messages": messages, "messages": messages,
@ -30,7 +30,7 @@ class Aibn(AsyncGeneratorProvider):
} }
async with session.post(f"{cls.url}/api/generate", json=data) as response: async with session.post(f"{cls.url}/api/generate", json=data) as response:
response.raise_for_status() response.raise_for_status()
async for chunk in response.content.iter_any(): async for chunk in response.iter_content():
yield chunk.decode() yield chunk.decode()
@classmethod @classmethod

View File

@ -1,8 +1,8 @@
from __future__ import annotations from __future__ import annotations
import requests
from .base_provider import BaseProvider from ..requests import StreamSession
from ..typing import CreateResult from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncGenerator
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models # to recreate this easily, send a post request to https://chat.aivvm.com/api/models
models = { models = {
@ -16,7 +16,7 @@ models = {
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'}, 'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
} }
class Aivvm(BaseProvider): class Aivvm(AsyncGeneratorProvider):
url = 'https://chat.aivvm.com' url = 'https://chat.aivvm.com'
supports_stream = True supports_stream = True
working = True working = True
@ -24,31 +24,18 @@ class Aivvm(BaseProvider):
supports_gpt_4 = True supports_gpt_4 = True
@classmethod @classmethod
def create_completion(cls, async def create_async_generator(
cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool,
**kwargs **kwargs
) -> CreateResult: ) -> AsyncGenerator:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
elif model not in models: elif model not in models:
raise ValueError(f"Model is not supported: {model}") raise ValueError(f"Model is not supported: {model}")
headers = {
"accept" : "*/*",
"accept-language" : "hu-HU,hu;q=0.9,en-US;q=0.8,en;q=0.7",
"content-type" : "application/json",
"sec-ch-ua" : "\"Kuki\";v=\"116\", \"Not)A;Brand\";v=\"24\", \"Pici Pocoro\";v=\"102\"",
"sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform": "\"Bandóz\"",
"sec-fetch-dest" : "empty",
"sec-fetch-mode" : "cors",
"sec-fetch-site" : "same-origin",
"Referer" : "https://chat.aivvm.com/",
"Referrer-Policy" : "same-origin",
}
json_data = { json_data = {
"model" : models[model], "model" : models[model],
"messages" : messages, "messages" : messages,
@ -56,13 +43,11 @@ class Aivvm(BaseProvider):
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."), "prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
"temperature" : kwargs.get("temperature", 0.7) "temperature" : kwargs.get("temperature", 0.7)
} }
async with StreamSession(impersonate="chrome107") as session:
response = requests.post( async with session.post(f"{cls.url}/api/chat", json=json_data) as response:
"https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True) response.raise_for_status()
response.raise_for_status() async for chunk in response.iter_content():
yield chunk.decode()
for chunk in response.iter_content(chunk_size=None):
yield chunk.decode('utf-8')
@classmethod @classmethod
@property @property

62
g4f/Provider/ChatForAi.py Normal file
View File

@ -0,0 +1,62 @@
from __future__ import annotations
import time, hashlib
from ..typing import AsyncGenerator
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
class ChatForAi(AsyncGeneratorProvider):
url = "https://chatforai.com"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
async with StreamSession(impersonate="chrome107") as session:
conversation_id = f"id_{int(time.time())}"
prompt = messages[-1]["content"]
timestamp = int(time.time())
data = {
"conversationId": conversation_id,
"conversationType": "chat_continuous",
"botId": "chat_continuous",
"globalSettings":{
"baseUrl": "https://api.openai.com",
"model": model if model else "gpt-3.5-turbo",
"messageHistorySize": 5,
"temperature": 0.7,
"top_p": 1,
**kwargs
},
"botSettings": {},
"prompt": prompt,
"messages": messages,
"sign": generate_signature(timestamp, conversation_id, prompt),
"timestamp": timestamp
}
async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response:
response.raise_for_status()
async for chunk in response.iter_content():
yield chunk.decode()
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
def generate_signature(timestamp, id, prompt):
data = f"{timestamp}:{id}:{prompt}:6B46K4pt"
return hashlib.sha256(data.encode()).hexdigest()

View File

@ -1,6 +1,6 @@
from __future__ import annotations from __future__ import annotations
from g4f.requests import AsyncSession from curl_cffi.requests import AsyncSession
from .base_provider import AsyncProvider, format_prompt from .base_provider import AsyncProvider, format_prompt
@ -23,17 +23,17 @@ class ChatgptDuo(AsyncProvider):
"search": prompt, "search": prompt,
"purpose": "ask", "purpose": "ask",
} }
async with session.post(f"{cls.url}/", data=data) as response: response = await session.post(f"{cls.url}/", data=data)
response.raise_for_status() response.raise_for_status()
data = await response.json() data = response.json()
cls._sources = [{ cls._sources = [{
"title": source["title"], "title": source["title"],
"url": source["link"], "url": source["link"],
"snippet": source["snippet"] "snippet": source["snippet"]
} for source in data["results"]] } for source in data["results"]]
return data["answer"] return data["answer"]
@classmethod @classmethod
def get_sources(cls): def get_sources(cls):

54
g4f/Provider/FreeGpt.py Normal file
View File

@ -0,0 +1,54 @@
from __future__ import annotations
import time, hashlib, random
from ..typing import AsyncGenerator
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
domains = [
'https://k.aifree.site',
'https://p.aifree.site'
]
class FreeGpt(AsyncGeneratorProvider):
url = "https://freegpts1.aifree.site/"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
async with StreamSession(impersonate="chrome107") as session:
prompt = messages[-1]["content"]
timestamp = int(time.time())
data = {
"messages": messages,
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, prompt)
}
url = random.choice(domains)
async with session.post(f"{url}/api/generate", json=data) as response:
response.raise_for_status()
async for chunk in response.iter_content():
yield chunk.decode()
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
def generate_signature(timestamp: int, message: str, secret: str = ""):
data = f"{timestamp}:{message}:{secret}"
return hashlib.sha256(data.encode()).hexdigest()

View File

@ -1,6 +1,6 @@
from __future__ import annotations from __future__ import annotations
import json import json, uuid
from aiohttp import ClientSession from aiohttp import ClientSession
@ -12,7 +12,7 @@ class HuggingChat(AsyncGeneratorProvider):
url = "https://huggingface.co/chat" url = "https://huggingface.co/chat"
needs_auth = True needs_auth = True
working = True working = True
model = "OpenAssistant/oasst-sft-6-llama-30b-xor" model = "meta-llama/Llama-2-70b-chat-hf"
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@ -37,55 +37,25 @@ class HuggingChat(AsyncGeneratorProvider):
cookies=cookies, cookies=cookies,
headers=headers headers=headers
) as session: ) as session:
async with session.post(f"{cls.url}/conversation", proxy=proxy, json={"model": model}) as response: async with session.post(f"{cls.url}/conversation", json={"model": model}, proxy=proxy) as response:
conversation_id = (await response.json())["conversationId"] conversation_id = (await response.json())["conversationId"]
send = { send = {
"id": str(uuid.uuid4()),
"inputs": format_prompt(messages), "inputs": format_prompt(messages),
"parameters": { "is_retry": False,
"temperature": 0.2, "response_id": str(uuid.uuid4()),
"truncate": 1000, "web_search": False
"max_new_tokens": 1024,
"stop": ["</s>"],
"top_p": 0.95,
"repetition_penalty": 1.2,
"top_k": 50,
"return_full_text": False,
**kwargs
},
"stream": stream,
"options": {
"id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37",
"response_id": "04ce2602-3bea-45e8-8efc-cef00680376a",
"is_retry": False,
"use_cache": False,
"web_search_id": ""
}
} }
async with session.post(f"{cls.url}/conversation/{conversation_id}", proxy=proxy, json=send) as response: async with session.post(f"{cls.url}/conversation/{conversation_id}", json=send, proxy=proxy) as response:
if not stream: async for line in response.content:
data = await response.json() line = json.loads(line[:-1])
if "error" in data: if "type" not in line:
raise RuntimeError(data["error"]) raise RuntimeError(f"Response: {line}")
elif isinstance(data, list): elif line["type"] == "stream":
yield data[0]["generated_text"].strip() yield line["token"]
else: elif line["type"] == "finalAnswer":
raise RuntimeError(f"Response: {data}") break
else:
start = "data:"
first = True
async for line in response.content:
line = line.decode("utf-8")
if line.startswith(start):
line = json.loads(line[len(start):-1])
if "token" not in line:
raise RuntimeError(f"Response: {line}")
if not line["token"]["special"]:
if first:
yield line["token"]["text"].lstrip()
first = False
else:
yield line["token"]["text"]
async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response: async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()

View File

@ -5,12 +5,12 @@ import time
import base64 import base64
from curl_cffi.requests import AsyncSession from curl_cffi.requests import AsyncSession
from .base_provider import AsyncProvider, format_prompt from .base_provider import AsyncProvider, format_prompt, get_cookies
class PerplexityAi(AsyncProvider): class PerplexityAi(AsyncProvider):
url = "https://www.perplexity.ai" url = "https://www.perplexity.ai"
working = True working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
_sources = [] _sources = []
@ -23,18 +23,32 @@ class PerplexityAi(AsyncProvider):
**kwargs **kwargs
) -> str: ) -> str:
url = cls.url + "/socket.io/?EIO=4&transport=polling" url = cls.url + "/socket.io/?EIO=4&transport=polling"
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session: headers = {
"Referer": f"{cls.url}/"
}
async with AsyncSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107") as session:
url_session = "https://www.perplexity.ai/api/auth/session" url_session = "https://www.perplexity.ai/api/auth/session"
response = await session.get(url_session) response = await session.get(url_session)
response.raise_for_status()
url_session = "https://www.perplexity.ai/api/auth/session"
response = await session.get(url_session)
response.raise_for_status()
response = await session.get(url, params={"t": timestamp()}) response = await session.get(url, params={"t": timestamp()})
response.raise_for_status() response.raise_for_status()
sid = json.loads(response.text[1:])["sid"] sid = json.loads(response.text[1:])["sid"]
response = await session.get(url, params={"t": timestamp(), "sid": sid})
response.raise_for_status()
data = '40{"jwt":"anonymous-ask-user"}' data = '40{"jwt":"anonymous-ask-user"}'
response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data) response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
response.raise_for_status() response.raise_for_status()
response = await session.get(url, params={"t": timestamp(), "sid": sid})
response.raise_for_status()
data = "424" + json.dumps([ data = "424" + json.dumps([
"perplexity_ask", "perplexity_ask",
format_prompt(messages), format_prompt(messages),

View File

@ -8,7 +8,7 @@ from .base_provider import AsyncProvider
class Wewordle(AsyncProvider): class Wewordle(AsyncProvider):
url = "https://wewordle.org" url = "https://wewordle.org"
working = True working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@classmethod @classmethod

View File

@ -1,8 +1,8 @@
from __future__ import annotations from __future__ import annotations
import json import json
from aiohttp import ClientSession
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncGenerator from ..typing import AsyncGenerator
@ -23,14 +23,8 @@ class Ylokh(AsyncGeneratorProvider):
) -> AsyncGenerator: ) -> AsyncGenerator:
model = model if model else "gpt-3.5-turbo" model = model if model else "gpt-3.5-turbo"
headers = { headers = {
"User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0",
"Accept" : "*/*",
"Accept-language" : "de,en-US;q=0.7,en;q=0.3",
"Origin" : cls.url, "Origin" : cls.url,
"Referer" : cls.url + "/", "Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
} }
data = { data = {
"messages": messages, "messages": messages,
@ -43,18 +37,19 @@ class Ylokh(AsyncGeneratorProvider):
"stream": stream, "stream": stream,
**kwargs **kwargs
} }
async with ClientSession( async with StreamSession(
headers=headers headers=headers,
proxies={"https": proxy}
) as session: ) as session:
async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data, proxy=proxy) as response: async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
response.raise_for_status() response.raise_for_status()
if stream: if stream:
async for line in response.content: async for line in response.iter_lines():
line = line.decode() line = line.decode()
if line.startswith("data: "): if line.startswith("data: "):
if line.startswith("data: [DONE]"): if line.startswith("data: [DONE]"):
break break
line = json.loads(line[6:-1]) line = json.loads(line[6:])
content = line["choices"][0]["delta"].get("content") content = line["choices"][0]["delta"].get("content")
if content: if content:
yield content yield content

View File

@ -10,6 +10,7 @@ from .Aivvm import Aivvm
from .Bard import Bard from .Bard import Bard
from .Bing import Bing from .Bing import Bing
from .ChatBase import ChatBase from .ChatBase import ChatBase
from .ChatForAi import ChatForAi
from .ChatgptAi import ChatgptAi from .ChatgptAi import ChatgptAi
from .ChatgptDuo import ChatgptDuo from .ChatgptDuo import ChatgptDuo
from .ChatgptLogin import ChatgptLogin from .ChatgptLogin import ChatgptLogin
@ -18,6 +19,7 @@ from .DeepAi import DeepAi
from .DfeHub import DfeHub from .DfeHub import DfeHub
from .EasyChat import EasyChat from .EasyChat import EasyChat
from .Forefront import Forefront from .Forefront import Forefront
from .FreeGpt import FreeGpt
from .GetGpt import GetGpt from .GetGpt import GetGpt
from .GptGo import GptGo from .GptGo import GptGo
from .H2o import H2o from .H2o import H2o
@ -61,6 +63,7 @@ __all__ = [
'Bard', 'Bard',
'Bing', 'Bing',
'ChatBase', 'ChatBase',
'ChatForAi',
'ChatgptAi', 'ChatgptAi',
'ChatgptDuo', 'ChatgptDuo',
'ChatgptLogin', 'ChatgptLogin',
@ -69,6 +72,7 @@ __all__ = [
'DfeHub', 'DfeHub',
'EasyChat', 'EasyChat',
'Forefront', 'Forefront',
'FreeGpt',
'GetGpt', 'GetGpt',
'GptGo', 'GptGo',
'H2o', 'H2o',

View File

@ -1,28 +1,33 @@
from __future__ import annotations from __future__ import annotations
import asyncio import asyncio, sys
import sys
from asyncio import AbstractEventLoop from asyncio import AbstractEventLoop
import browser_cookie3 import browser_cookie3
# Change event loop policy on windows
if sys.platform == 'win32':
if isinstance(
asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy
):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# Local Cookie Storage
_cookies: dict[str, dict[str, str]] = {} _cookies: dict[str, dict[str, str]] = {}
# Use own event_loop_policy with a selector event loop on windows.
if sys.platform == 'win32':
_event_loop_policy = asyncio.WindowsSelectorEventLoopPolicy()
else:
_event_loop_policy = asyncio.get_event_loop_policy()
# If event loop is already running, handle nested event loops # If event loop is already running, handle nested event loops
# If "nest_asyncio" is installed, patch the event loop. # If "nest_asyncio" is installed, patch the event loop.
def get_event_loop() -> AbstractEventLoop: def get_event_loop() -> AbstractEventLoop:
try: try:
asyncio.get_running_loop() asyncio.get_running_loop()
except RuntimeError: except RuntimeError:
return _event_loop_policy.get_event_loop() try:
return asyncio.get_event_loop()
except RuntimeError:
asyncio.set_event_loop(asyncio.new_event_loop())
return asyncio.get_event_loop()
try: try:
event_loop = _event_loop_policy.get_event_loop() event_loop = asyncio.get_event_loop()
if not hasattr(event_loop.__class__, "_nest_patched"): if not hasattr(event_loop.__class__, "_nest_patched"):
import nest_asyncio import nest_asyncio
nest_asyncio.apply(event_loop) nest_asyncio.apply(event_loop)

View File

@ -8,16 +8,19 @@ from .Provider import (
PerplexityAi, PerplexityAi,
ChatgptDuo, ChatgptDuo,
ChatgptAi, ChatgptAi,
ChatForAi,
ChatBase, ChatBase,
AItianhu, AItianhu,
Wewordle, Wewordle,
Yqcloud, Yqcloud,
Myshell, Myshell,
FreeGpt,
Vercel, Vercel,
DeepAi, DeepAi,
Aichat, Aichat,
Aivvm, Aivvm,
GptGo, GptGo,
Ylokh,
Bard, Bard,
Aibn, Aibn,
Bing, Bing,
@ -42,7 +45,7 @@ default = Model(
Yqcloud, # Answers short questions in chinese Yqcloud, # Answers short questions in chinese
ChatBase, # Don't want to answer creatively ChatBase, # Don't want to answer creatively
ChatgptDuo, # Include search results ChatgptDuo, # Include search results
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn, DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh,
]) ])
) )
@ -51,7 +54,7 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo', name = 'gpt-3.5-turbo',
base_provider = 'openai', base_provider = 'openai',
best_provider = RetryProvider([ best_provider = RetryProvider([
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh,
]) ])
) )
@ -59,7 +62,7 @@ gpt_4 = Model(
name = 'gpt-4', name = 'gpt-4',
base_provider = 'openai', base_provider = 'openai',
best_provider = RetryProvider([ best_provider = RetryProvider([
Myshell, AItianhuSpace, Myshell, Ylokh,
]) ])
) )

View File

@ -1,20 +1,24 @@
from __future__ import annotations from __future__ import annotations
import json, sys import warnings, json, asyncio
from functools import partialmethod from functools import partialmethod
from asyncio import Future, Queue
from typing import AsyncGenerator
from aiohttp import StreamReader from curl_cffi.requests import AsyncSession, Response
from aiohttp.base_protocol import BaseProtocol
from curl_cffi.requests import AsyncSession as BaseSession import curl_cffi
from curl_cffi.requests import Response
is_newer_0_5_8 = hasattr(AsyncSession, "_set_cookies") or hasattr(curl_cffi.requests.Cookies, "get_cookies_for_curl")
is_newer_0_5_9 = hasattr(curl_cffi.AsyncCurl, "remove_handle")
is_newer_0_5_10 = hasattr(AsyncSession, "release_curl")
class StreamResponse: class StreamResponse:
def __init__(self, inner: Response, content: StreamReader, request): def __init__(self, inner: Response, queue: Queue):
self.inner = inner self.inner = inner
self.content = content self.queue = queue
self.request = request self.request = inner.request
self.status_code = inner.status_code self.status_code = inner.status_code
self.reason = inner.reason self.reason = inner.reason
self.ok = inner.ok self.ok = inner.ok
@ -22,7 +26,7 @@ class StreamResponse:
self.cookies = inner.cookies self.cookies = inner.cookies
async def text(self) -> str: async def text(self) -> str:
content = await self.content.read() content = await self.read()
return content.decode() return content.decode()
def raise_for_status(self): def raise_for_status(self):
@ -30,56 +34,120 @@ class StreamResponse:
raise RuntimeError(f"HTTP Error {self.status_code}: {self.reason}") raise RuntimeError(f"HTTP Error {self.status_code}: {self.reason}")
async def json(self, **kwargs): async def json(self, **kwargs):
return json.loads(await self.content.read(), **kwargs) return json.loads(await self.read(), **kwargs)
async def iter_lines(self, chunk_size=None, decode_unicode=False, delimiter=None) -> AsyncGenerator[bytes]:
"""
Copied from: https://requests.readthedocs.io/en/latest/_modules/requests/models/
which is under the License: Apache 2.0
"""
pending = None
async for chunk in self.iter_content(
chunk_size=chunk_size, decode_unicode=decode_unicode
):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
async def iter_content(self, chunk_size=None, decode_unicode=False) -> As:
if chunk_size:
warnings.warn("chunk_size is ignored, there is no way to tell curl that.")
if decode_unicode:
raise NotImplementedError()
while True:
chunk = await self.queue.get()
if chunk is None:
return
yield chunk
async def read(self) -> bytes:
return b"".join([chunk async for chunk in self.iter_content()])
class StreamRequest: class StreamRequest:
def __init__(self, session: AsyncSession, method: str, url: str, **kwargs): def __init__(self, session: AsyncSession, method: str, url: str, **kwargs):
self.session = session self.session = session
self.loop = session.loop self.loop = session.loop if session.loop else asyncio.get_running_loop()
self.content = StreamReader( self.queue = Queue()
BaseProtocol(session.loop),
sys.maxsize,
loop=session.loop
)
self.method = method self.method = method
self.url = url self.url = url
self.options = kwargs self.options = kwargs
self.handle = None
def on_content(self, data): def _on_content(self, data):
if not self.enter.done(): if not self.enter.done():
self.enter.set_result(None) self.enter.set_result(None)
self.content.feed_data(data) self.queue.put_nowait(data)
def on_done(self, task): def _on_done(self, task: Future):
self.content.feed_eof() if not self.enter.done():
self.curl.clean_after_perform() self.enter.set_result(None)
self.curl.reset() self.queue.put_nowait(None)
self.session.push_curl(self.curl)
async def __aenter__(self) -> StreamResponse: self.loop.call_soon(self.session.release_curl, self.curl)
async def fetch(self) -> StreamResponse:
if self.handle:
raise RuntimeError("Request already started")
self.curl = await self.session.pop_curl() self.curl = await self.session.pop_curl()
self.enter = self.loop.create_future() self.enter = self.loop.create_future()
request, _, header_buffer = self.session._set_curl_options( if is_newer_0_5_10:
self.curl, request, _, header_buffer, _, _ = self.session._set_curl_options(
self.method, self.curl,
self.url, self.method,
content_callback=self.on_content, self.url,
**self.options content_callback=self._on_content,
) **self.options
await self.session.acurl.add_handle(self.curl, False) )
self.handle = self.session.acurl._curl2future[self.curl] else:
self.handle.add_done_callback(self.on_done) request, _, header_buffer = self.session._set_curl_options(
self.curl,
self.method,
self.url,
content_callback=self._on_content,
**self.options
)
if is_newer_0_5_9:
self.handle = self.session.acurl.add_handle(self.curl)
else:
await self.session.acurl.add_handle(self.curl, False)
self.handle = self.session.acurl._curl2future[self.curl]
self.handle.add_done_callback(self._on_done)
# Wait for headers
await self.enter await self.enter
# Raise exceptions
if self.handle.done():
self.handle.result()
if is_newer_0_5_8:
response = self.session._parse_response(self.curl, _, header_buffer)
response.request = request
else:
response = self.session._parse_response(self.curl, request, _, header_buffer)
return StreamResponse( return StreamResponse(
self.session._parse_response(self.curl, request, _, header_buffer), response,
self.content, self.queue
request
) )
async def __aexit__(self, exc_type, exc, tb): async def __aenter__(self) -> StreamResponse:
pass return await self.fetch()
class AsyncSession(BaseSession): async def __aexit__(self, *args):
self.session.release_curl(self.curl)
class StreamSession(AsyncSession):
def request( def request(
self, self,
method: str, method: str,

View File

@ -5,31 +5,26 @@ import asyncio
sys.path.append(str(Path(__file__).parent.parent)) sys.path.append(str(Path(__file__).parent.parent))
import g4f import g4f
from g4f.Provider import AsyncProvider
from testing.test_providers import get_providers from testing.test_providers import get_providers
from testing.log_time import log_time_async from testing.log_time import log_time_async
async def create_async(provider): async def create_async(provider):
model = g4f.models.gpt_35_turbo.name if provider.supports_gpt_35_turbo else g4f.models.default.name
try: try:
response = await log_time_async( response = await log_time_async(
provider.create_async, provider.create_async,
model=model, model=g4f.models.default.name,
messages=[{"role": "user", "content": "Hello Assistant!"}] messages=[{"role": "user", "content": "Hello, are you GPT 3.5?"}]
) )
print(f"{provider.__name__}:", response) print(f"{provider.__name__}:", response)
except Exception as e: except Exception as e:
return f"{provider.__name__}: {e.__class__.__name__}: {e}" print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
async def run_async(): async def run_async():
responses: list = [ responses: list = [
create_async(_provider) create_async(provider)
for _provider in get_providers() for provider in get_providers()
if _provider.working and issubclass(_provider, AsyncProvider) if provider.working
] ]
responses = await asyncio.gather(*responses) await asyncio.gather(*responses)
for error in responses:
if error:
print(error)
print("Total:", asyncio.run(log_time_async(run_async))) print("Total:", asyncio.run(log_time_async(run_async)))

View File

@ -36,6 +36,7 @@ def get_providers() -> list[type[BaseProvider]]:
provider_names = dir(Provider) provider_names = dir(Provider)
ignore_names = [ ignore_names = [
"annotations", "annotations",
"helper",
"base_provider", "base_provider",
"retry_provider", "retry_provider",
"BaseProvider", "BaseProvider",