~ | Merge pull request #962 from hlohaus/nnn

Update HuggingChat to new api
This commit is contained in:
Tekky 2023-10-02 12:20:18 +02:00 committed by GitHub
commit c0632c2741
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 366 additions and 190 deletions

View File

@ -1,24 +1,25 @@
from __future__ import annotations
import json
from curl_cffi.requests import AsyncSession
from .base_provider import AsyncProvider, format_prompt
from ..typing import AsyncGenerator
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt
class AItianhu(AsyncProvider):
class AItianhu(AsyncGeneratorProvider):
url = "https://www.aitianhu.com"
working = True
supports_gpt_35_turbo = True
@classmethod
async def create_async(
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
**kwargs
) -> str:
) -> AsyncGenerator:
data = {
"prompt": format_prompt(messages),
"options": {},
@ -27,12 +28,25 @@ class AItianhu(AsyncProvider):
"top_p": 1,
**kwargs
}
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107", verify=False) as session:
response = await session.post(cls.url + "/api/chat-process", json=data)
response.raise_for_status()
line = response.text.splitlines()[-1]
line = json.loads(line)
return line["text"]
headers = {
"Authority": cls.url,
"Accept": "application/json, text/plain, */*",
"Origin": cls.url,
"Referer": f"{cls.url}/"
}
async with StreamSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107", verify=False) as session:
async with session.post(f"{cls.url}/api/chat-process", json=data) as response:
response.raise_for_status()
async for line in response.iter_lines():
if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control")
line = json.loads(line)
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
if content:
yield content
else:
raise RuntimeError(f"Response: {line}")
@classmethod

View File

@ -2,7 +2,8 @@ from __future__ import annotations
import random, json
from g4f.requests import AsyncSession, StreamRequest
from ..typing import AsyncGenerator
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt
domains = {
@ -22,7 +23,7 @@ class AItianhuSpace(AsyncGeneratorProvider):
messages: list[dict[str, str]],
stream: bool = True,
**kwargs
) -> str:
) -> AsyncGenerator:
if not model:
model = "gpt-3.5-turbo"
elif not model in domains:
@ -31,12 +32,9 @@ class AItianhuSpace(AsyncGeneratorProvider):
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
rand = ''.join(random.choice(chars) for _ in range(6))
domain = domains[model]
url = f'https://{rand}{domain}/api/chat-process'
url = f'https://{rand}{domain}'
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
}
async with AsyncSession(headers=headers, impersonate="chrome107", verify=False) as session:
async with StreamSession(impersonate="chrome110", verify=False) as session:
data = {
"prompt": format_prompt(messages),
"options": {},
@ -45,10 +43,18 @@ class AItianhuSpace(AsyncGeneratorProvider):
"top_p": 1,
**kwargs
}
async with StreamRequest(session, "POST", url, json=data) as response:
headers = {
"Authority": url,
"Accept": "application/json, text/plain, */*",
"Origin": url,
"Referer": f"{url}/"
}
async with session.post(f"{url}/api/chat-process", json=data, headers=headers) as response:
response.raise_for_status()
async for line in response.content:
line = json.loads(line.rstrip())
async for line in response.iter_lines():
if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control")
line = json.loads(line)
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
if content:
@ -56,7 +62,7 @@ class AItianhuSpace(AsyncGeneratorProvider):
elif "message" in line and "AI-4接口非常昂贵" in line["message"]:
raise RuntimeError("Rate limit for GPT 4 reached")
else:
raise RuntimeError("Response: {line}")
raise RuntimeError(f"Response: {line}")
@classmethod

View File

@ -4,7 +4,7 @@ import time
import hashlib
from ..typing import AsyncGenerator
from g4f.requests import AsyncSession
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
@ -20,7 +20,7 @@ class Aibn(AsyncGeneratorProvider):
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
async with AsyncSession(impersonate="chrome107") as session:
async with StreamSession(impersonate="chrome107") as session:
timestamp = int(time.time())
data = {
"messages": messages,
@ -30,7 +30,7 @@ class Aibn(AsyncGeneratorProvider):
}
async with session.post(f"{cls.url}/api/generate", json=data) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
async for chunk in response.iter_content():
yield chunk.decode()
@classmethod

View File

@ -1,8 +1,8 @@
from __future__ import annotations
import requests
from .base_provider import BaseProvider
from ..typing import CreateResult
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncGenerator
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
models = {
@ -16,7 +16,7 @@ models = {
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
}
class Aivvm(BaseProvider):
class Aivvm(AsyncGeneratorProvider):
url = 'https://chat.aivvm.com'
supports_stream = True
working = True
@ -24,31 +24,18 @@ class Aivvm(BaseProvider):
supports_gpt_4 = True
@classmethod
def create_completion(cls,
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool,
**kwargs
) -> CreateResult:
) -> AsyncGenerator:
if not model:
model = "gpt-3.5-turbo"
elif model not in models:
raise ValueError(f"Model is not supported: {model}")
headers = {
"accept" : "*/*",
"accept-language" : "hu-HU,hu;q=0.9,en-US;q=0.8,en;q=0.7",
"content-type" : "application/json",
"sec-ch-ua" : "\"Kuki\";v=\"116\", \"Not)A;Brand\";v=\"24\", \"Pici Pocoro\";v=\"102\"",
"sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform": "\"Bandóz\"",
"sec-fetch-dest" : "empty",
"sec-fetch-mode" : "cors",
"sec-fetch-site" : "same-origin",
"Referer" : "https://chat.aivvm.com/",
"Referrer-Policy" : "same-origin",
}
json_data = {
"model" : models[model],
"messages" : messages,
@ -56,13 +43,11 @@ class Aivvm(BaseProvider):
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
"temperature" : kwargs.get("temperature", 0.7)
}
response = requests.post(
"https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True)
response.raise_for_status()
for chunk in response.iter_content(chunk_size=None):
yield chunk.decode('utf-8')
async with StreamSession(impersonate="chrome107") as session:
async with session.post(f"{cls.url}/api/chat", json=json_data) as response:
response.raise_for_status()
async for chunk in response.iter_content():
yield chunk.decode()
@classmethod
@property

62
g4f/Provider/ChatForAi.py Normal file
View File

@ -0,0 +1,62 @@
from __future__ import annotations
import time, hashlib
from ..typing import AsyncGenerator
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
class ChatForAi(AsyncGeneratorProvider):
url = "https://chatforai.com"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
async with StreamSession(impersonate="chrome107") as session:
conversation_id = f"id_{int(time.time())}"
prompt = messages[-1]["content"]
timestamp = int(time.time())
data = {
"conversationId": conversation_id,
"conversationType": "chat_continuous",
"botId": "chat_continuous",
"globalSettings":{
"baseUrl": "https://api.openai.com",
"model": model if model else "gpt-3.5-turbo",
"messageHistorySize": 5,
"temperature": 0.7,
"top_p": 1,
**kwargs
},
"botSettings": {},
"prompt": prompt,
"messages": messages,
"sign": generate_signature(timestamp, conversation_id, prompt),
"timestamp": timestamp
}
async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response:
response.raise_for_status()
async for chunk in response.iter_content():
yield chunk.decode()
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
def generate_signature(timestamp, id, prompt):
data = f"{timestamp}:{id}:{prompt}:6B46K4pt"
return hashlib.sha256(data.encode()).hexdigest()

View File

@ -1,6 +1,6 @@
from __future__ import annotations
from g4f.requests import AsyncSession
from curl_cffi.requests import AsyncSession
from .base_provider import AsyncProvider, format_prompt
@ -23,17 +23,17 @@ class ChatgptDuo(AsyncProvider):
"search": prompt,
"purpose": "ask",
}
async with session.post(f"{cls.url}/", data=data) as response:
response.raise_for_status()
data = await response.json()
response = await session.post(f"{cls.url}/", data=data)
response.raise_for_status()
data = response.json()
cls._sources = [{
"title": source["title"],
"url": source["link"],
"snippet": source["snippet"]
} for source in data["results"]]
cls._sources = [{
"title": source["title"],
"url": source["link"],
"snippet": source["snippet"]
} for source in data["results"]]
return data["answer"]
return data["answer"]
@classmethod
def get_sources(cls):

54
g4f/Provider/FreeGpt.py Normal file
View File

@ -0,0 +1,54 @@
from __future__ import annotations
import time, hashlib, random
from ..typing import AsyncGenerator
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
domains = [
'https://k.aifree.site',
'https://p.aifree.site'
]
class FreeGpt(AsyncGeneratorProvider):
url = "https://freegpts1.aifree.site/"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
async with StreamSession(impersonate="chrome107") as session:
prompt = messages[-1]["content"]
timestamp = int(time.time())
data = {
"messages": messages,
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, prompt)
}
url = random.choice(domains)
async with session.post(f"{url}/api/generate", json=data) as response:
response.raise_for_status()
async for chunk in response.iter_content():
yield chunk.decode()
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
def generate_signature(timestamp: int, message: str, secret: str = ""):
data = f"{timestamp}:{message}:{secret}"
return hashlib.sha256(data.encode()).hexdigest()

View File

@ -1,6 +1,6 @@
from __future__ import annotations
import json
import json, uuid
from aiohttp import ClientSession
@ -12,7 +12,7 @@ class HuggingChat(AsyncGeneratorProvider):
url = "https://huggingface.co/chat"
needs_auth = True
working = True
model = "OpenAssistant/oasst-sft-6-llama-30b-xor"
model = "meta-llama/Llama-2-70b-chat-hf"
@classmethod
async def create_async_generator(
@ -37,55 +37,25 @@ class HuggingChat(AsyncGeneratorProvider):
cookies=cookies,
headers=headers
) as session:
async with session.post(f"{cls.url}/conversation", proxy=proxy, json={"model": model}) as response:
async with session.post(f"{cls.url}/conversation", json={"model": model}, proxy=proxy) as response:
conversation_id = (await response.json())["conversationId"]
send = {
"id": str(uuid.uuid4()),
"inputs": format_prompt(messages),
"parameters": {
"temperature": 0.2,
"truncate": 1000,
"max_new_tokens": 1024,
"stop": ["</s>"],
"top_p": 0.95,
"repetition_penalty": 1.2,
"top_k": 50,
"return_full_text": False,
**kwargs
},
"stream": stream,
"options": {
"id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37",
"response_id": "04ce2602-3bea-45e8-8efc-cef00680376a",
"is_retry": False,
"use_cache": False,
"web_search_id": ""
}
"is_retry": False,
"response_id": str(uuid.uuid4()),
"web_search": False
}
async with session.post(f"{cls.url}/conversation/{conversation_id}", proxy=proxy, json=send) as response:
if not stream:
data = await response.json()
if "error" in data:
raise RuntimeError(data["error"])
elif isinstance(data, list):
yield data[0]["generated_text"].strip()
else:
raise RuntimeError(f"Response: {data}")
else:
start = "data:"
first = True
async for line in response.content:
line = line.decode("utf-8")
if line.startswith(start):
line = json.loads(line[len(start):-1])
if "token" not in line:
raise RuntimeError(f"Response: {line}")
if not line["token"]["special"]:
if first:
yield line["token"]["text"].lstrip()
first = False
else:
yield line["token"]["text"]
async with session.post(f"{cls.url}/conversation/{conversation_id}", json=send, proxy=proxy) as response:
async for line in response.content:
line = json.loads(line[:-1])
if "type" not in line:
raise RuntimeError(f"Response: {line}")
elif line["type"] == "stream":
yield line["token"]
elif line["type"] == "finalAnswer":
break
async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response:
response.raise_for_status()

View File

@ -5,12 +5,12 @@ import time
import base64
from curl_cffi.requests import AsyncSession
from .base_provider import AsyncProvider, format_prompt
from .base_provider import AsyncProvider, format_prompt, get_cookies
class PerplexityAi(AsyncProvider):
url = "https://www.perplexity.ai"
working = True
working = False
supports_gpt_35_turbo = True
_sources = []
@ -23,18 +23,32 @@ class PerplexityAi(AsyncProvider):
**kwargs
) -> str:
url = cls.url + "/socket.io/?EIO=4&transport=polling"
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session:
headers = {
"Referer": f"{cls.url}/"
}
async with AsyncSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107") as session:
url_session = "https://www.perplexity.ai/api/auth/session"
response = await session.get(url_session)
response.raise_for_status()
url_session = "https://www.perplexity.ai/api/auth/session"
response = await session.get(url_session)
response.raise_for_status()
response = await session.get(url, params={"t": timestamp()})
response.raise_for_status()
sid = json.loads(response.text[1:])["sid"]
response = await session.get(url, params={"t": timestamp(), "sid": sid})
response.raise_for_status()
data = '40{"jwt":"anonymous-ask-user"}'
response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
response.raise_for_status()
response = await session.get(url, params={"t": timestamp(), "sid": sid})
response.raise_for_status()
data = "424" + json.dumps([
"perplexity_ask",
format_prompt(messages),

View File

@ -8,7 +8,7 @@ from .base_provider import AsyncProvider
class Wewordle(AsyncProvider):
url = "https://wewordle.org"
working = True
working = False
supports_gpt_35_turbo = True
@classmethod

View File

@ -1,8 +1,8 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncGenerator
@ -23,14 +23,8 @@ class Ylokh(AsyncGeneratorProvider):
) -> AsyncGenerator:
model = model if model else "gpt-3.5-turbo"
headers = {
"User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0",
"Accept" : "*/*",
"Accept-language" : "de,en-US;q=0.7,en;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
data = {
"messages": messages,
@ -43,18 +37,19 @@ class Ylokh(AsyncGeneratorProvider):
"stream": stream,
**kwargs
}
async with ClientSession(
headers=headers
async with StreamSession(
headers=headers,
proxies={"https": proxy}
) as session:
async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data, proxy=proxy) as response:
async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
response.raise_for_status()
if stream:
async for line in response.content:
async for line in response.iter_lines():
line = line.decode()
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
line = json.loads(line[6:])
content = line["choices"][0]["delta"].get("content")
if content:
yield content

View File

@ -10,6 +10,7 @@ from .Aivvm import Aivvm
from .Bard import Bard
from .Bing import Bing
from .ChatBase import ChatBase
from .ChatForAi import ChatForAi
from .ChatgptAi import ChatgptAi
from .ChatgptDuo import ChatgptDuo
from .ChatgptLogin import ChatgptLogin
@ -18,6 +19,7 @@ from .DeepAi import DeepAi
from .DfeHub import DfeHub
from .EasyChat import EasyChat
from .Forefront import Forefront
from .FreeGpt import FreeGpt
from .GetGpt import GetGpt
from .GptGo import GptGo
from .H2o import H2o
@ -61,6 +63,7 @@ __all__ = [
'Bard',
'Bing',
'ChatBase',
'ChatForAi',
'ChatgptAi',
'ChatgptDuo',
'ChatgptLogin',
@ -69,6 +72,7 @@ __all__ = [
'DfeHub',
'EasyChat',
'Forefront',
'FreeGpt',
'GetGpt',
'GptGo',
'H2o',

View File

@ -1,28 +1,33 @@
from __future__ import annotations
import asyncio
import sys
import asyncio, sys
from asyncio import AbstractEventLoop
import browser_cookie3
# Change event loop policy on windows
if sys.platform == 'win32':
if isinstance(
asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy
):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# Local Cookie Storage
_cookies: dict[str, dict[str, str]] = {}
# Use own event_loop_policy with a selector event loop on windows.
if sys.platform == 'win32':
_event_loop_policy = asyncio.WindowsSelectorEventLoopPolicy()
else:
_event_loop_policy = asyncio.get_event_loop_policy()
# If event loop is already running, handle nested event loops
# If "nest_asyncio" is installed, patch the event loop.
def get_event_loop() -> AbstractEventLoop:
try:
asyncio.get_running_loop()
except RuntimeError:
return _event_loop_policy.get_event_loop()
try:
return asyncio.get_event_loop()
except RuntimeError:
asyncio.set_event_loop(asyncio.new_event_loop())
return asyncio.get_event_loop()
try:
event_loop = _event_loop_policy.get_event_loop()
event_loop = asyncio.get_event_loop()
if not hasattr(event_loop.__class__, "_nest_patched"):
import nest_asyncio
nest_asyncio.apply(event_loop)

View File

@ -8,16 +8,19 @@ from .Provider import (
PerplexityAi,
ChatgptDuo,
ChatgptAi,
ChatForAi,
ChatBase,
AItianhu,
Wewordle,
Yqcloud,
Myshell,
FreeGpt,
Vercel,
DeepAi,
Aichat,
Aivvm,
GptGo,
Ylokh,
Bard,
Aibn,
Bing,
@ -42,7 +45,7 @@ default = Model(
Yqcloud, # Answers short questions in chinese
ChatBase, # Don't want to answer creatively
ChatgptDuo, # Include search results
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn,
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh,
])
)
@ -51,7 +54,7 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn,
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh,
])
)
@ -59,7 +62,7 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = RetryProvider([
Myshell, AItianhuSpace,
Myshell, Ylokh,
])
)

View File

@ -1,20 +1,24 @@
from __future__ import annotations
import json, sys
import warnings, json, asyncio
from functools import partialmethod
from asyncio import Future, Queue
from typing import AsyncGenerator
from aiohttp import StreamReader
from aiohttp.base_protocol import BaseProtocol
from curl_cffi.requests import AsyncSession, Response
from curl_cffi.requests import AsyncSession as BaseSession
from curl_cffi.requests import Response
import curl_cffi
is_newer_0_5_8 = hasattr(AsyncSession, "_set_cookies") or hasattr(curl_cffi.requests.Cookies, "get_cookies_for_curl")
is_newer_0_5_9 = hasattr(curl_cffi.AsyncCurl, "remove_handle")
is_newer_0_5_10 = hasattr(AsyncSession, "release_curl")
class StreamResponse:
def __init__(self, inner: Response, content: StreamReader, request):
def __init__(self, inner: Response, queue: Queue):
self.inner = inner
self.content = content
self.request = request
self.queue = queue
self.request = inner.request
self.status_code = inner.status_code
self.reason = inner.reason
self.ok = inner.ok
@ -22,7 +26,7 @@ class StreamResponse:
self.cookies = inner.cookies
async def text(self) -> str:
content = await self.content.read()
content = await self.read()
return content.decode()
def raise_for_status(self):
@ -30,56 +34,120 @@ class StreamResponse:
raise RuntimeError(f"HTTP Error {self.status_code}: {self.reason}")
async def json(self, **kwargs):
return json.loads(await self.content.read(), **kwargs)
return json.loads(await self.read(), **kwargs)
async def iter_lines(self, chunk_size=None, decode_unicode=False, delimiter=None) -> AsyncGenerator[bytes]:
"""
Copied from: https://requests.readthedocs.io/en/latest/_modules/requests/models/
which is under the License: Apache 2.0
"""
pending = None
async for chunk in self.iter_content(
chunk_size=chunk_size, decode_unicode=decode_unicode
):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
async def iter_content(self, chunk_size=None, decode_unicode=False) -> As:
if chunk_size:
warnings.warn("chunk_size is ignored, there is no way to tell curl that.")
if decode_unicode:
raise NotImplementedError()
while True:
chunk = await self.queue.get()
if chunk is None:
return
yield chunk
async def read(self) -> bytes:
return b"".join([chunk async for chunk in self.iter_content()])
class StreamRequest:
def __init__(self, session: AsyncSession, method: str, url: str, **kwargs):
self.session = session
self.loop = session.loop
self.content = StreamReader(
BaseProtocol(session.loop),
sys.maxsize,
loop=session.loop
)
self.loop = session.loop if session.loop else asyncio.get_running_loop()
self.queue = Queue()
self.method = method
self.url = url
self.options = kwargs
self.handle = None
def on_content(self, data):
def _on_content(self, data):
if not self.enter.done():
self.enter.set_result(None)
self.content.feed_data(data)
self.queue.put_nowait(data)
def on_done(self, task):
self.content.feed_eof()
self.curl.clean_after_perform()
self.curl.reset()
self.session.push_curl(self.curl)
def _on_done(self, task: Future):
if not self.enter.done():
self.enter.set_result(None)
self.queue.put_nowait(None)
async def __aenter__(self) -> StreamResponse:
self.loop.call_soon(self.session.release_curl, self.curl)
async def fetch(self) -> StreamResponse:
if self.handle:
raise RuntimeError("Request already started")
self.curl = await self.session.pop_curl()
self.enter = self.loop.create_future()
request, _, header_buffer = self.session._set_curl_options(
self.curl,
self.method,
self.url,
content_callback=self.on_content,
**self.options
)
await self.session.acurl.add_handle(self.curl, False)
self.handle = self.session.acurl._curl2future[self.curl]
self.handle.add_done_callback(self.on_done)
if is_newer_0_5_10:
request, _, header_buffer, _, _ = self.session._set_curl_options(
self.curl,
self.method,
self.url,
content_callback=self._on_content,
**self.options
)
else:
request, _, header_buffer = self.session._set_curl_options(
self.curl,
self.method,
self.url,
content_callback=self._on_content,
**self.options
)
if is_newer_0_5_9:
self.handle = self.session.acurl.add_handle(self.curl)
else:
await self.session.acurl.add_handle(self.curl, False)
self.handle = self.session.acurl._curl2future[self.curl]
self.handle.add_done_callback(self._on_done)
# Wait for headers
await self.enter
# Raise exceptions
if self.handle.done():
self.handle.result()
if is_newer_0_5_8:
response = self.session._parse_response(self.curl, _, header_buffer)
response.request = request
else:
response = self.session._parse_response(self.curl, request, _, header_buffer)
return StreamResponse(
self.session._parse_response(self.curl, request, _, header_buffer),
self.content,
request
response,
self.queue
)
async def __aexit__(self, exc_type, exc, tb):
pass
async def __aenter__(self) -> StreamResponse:
return await self.fetch()
class AsyncSession(BaseSession):
async def __aexit__(self, *args):
self.session.release_curl(self.curl)
class StreamSession(AsyncSession):
def request(
self,
method: str,

View File

@ -5,31 +5,26 @@ import asyncio
sys.path.append(str(Path(__file__).parent.parent))
import g4f
from g4f.Provider import AsyncProvider
from testing.test_providers import get_providers
from testing.log_time import log_time_async
async def create_async(provider):
model = g4f.models.gpt_35_turbo.name if provider.supports_gpt_35_turbo else g4f.models.default.name
try:
response = await log_time_async(
provider.create_async,
model=model,
messages=[{"role": "user", "content": "Hello Assistant!"}]
model=g4f.models.default.name,
messages=[{"role": "user", "content": "Hello, are you GPT 3.5?"}]
)
print(f"{provider.__name__}:", response)
except Exception as e:
return f"{provider.__name__}: {e.__class__.__name__}: {e}"
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
async def run_async():
responses: list = [
create_async(_provider)
for _provider in get_providers()
if _provider.working and issubclass(_provider, AsyncProvider)
create_async(provider)
for provider in get_providers()
if provider.working
]
responses = await asyncio.gather(*responses)
for error in responses:
if error:
print(error)
await asyncio.gather(*responses)
print("Total:", asyncio.run(log_time_async(run_async)))

View File

@ -36,6 +36,7 @@ def get_providers() -> list[type[BaseProvider]]:
provider_names = dir(Provider)
ignore_names = [
"annotations",
"helper",
"base_provider",
"retry_provider",
"BaseProvider",