From 98895e5b09ea5e3f19b2263ddca028c7b296abb2 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 1 Oct 2023 06:38:11 +0200 Subject: [PATCH 1/7] Update HuggingChat to new api Impersonate Aivvm Provider Add ChatForAi and FreeGpt Provider Update AItianhuSpace Provider Improve StreamRequest Support Update get_event_loop Helper --- g4f/Provider/AItianhuSpace.py | 21 +++++++----- g4f/Provider/Aivvm.py | 39 +++++++--------------- g4f/Provider/ChatForAi.py | 62 +++++++++++++++++++++++++++++++++++ g4f/Provider/FreeGpt.py | 54 ++++++++++++++++++++++++++++++ g4f/Provider/HuggingChat.py | 62 +++++++++-------------------------- g4f/Provider/PerplexityAi.py | 22 +++++++++++-- g4f/Provider/__init__.py | 4 +++ g4f/Provider/helper.py | 10 ++---- g4f/requests.py | 32 +++++++++++++----- 9 files changed, 206 insertions(+), 100 deletions(-) create mode 100644 g4f/Provider/ChatForAi.py create mode 100644 g4f/Provider/FreeGpt.py diff --git a/g4f/Provider/AItianhuSpace.py b/g4f/Provider/AItianhuSpace.py index 8beb3355..eb072db5 100644 --- a/g4f/Provider/AItianhuSpace.py +++ b/g4f/Provider/AItianhuSpace.py @@ -2,7 +2,7 @@ from __future__ import annotations import random, json -from g4f.requests import AsyncSession, StreamRequest +from g4f.requests import AsyncSession from .base_provider import AsyncGeneratorProvider, format_prompt domains = { @@ -31,12 +31,9 @@ class AItianhuSpace(AsyncGeneratorProvider): chars = 'abcdefghijklmnopqrstuvwxyz0123456789' rand = ''.join(random.choice(chars) for _ in range(6)) domain = domains[model] - url = f'https://{rand}{domain}/api/chat-process' + url = f'https://{rand}{domain}' - headers = { - "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36", - } - async with AsyncSession(headers=headers, impersonate="chrome107", verify=False) as session: + async with AsyncSession(impersonate="chrome110", verify=False) as session: data = { "prompt": format_prompt(messages), "options": {}, @@ -45,9 +42,17 @@ class AItianhuSpace(AsyncGeneratorProvider): "top_p": 1, **kwargs } - async with StreamRequest(session, "POST", url, json=data) as response: + headers = { + "Authority": url, + "Accept": "application/json, text/plain, */*", + "Origin": url, + "Referer": f"{url}/" + } + async with session.post(f"{url}/api/chat-process", json=data, headers=headers) as response: response.raise_for_status() async for line in response.content: + if b"platform's risk control" in line: + raise RuntimeError("Platform's Risk Control") line = json.loads(line.rstrip()) if "detail" in line: content = line["detail"]["choices"][0]["delta"].get("content") @@ -56,7 +61,7 @@ class AItianhuSpace(AsyncGeneratorProvider): elif "message" in line and "AI-4接口非常昂贵" in line["message"]: raise RuntimeError("Rate limit for GPT 4 reached") else: - raise RuntimeError("Response: {line}") + raise RuntimeError(f"Response: {line}") @classmethod diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py index 1ba6d6f1..02b6c2b4 100644 --- a/g4f/Provider/Aivvm.py +++ b/g4f/Provider/Aivvm.py @@ -1,8 +1,8 @@ from __future__ import annotations -import requests -from .base_provider import BaseProvider -from ..typing import CreateResult +from ..requests import AsyncSession +from .base_provider import AsyncGeneratorProvider +from ..typing import AsyncGenerator # to recreate this easily, send a post request to https://chat.aivvm.com/api/models models = { @@ -16,7 +16,7 @@ models = { 'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'}, } -class Aivvm(BaseProvider): +class Aivvm(AsyncGeneratorProvider): url = 'https://chat.aivvm.com' supports_stream = True working = True @@ -24,31 +24,18 @@ class Aivvm(BaseProvider): supports_gpt_4 = True @classmethod - def create_completion(cls, + async def create_async_generator( + cls, model: str, messages: list[dict[str, str]], stream: bool, **kwargs - ) -> CreateResult: + ) -> AsyncGenerator: if not model: model = "gpt-3.5-turbo" elif model not in models: raise ValueError(f"Model is not supported: {model}") - headers = { - "accept" : "*/*", - "accept-language" : "hu-HU,hu;q=0.9,en-US;q=0.8,en;q=0.7", - "content-type" : "application/json", - "sec-ch-ua" : "\"Kuki\";v=\"116\", \"Not)A;Brand\";v=\"24\", \"Pici Pocoro\";v=\"102\"", - "sec-ch-ua-mobile" : "?0", - "sec-ch-ua-platform": "\"Bandóz\"", - "sec-fetch-dest" : "empty", - "sec-fetch-mode" : "cors", - "sec-fetch-site" : "same-origin", - "Referer" : "https://chat.aivvm.com/", - "Referrer-Policy" : "same-origin", - } - json_data = { "model" : models[model], "messages" : messages, @@ -56,13 +43,11 @@ class Aivvm(BaseProvider): "prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."), "temperature" : kwargs.get("temperature", 0.7) } - - response = requests.post( - "https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True) - response.raise_for_status() - - for chunk in response.iter_content(chunk_size=None): - yield chunk.decode('utf-8') + async with AsyncSession(impersonate="chrome107") as session: + async with session.post(f"{cls.url}/api/chat", json=json_data) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + yield chunk.decode('utf-8') @classmethod @property diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py new file mode 100644 index 00000000..efb5478e --- /dev/null +++ b/g4f/Provider/ChatForAi.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +import time, hashlib + +from ..typing import AsyncGenerator +from g4f.requests import AsyncSession +from .base_provider import AsyncGeneratorProvider + + +class ChatForAi(AsyncGeneratorProvider): + url = "https://chatforai.com" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> AsyncGenerator: + async with AsyncSession(impersonate="chrome107") as session: + conversation_id = f"id_{int(time.time())}" + prompt = messages[-1]["content"] + timestamp = int(time.time()) + data = { + "conversationId": conversation_id, + "conversationType": "chat_continuous", + "botId": "chat_continuous", + "globalSettings":{ + "baseUrl": "https://api.openai.com", + "model": model if model else "gpt-3.5-turbo", + "messageHistorySize": 5, + "temperature": 0.7, + "top_p": 1, + **kwargs + }, + "botSettings": {}, + "prompt": prompt, + "messages": messages, + "sign": generate_signature(timestamp, conversation_id, prompt), + "timestamp": timestamp + } + async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + yield chunk.decode() + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" + +def generate_signature(timestamp, id, prompt): + data = f"{timestamp}:{id}:{prompt}:6B46K4pt" + return hashlib.sha256(data.encode()).hexdigest() diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py new file mode 100644 index 00000000..534b69a5 --- /dev/null +++ b/g4f/Provider/FreeGpt.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +import time, hashlib, random + +from ..typing import AsyncGenerator +from g4f.requests import AsyncSession +from .base_provider import AsyncGeneratorProvider + +domains = [ + 'https://k.aifree.site', + 'https://p.aifree.site' +] + +class FreeGpt(AsyncGeneratorProvider): + url = "https://freegpts1.aifree.site/" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> AsyncGenerator: + async with AsyncSession(impersonate="chrome107") as session: + prompt = messages[-1]["content"] + timestamp = int(time.time()) + data = { + "messages": messages, + "time": timestamp, + "pass": None, + "sign": generate_signature(timestamp, prompt) + } + url = random.choice(domains) + async with session.post(f"{url}/api/generate", json=data) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + yield chunk.decode() + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" + +def generate_signature(timestamp: int, message: str, secret: str = ""): + data = f"{timestamp}:{message}:{secret}" + return hashlib.sha256(data.encode()).hexdigest() \ No newline at end of file diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index b2cf9793..f1cec775 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -1,6 +1,6 @@ from __future__ import annotations -import json +import json, uuid from aiohttp import ClientSession @@ -12,7 +12,7 @@ class HuggingChat(AsyncGeneratorProvider): url = "https://huggingface.co/chat" needs_auth = True working = True - model = "OpenAssistant/oasst-sft-6-llama-30b-xor" + model = "meta-llama/Llama-2-70b-chat-hf" @classmethod async def create_async_generator( @@ -37,55 +37,25 @@ class HuggingChat(AsyncGeneratorProvider): cookies=cookies, headers=headers ) as session: - async with session.post(f"{cls.url}/conversation", proxy=proxy, json={"model": model}) as response: + async with session.post(f"{cls.url}/conversation", json={"model": model}, proxy=proxy) as response: conversation_id = (await response.json())["conversationId"] send = { + "id": str(uuid.uuid4()), "inputs": format_prompt(messages), - "parameters": { - "temperature": 0.2, - "truncate": 1000, - "max_new_tokens": 1024, - "stop": [""], - "top_p": 0.95, - "repetition_penalty": 1.2, - "top_k": 50, - "return_full_text": False, - **kwargs - }, - "stream": stream, - "options": { - "id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37", - "response_id": "04ce2602-3bea-45e8-8efc-cef00680376a", - "is_retry": False, - "use_cache": False, - "web_search_id": "" - } + "is_retry": False, + "response_id": str(uuid.uuid4()), + "web_search": False } - async with session.post(f"{cls.url}/conversation/{conversation_id}", proxy=proxy, json=send) as response: - if not stream: - data = await response.json() - if "error" in data: - raise RuntimeError(data["error"]) - elif isinstance(data, list): - yield data[0]["generated_text"].strip() - else: - raise RuntimeError(f"Response: {data}") - else: - start = "data:" - first = True - async for line in response.content: - line = line.decode("utf-8") - if line.startswith(start): - line = json.loads(line[len(start):-1]) - if "token" not in line: - raise RuntimeError(f"Response: {line}") - if not line["token"]["special"]: - if first: - yield line["token"]["text"].lstrip() - first = False - else: - yield line["token"]["text"] + async with session.post(f"{cls.url}/conversation/{conversation_id}", json=send, proxy=proxy) as response: + async for line in response.content: + line = json.loads(line[:-1]) + if "type" not in line: + raise RuntimeError(f"Response: {line}") + elif line["type"] == "stream": + yield line["token"] + elif line["type"] == "finalAnswer": + break async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response: response.raise_for_status() diff --git a/g4f/Provider/PerplexityAi.py b/g4f/Provider/PerplexityAi.py index fc0fd48c..6e95fd51 100644 --- a/g4f/Provider/PerplexityAi.py +++ b/g4f/Provider/PerplexityAi.py @@ -4,8 +4,9 @@ import json import time import base64 from curl_cffi.requests import AsyncSession +from fake_useragent import UserAgent -from .base_provider import AsyncProvider, format_prompt +from .base_provider import AsyncProvider, format_prompt, get_cookies class PerplexityAi(AsyncProvider): @@ -23,18 +24,35 @@ class PerplexityAi(AsyncProvider): **kwargs ) -> str: url = cls.url + "/socket.io/?EIO=4&transport=polling" - async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session: + headers = { + "User-Agent": UserAgent().random, + "Referer": f"{cls.url}/" + } + async with AsyncSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107") as session: url_session = "https://www.perplexity.ai/api/auth/session" response = await session.get(url_session) + response.raise_for_status() + + url_session = "https://www.perplexity.ai/api/auth/session" + response = await session.get(url_session) + response.raise_for_status() response = await session.get(url, params={"t": timestamp()}) response.raise_for_status() sid = json.loads(response.text[1:])["sid"] + response = await session.get(url, params={"t": timestamp(), "sid": sid}) + response.raise_for_status() + + print(session.cookies) + data = '40{"jwt":"anonymous-ask-user"}' response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data) response.raise_for_status() + response = await session.get(url, params={"t": timestamp(), "sid": sid}) + response.raise_for_status() + data = "424" + json.dumps([ "perplexity_ask", format_prompt(messages), diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 59c91dd5..b2f0f729 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -10,6 +10,7 @@ from .Aivvm import Aivvm from .Bard import Bard from .Bing import Bing from .ChatBase import ChatBase +from .ChatForAi import ChatForAi from .ChatgptAi import ChatgptAi from .ChatgptDuo import ChatgptDuo from .ChatgptLogin import ChatgptLogin @@ -18,6 +19,7 @@ from .DeepAi import DeepAi from .DfeHub import DfeHub from .EasyChat import EasyChat from .Forefront import Forefront +from .FreeGpt import FreeGpt from .GetGpt import GetGpt from .GptGo import GptGo from .H2o import H2o @@ -61,6 +63,7 @@ __all__ = [ 'Bard', 'Bing', 'ChatBase', + 'ChatForAi', 'ChatgptAi', 'ChatgptDuo', 'ChatgptLogin', @@ -69,6 +72,7 @@ __all__ = [ 'DfeHub', 'EasyChat', 'Forefront', + 'FreeGpt', 'GetGpt', 'GptGo', 'H2o', diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py index e14ae65e..2c00a488 100644 --- a/g4f/Provider/helper.py +++ b/g4f/Provider/helper.py @@ -1,18 +1,12 @@ from __future__ import annotations import asyncio -import sys from asyncio import AbstractEventLoop import browser_cookie3 _cookies: dict[str, dict[str, str]] = {} -# Use own event_loop_policy with a selector event loop on windows. -if sys.platform == 'win32': - _event_loop_policy = asyncio.WindowsSelectorEventLoopPolicy() -else: - _event_loop_policy = asyncio.get_event_loop_policy() # If event loop is already running, handle nested event loops # If "nest_asyncio" is installed, patch the event loop. @@ -20,9 +14,9 @@ def get_event_loop() -> AbstractEventLoop: try: asyncio.get_running_loop() except RuntimeError: - return _event_loop_policy.get_event_loop() + return asyncio.get_event_loop() try: - event_loop = _event_loop_policy.get_event_loop() + event_loop = asyncio.get_event_loop() if not hasattr(event_loop.__class__, "_nest_patched"): import nest_asyncio nest_asyncio.apply(event_loop) diff --git a/g4f/requests.py b/g4f/requests.py index 736442e3..f6f2383b 100644 --- a/g4f/requests.py +++ b/g4f/requests.py @@ -1,6 +1,6 @@ from __future__ import annotations -import json, sys +import json, sys, asyncio from functools import partialmethod from aiohttp import StreamReader @@ -8,6 +8,9 @@ from aiohttp.base_protocol import BaseProtocol from curl_cffi.requests import AsyncSession as BaseSession from curl_cffi.requests import Response +from curl_cffi import AsyncCurl + +is_newer_0_5_9 = hasattr(AsyncCurl, "remove_handle") class StreamResponse: @@ -35,7 +38,7 @@ class StreamResponse: class StreamRequest: def __init__(self, session: AsyncSession, method: str, url: str, **kwargs): self.session = session - self.loop = session.loop + self.loop = session.loop if session.loop else asyncio.get_running_loop() self.content = StreamReader( BaseProtocol(session.loop), sys.maxsize, @@ -51,10 +54,9 @@ class StreamRequest: self.content.feed_data(data) def on_done(self, task): + if not self.enter.done(): + self.enter.set_result(None) self.content.feed_eof() - self.curl.clean_after_perform() - self.curl.reset() - self.session.push_curl(self.curl) async def __aenter__(self) -> StreamResponse: self.curl = await self.session.pop_curl() @@ -66,18 +68,30 @@ class StreamRequest: content_callback=self.on_content, **self.options ) - await self.session.acurl.add_handle(self.curl, False) - self.handle = self.session.acurl._curl2future[self.curl] + if is_newer_0_5_9: + self.handle = self.session.acurl.add_handle(self.curl) + else: + await self.session.acurl.add_handle(self.curl, False) + self.handle = self.session.acurl._curl2future[self.curl] self.handle.add_done_callback(self.on_done) await self.enter + if is_newer_0_5_9: + response = self.session._parse_response(self.curl, _, header_buffer) + response.request = request + else: + response = self.session._parse_response(self.curl, request, _, header_buffer) return StreamResponse( - self.session._parse_response(self.curl, request, _, header_buffer), + response, self.content, request ) async def __aexit__(self, exc_type, exc, tb): - pass + if not self.handle.done(): + self.session.acurl.set_result(self.curl) + self.curl.clean_after_perform() + self.curl.reset() + self.session.push_curl(self.curl) class AsyncSession(BaseSession): def request( From 261fac86dcf278310d035f89f06e7f187e4ebf9c Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 1 Oct 2023 06:41:00 +0200 Subject: [PATCH 2/7] Disable PerplexityAi Provider --- g4f/Provider/Aivvm.py | 2 +- g4f/Provider/PerplexityAi.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py index 02b6c2b4..5cd91546 100644 --- a/g4f/Provider/Aivvm.py +++ b/g4f/Provider/Aivvm.py @@ -47,7 +47,7 @@ class Aivvm(AsyncGeneratorProvider): async with session.post(f"{cls.url}/api/chat", json=json_data) as response: response.raise_for_status() async for chunk in response.content.iter_any(): - yield chunk.decode('utf-8') + yield chunk.decode() @classmethod @property diff --git a/g4f/Provider/PerplexityAi.py b/g4f/Provider/PerplexityAi.py index 6e95fd51..612e2d98 100644 --- a/g4f/Provider/PerplexityAi.py +++ b/g4f/Provider/PerplexityAi.py @@ -11,7 +11,7 @@ from .base_provider import AsyncProvider, format_prompt, get_cookies class PerplexityAi(AsyncProvider): url = "https://www.perplexity.ai" - working = True + working = False supports_gpt_35_turbo = True _sources = [] From bb481a03ab1a64e47731eb28e461e1dc1d655383 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 1 Oct 2023 10:36:09 +0200 Subject: [PATCH 3/7] Disable Wewordle Provider Update Ylokh Provider Improve StreamRequest --- g4f/Provider/Wewordle.py | 2 +- g4f/Provider/Ylokh.py | 10 ++-------- g4f/requests.py | 10 +++++++--- testing/test_providers.py | 1 + 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/g4f/Provider/Wewordle.py b/g4f/Provider/Wewordle.py index a7bdc722..26d040c3 100644 --- a/g4f/Provider/Wewordle.py +++ b/g4f/Provider/Wewordle.py @@ -8,7 +8,7 @@ from .base_provider import AsyncProvider class Wewordle(AsyncProvider): url = "https://wewordle.org" - working = True + working = False supports_gpt_35_turbo = True @classmethod diff --git a/g4f/Provider/Ylokh.py b/g4f/Provider/Ylokh.py index c7b92089..2187eb78 100644 --- a/g4f/Provider/Ylokh.py +++ b/g4f/Provider/Ylokh.py @@ -1,8 +1,8 @@ from __future__ import annotations import json -from aiohttp import ClientSession +from ..requests import AsyncSession from .base_provider import AsyncGeneratorProvider from ..typing import AsyncGenerator @@ -23,14 +23,8 @@ class Ylokh(AsyncGeneratorProvider): ) -> AsyncGenerator: model = model if model else "gpt-3.5-turbo" headers = { - "User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0", - "Accept" : "*/*", - "Accept-language" : "de,en-US;q=0.7,en;q=0.3", "Origin" : cls.url, "Referer" : cls.url + "/", - "Sec-Fetch-Dest" : "empty", - "Sec-Fetch-Mode" : "cors", - "Sec-Fetch-Site" : "same-origin", } data = { "messages": messages, @@ -43,7 +37,7 @@ class Ylokh(AsyncGeneratorProvider): "stream": stream, **kwargs } - async with ClientSession( + async with AsyncSession( headers=headers ) as session: async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data, proxy=proxy) as response: diff --git a/g4f/requests.py b/g4f/requests.py index f6f2383b..6f4b587c 100644 --- a/g4f/requests.py +++ b/g4f/requests.py @@ -11,7 +11,7 @@ from curl_cffi.requests import Response from curl_cffi import AsyncCurl is_newer_0_5_9 = hasattr(AsyncCurl, "remove_handle") - +is_newer_0_5_8 = hasattr(BaseSession, "_set_cookies") class StreamResponse: def __init__(self, inner: Response, content: StreamReader, request): @@ -46,6 +46,10 @@ class StreamRequest: ) self.method = method self.url = url + if "proxy" in kwargs: + proxy = kwargs.pop("proxy") + if proxy: + kwargs["proxies"] = {"http": proxy, "https": proxy} self.options = kwargs def on_content(self, data): @@ -75,7 +79,7 @@ class StreamRequest: self.handle = self.session.acurl._curl2future[self.curl] self.handle.add_done_callback(self.on_done) await self.enter - if is_newer_0_5_9: + if is_newer_0_5_8: response = self.session._parse_response(self.curl, _, header_buffer) response.request = request else: @@ -91,7 +95,7 @@ class StreamRequest: self.session.acurl.set_result(self.curl) self.curl.clean_after_perform() self.curl.reset() - self.session.push_curl(self.curl) + self.session.push_curl(self.curl) class AsyncSession(BaseSession): def request( diff --git a/testing/test_providers.py b/testing/test_providers.py index cd82fe7c..73b75f5d 100644 --- a/testing/test_providers.py +++ b/testing/test_providers.py @@ -36,6 +36,7 @@ def get_providers() -> list[type[BaseProvider]]: provider_names = dir(Provider) ignore_names = [ "annotations", + "helper", "base_provider", "retry_provider", "BaseProvider", From 7b9ad21de81cd5129b047d8de3ce8d9e5a53ea9a Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 1 Oct 2023 20:29:57 +0200 Subject: [PATCH 4/7] Change event loop policy on windows Support more versions from curl_cffi --- g4f/Provider/helper.py | 11 +++++++++-- g4f/requests.py | 31 +++++++++++++++++++++---------- 2 files changed, 30 insertions(+), 12 deletions(-) diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py index 2c00a488..544c5a76 100644 --- a/g4f/Provider/helper.py +++ b/g4f/Provider/helper.py @@ -1,13 +1,20 @@ from __future__ import annotations -import asyncio +import asyncio, sys from asyncio import AbstractEventLoop import browser_cookie3 +# Change event loop policy on windows +if sys.platform == 'win32': + if isinstance( + asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy + ): + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + +# Local Cookie Storage _cookies: dict[str, dict[str, str]] = {} - # If event loop is already running, handle nested event loops # If "nest_asyncio" is installed, patch the event loop. def get_event_loop() -> AbstractEventLoop: diff --git a/g4f/requests.py b/g4f/requests.py index 6f4b587c..367bafa0 100644 --- a/g4f/requests.py +++ b/g4f/requests.py @@ -8,10 +8,12 @@ from aiohttp.base_protocol import BaseProtocol from curl_cffi.requests import AsyncSession as BaseSession from curl_cffi.requests import Response -from curl_cffi import AsyncCurl -is_newer_0_5_9 = hasattr(AsyncCurl, "remove_handle") -is_newer_0_5_8 = hasattr(BaseSession, "_set_cookies") +import curl_cffi + +is_newer_0_5_8 = hasattr(BaseSession, "_set_cookies") or hasattr(curl_cffi.requests.Cookies, "get_cookies_for_curl") +is_newer_0_5_9 = hasattr(curl_cffi.AsyncCurl, "remove_handle") +is_newer_0_5_10 = hasattr(BaseSession, "release_curl") class StreamResponse: def __init__(self, inner: Response, content: StreamReader, request): @@ -65,13 +67,22 @@ class StreamRequest: async def __aenter__(self) -> StreamResponse: self.curl = await self.session.pop_curl() self.enter = self.loop.create_future() - request, _, header_buffer = self.session._set_curl_options( - self.curl, - self.method, - self.url, - content_callback=self.on_content, - **self.options - ) + if is_newer_0_5_10: + request, _, header_buffer, _, _ = self.session._set_curl_options( + self.curl, + self.method, + self.url, + content_callback=self.on_content, + **self.options + ) + else: + request, _, header_buffer = self.session._set_curl_options( + self.curl, + self.method, + self.url, + content_callback=self.on_content, + **self.options + ) if is_newer_0_5_9: self.handle = self.session.acurl.add_handle(self.curl) else: From eb0e2c6a93c3f21937457d13220ce2b7fca1f04a Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 2 Oct 2023 02:04:22 +0200 Subject: [PATCH 5/7] +Curl +Async +Stream Requests Update Model List --- g4f/Provider/AItianhuSpace.py | 8 +-- g4f/Provider/Aibn.py | 6 +- g4f/Provider/Aivvm.py | 6 +- g4f/Provider/ChatForAi.py | 6 +- g4f/Provider/ChatgptDuo.py | 20 +++--- g4f/Provider/FreeGpt.py | 6 +- g4f/Provider/Ylokh.py | 13 ++-- g4f/models.py | 9 ++- g4f/requests.py | 119 ++++++++++++++++++++++------------ 9 files changed, 118 insertions(+), 75 deletions(-) diff --git a/g4f/Provider/AItianhuSpace.py b/g4f/Provider/AItianhuSpace.py index eb072db5..8805b1c0 100644 --- a/g4f/Provider/AItianhuSpace.py +++ b/g4f/Provider/AItianhuSpace.py @@ -2,7 +2,7 @@ from __future__ import annotations import random, json -from g4f.requests import AsyncSession +from ..requests import StreamSession from .base_provider import AsyncGeneratorProvider, format_prompt domains = { @@ -33,7 +33,7 @@ class AItianhuSpace(AsyncGeneratorProvider): domain = domains[model] url = f'https://{rand}{domain}' - async with AsyncSession(impersonate="chrome110", verify=False) as session: + async with StreamSession(impersonate="chrome110", verify=False) as session: data = { "prompt": format_prompt(messages), "options": {}, @@ -50,10 +50,10 @@ class AItianhuSpace(AsyncGeneratorProvider): } async with session.post(f"{url}/api/chat-process", json=data, headers=headers) as response: response.raise_for_status() - async for line in response.content: + async for line in response.iter_lines(): if b"platform's risk control" in line: raise RuntimeError("Platform's Risk Control") - line = json.loads(line.rstrip()) + line = json.loads(line) if "detail" in line: content = line["detail"]["choices"][0]["delta"].get("content") if content: diff --git a/g4f/Provider/Aibn.py b/g4f/Provider/Aibn.py index 1ef928be..fe278f84 100644 --- a/g4f/Provider/Aibn.py +++ b/g4f/Provider/Aibn.py @@ -4,7 +4,7 @@ import time import hashlib from ..typing import AsyncGenerator -from g4f.requests import AsyncSession +from ..requests import StreamRequest from .base_provider import AsyncGeneratorProvider @@ -20,7 +20,7 @@ class Aibn(AsyncGeneratorProvider): messages: list[dict[str, str]], **kwargs ) -> AsyncGenerator: - async with AsyncSession(impersonate="chrome107") as session: + async with StreamRequest(impersonate="chrome107") as session: timestamp = int(time.time()) data = { "messages": messages, @@ -30,7 +30,7 @@ class Aibn(AsyncGeneratorProvider): } async with session.post(f"{cls.url}/api/generate", json=data) as response: response.raise_for_status() - async for chunk in response.content.iter_any(): + async for chunk in response.iter_content(): yield chunk.decode() @classmethod diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py index 5cd91546..c4ec677c 100644 --- a/g4f/Provider/Aivvm.py +++ b/g4f/Provider/Aivvm.py @@ -1,6 +1,6 @@ from __future__ import annotations -from ..requests import AsyncSession +from ..requests import StreamSession from .base_provider import AsyncGeneratorProvider from ..typing import AsyncGenerator @@ -43,10 +43,10 @@ class Aivvm(AsyncGeneratorProvider): "prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."), "temperature" : kwargs.get("temperature", 0.7) } - async with AsyncSession(impersonate="chrome107") as session: + async with StreamSession(impersonate="chrome107") as session: async with session.post(f"{cls.url}/api/chat", json=json_data) as response: response.raise_for_status() - async for chunk in response.content.iter_any(): + async for chunk in response.iter_content(): yield chunk.decode() @classmethod diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py index efb5478e..779799cf 100644 --- a/g4f/Provider/ChatForAi.py +++ b/g4f/Provider/ChatForAi.py @@ -3,7 +3,7 @@ from __future__ import annotations import time, hashlib from ..typing import AsyncGenerator -from g4f.requests import AsyncSession +from ..requests import StreamSession from .base_provider import AsyncGeneratorProvider @@ -19,7 +19,7 @@ class ChatForAi(AsyncGeneratorProvider): messages: list[dict[str, str]], **kwargs ) -> AsyncGenerator: - async with AsyncSession(impersonate="chrome107") as session: + async with StreamSession(impersonate="chrome107") as session: conversation_id = f"id_{int(time.time())}" prompt = messages[-1]["content"] timestamp = int(time.time()) @@ -43,7 +43,7 @@ class ChatForAi(AsyncGeneratorProvider): } async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response: response.raise_for_status() - async for chunk in response.content.iter_any(): + async for chunk in response.iter_content(): yield chunk.decode() @classmethod diff --git a/g4f/Provider/ChatgptDuo.py b/g4f/Provider/ChatgptDuo.py index 07f4c16c..abed8a3c 100644 --- a/g4f/Provider/ChatgptDuo.py +++ b/g4f/Provider/ChatgptDuo.py @@ -1,6 +1,6 @@ from __future__ import annotations -from g4f.requests import AsyncSession +from curl_cffi.requests import AsyncSession from .base_provider import AsyncProvider, format_prompt @@ -23,17 +23,17 @@ class ChatgptDuo(AsyncProvider): "search": prompt, "purpose": "ask", } - async with session.post(f"{cls.url}/", data=data) as response: - response.raise_for_status() - data = await response.json() + response = await session.post(f"{cls.url}/", data=data) + response.raise_for_status() + data = response.json() - cls._sources = [{ - "title": source["title"], - "url": source["link"], - "snippet": source["snippet"] - } for source in data["results"]] + cls._sources = [{ + "title": source["title"], + "url": source["link"], + "snippet": source["snippet"] + } for source in data["results"]] - return data["answer"] + return data["answer"] @classmethod def get_sources(cls): diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py index 534b69a5..092e1bb6 100644 --- a/g4f/Provider/FreeGpt.py +++ b/g4f/Provider/FreeGpt.py @@ -3,7 +3,7 @@ from __future__ import annotations import time, hashlib, random from ..typing import AsyncGenerator -from g4f.requests import AsyncSession +from ..requests import StreamSession from .base_provider import AsyncGeneratorProvider domains = [ @@ -23,7 +23,7 @@ class FreeGpt(AsyncGeneratorProvider): messages: list[dict[str, str]], **kwargs ) -> AsyncGenerator: - async with AsyncSession(impersonate="chrome107") as session: + async with StreamSession(impersonate="chrome107") as session: prompt = messages[-1]["content"] timestamp = int(time.time()) data = { @@ -35,7 +35,7 @@ class FreeGpt(AsyncGeneratorProvider): url = random.choice(domains) async with session.post(f"{url}/api/generate", json=data) as response: response.raise_for_status() - async for chunk in response.content.iter_any(): + async for chunk in response.iter_content(): yield chunk.decode() @classmethod diff --git a/g4f/Provider/Ylokh.py b/g4f/Provider/Ylokh.py index 2187eb78..3c8b32dd 100644 --- a/g4f/Provider/Ylokh.py +++ b/g4f/Provider/Ylokh.py @@ -2,7 +2,7 @@ from __future__ import annotations import json -from ..requests import AsyncSession +from ..requests import StreamSession from .base_provider import AsyncGeneratorProvider from ..typing import AsyncGenerator @@ -37,18 +37,19 @@ class Ylokh(AsyncGeneratorProvider): "stream": stream, **kwargs } - async with AsyncSession( - headers=headers + async with StreamSession( + headers=headers, + proxies={"https": proxy} ) as session: - async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data, proxy=proxy) as response: + async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response: response.raise_for_status() if stream: - async for line in response.content: + async for line in response.iter_lines(): line = line.decode() if line.startswith("data: "): if line.startswith("data: [DONE]"): break - line = json.loads(line[6:-1]) + line = json.loads(line[6:]) content = line["choices"][0]["delta"].get("content") if content: yield content diff --git a/g4f/models.py b/g4f/models.py index cca9e850..6b27645a 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -8,16 +8,19 @@ from .Provider import ( PerplexityAi, ChatgptDuo, ChatgptAi, + ChatForAi, ChatBase, AItianhu, Wewordle, Yqcloud, Myshell, + FreeGpt, Vercel, DeepAi, Aichat, Aivvm, GptGo, + Ylokh, Bard, Aibn, Bing, @@ -42,7 +45,7 @@ default = Model( Yqcloud, # Answers short questions in chinese ChatBase, # Don't want to answer creatively ChatgptDuo, # Include search results - DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn, + DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh, ]) ) @@ -51,7 +54,7 @@ gpt_35_turbo = Model( name = 'gpt-3.5-turbo', base_provider = 'openai', best_provider = RetryProvider([ - DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, + DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh, ]) ) @@ -59,7 +62,7 @@ gpt_4 = Model( name = 'gpt-4', base_provider = 'openai', best_provider = RetryProvider([ - Myshell, AItianhuSpace, + Myshell, Ylokh, ]) ) diff --git a/g4f/requests.py b/g4f/requests.py index 367bafa0..78acb9de 100644 --- a/g4f/requests.py +++ b/g4f/requests.py @@ -1,25 +1,24 @@ from __future__ import annotations -import json, sys, asyncio +import warnings, json, asyncio + from functools import partialmethod +from asyncio import Future, Queue +from typing import AsyncGenerator -from aiohttp import StreamReader -from aiohttp.base_protocol import BaseProtocol - -from curl_cffi.requests import AsyncSession as BaseSession -from curl_cffi.requests import Response +from curl_cffi.requests import AsyncSession, Response import curl_cffi -is_newer_0_5_8 = hasattr(BaseSession, "_set_cookies") or hasattr(curl_cffi.requests.Cookies, "get_cookies_for_curl") +is_newer_0_5_8 = hasattr(AsyncSession, "_set_cookies") or hasattr(curl_cffi.requests.Cookies, "get_cookies_for_curl") is_newer_0_5_9 = hasattr(curl_cffi.AsyncCurl, "remove_handle") -is_newer_0_5_10 = hasattr(BaseSession, "release_curl") +is_newer_0_5_10 = hasattr(AsyncSession, "release_curl") class StreamResponse: - def __init__(self, inner: Response, content: StreamReader, request): + def __init__(self, inner: Response, queue: Queue): self.inner = inner - self.content = content - self.request = request + self.queue = queue + self.request = inner.request self.status_code = inner.status_code self.reason = inner.reason self.ok = inner.ok @@ -27,7 +26,7 @@ class StreamResponse: self.cookies = inner.cookies async def text(self) -> str: - content = await self.content.read() + content = await self.read() return content.decode() def raise_for_status(self): @@ -35,36 +34,74 @@ class StreamResponse: raise RuntimeError(f"HTTP Error {self.status_code}: {self.reason}") async def json(self, **kwargs): - return json.loads(await self.content.read(), **kwargs) + return json.loads(await self.read(), **kwargs) + + async def iter_lines(self, chunk_size=None, decode_unicode=False, delimiter=None) -> AsyncGenerator[bytes]: + """ + Copied from: https://requests.readthedocs.io/en/latest/_modules/requests/models/ + which is under the License: Apache 2.0 + """ + pending = None + + async for chunk in self.iter_content( + chunk_size=chunk_size, decode_unicode=decode_unicode + ): + if pending is not None: + chunk = pending + chunk + if delimiter: + lines = chunk.split(delimiter) + else: + lines = chunk.splitlines() + if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: + pending = lines.pop() + else: + pending = None + + for line in lines: + yield line + + if pending is not None: + yield pending + + async def iter_content(self, chunk_size=None, decode_unicode=False) -> As: + if chunk_size: + warnings.warn("chunk_size is ignored, there is no way to tell curl that.") + if decode_unicode: + raise NotImplementedError() + while True: + chunk = await self.queue.get() + if chunk is None: + return + yield chunk + + async def read(self) -> bytes: + return b"".join([chunk async for chunk in self.iter_content()]) class StreamRequest: def __init__(self, session: AsyncSession, method: str, url: str, **kwargs): self.session = session self.loop = session.loop if session.loop else asyncio.get_running_loop() - self.content = StreamReader( - BaseProtocol(session.loop), - sys.maxsize, - loop=session.loop - ) + self.queue = Queue() self.method = method self.url = url - if "proxy" in kwargs: - proxy = kwargs.pop("proxy") - if proxy: - kwargs["proxies"] = {"http": proxy, "https": proxy} self.options = kwargs + self.handle = None - def on_content(self, data): + def _on_content(self, data): if not self.enter.done(): self.enter.set_result(None) - self.content.feed_data(data) + self.queue.put_nowait(data) - def on_done(self, task): + def _on_done(self, task: Future): if not self.enter.done(): self.enter.set_result(None) - self.content.feed_eof() + self.queue.put_nowait(None) - async def __aenter__(self) -> StreamResponse: + self.loop.call_soon(self.session.release_curl, self.curl) + + async def fetch(self) -> StreamResponse: + if self.handle: + raise RuntimeError("Request already started") self.curl = await self.session.pop_curl() self.enter = self.loop.create_future() if is_newer_0_5_10: @@ -72,7 +109,7 @@ class StreamRequest: self.curl, self.method, self.url, - content_callback=self.on_content, + content_callback=self._on_content, **self.options ) else: @@ -80,7 +117,7 @@ class StreamRequest: self.curl, self.method, self.url, - content_callback=self.on_content, + content_callback=self._on_content, **self.options ) if is_newer_0_5_9: @@ -88,8 +125,12 @@ class StreamRequest: else: await self.session.acurl.add_handle(self.curl, False) self.handle = self.session.acurl._curl2future[self.curl] - self.handle.add_done_callback(self.on_done) + self.handle.add_done_callback(self._on_done) + # Wait for headers await self.enter + # Raise exceptions + if self.handle.done(): + self.handle.result() if is_newer_0_5_8: response = self.session._parse_response(self.curl, _, header_buffer) response.request = request @@ -97,18 +138,16 @@ class StreamRequest: response = self.session._parse_response(self.curl, request, _, header_buffer) return StreamResponse( response, - self.content, - request + self.queue ) - - async def __aexit__(self, exc_type, exc, tb): - if not self.handle.done(): - self.session.acurl.set_result(self.curl) - self.curl.clean_after_perform() - self.curl.reset() - self.session.push_curl(self.curl) + + async def __aenter__(self) -> StreamResponse: + return await self.fetch() -class AsyncSession(BaseSession): + async def __aexit__(self, *args): + self.session.release_curl(self.curl) + +class StreamSession(AsyncSession): def request( self, method: str, From 2dbeb5460827d7e3792b4e6f5871463a430fa525 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 2 Oct 2023 02:06:33 +0200 Subject: [PATCH 6/7] Remove fake_useragent module --- g4f/Provider/PerplexityAi.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/g4f/Provider/PerplexityAi.py b/g4f/Provider/PerplexityAi.py index 612e2d98..c7e58ad3 100644 --- a/g4f/Provider/PerplexityAi.py +++ b/g4f/Provider/PerplexityAi.py @@ -4,7 +4,6 @@ import json import time import base64 from curl_cffi.requests import AsyncSession -from fake_useragent import UserAgent from .base_provider import AsyncProvider, format_prompt, get_cookies @@ -25,7 +24,6 @@ class PerplexityAi(AsyncProvider): ) -> str: url = cls.url + "/socket.io/?EIO=4&transport=polling" headers = { - "User-Agent": UserAgent().random, "Referer": f"{cls.url}/" } async with AsyncSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107") as session: @@ -44,8 +42,6 @@ class PerplexityAi(AsyncProvider): response = await session.get(url, params={"t": timestamp(), "sid": sid}) response.raise_for_status() - print(session.cookies) - data = '40{"jwt":"anonymous-ask-user"}' response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data) response.raise_for_status() From d116f043227f789d6582bc12f69ea4ee7a9330ea Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 2 Oct 2023 06:47:07 +0200 Subject: [PATCH 7/7] Fix: There is no current event loop in thread --- g4f/Provider/AItianhu.py | 36 ++++++++++++++++++++++++----------- g4f/Provider/AItianhuSpace.py | 3 ++- g4f/Provider/Aibn.py | 4 ++-- g4f/Provider/helper.py | 6 +++++- testing/test_async.py | 19 +++++++----------- 5 files changed, 41 insertions(+), 27 deletions(-) diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/AItianhu.py index 0f01e536..42631d7e 100644 --- a/g4f/Provider/AItianhu.py +++ b/g4f/Provider/AItianhu.py @@ -1,24 +1,25 @@ from __future__ import annotations import json -from curl_cffi.requests import AsyncSession -from .base_provider import AsyncProvider, format_prompt +from ..typing import AsyncGenerator +from ..requests import StreamSession +from .base_provider import AsyncGeneratorProvider, format_prompt -class AItianhu(AsyncProvider): +class AItianhu(AsyncGeneratorProvider): url = "https://www.aitianhu.com" working = True supports_gpt_35_turbo = True @classmethod - async def create_async( + async def create_async_generator( cls, model: str, messages: list[dict[str, str]], proxy: str = None, **kwargs - ) -> str: + ) -> AsyncGenerator: data = { "prompt": format_prompt(messages), "options": {}, @@ -27,12 +28,25 @@ class AItianhu(AsyncProvider): "top_p": 1, **kwargs } - async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107", verify=False) as session: - response = await session.post(cls.url + "/api/chat-process", json=data) - response.raise_for_status() - line = response.text.splitlines()[-1] - line = json.loads(line) - return line["text"] + headers = { + "Authority": cls.url, + "Accept": "application/json, text/plain, */*", + "Origin": cls.url, + "Referer": f"{cls.url}/" + } + async with StreamSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107", verify=False) as session: + async with session.post(f"{cls.url}/api/chat-process", json=data) as response: + response.raise_for_status() + async for line in response.iter_lines(): + if b"platform's risk control" in line: + raise RuntimeError("Platform's Risk Control") + line = json.loads(line) + if "detail" in line: + content = line["detail"]["choices"][0]["delta"].get("content") + if content: + yield content + else: + raise RuntimeError(f"Response: {line}") @classmethod diff --git a/g4f/Provider/AItianhuSpace.py b/g4f/Provider/AItianhuSpace.py index 8805b1c0..a6bf9a58 100644 --- a/g4f/Provider/AItianhuSpace.py +++ b/g4f/Provider/AItianhuSpace.py @@ -2,6 +2,7 @@ from __future__ import annotations import random, json +from ..typing import AsyncGenerator from ..requests import StreamSession from .base_provider import AsyncGeneratorProvider, format_prompt @@ -22,7 +23,7 @@ class AItianhuSpace(AsyncGeneratorProvider): messages: list[dict[str, str]], stream: bool = True, **kwargs - ) -> str: + ) -> AsyncGenerator: if not model: model = "gpt-3.5-turbo" elif not model in domains: diff --git a/g4f/Provider/Aibn.py b/g4f/Provider/Aibn.py index fe278f84..df56189b 100644 --- a/g4f/Provider/Aibn.py +++ b/g4f/Provider/Aibn.py @@ -4,7 +4,7 @@ import time import hashlib from ..typing import AsyncGenerator -from ..requests import StreamRequest +from ..requests import StreamSession from .base_provider import AsyncGeneratorProvider @@ -20,7 +20,7 @@ class Aibn(AsyncGeneratorProvider): messages: list[dict[str, str]], **kwargs ) -> AsyncGenerator: - async with StreamRequest(impersonate="chrome107") as session: + async with StreamSession(impersonate="chrome107") as session: timestamp = int(time.time()) data = { "messages": messages, diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py index 544c5a76..234cdaa1 100644 --- a/g4f/Provider/helper.py +++ b/g4f/Provider/helper.py @@ -21,7 +21,11 @@ def get_event_loop() -> AbstractEventLoop: try: asyncio.get_running_loop() except RuntimeError: - return asyncio.get_event_loop() + try: + return asyncio.get_event_loop() + except RuntimeError: + asyncio.set_event_loop(asyncio.new_event_loop()) + return asyncio.get_event_loop() try: event_loop = asyncio.get_event_loop() if not hasattr(event_loop.__class__, "_nest_patched"): diff --git a/testing/test_async.py b/testing/test_async.py index bef2c75f..76b109b1 100644 --- a/testing/test_async.py +++ b/testing/test_async.py @@ -5,31 +5,26 @@ import asyncio sys.path.append(str(Path(__file__).parent.parent)) import g4f -from g4f.Provider import AsyncProvider from testing.test_providers import get_providers from testing.log_time import log_time_async async def create_async(provider): - model = g4f.models.gpt_35_turbo.name if provider.supports_gpt_35_turbo else g4f.models.default.name try: response = await log_time_async( provider.create_async, - model=model, - messages=[{"role": "user", "content": "Hello Assistant!"}] + model=g4f.models.default.name, + messages=[{"role": "user", "content": "Hello, are you GPT 3.5?"}] ) print(f"{provider.__name__}:", response) except Exception as e: - return f"{provider.__name__}: {e.__class__.__name__}: {e}" + print(f"{provider.__name__}: {e.__class__.__name__}: {e}") async def run_async(): responses: list = [ - create_async(_provider) - for _provider in get_providers() - if _provider.working and issubclass(_provider, AsyncProvider) + create_async(provider) + for provider in get_providers() + if provider.working ] - responses = await asyncio.gather(*responses) - for error in responses: - if error: - print(error) + await asyncio.gather(*responses) print("Total:", asyncio.run(log_time_async(run_async))) \ No newline at end of file