Fix: There is no current event loop in thread

This commit is contained in:
Heiner Lohaus 2023-10-02 06:47:07 +02:00
parent 2dbeb54608
commit d116f04322
5 changed files with 41 additions and 27 deletions

View File

@ -1,24 +1,25 @@
from __future__ import annotations from __future__ import annotations
import json import json
from curl_cffi.requests import AsyncSession
from .base_provider import AsyncProvider, format_prompt from ..typing import AsyncGenerator
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt
class AItianhu(AsyncProvider): class AItianhu(AsyncGeneratorProvider):
url = "https://www.aitianhu.com" url = "https://www.aitianhu.com"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@classmethod @classmethod
async def create_async( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> str: ) -> AsyncGenerator:
data = { data = {
"prompt": format_prompt(messages), "prompt": format_prompt(messages),
"options": {}, "options": {},
@ -27,12 +28,25 @@ class AItianhu(AsyncProvider):
"top_p": 1, "top_p": 1,
**kwargs **kwargs
} }
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107", verify=False) as session: headers = {
response = await session.post(cls.url + "/api/chat-process", json=data) "Authority": cls.url,
response.raise_for_status() "Accept": "application/json, text/plain, */*",
line = response.text.splitlines()[-1] "Origin": cls.url,
line = json.loads(line) "Referer": f"{cls.url}/"
return line["text"] }
async with StreamSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107", verify=False) as session:
async with session.post(f"{cls.url}/api/chat-process", json=data) as response:
response.raise_for_status()
async for line in response.iter_lines():
if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control")
line = json.loads(line)
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
if content:
yield content
else:
raise RuntimeError(f"Response: {line}")
@classmethod @classmethod

View File

@ -2,6 +2,7 @@ from __future__ import annotations
import random, json import random, json
from ..typing import AsyncGenerator
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt from .base_provider import AsyncGeneratorProvider, format_prompt
@ -22,7 +23,7 @@ class AItianhuSpace(AsyncGeneratorProvider):
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool = True, stream: bool = True,
**kwargs **kwargs
) -> str: ) -> AsyncGenerator:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
elif not model in domains: elif not model in domains:

View File

@ -4,7 +4,7 @@ import time
import hashlib import hashlib
from ..typing import AsyncGenerator from ..typing import AsyncGenerator
from ..requests import StreamRequest from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -20,7 +20,7 @@ class Aibn(AsyncGeneratorProvider):
messages: list[dict[str, str]], messages: list[dict[str, str]],
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
async with StreamRequest(impersonate="chrome107") as session: async with StreamSession(impersonate="chrome107") as session:
timestamp = int(time.time()) timestamp = int(time.time())
data = { data = {
"messages": messages, "messages": messages,

View File

@ -21,7 +21,11 @@ def get_event_loop() -> AbstractEventLoop:
try: try:
asyncio.get_running_loop() asyncio.get_running_loop()
except RuntimeError: except RuntimeError:
return asyncio.get_event_loop() try:
return asyncio.get_event_loop()
except RuntimeError:
asyncio.set_event_loop(asyncio.new_event_loop())
return asyncio.get_event_loop()
try: try:
event_loop = asyncio.get_event_loop() event_loop = asyncio.get_event_loop()
if not hasattr(event_loop.__class__, "_nest_patched"): if not hasattr(event_loop.__class__, "_nest_patched"):

View File

@ -5,31 +5,26 @@ import asyncio
sys.path.append(str(Path(__file__).parent.parent)) sys.path.append(str(Path(__file__).parent.parent))
import g4f import g4f
from g4f.Provider import AsyncProvider
from testing.test_providers import get_providers from testing.test_providers import get_providers
from testing.log_time import log_time_async from testing.log_time import log_time_async
async def create_async(provider): async def create_async(provider):
model = g4f.models.gpt_35_turbo.name if provider.supports_gpt_35_turbo else g4f.models.default.name
try: try:
response = await log_time_async( response = await log_time_async(
provider.create_async, provider.create_async,
model=model, model=g4f.models.default.name,
messages=[{"role": "user", "content": "Hello Assistant!"}] messages=[{"role": "user", "content": "Hello, are you GPT 3.5?"}]
) )
print(f"{provider.__name__}:", response) print(f"{provider.__name__}:", response)
except Exception as e: except Exception as e:
return f"{provider.__name__}: {e.__class__.__name__}: {e}" print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
async def run_async(): async def run_async():
responses: list = [ responses: list = [
create_async(_provider) create_async(provider)
for _provider in get_providers() for provider in get_providers()
if _provider.working and issubclass(_provider, AsyncProvider) if provider.working
] ]
responses = await asyncio.gather(*responses) await asyncio.gather(*responses)
for error in responses:
if error:
print(error)
print("Total:", asyncio.run(log_time_async(run_async))) print("Total:", asyncio.run(log_time_async(run_async)))