~ | Merge pull request #941 from hlohaus/myshell

AItianhuSpace Provider with GPT 4 added
This commit is contained in:
Tekky 2023-09-26 00:20:27 +01:00 committed by GitHub
commit fd5d28cf7b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 206 additions and 20 deletions

View File

@ -0,0 +1,73 @@
from __future__ import annotations
import random, json
from g4f.requests import AsyncSession, StreamRequest
from .base_provider import AsyncGeneratorProvider, format_prompt
domains = {
"gpt-3.5-turbo": ".aitianhu.space",
"gpt-4": ".aitianhu.website",
}
class AItianhuSpace(AsyncGeneratorProvider):
url = "https://chat3.aiyunos.top/"
working = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool = True,
**kwargs
) -> str:
if not model:
model = "gpt-3.5-turbo"
elif not model in domains:
raise ValueError(f"Model are not supported: {model}")
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
rand = ''.join(random.choice(chars) for _ in range(6))
domain = domains[model]
url = f'https://{rand}{domain}/api/chat-process'
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
}
async with AsyncSession(headers=headers, impersonate="chrome107", verify=False) as session:
data = {
"prompt": format_prompt(messages),
"options": {},
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
"temperature": 0.8,
"top_p": 1,
**kwargs
}
async with StreamRequest(session, "POST", url, json=data) as response:
response.raise_for_status()
async for line in response.content:
line = json.loads(line.rstrip())
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
if content:
yield content
elif "message" in line and "AI-4接口非常昂贵" in line["message"]:
raise RuntimeError("Rate limit for GPT 4 reached")
else:
raise RuntimeError("Response: {line}")
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
("top_p", "int"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -60,9 +60,10 @@ class Aivvm(BaseProvider):
response = requests.post(
"https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True)
response.raise_for_status()
for line in response.iter_content(chunk_size=1048):
yield line.decode('utf-8')
for chunk in response.iter_content(chunk_size=None):
yield chunk.decode('utf-8')
@classmethod
@property

View File

@ -18,7 +18,13 @@ class Vercel(BaseProvider):
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs ) -> CreateResult:
stream: bool,
**kwargs
) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
elif model not in model_info:
raise ValueError(f"Model are not supported: {model}")
headers = {
'authority' : 'sdk.vercel.ai',
@ -26,7 +32,7 @@ class Vercel(BaseProvider):
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control' : 'no-cache',
'content-type' : 'application/json',
'custom-encoding' : AntiBotToken(),
'custom-encoding' : get_anti_bot_token(),
'origin' : 'https://sdk.vercel.ai',
'pragma' : 'no-cache',
'referer' : 'https://sdk.vercel.ai/',
@ -48,22 +54,20 @@ class Vercel(BaseProvider):
'playgroundId': str(uuid.uuid4()),
'chatIndex' : 0} | model_info[model]['default_params']
server_error = True
retries = 0
max_retries = kwargs.get('max_retries', 20)
while server_error and not retries > max_retries:
for i in range(max_retries):
response = requests.post('https://sdk.vercel.ai/api/generate',
headers=headers, json=json_data, stream=True)
try:
response.raise_for_status()
except:
continue
for token in response.iter_content(chunk_size=8):
yield token.decode()
break
for token in response.iter_content(chunk_size=2046):
if token != b'Internal Server Error':
server_error = False
yield (token.decode())
retries += 1
def AntiBotToken() -> str:
def get_anti_bot_token() -> str:
headers = {
'authority' : 'sdk.vercel.ai',
'accept' : '*/*',

View File

@ -4,6 +4,7 @@ from .Aichat import Aichat
from .Ails import Ails
from .AiService import AiService
from .AItianhu import AItianhu
from .AItianhuSpace import AItianhuSpace
from .Aivvm import Aivvm
from .Bard import Bard
from .Bing import Bing
@ -52,6 +53,7 @@ __all__ = [
'Ails',
'AiService',
'AItianhu',
'AItianhuSpace',
'Aivvm',
'Bard',
'Bing',

View File

@ -1,7 +1,9 @@
from __future__ import annotations
import asyncio
from asyncio import SelectorEventLoop
import functools
from asyncio import SelectorEventLoop, AbstractEventLoop
from concurrent.futures import ThreadPoolExecutor
from abc import ABC, abstractmethod
import browser_cookie3
@ -27,6 +29,31 @@ class BaseProvider(ABC):
) -> CreateResult:
raise NotImplementedError()
@classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
*,
loop: AbstractEventLoop = None,
executor: ThreadPoolExecutor = None,
**kwargs
) -> str:
if not loop:
loop = asyncio.get_event_loop()
partial_func = functools.partial(
cls.create_completion,
model,
messages,
False,
**kwargs
)
response = await loop.run_in_executor(
executor,
partial_func
)
return "".join(response)
@classmethod
@property
@ -127,7 +154,7 @@ def create_event_loop() -> SelectorEventLoop:
except RuntimeError:
return SelectorEventLoop()
raise RuntimeError(
'Use "create_async" instead of "create" function in a async loop.')
'Use "create_async" instead of "create" function in a running event loop.')
_cookies = {}

View File

@ -17,6 +17,7 @@ from .Provider import (
Wewordle,
Yqcloud,
AItianhu,
AItianhuSpace,
Aichat,
Myshell,
)
@ -38,7 +39,7 @@ default = Model(
Wewordle, # Responds with markdown
Yqcloud, # Answers short questions in chinese
ChatBase, # Don't want to answer creatively
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, Myshell,
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell,
])
)
@ -47,7 +48,7 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, Myshell,
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell,
])
)
@ -55,7 +56,7 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = RetryProvider([
Aivvm, Myshell
Aivvm, Myshell, AItianhuSpace,
])
)

78
g4f/requests.py Normal file
View File

@ -0,0 +1,78 @@
from __future__ import annotations
import json, sys
from aiohttp import StreamReader
from aiohttp.base_protocol import BaseProtocol
from curl_cffi.requests import AsyncSession
from curl_cffi.requests.cookies import Request
from curl_cffi.requests.cookies import Response
class StreamResponse:
def __init__(self, inner: Response, content: StreamReader, request: Request):
self.inner = inner
self.content = content
self.request = request
self.status_code = inner.status_code
self.reason = inner.reason
self.ok = inner.ok
async def text(self) -> str:
content = await self.content.read()
return content.decode()
def raise_for_status(self):
if not self.ok:
raise RuntimeError(f"HTTP Error {self.status_code}: {self.reason}")
async def json(self, **kwargs):
return json.loads(await self.content.read(), **kwargs)
class StreamRequest:
def __init__(self, session: AsyncSession, method: str, url: str, **kwargs):
self.session = session
self.loop = session.loop
self.content = StreamReader(
BaseProtocol(session.loop),
sys.maxsize,
loop=session.loop
)
self.method = method
self.url = url
self.options = kwargs
def on_content(self, data):
if not self.enter.done():
self.enter.set_result(None)
self.content.feed_data(data)
def on_done(self, task):
self.content.feed_eof()
async def __aenter__(self) -> StreamResponse:
self.curl = await self.session.pop_curl()
self.enter = self.session.loop.create_future()
request, _, header_buffer = self.session._set_curl_options(
self.curl,
self.method,
self.url,
content_callback=self.on_content,
**self.options
)
handle = self.session.acurl.add_handle(self.curl)
self.handle = self.session.loop.create_task(handle)
self.handle.add_done_callback(self.on_done)
await self.enter
return StreamResponse(
self.session._parse_response(self.curl, request, _, header_buffer),
self.content,
request
)
async def __aexit__(self, exc_type, exc, tb):
await self.handle
self.curl.clean_after_perform()
self.curl.reset()
self.session.push_curl(self.curl)