mirror of https://github.com/xtekky/gpt4free.git
AItianhuSpace Provider with GPT 4 added
Reduced chunksize to better text completion
This commit is contained in:
parent
348670fe35
commit
72c3ff7a25
|
@ -18,7 +18,13 @@ class Vercel(BaseProvider):
|
|||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool, **kwargs ) -> CreateResult:
|
||||
stream: bool,
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
if not model:
|
||||
model = "gpt-3.5-turbo"
|
||||
elif model not in model_info:
|
||||
raise ValueError(f"Model are not supported: {model}")
|
||||
|
||||
headers = {
|
||||
'authority' : 'sdk.vercel.ai',
|
||||
|
@ -26,7 +32,7 @@ class Vercel(BaseProvider):
|
|||
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'cache-control' : 'no-cache',
|
||||
'content-type' : 'application/json',
|
||||
'custom-encoding' : AntiBotToken(),
|
||||
'custom-encoding' : get_anti_bot_token(),
|
||||
'origin' : 'https://sdk.vercel.ai',
|
||||
'pragma' : 'no-cache',
|
||||
'referer' : 'https://sdk.vercel.ai/',
|
||||
|
@ -48,22 +54,20 @@ class Vercel(BaseProvider):
|
|||
'playgroundId': str(uuid.uuid4()),
|
||||
'chatIndex' : 0} | model_info[model]['default_params']
|
||||
|
||||
server_error = True
|
||||
retries = 0
|
||||
max_retries = kwargs.get('max_retries', 20)
|
||||
|
||||
while server_error and not retries > max_retries:
|
||||
for i in range(max_retries):
|
||||
response = requests.post('https://sdk.vercel.ai/api/generate',
|
||||
headers=headers, json=json_data, stream=True)
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except:
|
||||
continue
|
||||
for token in response.iter_content(chunk_size=8):
|
||||
yield token.decode()
|
||||
break
|
||||
|
||||
for token in response.iter_content(chunk_size=2046):
|
||||
if token != b'Internal Server Error':
|
||||
server_error = False
|
||||
yield (token.decode())
|
||||
|
||||
retries += 1
|
||||
|
||||
def AntiBotToken() -> str:
|
||||
def get_anti_bot_token() -> str:
|
||||
headers = {
|
||||
'authority' : 'sdk.vercel.ai',
|
||||
'accept' : '*/*',
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from asyncio import SelectorEventLoop
|
||||
import functools
|
||||
from asyncio import SelectorEventLoop, AbstractEventLoop
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
import browser_cookie3
|
||||
|
@ -27,6 +29,31 @@ class BaseProvider(ABC):
|
|||
) -> CreateResult:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
async def create_async(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
*,
|
||||
loop: AbstractEventLoop = None,
|
||||
executor: ThreadPoolExecutor = None,
|
||||
**kwargs
|
||||
) -> str:
|
||||
if not loop:
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
partial_func = functools.partial(
|
||||
cls.create_completion,
|
||||
model,
|
||||
messages,
|
||||
False,
|
||||
**kwargs
|
||||
)
|
||||
response = await loop.run_in_executor(
|
||||
executor,
|
||||
partial_func
|
||||
)
|
||||
return "".join(response)
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
|
@ -127,7 +154,7 @@ def create_event_loop() -> SelectorEventLoop:
|
|||
except RuntimeError:
|
||||
return SelectorEventLoop()
|
||||
raise RuntimeError(
|
||||
'Use "create_async" instead of "create" function in a async loop.')
|
||||
'Use "create_async" instead of "create" function in a running event loop.')
|
||||
|
||||
|
||||
_cookies = {}
|
||||
|
|
|
@ -17,6 +17,7 @@ from .Provider import (
|
|||
Wewordle,
|
||||
Yqcloud,
|
||||
AItianhu,
|
||||
AItianhuSpace,
|
||||
Aichat,
|
||||
Myshell,
|
||||
)
|
||||
|
@ -38,7 +39,7 @@ default = Model(
|
|||
Wewordle, # Responds with markdown
|
||||
Yqcloud, # Answers short questions in chinese
|
||||
ChatBase, # Don't want to answer creatively
|
||||
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, Myshell,
|
||||
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell,
|
||||
])
|
||||
)
|
||||
|
||||
|
@ -47,7 +48,7 @@ gpt_35_turbo = Model(
|
|||
name = 'gpt-3.5-turbo',
|
||||
base_provider = 'openai',
|
||||
best_provider = RetryProvider([
|
||||
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, Myshell,
|
||||
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell,
|
||||
])
|
||||
)
|
||||
|
||||
|
@ -55,7 +56,7 @@ gpt_4 = Model(
|
|||
name = 'gpt-4',
|
||||
base_provider = 'openai',
|
||||
best_provider = RetryProvider([
|
||||
Aivvm, Myshell
|
||||
Aivvm, Myshell, AItianhuSpace,
|
||||
])
|
||||
)
|
||||
|
||||
|
|
Loading…
Reference in New Issue