Merge pull request #1241 from hlohaus/pi

This commit is contained in:
Tekky 2023-11-13 09:57:07 +00:00 committed by GitHub
commit ed008dcfe6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 162 additions and 5 deletions

View File

@ -7,6 +7,8 @@ sys.path.append(str(Path(__file__).parent.parent.parent))
import g4f
g4f.debug.logging = True
def read_code(text):
if match := re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text):
return match.group("code")

78
g4f/Provider/Berlin.py Normal file
View File

@ -0,0 +1,78 @@
from __future__ import annotations
import secrets
import uuid
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
class Berlin(AsyncGeneratorProvider):
url = "https://ai.berlin4h.top"
working = True
supports_gpt_35_turbo = True
_token = None
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/",
"Content-Type": "application/json",
"Origin": cls.url,
"Alt-Used": "ai.berlin4h.top",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers",
}
async with ClientSession(headers=headers) as session:
if not cls._token:
data = {
"account": '免费使用GPT3.5模型@163.com',
"password": '659e945c2d004686bad1a75b708c962f'
}
async with session.post(f"{cls.url}/api/login", json=data, proxy=proxy) as response:
response.raise_for_status()
cls._token = (await response.json())["data"]["token"]
headers = {
"token": cls._token
}
prompt = format_prompt(messages)
data = {
"prompt": prompt,
"parentMessageId": str(uuid.uuid4()),
"options": {
"model": model,
"temperature": 0,
"presence_penalty": 0,
"frequency_penalty": 0,
"max_tokens": 1888,
**kwargs
},
}
async with session.post(f"{cls.url}/api/chat/completions", json=data, proxy=proxy, headers=headers) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk.strip():
try:
yield json.loads(chunk)["content"]
except:
raise RuntimeError(f"Response: {chunk.decode()}")

71
g4f/Provider/Koala.py Normal file
View File

@ -0,0 +1,71 @@
from __future__ import annotations
import random
import string
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
class Koala(AsyncGeneratorProvider):
url = "https://koala.sh"
supports_gpt_35_turbo = True
supports_message_history = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Accept": "text/event-stream",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/chat",
"Content-Type": "application/json",
"Flag-Real-Time-Data": "false",
"Visitor-ID": random_string(),
"Origin": cls.url,
"Alt-Used": "koala.sh",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers",
}
async with ClientSession(headers=headers) as session:
data = {
"input": messages[-1]["content"],
"inputHistory": [
message["content"]
for message in messages
if message["role"] == "user"
],
"outputHistory": [
message["content"]
for message in messages
if message["role"] == "assistant"
],
"model": model,
}
async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk.startswith(b"data: "):
yield json.loads(chunk[6:])
def random_string(length: int = 20):
return ''.join(random.choice(
string.ascii_letters + string.digits
) for _ in range(length))

View File

@ -6,6 +6,7 @@ from .Aichat import Aichat
from .Ails import Ails
from .AItianhu import AItianhu
from .AItianhuSpace import AItianhuSpace
from .Berlin import Berlin
from .Bing import Bing
from .ChatBase import ChatBase
from .ChatForAi import ChatForAi
@ -26,6 +27,7 @@ from .GptForLove import GptForLove
from .GptGo import GptGo
from .GptGod import GptGod
from .Hashnode import Hashnode
from .Koala import Koala
from .Liaobots import Liaobots
from .Llama2 import Llama2
from .MyShell import MyShell
@ -59,6 +61,7 @@ class ProviderUtils:
'AsyncProvider': AsyncProvider,
'Bard': Bard,
'BaseProvider': BaseProvider,
'Berlin': Berlin,
'Bing': Bing,
'ChatBase': ChatBase,
'ChatForAi': ChatForAi,
@ -89,6 +92,7 @@ class ProviderUtils:
'H2o': H2o,
'HuggingChat': HuggingChat,
'Komo': Komo,
'Koala': Koala,
'Liaobots': Liaobots,
'Llama2': Llama2,
'Lockchat': Lockchat,
@ -135,6 +139,7 @@ __all__ = [
'AItianhuSpace',
'Aivvm',
'Bard',
'Berlin',
'Bing',
'ChatBase',
'ChatForAi',
@ -162,6 +167,7 @@ __all__ = [
'Hashnode',
'H2o',
'HuggingChat',
'Koala',
'Liaobots',
'Llama2',
'Lockchat',

View File

@ -5,7 +5,6 @@ from .Provider import BaseProvider, RetryProvider
from .Provider import (
GptForLove,
ChatgptAi,
GptChatly,
DeepInfra,
ChatgptX,
ChatBase,
@ -13,10 +12,12 @@ from .Provider import (
FakeGpt,
FreeGpt,
NoowAi,
Berlin,
Llama2,
Vercel,
Aichat,
GPTalk,
Koala,
AiAsk,
GptGo,
Phind,
@ -51,10 +52,9 @@ gpt_35_long = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
AiAsk, Aichat, FreeGpt, You,
GptChatly, GptForLove,
NoowAi, GeekGpt, Phind,
FakeGpt
FreeGpt, You,
GeekGpt, FakeGpt,
Berlin, Koala
])
)