Add new providers: Aivvm, Vitalentum and Ylokh

This commit is contained in:
Heiner Lohaus 2023-09-12 00:47:03 +02:00
parent a392f4df78
commit d6382a2145
7 changed files with 251 additions and 25 deletions

78
g4f/Provider/Aivvm.py Normal file
View File

@ -0,0 +1,78 @@
from __future__ import annotations
from aiohttp import ClientSession
from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncGenerator
models = {
"gpt-4": {
"id": "gpt-4",
"name": "GPT-4",
},
"gpt-3.5-turbo": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5",
},
"gpt-3.5-turbo-16k": {
"id": "gpt-3.5-turbo-16k",
"name": "GPT-3.5-16k",
},
}
class Aivvm(AsyncGeneratorProvider):
url = "https://chat.aivvm.com"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
**kwargs
) -> AsyncGenerator:
model = model if model else "gpt-3.5-turbo"
if model not in models:
raise ValueError(f"Model are not supported: {model}")
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
async with ClientSession(
headers=headers
) as session:
data = {
"temperature": 1,
"key": "",
"messages": messages,
"model": models[model],
"prompt": "",
**kwargs
}
async with session.post(cls.url + "/api/chat", json=data, proxy=proxy) as response:
response.raise_for_status()
async for stream in response.content.iter_any():
yield stream.decode()
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -21,8 +21,10 @@ class ChatBase(AsyncGeneratorProvider):
) -> AsyncGenerator:
if model == "gpt-4":
chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn"
elif model == "gpt-3.5-turbo" or True:
elif model == "gpt-3.5-turbo" or not model:
chat_id = "chatbase--1--pdf-p680fxvnm"
else:
raise ValueError(f"Model are not supported: {model}")
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
@ -45,9 +47,7 @@ class ChatBase(AsyncGeneratorProvider):
async with session.post("https://www.chatbase.co/api/fe/chat", json=data) as response:
response.raise_for_status()
async for stream in response.content.iter_any():
stream = stream.decode()
if stream:
yield stream
yield stream.decode()
@classmethod

View File

@ -55,7 +55,7 @@ class Vercel(AsyncProvider):
def _create_payload(model: str, messages: list[dict[str, str]]) -> dict[str, Any]:
if model not in model_info:
raise RuntimeError(f'Model "{model}" are not supported')
raise ValueError(f'Model are not supported: {model}')
default_params = model_info[model]["default_params"]
return {
"messages": messages,

View File

@ -0,0 +1,66 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncGenerator
class Vitalentum(AsyncGeneratorProvider):
url = "https://app.vitalentum.io"
working = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
**kwargs
) -> AsyncGenerator:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "text/event-stream",
"Accept-language" : "de,en-US;q=0.7,en;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
conversation = json.dumps({"history": [{
"speaker": "human" if message["role"] == "user" else "bot",
"text": message["content"],
} for message in messages]})
data = {
"conversation": conversation,
"temperature": 0.7,
**kwargs
}
async with ClientSession(
headers=headers
) as session:
async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
line = line.decode()
if line.startswith("data: ") and not line.startswith("data: [DONE]"):
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
yield content
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

76
g4f/Provider/Ylokh.py Normal file
View File

@ -0,0 +1,76 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncGenerator
class Ylokh(AsyncGeneratorProvider):
url = "https://chat.ylokh.xyz"
working = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool = True,
proxy: str = None,
**kwargs
) -> AsyncGenerator:
model = model if model else "gpt-3.5-turbo"
headers = {
"User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0",
"Accept" : "*/*",
"Accept-language" : "de,en-US;q=0.7,en;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
data = {
"messages": messages,
"model": model,
"temperature": 1,
"presence_penalty": 0,
"top_p": 1,
"frequency_penalty": 0,
"allow_fallback": True,
"stream": stream,
**kwargs
}
async with ClientSession(
headers=headers
) as session:
async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data, proxy=proxy) as response:
response.raise_for_status()
if stream:
async for line in response.content:
line = line.decode()
if line.startswith("data: ") and not line.startswith("data: [DONE]"):
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
yield content
else:
chat = await response.json()
yield chat["choices"][0]["message"].get("content")
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -2,39 +2,39 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
from curl_cffi.requests import AsyncSession
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
from .base_provider import AsyncGeneratorProvider, format_prompt
class You(AsyncGeneratorProvider):
url = "https://you.com"
working = True
supports_gpt_35_turbo = True
supports_stream = True
supports_stream = False
@staticmethod
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
cookies: dict = None,
proxy: str = None,
**kwargs,
) -> AsyncGenerator:
if not cookies:
cookies = get_cookies("you.com")
headers = {
"Accept": "text/event-stream",
"Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0"
}
async with ClientSession(headers=headers, cookies=cookies) as session:
async with session.get(
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session:
headers = {
"Accept": "text/event-stream",
"Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
}
response = await session.get(
"https://you.com/api/streamingSearch",
params={"q": format_prompt(messages), "domain": "youchat", "chat": ""},
) as response:
start = 'data: {"youChatToken": '
async for line in response.content:
line = line.decode('utf-8')
if line.startswith(start):
yield json.loads(line[len(start): -2])
headers=headers
)
response.raise_for_status()
start = 'data: {"youChatToken": '
for line in response.text.splitlines():
if line.startswith(start):
yield json.loads(line[len(start): -1])

View File

@ -4,6 +4,7 @@ from .Aichat import Aichat
from .Ails import Ails
from .AiService import AiService
from .AItianhu import AItianhu
from .Aivvm import Aivvm
from .Bard import Bard
from .Bing import Bing
from .ChatBase import ChatBase
@ -25,7 +26,9 @@ from .OpenAssistant import OpenAssistant
from .Raycast import Raycast
from .Theb import Theb
from .Vercel import Vercel
from .Vitalentum import Vitalentum
from .Wewordle import Wewordle
from .Ylokh import Ylokh
from .You import You
from .Yqcloud import Yqcloud
from .Equing import Equing
@ -42,6 +45,7 @@ __all__ = [
'Ails',
'AiService',
'AItianhu',
'Aivvm',
'Bard',
'Bing',
'ChatBase',
@ -63,7 +67,9 @@ __all__ = [
'OpenAssistant',
'Theb',
'Vercel',
'Vitalentum',
'Wewordle',
'Ylokh',
'You',
'Yqcloud',
'Equing',