- | Merge pull request #873

- | Move httpx requirement, Fix Ails and ChatgptAi Provider, Improve scripts
This commit is contained in:
Tekky 2023-09-07 18:45:04 +01:00 committed by GitHub
commit 7ca1a59d95
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 501 additions and 574 deletions

View File

@ -1,43 +1,62 @@
from __future__ import annotations
import json
from aiohttp import ClientSession, http
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt
class AItianhu(BaseProvider):
url = "https://www.aitianhu.com/"
working = False
class AItianhu(AsyncGeneratorProvider):
url = "https://www.aitianhu.com"
working = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
base = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
base += "\nassistant: "
proxy: str = None,
**kwargs
) -> AsyncGenerator:
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Content-Type": "application/json",
"Origin": cls.url,
"Connection": "keep-alive",
"Referer": cls.url + "/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
data: dict[str, Any] = {
"prompt": base,
"options": {},
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
"temperature": kwargs.get("temperature", 0.8),
"top_p": kwargs.get("top_p", 1),
}
url = "https://www.aitianhu.com/api/chat-process"
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
lines = response.text.strip().split("\n")
res = json.loads(lines[-1])
yield res["text"]
async with ClientSession(
headers=headers,
version=http.HttpVersion10
) as session:
data = {
"prompt": format_prompt(messages),
"options": {},
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
"temperature": 0.8,
"top_p": 1,
**kwargs
}
async with session.post(
cls.url + "/api/chat-process",
proxy=proxy,
json=data,
ssl=False,
) as response:
response.raise_for_status()
async for line in response.content:
line = json.loads(line.decode('utf-8'))
token = line["detail"]["choices"][0]["delta"].get("content")
if token:
yield token
@classmethod
@property
@ -46,6 +65,7 @@ class AItianhu(BaseProvider):
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
("temperature", "float"),
("top_p", "int"),
]

View File

@ -1,32 +1,37 @@
from __future__ import annotations
import time
from aiohttp import ClientSession
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider
class Acytoo(BaseProvider):
url = 'https://chat.acytoo.com/'
class Acytoo(AsyncGeneratorProvider):
url = 'https://chat.acytoo.com'
working = True
supports_gpt_35_turbo = True
@classmethod
def create_completion(
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
proxy: str = None,
**kwargs
) -> AsyncGenerator:
response = requests.post(f'{cls.url}api/completions',
headers=_create_header(), json=_create_payload(messages, kwargs.get('temperature', 0.5)))
response.raise_for_status()
response.encoding = 'utf-8'
yield response.text
async with ClientSession(
headers=_create_header()
) as session:
async with session.post(
cls.url + '/api/completions',
proxy=proxy,
json=_create_payload(messages, **kwargs)
) as response:
response.raise_for_status()
async for stream in response.content.iter_any():
if stream:
yield stream.decode()
def _create_header():
@ -36,15 +41,11 @@ def _create_header():
}
def _create_payload(messages: list[dict[str, str]], temperature):
payload_messages = [
message | {'createdAt': int(time.time()) * 1000} for message in messages
]
def _create_payload(messages: list[dict[str, str]], temperature: float = 0.5, **kwargs):
return {
'key' : '',
'model' : 'gpt-3.5-turbo',
'messages' : payload_messages,
'messages' : messages,
'temperature' : temperature,
'password' : ''
}

View File

@ -1,25 +1,22 @@
from __future__ import annotations
import requests
from aiohttp import ClientSession
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from .base_provider import AsyncProvider, format_prompt
class Aichat(BaseProvider):
class Aichat(AsyncProvider):
url = "https://chat-gpt.org/chat"
working = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
async def create_async(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
chat = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
chat += "\nassistant: "
proxy: str = None,
**kwargs
) -> str:
headers = {
"authority": "chat-gpt.org",
"accept": "*/*",
@ -35,21 +32,23 @@ class Aichat(BaseProvider):
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
}
json_data = {
"message": base,
"temperature": kwargs.get('temperature', 0.5),
"presence_penalty": 0,
"top_p": kwargs.get('top_p', 1),
"frequency_penalty": 0,
}
response = requests.post(
"https://chat-gpt.org/api/text",
headers=headers,
json=json_data,
)
response.raise_for_status()
if not response.json()['response']:
raise Exception("Error Response: " + response.json())
yield response.json()["message"]
async with ClientSession(
headers=headers
) as session:
json_data = {
"message": format_prompt(messages),
"temperature": kwargs.get('temperature', 0.5),
"presence_penalty": 0,
"top_p": kwargs.get('top_p', 1),
"frequency_penalty": 0,
}
async with session.post(
"https://chat-gpt.org/api/text",
proxy=proxy,
json=json_data
) as response:
response.raise_for_status()
result = await response.json()
if not result['response']:
raise Exception(f"Error Response: {result}")
return result["message"]

View File

@ -1,36 +1,36 @@
from __future__ import annotations
import hashlib
import json
import time
import uuid
import json
from datetime import datetime
from aiohttp import ClientSession
import requests
from ..typing import SHA256, Any, CreateResult
from .base_provider import BaseProvider
from ..typing import SHA256, AsyncGenerator
from .base_provider import AsyncGeneratorProvider
class Ails(BaseProvider):
class Ails(AsyncGeneratorProvider):
url: str = "https://ai.ls"
working = True
supports_stream = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
async def create_async_generator(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
stream: bool,
proxy: str = None,
**kwargs
) -> AsyncGenerator:
headers = {
"authority": "api.caipacity.com",
"accept": "*/*",
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"authorization": "Bearer free",
"client-id": str(uuid.uuid4()),
"client-v": _get_client_v(),
"client-v": "0.1.278",
"content-type": "application/json",
"origin": "https://ai.ls",
"referer": "https://ai.ls/",
@ -41,42 +41,39 @@ class Ails(BaseProvider):
"sec-fetch-mode": "cors",
"sec-fetch-site": "cross-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
"from-url": "https://ai.ls/?chat=1"
}
timestamp = _format_timestamp(int(time.time() * 1000))
sig = {
"d": datetime.now().strftime("%Y-%m-%d"),
"t": timestamp,
"s": _hash({"t": timestamp, "m": messages[-1]["content"]}),
}
json_data = json.dumps(
separators=(",", ":"),
obj={
async with ClientSession(
headers=headers
) as session:
timestamp = _format_timestamp(int(time.time() * 1000))
json_data = {
"model": "gpt-3.5-turbo",
"temperature": kwargs.get("temperature", 0.6),
"stream": True,
"messages": messages,
"d": datetime.now().strftime("%Y-%m-%d"),
"t": timestamp,
"s": _hash({"t": timestamp, "m": messages[-1]["content"]}),
}
| sig,
)
async with session.post(
"https://api.caipacity.com/v1/chat/completions",
proxy=proxy,
json=json_data
) as response:
response.raise_for_status()
start = "data: "
async for line in response.content:
line = line.decode('utf-8')
if line.startswith(start) and line != "data: [DONE]":
line = line[len(start):-1]
line = json.loads(line)
token = line["choices"][0]["delta"].get("content")
if token:
if "ai.ls" in token or "ai.ci" in token:
raise Exception("Response Error: " + token)
yield token
response = requests.post(
"https://api.caipacity.com/v1/chat/completions",
headers=headers,
data=json_data,
stream=True,
)
response.raise_for_status()
for token in response.iter_lines():
if b"content" in token:
completion_chunk = json.loads(token.decode().replace("data: ", ""))
token = completion_chunk["choices"][0]["delta"].get("content")
if "ai.ls" in token.lower() or "ai.ci" in token.lower():
raise Exception("Response Error: " + token)
if token != None:
yield token
@classmethod
@property
@ -106,14 +103,4 @@ def _format_timestamp(timestamp: int) -> str:
e = timestamp
n = e % 10
r = n + 1 if n % 2 == 0 else n
return str(e - n + r)
def _get_client_v():
response = requests.get("https://ai.ls/?chat=1")
response.raise_for_status()
js_path = response.text.split('crossorigin href="')[1].split('"')[0]
response = requests.get("https://ai.ls" + js_path)
response.raise_for_status()
return response.text.split('G4="')[1].split('"')[0]
return str(e - n + r)

View File

@ -1,23 +1,17 @@
from __future__ import annotations
import asyncio
import random
import json
import os
import random
import aiohttp
from aiohttp import ClientSession
from ..typing import Any, AsyncGenerator, CreateResult, Union
from aiohttp import ClientSession, ClientTimeout
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, get_cookies
class Bing(AsyncGeneratorProvider):
url = "https://bing.com/chat"
needs_auth = True
working = True
supports_gpt_4 = True
supports_stream = True
@staticmethod
def create_async_generator(
@ -34,18 +28,16 @@ class Bing(AsyncGeneratorProvider):
prompt = messages[-1]["content"]
context = create_context(messages[:-1])
if cookies and "SRCHD" in cookies:
#TODO: Will implement proper cookie retrieval later and use a try-except mechanism in 'stream_generate' instead of defaulting the cookie value like this
cookies_dict = {
'SRCHD' : cookies["SRCHD"],
if not cookies or "SRCHD" not in cookies:
cookies = {
'SRCHD' : 'AF=NOFORM',
'PPLState' : '1',
'KievRPSSecAuth': '',
'SUID' : '',
'SRCHUSR' : '',
'SRCHHPGUSR' : '',
}
return stream_generate(prompt, context, cookies_dict)
return stream_generate(prompt, context, cookies)
def create_context(messages: list[dict[str, str]]):
context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
@ -236,7 +228,7 @@ async def stream_generate(
cookies: dict=None
):
async with ClientSession(
timeout=aiohttp.ClientTimeout(total=900),
timeout=ClientTimeout(total=900),
cookies=cookies,
headers=Defaults.headers,
) as session:
@ -288,16 +280,4 @@ async def stream_generate(
final = True
break
finally:
await delete_conversation(session, conversation)
def run(generator: AsyncGenerator[Union[Any, str], Any]):
loop = asyncio.get_event_loop()
gen = generator.__aiter__()
while True:
try:
yield loop.run_until_complete(gen.__anext__())
except StopAsyncIteration:
break
await delete_conversation(session, conversation)

View File

@ -1,32 +1,28 @@
from __future__ import annotations
import re
import html
import json
from aiohttp import ClientSession
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider
class ChatgptAi(BaseProvider):
url: str = "https://chatgpt.ai/gpt-4/"
working = True
supports_gpt_4 = True
class ChatgptAi(AsyncGeneratorProvider):
url: str = "https://chatgpt.ai/"
working = True
supports_gpt_35_turbo = True
_system_data = None
@staticmethod
def create_completion(
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
chat = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
chat += "\nassistant: "
response = requests.get("https://chatgpt.ai/")
nonce, post_id, _, bot_id = re.findall(
r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width',
response.text)[0]
proxy: str = None,
**kwargs
) -> AsyncGenerator:
headers = {
"authority" : "chatgpt.ai",
"accept" : "*/*",
@ -34,7 +30,7 @@ class ChatgptAi(BaseProvider):
"cache-control" : "no-cache",
"origin" : "https://chatgpt.ai",
"pragma" : "no-cache",
"referer" : "https://chatgpt.ai/gpt-4/",
"referer" : cls.url,
"sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
"sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform" : '"Windows"',
@ -43,17 +39,37 @@ class ChatgptAi(BaseProvider):
"sec-fetch-site" : "same-origin",
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
}
data = {
"_wpnonce" : nonce,
"post_id" : post_id,
"url" : "https://chatgpt.ai/gpt-4",
"action" : "wpaicg_chat_shortcode_message",
"message" : chat,
"bot_id" : bot_id,
}
async with ClientSession(
headers=headers
) as session:
if not cls._system_data:
async with session.get(cls.url, proxy=proxy) as response:
response.raise_for_status()
match = re.findall(r"data-system='([^']+)'", await response.text())
if not match:
raise RuntimeError("No system data")
cls._system_data = json.loads(html.unescape(match[0]))
response = requests.post(
"https://chatgpt.ai/wp-admin/admin-ajax.php", headers=headers, data=data)
response.raise_for_status()
yield response.json()["data"]
data = {
"botId": cls._system_data["botId"],
"clientId": "",
"contextId": cls._system_data["contextId"],
"id": cls._system_data["id"],
"messages": messages[:-1],
"newMessage": messages[-1]["content"],
"session": cls._system_data["sessionId"],
"stream": True
}
async with session.post(
"https://chatgpt.ai/wp-json/mwai-ui/v1/chats/submit",
proxy=proxy,
json=data
) as response:
response.raise_for_status()
start = "data: "
async for line in response.content:
line = line.decode('utf-8')
if line.startswith(start):
line = json.loads(line[len(start):-1])
if line["type"] == "live":
yield line["data"]

View File

@ -1,70 +1,58 @@
from __future__ import annotations
import base64
import os
import re
import os, re
from aiohttp import ClientSession
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from .base_provider import AsyncProvider, format_prompt
class ChatgptLogin(BaseProvider):
class ChatgptLogin(AsyncProvider):
url = "https://opchatgpts.net"
supports_gpt_35_turbo = True
working = True
_nonce = None
@staticmethod
def create_completion(
@classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
**kwargs
) -> str:
headers = {
"authority" : "chatgptlogin.ac",
"accept" : "*/*",
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"content-type" : "application/json",
"origin" : "https://opchatgpts.net",
"referer" : "https://opchatgpts.net/chatgpt-free-use/",
"sec-ch-ua" : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
"sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform" : '"Windows"',
"sec-fetch-dest" : "empty",
"sec-fetch-mode" : "cors",
"sec-fetch-site" : "same-origin",
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"x-wp-nonce" : _get_nonce(),
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : "https://opchatgpts.net",
"Alt-Used" : "opchatgpts.net",
"Referer" : "https://opchatgpts.net/chatgpt-free-use/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
conversation = _transform(messages)
json_data = {
"env" : "chatbot",
"session" : "N/A",
"prompt" : "Converse as if you were an AI assistant. Be friendly, creative.",
"context" : "Converse as if you were an AI assistant. Be friendly, creative.",
"messages" : conversation,
"newMessage" : messages[-1]["content"],
"userName" : '<div class="mwai-name-text">User:</div>',
"aiName" : '<div class="mwai-name-text">AI:</div>',
"model" : "gpt-3.5-turbo",
"temperature" : kwargs.get("temperature", 0.8),
"maxTokens" : 1024,
"maxResults" : 1,
"apiKey" : "",
"service" : "openai",
"embeddingsIndex": "",
"stop" : "",
"clientId" : os.urandom(6).hex()
}
response = requests.post("https://opchatgpts.net/wp-json/ai-chatbot/v1/chat",
headers=headers, json=json_data)
response.raise_for_status()
yield response.json()["reply"]
async with ClientSession(
headers=headers
) as session:
if not cls._nonce:
async with session.get(
"https://opchatgpts.net/chatgpt-free-use/",
params={"id": os.urandom(6).hex()},
) as response:
result = re.search(r'data-nonce="(.*?)"', await response.text())
if not result:
raise RuntimeError("No nonce value")
cls._nonce = result.group(1)
data = {
"_wpnonce": cls._nonce,
"post_id": 28,
"url": "https://opchatgpts.net/chatgpt-free-use",
"action": "wpaicg_chat_shortcode_message",
"message": format_prompt(messages),
"bot_id": 0
}
async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response:
response.raise_for_status()
return (await response.json())["data"]
@classmethod
@property
@ -76,55 +64,4 @@ class ChatgptLogin(BaseProvider):
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
def _get_nonce() -> str:
res = requests.get("https://opchatgpts.net/chatgpt-free-use/",
headers = {
"Referer" : "https://opchatgpts.net/chatgpt-free-use/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"})
result = re.search(
r'class="mwai-chat mwai-chatgpt">.*<span>Send</span></button></div></div></div> <script defer src="(.*?)">',
res.text)
if result is None:
return ""
src = result.group(1)
decoded_string = base64.b64decode(src.split(",")[-1]).decode("utf-8")
result = re.search(r"let restNonce = '(.*?)';", decoded_string)
return "" if result is None else result.group(1)
def _transform(messages: list[dict[str, str]]) -> list[dict[str, Any]]:
return [
{
"id" : os.urandom(6).hex(),
"role" : message["role"],
"content": message["content"],
"who" : "AI: " if message["role"] == "assistant" else "User: ",
"html" : _html_encode(message["content"]),
}
for message in messages
]
def _html_encode(string: str) -> str:
table = {
'"' : "&quot;",
"'" : "&#39;",
"&" : "&amp;",
">" : "&gt;",
"<" : "&lt;",
"\n": "<br>",
"\t": "&nbsp;&nbsp;&nbsp;&nbsp;",
" " : "&nbsp;",
}
for key in table:
string = string.replace(key, table[key])
return string
return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -1,25 +1,25 @@
from __future__ import annotations
import json
import js2py
import requests
from aiohttp import ClientSession
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider
class DeepAi(BaseProvider):
class DeepAi(AsyncGeneratorProvider):
url: str = "https://deepai.org"
working = True
supports_stream = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
async def create_async_generator(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
proxy: str = None,
**kwargs
) -> AsyncGenerator:
token_js = """
var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
@ -53,10 +53,11 @@ f = function () {
"api-key": api_key,
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
}
response = requests.post("https://api.deepai.org/make_me_a_pizza",
headers=headers, data=payload, stream=True)
for chunk in response.iter_content(chunk_size=None):
response.raise_for_status()
yield chunk.decode()
async with ClientSession(
headers=headers
) as session:
async with session.post("https://api.deepai.org/make_me_a_pizza", proxy=proxy, data=payload) as response:
response.raise_for_status()
async for stream in response.content.iter_any():
if stream:
yield stream.decode()

View File

@ -12,8 +12,7 @@ from .base_provider import AsyncGeneratorProvider, format_prompt
class H2o(AsyncGeneratorProvider):
url = "https://gpt-gm.h2o.ai"
working = True
supports_stream = True
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
@classmethod
async def create_async_generator(

View File

@ -24,9 +24,9 @@ class HuggingChat(AsyncGeneratorProvider):
cookies: dict = None,
**kwargs
) -> AsyncGenerator:
model = model if model else cls.model
if not cookies:
cookies = get_cookies(".huggingface.co")
model = model if model else cls.model
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
@ -62,36 +62,32 @@ class HuggingChat(AsyncGeneratorProvider):
"web_search_id": ""
}
}
start = "data:"
first = True
async with session.post(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy, json=send) as response:
async for line in response.content:
line = line.decode("utf-8")
if not line:
continue
if not stream:
try:
data = json.loads(line)
except json.decoder.JSONDecodeError:
raise RuntimeError(f"No json: {line}")
if "error" in data:
raise RuntimeError(data["error"])
elif isinstance(data, list):
yield data[0]["generated_text"]
else:
raise RuntimeError(f"Response: {line}")
elif line.startswith(start):
line = json.loads(line[len(start):-1])
if not stream:
data = await response.json()
if "error" in data:
raise RuntimeError(data["error"])
elif isinstance(data, list):
yield data[0]["generated_text"]
else:
raise RuntimeError(f"Response: {data}")
else:
start = "data:"
first = True
async for line in response.content:
line = line.decode("utf-8")
if not line:
continue
if "token" not in line:
raise RuntimeError(f"Response: {line}")
if not line["token"]["special"]:
if first:
yield line["token"]["text"].lstrip()
first = False
else:
yield line["token"]["text"]
if line.startswith(start):
line = json.loads(line[len(start):-1])
if "token" not in line:
raise RuntimeError(f"Response: {line}")
if not line["token"]["special"]:
if first:
yield line["token"]["text"].lstrip()
first = False
else:
yield line["token"]["text"]
async with session.delete(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy) as response:
response.raise_for_status()

View File

@ -32,7 +32,6 @@ models = {
class Liaobots(AsyncGeneratorProvider):
url = "https://liaobots.com"
working = True
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
_auth_code = None
@ -46,24 +45,24 @@ class Liaobots(AsyncGeneratorProvider):
proxy: str = None,
**kwargs
) -> AsyncGenerator:
model = model if model in models else "gpt-3.5-turbo"
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
headers = {
"authority": "liaobots.com",
"content-type": "application/json",
"origin": "https://liaobots.com",
"referer": "https://liaobots.com/",
"origin": cls.url,
"referer": cls.url + "/",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
}
async with ClientSession(
headers=headers
) as session:
model = model if model in models else "gpt-3.5-turbo"
auth_code = auth if isinstance(auth, str) else cls._auth_code
if not auth_code:
async with session.post("https://liaobots.com/api/user", proxy=proxy, json={"authcode": ""}) as response:
async with session.post(cls.url + "/api/user", proxy=proxy, json={"authcode": ""}) as response:
response.raise_for_status()
auth_code = cls._auth_code = json.loads((await response.text()))["authCode"]
auth_code = cls._auth_code = json.loads(await response.text())["authCode"]
data = {
"conversationId": str(uuid.uuid4()),
"model": models[model],
@ -71,10 +70,11 @@ class Liaobots(AsyncGeneratorProvider):
"key": "",
"prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
}
async with session.post("https://liaobots.com/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response:
async with session.post(cls.url + "/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response:
response.raise_for_status()
async for line in response.content:
yield line.decode("utf-8")
async for stream in response.content.iter_any():
if stream:
yield stream.decode()
@classmethod

View File

@ -1,60 +1,8 @@
from __future__ import annotations
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from .ChatgptLogin import ChatgptLogin
class Opchatgpts(BaseProvider):
url = "https://opchatgpts.net"
working = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
temperature = kwargs.get("temperature", 0.8)
max_tokens = kwargs.get("max_tokens", 1024)
system_prompt = kwargs.get(
"system_prompt",
"Converse as if you were an AI assistant. Be friendly, creative.")
payload = _create_payload(
messages = messages,
temperature = temperature,
max_tokens = max_tokens,
system_prompt = system_prompt)
response = requests.post("https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", json=payload)
response.raise_for_status()
yield response.json()["reply"]
def _create_payload(
messages: list[dict[str, str]],
temperature: float,
max_tokens: int, system_prompt: str) -> dict:
return {
"env" : "chatbot",
"session" : "N/A",
"prompt" : "\n",
"context" : system_prompt,
"messages" : messages,
"newMessage" : messages[::-1][0]["content"],
"userName" : '<div class="mwai-name-text">User:</div>',
"aiName" : '<div class="mwai-name-text">AI:</div>',
"model" : "gpt-3.5-turbo",
"temperature" : temperature,
"maxTokens" : max_tokens,
"maxResults" : 1,
"apiKey" : "",
"service" : "openai",
"embeddingsIndex" : "",
"stop" : "",
}
class Opchatgpts(ChatgptLogin):
url = "https://opchatgpts.net"
working = True

View File

@ -1,67 +1,82 @@
from __future__ import annotations
has_module = True
try:
from revChatGPT.V1 import AsyncChatbot
except ImportError:
has_module = False
from curl_cffi.requests import AsyncSession
import uuid
import json
from httpx import AsyncClient
from .base_provider import AsyncProvider, get_cookies, format_prompt
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
class OpenaiChat(AsyncGeneratorProvider):
class OpenaiChat(AsyncProvider):
url = "https://chat.openai.com"
needs_auth = True
working = has_module
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_stream = True
_access_token = None
@classmethod
async def create_async_generator(
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
access_token: str = _access_token,
access_token: str = None,
cookies: dict = None,
**kwargs: dict
) -> AsyncGenerator:
config = {"access_token": access_token, "model": model}
proxies = None
if proxy:
if "://" not in proxy:
proxy = f"http://{proxy}"
config["proxy"] = proxy
bot = AsyncChatbot(
config=config
)
proxies = {
"http": proxy,
"https": proxy
}
if not access_token:
cookies = cookies if cookies else get_cookies("chat.openai.com")
cls._access_token = await get_access_token(bot.session, cookies)
bot.set_access_token(cls._access_token)
access_token = await cls.get_access_token(cookies)
headers = {
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
}
async with AsyncSession(proxies=proxies, headers=headers, impersonate="chrome107") as session:
messages = [
{
"id": str(uuid.uuid4()),
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [format_prompt(messages)]},
},
]
data = {
"action": "next",
"messages": messages,
"conversation_id": None,
"parent_message_id": str(uuid.uuid4()),
"model": "text-davinci-002-render-sha",
"history_and_training_disabled": True,
}
response = await session.post("https://chat.openai.com/backend-api/conversation", json=data)
response.raise_for_status()
last_message = None
for line in response.content.decode().splitlines():
if line.startswith("data: "):
line = line[6:]
if line != "[DONE]":
line = json.loads(line)
if "message" in line:
last_message = line["message"]["content"]["parts"][0]
return last_message
returned = None
async for message in bot.ask(format_prompt(messages)):
message = message["message"]
if returned:
if message.startswith(returned):
new = message[len(returned):]
if new:
yield new
else:
yield message
returned = message
await bot.delete_conversation(bot.conversation_id)
@classmethod
async def get_access_token(cls, cookies: dict = None, proxies: dict = None):
if not cls._access_token:
cookies = cookies if cookies else get_cookies("chat.openai.com")
async with AsyncSession(proxies=proxies, cookies=cookies, impersonate="chrome107") as session:
response = await session.get("https://chat.openai.com/api/auth/session")
response.raise_for_status()
cls._access_token = response.json()["accessToken"]
return cls._access_token
@classmethod
@ -72,15 +87,8 @@ class OpenaiChat(AsyncGeneratorProvider):
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
("access_token", "str"),
("cookies", "dict[str, str]")
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
async def get_access_token(session: AsyncClient, cookies: dict):
response = await session.get("https://chat.openai.com/api/auth/session", cookies=cookies)
response.raise_for_status()
try:
return response.json()["accessToken"]
except json.decoder.JSONDecodeError:
raise RuntimeError(f"Response: {response.text}")
return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -1,63 +1,72 @@
from __future__ import annotations
import base64
import json
import uuid
import base64, json, uuid, quickjs, random
from curl_cffi.requests import AsyncSession
import quickjs
from curl_cffi import requests
from ..typing import Any, CreateResult, TypedDict
from .base_provider import BaseProvider
from ..typing import Any, TypedDict
from .base_provider import AsyncProvider
class Vercel(BaseProvider):
url = "https://play.vercel.ai"
class Vercel(AsyncProvider):
url = "https://sdk.vercel.ai"
working = True
supports_gpt_35_turbo = True
model = "replicate:replicate/llama-2-70b-chat"
@staticmethod
def create_completion(
@classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
proxy: str = None,
**kwargs
) -> str:
if model in ["gpt-3.5-turbo", "gpt-4"]:
model = "openai:" + model
yield _chat(model_id=model, messages=messages)
model = model if model else cls.model
proxies = None
if proxy:
if "://" not in proxy:
proxy = "http://" + proxy
proxies = {"http": proxy, "https": proxy}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.{rand1}.{rand2} Safari/537.36".format(
rand1=random.randint(0,9999),
rand2=random.randint(0,9999)
),
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.5",
"TE": "trailers",
}
async with AsyncSession(headers=headers, proxies=proxies, impersonate="chrome107") as session:
response = await session.get(cls.url + "/openai.jpeg")
response.raise_for_status()
custom_encoding = _get_custom_encoding(response.text)
headers = {
"Content-Type": "application/json",
"Custom-Encoding": custom_encoding,
}
data = _create_payload(model, messages)
response = await session.post(cls.url + "/api/generate", json=data, headers=headers)
response.raise_for_status()
return response.text
def _chat(model_id: str, messages: list[dict[str, str]]) -> str:
session = requests.Session(impersonate="chrome107")
url = "https://sdk.vercel.ai/api/generate"
header = _create_header(session)
payload = _create_payload(model_id, messages)
response = session.post(url=url, headers=header, json=payload)
response.raise_for_status()
return response.text
def _create_payload(model_id: str, messages: list[dict[str, str]]) -> dict[str, Any]:
default_params = model_info[model_id]["default_params"]
def _create_payload(model: str, messages: list[dict[str, str]]) -> dict[str, Any]:
if model not in model_info:
raise RuntimeError(f'Model "{model}" are not supported')
default_params = model_info[model]["default_params"]
return {
"messages": messages,
"playgroundId": str(uuid.uuid4()),
"chatIndex": 0,
"model": model_id} | default_params
def _create_header(session: requests.Session):
custom_encoding = _get_custom_encoding(session)
return {"custom-encoding": custom_encoding}
"model": model
} | default_params
# based on https://github.com/ading2210/vercel-llm-api
def _get_custom_encoding(session: requests.Session):
url = "https://sdk.vercel.ai/openai.jpeg"
response = session.get(url=url)
data = json.loads(base64.b64decode(response.text, validate=True))
def _get_custom_encoding(text: str) -> str:
data = json.loads(base64.b64decode(text, validate=True))
script = """
String.prototype.fontcolor = function() {{
return `<font>${{this}}</font>`
@ -67,7 +76,6 @@ def _get_custom_encoding(session: requests.Session):
""".format(
script=data["c"], key=data["a"]
)
context = quickjs.Context() # type: ignore
token_data = json.loads(context.eval(script).json()) # type: ignore
token_data[2] = "mark"
@ -136,6 +144,15 @@ model_info: dict[str, ModelInfo] = {
"repetitionPenalty": 1,
},
},
"replicate:replicate/llama-2-70b-chat": {
"id": "replicate:replicate/llama-2-70b-chat",
"default_params": {
"temperature": 0.75,
"maxTokens": 1000,
"topP": 1,
"repetitionPenalty": 1,
},
},
"huggingface:bigscience/bloom": {
"id": "huggingface:bigscience/bloom",
"default_params": {

View File

@ -1,47 +1,36 @@
from __future__ import annotations
import json
import random
import string
import time
import random, string, time
from aiohttp import ClientSession
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from .base_provider import AsyncProvider
class Wewordle(BaseProvider):
url = "https://wewordle.org/"
class Wewordle(AsyncProvider):
url = "https://wewordle.org"
working = True
supports_gpt_35_turbo = True
@classmethod
def create_completion(
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
proxy: str = None,
**kwargs
) -> str:
# randomize user id and app id
_user_id = "".join(
random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
_app_id = "".join(
random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
# make current date with format utc
_request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
headers = {
"accept" : "*/*",
"pragma" : "no-cache",
"Content-Type" : "application/json",
"Connection" : "keep-alive"
# user agent android client
# 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 10; SM-G975F Build/QP1A.190711.020)',
}
data: dict[str, Any] = {
_user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
_app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
_request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
data = {
"user" : _user_id,
"messages" : messages,
"subscriber": {
@ -65,10 +54,12 @@ class Wewordle(BaseProvider):
}
}
response = requests.post(f"{cls.url}gptapi/v1/android/turbo",
headers=headers, data=json.dumps(data))
response.raise_for_status()
_json = response.json()
if "message" in _json:
yield _json["message"]["content"]
async with ClientSession(
headers=headers
) as session:
async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response:
response.raise_for_status()
content = (await response.json())["message"]["content"]
if content:
return content

View File

@ -2,28 +2,31 @@ from __future__ import annotations
from aiohttp import ClientSession
from .base_provider import AsyncProvider, format_prompt
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt
class Yqcloud(AsyncProvider):
class Yqcloud(AsyncGeneratorProvider):
url = "https://chat9.yqcloud.top/"
working = True
supports_gpt_35_turbo = True
@staticmethod
async def create_async(
async def create_async_generator(
model: str,
messages: list[dict[str, str]],
proxy: str = None,
**kwargs,
) -> str:
) -> AsyncGenerator:
async with ClientSession(
headers=_create_header()
) as session:
payload = _create_payload(messages)
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
response.raise_for_status()
return await response.text()
async for stream in response.content.iter_any():
if stream:
yield stream.decode()
def _create_header():
@ -40,6 +43,6 @@ def _create_payload(messages: list[dict[str, str]]):
"network": True,
"system": "",
"withoutContext": False,
"stream": False,
"stream": True,
"userId": "#/chat/1693025544336"
}

View File

@ -42,10 +42,11 @@ _cookies = {}
def get_cookies(cookie_domain: str) -> dict:
if cookie_domain not in _cookies:
_cookies[cookie_domain] = {}
for cookie in browser_cookie3.load(cookie_domain):
_cookies[cookie_domain][cookie.name] = cookie.value
try:
for cookie in browser_cookie3.load(cookie_domain):
_cookies[cookie_domain][cookie.name] = cookie.value
except:
pass
return _cookies[cookie_domain]
@ -79,6 +80,8 @@ class AsyncProvider(BaseProvider):
class AsyncGeneratorProvider(AsyncProvider):
supports_stream = True
@classmethod
def create_completion(
cls,

View File

@ -9,4 +9,3 @@ js2py
quickjs
flask
flask-cors
httpx

37
testing/test_async.py Normal file
View File

@ -0,0 +1,37 @@
import sys
from pathlib import Path
import asyncio
sys.path.append(str(Path(__file__).parent.parent))
import g4f
from g4f.Provider import AsyncProvider
from testing.test_providers import get_providers
from testing.log_time import log_time_async
async def create_async(provider: AsyncProvider):
model = g4f.models.gpt_35_turbo.name if provider.supports_gpt_35_turbo else g4f.models.default.name
try:
response = await log_time_async(
provider.create_async,
model=model,
messages=[{"role": "user", "content": "Hello Assistant!"}]
)
assert type(response) is str
assert len(response) > 0
return response
except Exception as e:
return e
async def run_async():
_providers: list[AsyncProvider] = [
_provider
for _provider in get_providers()
if _provider.working and hasattr(_provider, "create_async")
]
responses = [create_async(_provider) for _provider in _providers]
responses = await asyncio.gather(*responses)
for idx, provider in enumerate(_providers):
print(f"{provider.__name__}:", responses[idx])
print("Total:", asyncio.run(log_time_async(run_async)))

View File

@ -8,6 +8,11 @@ from g4f import BaseProvider, models, Provider
logging = False
class Styles:
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def main():
providers = get_providers()
failed_providers = []
@ -24,39 +29,40 @@ def main():
print()
if failed_providers:
print(f"{Fore.RED}Failed providers:\n")
print(f"{Fore.RED + Styles.BOLD}Failed providers:{Styles.ENDC}")
for _provider in failed_providers:
print(f"{Fore.RED}{_provider.__name__}")
else:
print(f"{Fore.GREEN}All providers are working")
print(f"{Fore.GREEN + Styles.BOLD}All providers are working")
def get_providers() -> list[type[BaseProvider]]:
provider_names = dir(Provider)
ignore_names = [
"annotations",
"base_provider",
"BaseProvider"
"BaseProvider",
"AsyncProvider",
"AsyncGeneratorProvider"
]
provider_names = [
provider_name
for provider_name in provider_names
if not provider_name.startswith("__") and provider_name not in ignore_names
]
return [getattr(Provider, provider_name) for provider_name in sorted(provider_names)]
return [getattr(Provider, provider_name) for provider_name in provider_names]
def create_response(_provider: type[BaseProvider]) -> str:
if _provider.supports_gpt_35_turbo:
model = models.gpt_35_turbo.name
elif _provider.supports_gpt_4:
model = models.gpt_4
elif hasattr(_provider, "model"):
model = _provider.model
model = models.gpt_4.name
else:
model = None
model = models.default.name
response = _provider.create_completion(
model=model,
messages=[{"role": "user", "content": "Hello"}],
messages=[{"role": "user", "content": "Hello, who are you? Answer in detail much as possible."}],
stream=False,
)
return "".join(response)

View File

@ -5,10 +5,9 @@ from urllib.parse import urlparse
sys.path.append(str(Path(__file__).parent.parent))
from g4f import models, Provider
from g4f.Provider.base_provider import BaseProvider, AsyncProvider
from testing.test_providers import test
from g4f import models
from g4f.Provider.base_provider import AsyncProvider
from testing.test_providers import test, get_providers
def print_imports():
print("##### Providers:")
@ -68,26 +67,6 @@ def print_providers():
)
print("\n".join(lines))
def get_provider_names() -> list[str]:
provider_names = dir(Provider)
ignore_names = [
"base_provider",
"BaseProvider",
"AsyncProvider",
"AsyncGeneratorProvider"
]
return [
provider_name
for provider_name in provider_names
if not provider_name.startswith("__") and provider_name not in ignore_names
]
def get_providers() -> list[type[BaseProvider]]:
return [getattr(Provider, provider_name) for provider_name in get_provider_names()]
def print_models():
base_provider_names = {
"cohere": "Cohere",