Merge pull request #1264 from hlohaus/any

Improve providers
This commit is contained in:
Tekky 2023-11-18 02:40:09 +00:00 committed by GitHub
commit ca3eaaffee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 231 additions and 96 deletions

View File

@ -11,6 +11,7 @@ from .. import debug
class AItianhuSpace(BaseProvider): class AItianhuSpace(BaseProvider):
url = "https://chat3.aiyunos.top/" url = "https://chat3.aiyunos.top/"
working = True working = True
supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
_domains = ["aitianhu.com", "aitianhu1.top"] _domains = ["aitianhu.com", "aitianhu1.top"]

View File

@ -22,24 +22,24 @@ class PerplexityAi(BaseProvider):
timeout: int = 120, timeout: int = 120,
browser: WebDriver = None, browser: WebDriver = None,
copilot: bool = False, copilot: bool = False,
headless: bool = True,
**kwargs **kwargs
) -> CreateResult: ) -> CreateResult:
driver = browser if browser else get_browser("", headless, proxy) driver = browser if browser else get_browser("", False, proxy)
from selenium.webdriver.common.by import By from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
prompt = format_prompt(messages) prompt = format_prompt(messages)
driver.get(f"{cls.url}/") driver.get(f"{cls.url}/")
wait = WebDriverWait(driver, timeout) wait = WebDriverWait(driver, timeout)
# Page loaded? # Is page loaded?
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']"))) wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']")))
# Add WebSocket hook # Register WebSocket hook
script = """ script = """
window._message = window._last_message = ""; window._message = window._last_message = "";
window._message_finished = false; window._message_finished = false;
@ -57,8 +57,9 @@ WebSocket.prototype.send = function(...args) {
content = JSON.parse(content); content = JSON.parse(content);
} }
window._message = content["answer"]; window._message = content["answer"];
if (!window._message_finished) {
window._message_finished = data[0] == "query_answered"; window._message_finished = data[0] == "query_answered";
window._web_results = content["web_results"]; }
} }
} }
}); });
@ -70,20 +71,19 @@ WebSocket.prototype.send = function(...args) {
if copilot: if copilot:
try: try:
# Check account # Check for account
driver.find_element(By.CSS_SELECTOR, "img[alt='User avatar']") driver.find_element(By.CSS_SELECTOR, "img[alt='User avatar']")
# Enable copilot # Enable copilot
driver.find_element(By.CSS_SELECTOR, "button[data-testid='copilot-toggle']").click() driver.find_element(By.CSS_SELECTOR, "button[data-testid='copilot-toggle']").click()
except: except:
raise RuntimeError("For copilot you needs a account") raise RuntimeError("You need a account for copilot")
# Enter question # Submit prompt
driver.find_element(By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']").send_keys(prompt) driver.find_element(By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']").send_keys(prompt)
# Submit question driver.find_element(By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']").send_keys(Keys.ENTER)
driver.find_element(By.CSS_SELECTOR, "button.bg-super svg[data-icon='arrow-right']").click()
try: try:
# Yield response # Stream response
script = """ script = """
if(window._message && window._message != window._last_message) { if(window._message && window._message != window._last_message) {
try { try {

View File

@ -32,7 +32,7 @@ class Bard(BaseProvider):
try: try:
driver.get(f"{cls.url}/chat") driver.get(f"{cls.url}/chat")
wait = WebDriverWait(driver, 10) wait = WebDriverWait(driver, 10 if headless else 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))) wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
except: except:
# Reopen browser for login # Reopen browser for login
@ -61,14 +61,13 @@ XMLHttpRequest.prototype.open = function(method, url) {
""" """
driver.execute_script(script) driver.execute_script(script)
# Input and submit prompt # Submit prompt
driver.find_element(By.CSS_SELECTOR, "div.ql-editor.ql-blank.textarea").send_keys(prompt) driver.find_element(By.CSS_SELECTOR, "div.ql-editor.ql-blank.textarea").send_keys(prompt)
driver.find_element(By.CSS_SELECTOR, "button.send-button").click() driver.find_element(By.CSS_SELECTOR, "button.send-button").click()
# Yield response # Yield response
script = "return window._message;"
while True: while True:
chunk = driver.execute_script(script) chunk = driver.execute_script("return window._message;")
if chunk: if chunk:
yield chunk yield chunk
return return

View File

@ -1,101 +1,158 @@
from __future__ import annotations from __future__ import annotations
import json import time
import random
import requests
from ...typing import Any, CreateResult, Messages from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider from ..base_provider import BaseProvider
from ..helper import format_prompt from ..helper import WebDriver, format_prompt, get_browser
models = {
"theb-ai": "TheB.AI",
"theb-ai-free": "TheB.AI Free",
"gpt-3.5-turbo": "GPT-3.5 Turbo (New)",
"gpt-3.5-turbo-16k": "GPT-3.5-16K",
"gpt-4-turbo": "GPT-4 Turbo",
"gpt-4": "GPT-4",
"gpt-4-32k": "GPT-4 32K",
"claude-2": "Claude 2",
"claude-instant-1": "Claude Instant 1.2",
"palm-2": "PaLM 2",
"palm-2-32k": "PaLM 2 32K",
"palm-2-codey": "Codey",
"palm-2-codey-32k": "Codey 32K",
"vicuna-13b-v1.5": "Vicuna v1.5 13B",
"llama-2-7b-chat": "Llama 2 7B",
"llama-2-13b-chat": "Llama 2 13B",
"llama-2-70b-chat": "Llama 2 70B",
"code-llama-7b": "Code Llama 7B",
"code-llama-13b": "Code Llama 13B",
"code-llama-34b": "Code Llama 34B",
"qwen-7b-chat": "Qwen 7B"
}
class Theb(BaseProvider): class Theb(BaseProvider):
url = "https://theb.ai" url = "https://beta.theb.ai"
working = True working = True
supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
needs_auth = True supports_gpt_4 = True
supports_stream = True
@staticmethod @classmethod
def create_completion( def create_completion(
cls,
model: str, model: str,
messages: Messages, messages: Messages,
stream: bool, stream: bool,
proxy: str = None, proxy: str = None,
browser: WebDriver = None,
headless: bool = True,
**kwargs **kwargs
) -> CreateResult: ) -> CreateResult:
auth = kwargs.get("auth", { if model in models:
"bearer_token":"free", model = models[model]
"org_id":"theb", prompt = format_prompt(messages)
}) driver = browser if browser else get_browser(None, headless, proxy)
bearer_token = auth["bearer_token"] from selenium.webdriver.common.by import By
org_id = auth["org_id"] from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
headers = {
'authority': 'beta.theb.ai', try:
'accept': 'text/event-stream', driver.get(f"{cls.url}/home")
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', wait = WebDriverWait(driver, 10 if headless else 240)
'authorization': f'Bearer {bearer_token}', wait.until(EC.visibility_of_element_located((By.TAG_NAME, "body")))
'content-type': 'application/json', time.sleep(0.1)
'origin': 'https://beta.theb.ai', try:
'referer': 'https://beta.theb.ai/home', driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
'sec-ch-ua-mobile': '?0', except:
'sec-ch-ua-platform': '"Windows"', pass
'sec-fetch-dest': 'empty', if model:
'sec-fetch-mode': 'cors', # Load model panel
'sec-fetch-site': 'same-origin', wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#SelectModel svg")))
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', time.sleep(0.1)
'x-ai-model': 'ee8d4f29cb7047f78cbe84313ed6ace8', driver.find_element(By.CSS_SELECTOR, "#SelectModel svg").click()
try:
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
except:
pass
# Select model
selector = f"div.flex-col div.items-center span[title='{model}']"
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, selector)))
span = driver.find_element(By.CSS_SELECTOR, selector)
container = span.find_element(By.XPATH, "//div/../..")
button = container.find_element(By.CSS_SELECTOR, "button.btn-blue.btn-small.border")
button.click()
# Register fetch hook
script = """
window._fetch = window.fetch;
window.fetch = (url, options) => {
// Call parent fetch method
const result = window._fetch(url, options);
if (!url.startsWith("/api/conversation")) {
return result;
} }
// Load response reader
result.then((response) => {
if (!response.body.locked) {
window._reader = response.body.getReader();
}
});
// Return dummy response
return new Promise((resolve, reject) => {
resolve(new Response(new ReadableStream()))
});
}
window._last_message = "";
"""
driver.execute_script(script)
req_rand = random.randint(100000000, 9999999999) # Submit prompt
wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize")))
driver.find_element(By.ID, "textareaAutosize").send_keys(prompt)
driver.find_element(By.ID, "textareaAutosize").send_keys(Keys.ENTER)
json_data: dict[str, Any] = { # Read response with reader
"text" : format_prompt(messages), script = """
"category" : "04f58f64a4aa4191a957b47290fee864", if(window._reader) {
"model" : "ee8d4f29cb7047f78cbe84313ed6ace8", chunk = await window._reader.read();
"model_params": { if (chunk['done']) {
"system_prompt" : "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}", return null;
"temperature" : kwargs.get("temperature", 1), }
"top_p" : kwargs.get("top_p", 1), text = (new TextDecoder()).decode(chunk['value']);
"frequency_penalty" : kwargs.get("frequency_penalty", 0), message = '';
"presence_penalty" : kwargs.get("presence_penalty", 0), text.split('\\r\\n').forEach((line, index) => {
"long_term_memory" : "auto" if (line.startsWith('data: ')) {
try {
line = JSON.parse(line.substring('data: '.length));
message = line["args"]["content"];
} catch(e) { }
}
});
if (message) {
try {
return message.substring(window._last_message.length);
} finally {
window._last_message = message;
} }
} }
}
response = requests.post( return '';
f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}", """
headers=headers, while True:
json=json_data, chunk = driver.execute_script(script)
stream=True, if chunk:
proxies={"https": proxy} yield chunk
) elif chunk != "":
break
response.raise_for_status() else:
content = "" time.sleep(0.1)
next_content = "" finally:
for chunk in response.iter_lines(): if not browser:
if b"content" in chunk: driver.close()
next_content = content time.sleep(0.1)
data = json.loads(chunk.decode().split("data: ")[1]) driver.quit()
content = data["content"]
yield content.replace(next_content, "")
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("auth", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
("presence_penalty", "int"),
("frequency_penalty", "int"),
("top_p", "int")
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -0,0 +1,77 @@
from __future__ import annotations
import requests
from ...typing import Any, CreateResult, Messages
from ..base_provider import BaseProvider
models = {
"theb-ai": "TheB.AI",
"gpt-3.5-turbo": "GPT-3.5",
"gpt-3.5-turbo-16k": "GPT-3.5-16K",
"gpt-4-turbo": "GPT-4 Turbo",
"gpt-4": "GPT-4",
"gpt-4-32k": "GPT-4 32K",
"claude-2": "Claude 2",
"claude-1": "Claude",
"claude-1-100k": "Claude 100K",
"claude-instant-1": "Claude Instant",
"claude-instant-1-100k": "Claude Instant 100K",
"palm-2": "PaLM 2",
"palm-2-codey": "Codey",
"vicuna-13b-v1.5": "Vicuna v1.5 13B",
"llama-2-7b-chat": "Llama 2 7B",
"llama-2-13b-chat": "Llama 2 13B",
"llama-2-70b-chat": "Llama 2 70B",
"code-llama-7b": "Code Llama 7B",
"code-llama-13b": "Code Llama 13B",
"code-llama-34b": "Code Llama 34B",
"qwen-7b-chat": "Qwen 7B"
}
class ThebApi(BaseProvider):
url = "https://theb.ai"
working = True
needs_auth = True
@staticmethod
def create_completion(
model: str,
messages: Messages,
stream: bool,
auth: str,
proxy: str = None,
**kwargs
) -> CreateResult:
if model and model not in models:
raise ValueError(f"Model are not supported: {model}")
headers = {
'accept': 'application/json',
'authorization': f'Bearer {auth}',
'content-type': 'application/json',
}
# response = requests.get("https://api.baizhi.ai/v1/models", headers=headers).json()["data"]
# models = dict([(m["id"], m["name"]) for m in response])
# print(json.dumps(models, indent=4))
data: dict[str, Any] = {
"model": model if model else "gpt-3.5-turbo",
"messages": messages,
"stream": False,
"model_params": {
"system_prompt": kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture."),
"temperature": 1,
"top_p": 1,
**kwargs
}
}
response = requests.post(
"https://api.theb.ai/v1/chat/completions",
headers=headers,
json=data,
proxies={"https": proxy}
)
try:
response.raise_for_status()
yield response.json()["choices"][0]["message"]["content"]
except:
raise RuntimeError(f"Response: {next(response.iter_lines()).decode()}")

View File

@ -1,6 +1,7 @@
from .Bard import Bard from .Bard import Bard
from .Raycast import Raycast from .Raycast import Raycast
from .Theb import Theb from .Theb import Theb
from .ThebApi import ThebApi
from .HuggingChat import HuggingChat from .HuggingChat import HuggingChat
from .OpenaiChat import OpenaiChat from .OpenaiChat import OpenaiChat
from .OpenAssistant import OpenAssistant from .OpenAssistant import OpenAssistant