Merge pull request #1260 from hlohaus/any

Update Phind and PerplexityAi - GPT-4 Providers
This commit is contained in:
Tekky 2023-11-16 20:48:26 +00:00 committed by GitHub
commit 8393b2bd56
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 517 additions and 475 deletions

View File

@ -0,0 +1,53 @@
from __future__ import annotations
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
class ChatAnywhere(AsyncGeneratorProvider):
url = "https://chatanywhere.cn"
supports_gpt_35_turbo = True
supports_message_history = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
temperature: float = 0.5,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json",
"Referer": f"{cls.url}/",
"Origin": cls.url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Authorization": "",
"Connection": "keep-alive",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
data = {
"list": messages,
"id": "s1_qYuOLXjI3rEpc7WHfQ",
"title": messages[-1]["content"],
"prompt": "",
"temperature": temperature,
"models": "61490748",
"continuous": True
}
async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
if chunk:
yield chunk.decode()

View File

@ -78,7 +78,7 @@ class Liaobots(AsyncGeneratorProvider):
"model": models[model],
"messages": messages,
"key": "",
"prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
"prompt": kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully."),
}
async with session.post(
"https://liaobots.work/api/chat",

View File

@ -1,91 +1,95 @@
from __future__ import annotations
import time, random, json
import time, json
from ..requests import StreamSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
from .helper import WebDriver, format_prompt, get_browser
class MyShell(AsyncGeneratorProvider):
class MyShell(BaseProvider):
url = "https://app.myshell.ai/chat"
working = True
supports_gpt_35_turbo = True
supports_stream = True
@classmethod
async def create_async_generator(
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
timeout: int = 120,
browser: WebDriver = None,
hidden_display: bool = True,
**kwargs
) -> AsyncResult:
user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
headers = {
"User-Agent": user_agent,
"Myshell-Service-Name": "organics-api",
"Visitor-Id": generate_visitor_id(user_agent)
}
async with StreamSession(
impersonate="chrome107",
proxies={"https": proxy},
timeout=timeout,
headers=headers
) as session:
prompt = format_prompt(messages)
) -> CreateResult:
if browser:
driver = browser
else:
if hidden_display:
driver, display = get_browser("", True, proxy)
else:
driver = get_browser("", False, proxy)
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver.get(cls.url)
try:
# Wait for page load
WebDriverWait(driver, timeout).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "body:not(.no-js)"))
)
# Send message
script = """
response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", {
"headers": {
"accept": "application/json",
"content-type": "application/json",
"myshell-service-name": "organics-api",
"visitor-id": localStorage.getItem("mix_visitorId")
},
"body": '{body}',
"method": "POST"
})
window.reader = response.body.getReader();
"""
data = {
"botId": "1",
"botId": "4738",
"conversation_scenario": 3,
"message": prompt,
"message": format_prompt(messages),
"messageType": 1
}
async with session.post("https://api.myshell.ai/v1/bot/chat/send_message", json=data) as response:
response.raise_for_status()
event = None
async for line in response.iter_lines():
if line.startswith(b"event: "):
event = line[7:]
elif event == b"MESSAGE_REPLY_SSE_ELEMENT_EVENT_NAME_TEXT":
if line.startswith(b"data: "):
yield json.loads(line[6:])["content"]
if event == b"MESSAGE_REPLY_SSE_ELEMENT_EVENT_NAME_TEXT_STREAM_PUSH_FINISHED":
break
def xor_hash(B: str):
r = []
i = 0
def o(e, t):
o_val = 0
for i in range(len(t)):
o_val |= r[i] << (8 * i)
return e ^ o_val
for e in range(len(B)):
t = ord(B[e])
r.insert(0, 255 & t)
if len(r) >= 4:
i = o(i, r)
r = []
if len(r) > 0:
i = o(i, r)
return hex(i)[2:]
def performance() -> str:
t = int(time.time() * 1000)
e = 0
while t == int(time.time() * 1000):
e += 1
return hex(t)[2:] + hex(e)[2:]
def generate_visitor_id(user_agent: str) -> str:
f = performance()
r = hex(int(random.random() * (16**16)))[2:-2]
d = xor_hash(user_agent)
e = hex(1080 * 1920)[2:]
return f"{f}-{r}-{d}-{e}-{f}"
driver.execute_script(script.replace("{body}", json.dumps(data)))
script = """
chunk = await window.reader.read();
if (chunk['done']) return null;
text = (new TextDecoder ()).decode(chunk['value']);
content = '';
text.split('\\n').forEach((line, index) => {
if (line.startsWith('data: ')) {
try {
const data = JSON.parse(line.substring('data: '.length));
if ('content' in data) {
content += data['content'];
}
} catch(e) {}
}
});
return content;
"""
while True:
chunk = driver.execute_script(script):
if chunk:
yield chunk
elif chunk != "":
break
finally:
driver.close()
if not browser:
time.sleep(0.1)
driver.quit()
if hidden_display:
display.stop()

View File

@ -0,0 +1,120 @@
from __future__ import annotations
import time
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
from .helper import WebDriver, format_prompt, get_browser
class PerplexityAi(BaseProvider):
url = "https://www.perplexity.ai"
working = True
supports_gpt_35_turbo = True
supports_stream = True
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
timeout: int = 120,
browser: WebDriver = None,
copilot: bool = False,
hidden_display: bool = True,
**kwargs
) -> CreateResult:
if browser:
driver = browser
else:
if hidden_display:
driver, display = get_browser("", True, proxy)
else:
driver = get_browser("", False, proxy)
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
prompt = format_prompt(messages)
driver.get(f"{cls.url}/")
wait = WebDriverWait(driver, timeout)
# Page loaded?
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']")))
# Add WebSocket hook
script = """
window._message = window._last_message = "";
window._message_finished = false;
const _socket_send = WebSocket.prototype.send;
WebSocket.prototype.send = function(...args) {
if (!window.socket_onmessage) {
window._socket_onmessage = this;
this.addEventListener("message", (event) => {
if (event.data.startsWith("42")) {
let data = JSON.parse(event.data.substring(2));
if (data[0] =="query_progress" || data[0] == "query_answered") {
let content = JSON.parse(data[1]["text"]);
if (data[1]["mode"] == "copilot") {
content = content[content.length-1]["content"]["answer"];
content = JSON.parse(content);
}
window._message = content["answer"];
window._message_finished = data[0] == "query_answered";
window._web_results = content["web_results"];
}
}
});
}
return _socket_send.call(this, ...args);
};
"""
driver.execute_script(script)
if copilot:
try:
# Check account
driver.find_element(By.CSS_SELECTOR, "img[alt='User avatar']")
# Enable copilot
driver.find_element(By.CSS_SELECTOR, "button[data-testid='copilot-toggle']").click()
except:
raise RuntimeError("For copilot you needs a account")
# Enter question
driver.find_element(By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']").send_keys(prompt)
# Submit question
driver.find_element(By.CSS_SELECTOR, "button.bg-super svg[data-icon='arrow-right']").click()
try:
# Yield response
script = """
if(window._message && window._message != window._last_message) {
try {
return window._message.substring(window._last_message.length);
} finally {
window._last_message = window._message;
}
} else if(window._message_finished) {
return null;
} else {
return '';
}
"""
while True:
chunk = driver.execute_script(script)
if chunk:
yield chunk
elif chunk != "":
break
else:
time.sleep(0.1)
finally:
driver.close()
if not browser:
time.sleep(0.1)
driver.quit()
if hidden_display:
display.stop()

View File

@ -1,83 +1,119 @@
from __future__ import annotations
import random, string
from datetime import datetime
import time
from urllib.parse import quote
from ..typing import AsyncResult, Messages
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
from .helper import WebDriver, format_prompt, get_browser
class Phind(AsyncGeneratorProvider):
class Phind(BaseProvider):
url = "https://www.phind.com"
working = True
supports_gpt_4 = True
supports_stream = True
@classmethod
async def create_async_generator(
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
timeout: int = 120,
browser: WebDriver = None,
creative_mode: bool = None,
hidden_display: bool = True,
**kwargs
) -> AsyncResult:
chars = string.ascii_lowercase + string.digits
user_id = ''.join(random.choice(chars) for _ in range(24))
data = {
"question": format_prompt(messages),
"webResults": [],
"options": {
"date": datetime.now().strftime("%d.%m.%Y"),
"language": "en",
"detailed": True,
"anonUserId": user_id,
"answerModel": "GPT-4",
"creativeMode": False,
"customLinks": []
},
"context":""
}
headers = {
"Authority": cls.url,
"Accept": "application/json, text/plain, */*",
"Origin": cls.url,
"Referer": f"{cls.url}/"
}
async with StreamSession(
headers=headers,
timeout=(5, timeout),
proxies={"https": proxy},
impersonate="chrome107"
) as session:
async with session.post(f"{cls.url}/api/infer/answer", json=data) as response:
response.raise_for_status()
new_lines = 0
async for line in response.iter_lines():
if not line:
continue
if line.startswith(b"data: "):
line = line[6:]
if line.startswith(b"<PHIND_METADATA>"):
continue
if line:
if new_lines:
yield "".join(["\n" for _ in range(int(new_lines / 2))])
new_lines = 0
yield line.decode()
else:
new_lines += 1
) -> CreateResult:
if browser:
driver = browser
else:
if hidden_display:
driver, display = get_browser("", True, proxy)
else:
driver = get_browser("", False, proxy)
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
("timeout", "int"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
prompt = quote(format_prompt(messages))
driver.get(f"{cls.url}/search?q={prompt}&source=searchbox")
# Need to change settinge
if model.startswith("gpt-4") or creative_mode:
wait = WebDriverWait(driver, timeout)
# Open settings dropdown
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "button.text-dark.dropdown-toggle")))
driver.find_element(By.CSS_SELECTOR, "button.text-dark.dropdown-toggle").click()
# Wait for dropdown toggle
wait.until(EC.visibility_of_element_located((By.XPATH, "//button[text()='GPT-4']")))
# Enable GPT-4
if model.startswith("gpt-4"):
driver.find_element(By.XPATH, "//button[text()='GPT-4']").click()
# Enable creative mode
if creative_mode or creative_mode == None:
driver.find_element(By.ID, "Creative Mode").click()
# Submit changes
driver.find_element(By.CSS_SELECTOR, ".search-bar-input-group button[type='submit']").click()
# Wait for page reload
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".search-container")))
try:
# Add fetch hook
script = """
window._fetch = window.fetch;
window.fetch = (url, options) => {
// Call parent fetch method
const result = window._fetch(url, options);
if (url != "/api/infer/answer") return result;
// Load response reader
result.then((response) => {
if (!response.body.locked) {
window.reader = response.body.getReader();
}
});
// Return dummy response
return new Promise((resolve, reject) => {
resolve(new Response(new ReadableStream()))
});
}
"""
# Read response from reader
driver.execute_script(script)
script = """
if(window.reader) {
chunk = await window.reader.read();
if (chunk['done']) return null;
text = (new TextDecoder()).decode(chunk['value']);
content = '';
text.split('\\r\\n').forEach((line, index) => {
if (line.startsWith('data: ')) {
line = line.substring('data: '.length);
if (!line.startsWith('<PHIND_METADATA>')) {
if (line) content += line;
else content += '\\n';
}
}
});
return content.replace('\\n\\n', '\\n');
} else {
return ''
}
"""
while True:
chunk = driver.execute_script(script)
if chunk:
yield chunk
elif chunk != "":
break
else:
time.sleep(0.1)
finally:
driver.close()
if not browser:
time.sleep(0.1)
driver.quit()
if hidden_display:
display.stop()

View File

@ -1,10 +1,12 @@
from __future__ import annotations
from __future__ import annotations
from .AiAsk import AiAsk
from .Aichat import Aichat
from .AItianhu import AItianhu
from .AItianhuSpace import AItianhuSpace
from .Berlin import Berlin
from .Bing import Bing
from .ChatAnywhere import ChatAnywhere
from .ChatBase import ChatBase
from .ChatForAi import ChatForAi
from .Chatgpt4Online import Chatgpt4Online
@ -28,6 +30,7 @@ from .Llama2 import Llama2
from .MyShell import MyShell
from .NoowAi import NoowAi
from .Opchatgpts import Opchatgpts
from .PerplexityAi import PerplexityAi
from .Phind import Phind
from .Vercel import Vercel
from .Ylokh import Ylokh
@ -41,150 +44,23 @@ from .deprecated import *
from .needs_auth import *
from .unfinished import *
class ProviderUtils:
convert: dict[str, BaseProvider] = {
'AItianhu': AItianhu,
'AItianhuSpace': AItianhuSpace,
'Acytoo': Acytoo,
'AiAsk': AiAsk,
'AiService': AiService,
'Aibn': Aibn,
'Aichat': Aichat,
'Ails': Ails,
'Aivvm': Aivvm,
'AsyncGeneratorProvider': AsyncGeneratorProvider,
'AsyncProvider': AsyncProvider,
'Bard': Bard,
'BaseProvider': BaseProvider,
'Berlin': Berlin,
'Bing': Bing,
'ChatBase': ChatBase,
'ChatForAi': ChatForAi,
'Chatgpt4Online': Chatgpt4Online,
'ChatgptAi': ChatgptAi,
'ChatgptDemo': ChatgptDemo,
'ChatgptDuo': ChatgptDuo,
'ChatgptFree': ChatgptFree,
'ChatgptLogin': ChatgptLogin,
'ChatgptX': ChatgptX,
'CodeLinkAva': CodeLinkAva,
'Cromicle': Cromicle,
'DeepInfra': DeepInfra,
'DfeHub': DfeHub,
'EasyChat': EasyChat,
'Equing': Equing,
'FastGpt': FastGpt,
'Forefront': Forefront,
'FakeGpt': FakeGpt,
'FreeGpt': FreeGpt,
'GPTalk': GPTalk,
'GptChatly': GptChatly,
'GetGpt': GetGpt,
'GptForLove': GptForLove,
'GptGo': GptGo,
'GptGod': GptGod,
'Hashnode': Hashnode,
'H2o': H2o,
'HuggingChat': HuggingChat,
'Komo': Komo,
'Koala': Koala,
'Liaobots': Liaobots,
'Llama2': Llama2,
'Lockchat': Lockchat,
'MikuChat': MikuChat,
'Myshell': Myshell,
'MyShell': MyShell,
'NoowAi': NoowAi,
'Opchatgpts': Opchatgpts,
'OpenAssistant': OpenAssistant,
'OpenaiChat': OpenaiChat,
'PerplexityAi': PerplexityAi,
'Phind': Phind,
'Raycast': Raycast,
'Theb': Theb,
'V50': V50,
'Vercel': Vercel,
'Vitalentum': Vitalentum,
'Wewordle': Wewordle,
'Wuguokai': Wuguokai,
'Ylokh': Ylokh,
'You': You,
'Yqcloud': Yqcloud,
'GeekGpt': GeekGpt,
'BaseProvider': BaseProvider,
'AsyncProvider': AsyncProvider,
'AsyncGeneratorProvider': AsyncGeneratorProvider,
'RetryProvider': RetryProvider,
}
import sys
__all__ = [
'BaseProvider',
'AsyncProvider',
'AsyncGeneratorProvider',
'RetryProvider',
'Acytoo',
'AiAsk',
'Aibn',
'Aichat',
'Ails',
'Aivvm',
'AiService',
'AItianhu',
'AItianhuSpace',
'Aivvm',
'Bard',
'Berlin',
'Bing',
'ChatBase',
'ChatForAi',
'Chatgpt4Online',
'ChatgptAi',
'ChatgptDemo',
'ChatgptDuo',
'ChatgptFree',
'ChatgptLogin',
'ChatgptX',
'Cromicle',
'DeepInfra',
'CodeLinkAva',
'DfeHub',
'EasyChat',
'Forefront',
'FakeGpt',
'FreeGpt',
'GPTalk',
'GptChatly',
'GptForLove',
'GetGpt',
'GptGo',
'GptGod',
'Hashnode',
'H2o',
'HuggingChat',
'Koala',
'Liaobots',
'Llama2',
'Lockchat',
'Myshell',
'MyShell',
'NoowAi',
'Opchatgpts',
'Raycast',
'OpenaiChat',
'OpenAssistant',
'PerplexityAi',
'Phind',
'Theb',
'Vercel',
'Vitalentum',
'Wewordle',
'Ylokh',
'You',
'Yqcloud',
'Equing',
'FastGpt',
'Wuguokai',
'V50',
'GeekGpt'
__modules__: list = [
getattr(sys.modules[__name__], provider) for provider in dir()
if not provider.startswith("__")
]
__providers__: list[type[BaseProvider]] = [
provider for provider in __modules__
if isinstance(provider, type)
and issubclass(provider, BaseProvider)
]
__all__: list[str] = [
provider.__name__ for provider in __providers__
]
__map__: dict[str, BaseProvider] = dict([
(provider.__name__, provider) for provider in __providers__
])
class ProviderUtils:
convert: dict[str, BaseProvider] = __map__

View File

@ -3,13 +3,44 @@ from __future__ import annotations
import sys
import asyncio
import webbrowser
from os import path
from asyncio import AbstractEventLoop
from platformdirs import user_config_dir
from browser_cookie3 import (
chrome,
chromium,
opera,
opera_gx,
brave,
edge,
vivaldi,
firefox,
BrowserCookieError
)
try:
from selenium.webdriver.remote.webdriver import WebDriver
except ImportError:
class WebDriver():
pass
try:
from undetected_chromedriver import Chrome, ChromeOptions
except ImportError:
class Chrome():
def __init__():
raise RuntimeError('Please install the "undetected_chromedriver" package')
class ChromeOptions():
def add_argument():
pass
try:
from pyvirtualdisplay import Display
except ImportError:
class Display():
def start():
pass
def stop():
pass
from ..typing import Dict, Messages
from browser_cookie3 import chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox, BrowserCookieError
from ..typing import Dict, Messages, Union, Tuple
from .. import debug
# Change event loop policy on windows
@ -106,10 +137,26 @@ def format_prompt(messages: Messages, add_special_tokens=False) -> str:
return f"{formatted}\nAssistant:"
def get_browser(user_data_dir: str = None):
from undetected_chromedriver import Chrome
if not user_data_dir:
def get_browser(
user_data_dir: str = None,
hidden_display: bool = False,
proxy: str = None,
options: ChromeOptions = None
) -> Union[Chrome, Tuple[Chrome, Display]] :
if user_data_dir == None:
user_data_dir = user_config_dir("g4f")
return Chrome(user_data_dir=user_data_dir)
if hidden_display:
display = Display(visible=0, size=(1920, 1080))
display.start()
if proxy:
if not options:
options = ChromeOptions()
options.add_argument(f'--proxy-server={proxy}')
browser = Chrome(user_data_dir=user_data_dir, options=options)
if hidden_display:
return browser, display
return browser

View File

@ -1,90 +1,91 @@
from __future__ import annotations
import json
import random
import re
import time
from aiohttp import ClientSession
from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
from ..helper import WebDriver, format_prompt, get_browser
from ...typing import Messages
from ..base_provider import AsyncProvider
from ..helper import format_prompt, get_cookies
class Bard(AsyncProvider):
class Bard(BaseProvider):
url = "https://bard.google.com"
needs_auth = True
working = True
_snlm0e = None
needs_auth = True
@classmethod
async def create_async(
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
cookies: dict = None,
browser: WebDriver = None,
hidden_display: bool = True,
**kwargs
) -> str:
) -> CreateResult:
prompt = format_prompt(messages)
if not cookies:
cookies = get_cookies(".google.com")
if browser:
driver = browser
else:
if hidden_display:
driver, display = get_browser(None, True, proxy)
else:
driver = get_browser(None, False, proxy)
headers = {
'authority': 'bard.google.com',
'origin': cls.url,
'referer': f'{cls.url}/',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
'x-same-domain': '1',
}
async with ClientSession(
cookies=cookies,
headers=headers
) as session:
if not cls._snlm0e:
async with session.get(cls.url, proxy=proxy) as response:
text = await response.text()
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
if match := re.search(r'SNlM0e\":\"(.*?)\"', text):
cls._snlm0e = match.group(1)
try:
driver.get(f"{cls.url}/chat")
wait = WebDriverWait(driver, 10)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
except:
# Reopen browser for login
if not browser:
driver.quit()
# New browser should be visible
if hidden_display:
display.stop()
driver = get_browser(None, False, proxy)
driver.get(f"{cls.url}/chat")
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
else:
raise RuntimeError("Prompt textarea not found. You may not be logged in.")
try:
# Add hook in XMLHttpRequest
script = """
const _http_request_open = XMLHttpRequest.prototype.open;
window._message = "";
XMLHttpRequest.prototype.open = function(method, url) {
if (url.includes("/assistant.lamda.BardFrontendService/StreamGenerate")) {
this.addEventListener("load", (event) => {
window._message = JSON.parse(JSON.parse(this.responseText.split("\\n")[3])[0][2])[4][0][1][0];
});
}
return _http_request_open.call(this, method, url);
}
"""
driver.execute_script(script)
# Input and submit prompt
driver.find_element(By.CSS_SELECTOR, "div.ql-editor.ql-blank.textarea").send_keys(prompt)
driver.find_element(By.CSS_SELECTOR, "button.send-button").click()
# Yield response
script = "return window._message;"
while True:
chunk = driver.execute_script(script)
if chunk:
yield chunk
return
else:
raise RuntimeError("No snlm0e value.")
params = {
'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
'_reqid': random.randint(1111, 9999),
'rt': 'c'
}
data = {
'at': cls._snlm0e,
'f.req': json.dumps([None, json.dumps([[prompt]])])
}
intents = '.'.join([
'assistant',
'lamda',
'BardFrontendService'
])
async with session.post(
f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate',
data=data,
params=params,
proxy=proxy
) as response:
response = await response.text()
response = json.loads(response.splitlines()[3])[0][2]
response = json.loads(response)[4][0][1][0]
return response
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
time.sleep(0.1)
finally:
driver.close()
if not browser:
time.sleep(0.1)
driver.quit()
if hidden_display:
display.stop()

View File

@ -1,100 +0,0 @@
from __future__ import annotations
import json
import time
import base64
from curl_cffi.requests import AsyncSession
from ..base_provider import AsyncProvider, format_prompt, get_cookies
class PerplexityAi(AsyncProvider):
url = "https://www.perplexity.ai"
supports_gpt_35_turbo = True
_sources = []
@classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
**kwargs
) -> str:
url = f"{cls.url}/socket.io/?EIO=4&transport=polling"
headers = {
"Referer": f"{cls.url}/"
}
async with AsyncSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107") as session:
url_session = "https://www.perplexity.ai/api/auth/session"
response = await session.get(url_session)
response.raise_for_status()
url_session = "https://www.perplexity.ai/api/auth/session"
response = await session.get(url_session)
response.raise_for_status()
response = await session.get(url, params={"t": timestamp()})
response.raise_for_status()
sid = json.loads(response.text[1:])["sid"]
response = await session.get(url, params={"t": timestamp(), "sid": sid})
response.raise_for_status()
data = '40{"jwt":"anonymous-ask-user"}'
response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
response.raise_for_status()
response = await session.get(url, params={"t": timestamp(), "sid": sid})
response.raise_for_status()
data = "424" + json.dumps([
"perplexity_ask",
format_prompt(messages),
{
"version":"2.1",
"source":"default",
"language":"en",
"timezone": time.tzname[0],
"search_focus":"internet",
"mode":"concise"
}
])
response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
response.raise_for_status()
while True:
response = await session.get(url, params={"t": timestamp(), "sid": sid})
response.raise_for_status()
for line in response.text.splitlines():
if line.startswith("434"):
result = json.loads(json.loads(line[3:])[0]["text"])
cls._sources = [{
"title": source["name"],
"url": source["url"],
"snippet": source["snippet"]
} for source in result["web_results"]]
return result["answer"]
@classmethod
def get_sources(cls):
return cls._sources
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
def timestamp() -> str:
return base64.urlsafe_b64encode(int(time.time()-1407782612).to_bytes(4, 'big')).decode()

View File

@ -1,5 +1,4 @@
from .MikuChat import MikuChat
from .PerplexityAi import PerplexityAi
from .Komo import Komo
from .TalkAi import TalkAi
from .ChatAiGpt import ChatAiGpt

View File

@ -3,6 +3,8 @@ from dataclasses import dataclass
from .typing import Union
from .Provider import BaseProvider, RetryProvider
from .Provider import (
Chatgpt4Online,
ChatAnywhere,
GptForLove,
ChatgptAi,
DeepInfra,
@ -11,14 +13,12 @@ from .Provider import (
GeekGpt,
FakeGpt,
FreeGpt,
NoowAi,
Berlin,
Llama2,
Vercel,
GPTalk,
Phind,
Koala,
GptGo,
Phind,
Bard,
Bing,
You,
@ -39,20 +39,24 @@ default = Model(
name = "",
base_provider = "",
best_provider = RetryProvider([
Bing, # Not fully GPT 3 or 4
Bing,
ChatgptAi, GptGo, GeekGpt,
Phind, You
You,
Chatgpt4Online,
ChatAnywhere,
])
)
# GPT-3.5 too, but all providers supports long responses and a custom timeouts
# GPT-3.5 too, but all providers supports long requests and responses
gpt_35_long = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
FreeGpt, You,
GeekGpt, FakeGpt,
Berlin, Koala
Berlin, Koala,
Chatgpt4Online,
ChatAnywhere,
])
)
@ -62,7 +66,9 @@ gpt_35_turbo = Model(
base_provider = 'openai',
best_provider=RetryProvider([
ChatgptX, GptGo, You,
NoowAi, GPTalk, GptForLove, Phind, ChatBase
GptForLove, ChatBase,
Chatgpt4Online,
ChatAnywhere,
])
)
@ -70,7 +76,7 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = RetryProvider([
Bing, GeekGpt, Phind
Bing, Phind
])
)