mirror of https://github.com/xtekky/gpt4free.git
Add webview support to OpenaiChat
This commit is contained in:
parent
13f1275ca3
commit
993c9498c4
|
@ -303,7 +303,7 @@ def create_message(
|
|||
struct['arguments'][0]['previousMessages'] = [{
|
||||
"author": "user",
|
||||
"description": context,
|
||||
"contextType": "WebPage",
|
||||
"contextType": "ClientApp",
|
||||
"messageType": "Context",
|
||||
"messageId": "discover-web--page-ping-mriduna-----"
|
||||
}]
|
||||
|
@ -404,6 +404,8 @@ async def stream_generate(
|
|||
image_client = BingCreateImages(cookies, proxy)
|
||||
image_response = await image_client.create_async(prompt)
|
||||
except Exception as e:
|
||||
if debug.logging:
|
||||
print(f"Bing: Failed to create images: {e}")
|
||||
response_txt += f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}"
|
||||
do_read = False
|
||||
if response_txt.startswith(returned_text):
|
||||
|
|
|
@ -14,6 +14,12 @@ try:
|
|||
except ImportError:
|
||||
has_arkose_generator = False
|
||||
|
||||
try:
|
||||
import webview
|
||||
has_webview = True
|
||||
except ImportError:
|
||||
has_webview = False
|
||||
|
||||
try:
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
|
@ -25,10 +31,10 @@ from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|||
from ..helper import get_cookies
|
||||
from ...webdriver import get_browser
|
||||
from ...typing import AsyncResult, Messages, Cookies, ImageType, Union, AsyncIterator
|
||||
from ...requests import get_args_from_browser
|
||||
from ...requests import get_args_from_browser, raise_for_status
|
||||
from ...requests.aiohttp import StreamSession
|
||||
from ...image import to_image, to_bytes, ImageResponse, ImageRequest
|
||||
from ...errors import MissingRequirementsError, MissingAuthError
|
||||
from ...errors import MissingRequirementsError, MissingAuthError, ProviderNotWorkingError
|
||||
from ... import debug
|
||||
|
||||
class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
|
@ -134,7 +140,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
}
|
||||
# Post the image data to the service and get the image data
|
||||
async with session.post(f"{cls.url}/backend-api/files", json=data, headers=headers) as response:
|
||||
response.raise_for_status()
|
||||
cls._update_request_args()
|
||||
await raise_for_status(response)
|
||||
image_data = {
|
||||
**data,
|
||||
**await response.json(),
|
||||
|
@ -152,14 +159,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"x-ms-blob-type": "BlockBlob"
|
||||
}
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
await raise_for_status(response)
|
||||
# Post the file ID to the service and get the download URL
|
||||
async with session.post(
|
||||
f"{cls.url}/backend-api/files/{image_data['file_id']}/uploaded",
|
||||
json={},
|
||||
headers=headers
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
cls._update_request_args(session)
|
||||
await raise_for_status(response)
|
||||
image_data["download_url"] = (await response.json())["download_url"]
|
||||
return ImageRequest(image_data)
|
||||
|
||||
|
@ -178,7 +186,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
if not cls.default_model:
|
||||
async with session.get(f"{cls.url}/backend-api/models", headers=headers) as response:
|
||||
cls._update_request_args(session)
|
||||
response.raise_for_status()
|
||||
await raise_for_status(response)
|
||||
data = await response.json()
|
||||
if "categories" in data:
|
||||
cls.default_model = data["categories"][-1]["default_model"]
|
||||
|
@ -261,7 +269,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
file_id = first_part["asset_pointer"].split("file-service://", 1)[1]
|
||||
try:
|
||||
async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response:
|
||||
response.raise_for_status()
|
||||
cls._update_request_args(session)
|
||||
await raise_for_status(response)
|
||||
download_url = (await response.json())["download_url"]
|
||||
return ImageResponse(download_url, prompt)
|
||||
except Exception as e:
|
||||
|
@ -288,6 +297,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
json={"is_visible": False},
|
||||
headers=headers
|
||||
) as response:
|
||||
cls._update_request_args(session)
|
||||
...
|
||||
|
||||
@classmethod
|
||||
|
@ -337,31 +347,32 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
if parent_id is None:
|
||||
parent_id = str(uuid.uuid4())
|
||||
|
||||
# Read api_key from arguments
|
||||
api_key = kwargs["access_token"] if "access_token" in kwargs else api_key
|
||||
|
||||
async with StreamSession(
|
||||
proxies={"https": proxy},
|
||||
impersonate="chrome",
|
||||
timeout=timeout
|
||||
) as session:
|
||||
# Read api_key and cookies from cache / browser config
|
||||
api_key = kwargs["access_token"] if "access_token" in kwargs else api_key
|
||||
if cls._headers is None or cls._expires is None or time.time() > cls._expires:
|
||||
if api_key is None:
|
||||
# Read api_key from cookies
|
||||
if cls._headers is None:
|
||||
cookies = get_cookies("chat.openai.com", False) if cookies is None else cookies
|
||||
api_key = cookies["access_token"] if "access_token" in cookies else api_key
|
||||
cls._create_request_args(cookies)
|
||||
if api_key is None:
|
||||
try:
|
||||
await cls.webview_access_token() if has_webview else None
|
||||
except Exception as e:
|
||||
if debug.logging:
|
||||
print(f"Use webview failed: {e}")
|
||||
else:
|
||||
api_key = cls._api_key if api_key is None else api_key
|
||||
# Read api_key with session cookies
|
||||
#if api_key is None and cookies:
|
||||
# api_key = await cls.fetch_access_token(session, cls._headers)
|
||||
# Load default model
|
||||
if cls.default_model is None and api_key is not None:
|
||||
|
||||
if api_key is not None:
|
||||
cls._create_request_args(cookies)
|
||||
cls._set_api_key(api_key)
|
||||
|
||||
if cls.default_model is None and cls._headers is not None:
|
||||
try:
|
||||
if not model:
|
||||
cls._set_api_key(api_key)
|
||||
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
|
||||
else:
|
||||
cls.default_model = cls.get_model(model)
|
||||
|
@ -369,8 +380,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
if debug.logging:
|
||||
print("OpenaiChat: Load default_model failed")
|
||||
print(f"{e.__class__.__name__}: {e}")
|
||||
# Browse api_key and default model
|
||||
if api_key is None or cls.default_model is None:
|
||||
if cls.default_model is None:
|
||||
login_url = os.environ.get("G4F_LOGIN_URL")
|
||||
if login_url:
|
||||
yield f"Please login: [ChatGPT]({login_url})\n\n"
|
||||
|
@ -379,20 +389,21 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
except MissingRequirementsError:
|
||||
raise MissingAuthError(f'Missing "access_token". Add a "api_key" please')
|
||||
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
|
||||
else:
|
||||
cls._set_api_key(api_key)
|
||||
|
||||
async with session.post(
|
||||
f"{cls.url}/backend-api/sentinel/chat-requirements",
|
||||
json={"conversation_mode_kind": "primary_assistant"},
|
||||
headers=cls._headers
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
cls._update_request_args(session)
|
||||
await raise_for_status(response)
|
||||
data = await response.json()
|
||||
blob = data["arkose"]["dx"]
|
||||
need_arkose = data["arkose"]["required"]
|
||||
chat_token = data["token"]
|
||||
|
||||
if need_arkose and not has_arkose_generator:
|
||||
raise ProviderNotWorkingError("OpenAI Plus Subscriber are not working")
|
||||
raise MissingRequirementsError('Install "py-arkose-generator" package')
|
||||
|
||||
try:
|
||||
|
@ -407,6 +418,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
while fields.finish_reason is None:
|
||||
conversation_id = conversation_id if fields.conversation_id is None else fields.conversation_id
|
||||
parent_id = parent_id if fields.message_id is None else fields.message_id
|
||||
websocket_request_id = str(uuid.uuid4())
|
||||
data = {
|
||||
"action": action,
|
||||
"conversation_mode": {"kind": "primary_assistant"},
|
||||
|
@ -416,25 +428,29 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"parent_message_id": parent_id,
|
||||
"model": model,
|
||||
"history_and_training_disabled": history_disabled and not auto_continue,
|
||||
"websocket_request_id": websocket_request_id
|
||||
}
|
||||
if action != "continue":
|
||||
messages = messages if conversation_id is None else [messages[-1]]
|
||||
data["messages"] = cls.create_messages(messages, image_request)
|
||||
data["messages"] = cls.create_messages(messages, image_request)
|
||||
headers = {
|
||||
"Accept": "text/event-stream",
|
||||
"OpenAI-Sentinel-Chat-Requirements-Token": chat_token,
|
||||
**cls._headers
|
||||
}
|
||||
if need_arkose:
|
||||
raise ProviderNotWorkingError("OpenAI Plus Subscriber are not working")
|
||||
headers["OpenAI-Sentinel-Arkose-Token"] = await cls.get_arkose_token(session, cls._headers, blob)
|
||||
headers["OpenAI-Sentinel-Chat-Requirements-Token"] = chat_token
|
||||
|
||||
async with session.post(
|
||||
f"{cls.url}/backend-api/conversation",
|
||||
json=data,
|
||||
headers={
|
||||
"Accept": "text/event-stream",
|
||||
**({"OpenAI-Sentinel-Arkose-Token": await cls.get_arkose_token(session)} if need_arkose else {}),
|
||||
"OpenAI-Sentinel-Chat-Requirements-Token": chat_token,
|
||||
**cls._headers
|
||||
}
|
||||
headers=headers
|
||||
) as response:
|
||||
cls._update_request_args(session)
|
||||
if not response.ok:
|
||||
raise RuntimeError(f"Response {response.status}: {await response.text()}")
|
||||
async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, fields):
|
||||
await raise_for_status(response)
|
||||
async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, fields, websocket_request_id):
|
||||
if response_fields:
|
||||
response_fields = False
|
||||
yield fields
|
||||
|
@ -447,21 +463,35 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
await cls.delete_conversation(session, cls._headers, fields.conversation_id)
|
||||
|
||||
@staticmethod
|
||||
async def iter_messages_ws(ws: ClientWebSocketResponse, conversation_id: str) -> AsyncIterator:
|
||||
async def iter_messages_ws(ws: ClientWebSocketResponse, conversation_id: str, is_curl: bool) -> AsyncIterator:
|
||||
while True:
|
||||
message = await ws.receive_json()
|
||||
if is_curl:
|
||||
message = json.loads(ws.recv()[0])
|
||||
else:
|
||||
message = await ws.receive_json()
|
||||
if message["conversation_id"] == conversation_id:
|
||||
yield base64.b64decode(message["body"])
|
||||
|
||||
@classmethod
|
||||
async def iter_messages_chunk(cls, messages: AsyncIterator, session: StreamSession, fields: ResponseFields) -> AsyncIterator:
|
||||
async def iter_messages_chunk(
|
||||
cls,
|
||||
messages: AsyncIterator,
|
||||
session: StreamSession,
|
||||
fields: ResponseFields
|
||||
) -> AsyncIterator:
|
||||
last_message: int = 0
|
||||
async for message in messages:
|
||||
if message.startswith(b'{"wss_url":'):
|
||||
message = json.loads(message)
|
||||
async with session.ws_connect(message["wss_url"]) as ws:
|
||||
async for chunk in cls.iter_messages_chunk(cls.iter_messages_ws(ws, message["conversation_id"]), session, fields):
|
||||
ws = await session.ws_connect(message["wss_url"])
|
||||
try:
|
||||
async for chunk in cls.iter_messages_chunk(
|
||||
cls.iter_messages_ws(ws, message["conversation_id"], hasattr(ws, "recv")),
|
||||
session, fields
|
||||
):
|
||||
yield chunk
|
||||
finally:
|
||||
await ws.aclose()
|
||||
break
|
||||
async for chunk in cls.iter_messages_line(session, message, fields):
|
||||
if fields.finish_reason is not None:
|
||||
|
@ -513,6 +543,43 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
if "finish_details" in line["message"]["metadata"]:
|
||||
fields.finish_reason = line["message"]["metadata"]["finish_details"]["type"]
|
||||
|
||||
@classmethod
|
||||
async def webview_access_token(cls) -> str:
|
||||
window = webview.create_window("OpenAI Chat", cls.url)
|
||||
await asyncio.sleep(3)
|
||||
prompt_input = None
|
||||
while not prompt_input:
|
||||
try:
|
||||
await asyncio.sleep(1)
|
||||
prompt_input = window.dom.get_element("#prompt-textarea")
|
||||
except:
|
||||
...
|
||||
window.evaluate_js("""
|
||||
this._fetch = this.fetch;
|
||||
this.fetch = async (url, options) => {
|
||||
const response = await this._fetch(url, options);
|
||||
if (url == "https://chat.openai.com/backend-api/conversation") {
|
||||
this._headers = options.headers;
|
||||
return response;
|
||||
}
|
||||
return response;
|
||||
};
|
||||
""")
|
||||
window.evaluate_js("""
|
||||
document.querySelector('.from-token-main-surface-secondary').click();
|
||||
""")
|
||||
headers = None
|
||||
while headers is None:
|
||||
headers = window.evaluate_js("this._headers")
|
||||
await asyncio.sleep(1)
|
||||
headers["User-Agent"] = window.evaluate_js("window.navigator.userAgent")
|
||||
cookies = [list(*cookie.items()) for cookie in window.get_cookies()]
|
||||
window.destroy()
|
||||
cls._cookies = dict([(name, cookie.value) for name, cookie in cookies])
|
||||
cls._headers = headers
|
||||
cls._expires = int(time.time()) + 60 * 60 * 4
|
||||
cls._update_cookie_header()
|
||||
|
||||
@classmethod
|
||||
def browse_access_token(cls, proxy: str = None, timeout: int = 1200) -> None:
|
||||
"""
|
||||
|
@ -542,10 +609,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
cls._update_cookie_header()
|
||||
cls._set_api_key(access_token)
|
||||
finally:
|
||||
driver.close()
|
||||
driver.close()
|
||||
|
||||
@classmethod
|
||||
async def get_arkose_token(cls, session: StreamSession) -> str:
|
||||
async def get_arkose_token(cls, session: StreamSession, headers: dict, blob: str) -> str:
|
||||
"""
|
||||
Obtain an Arkose token for the session.
|
||||
|
||||
|
@ -559,16 +626,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
RuntimeError: If unable to retrieve the token.
|
||||
"""
|
||||
config = {
|
||||
"pkey": "3D86FBBA-9D22-402A-B512-3420086BA6CC",
|
||||
"pkey": "35536E1E-65B4-4D96-9D97-6ADB7EFF8147",
|
||||
"surl": "https://tcr9i.chat.openai.com",
|
||||
"headers": {
|
||||
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'
|
||||
},
|
||||
"headers": headers,
|
||||
"site": cls.url,
|
||||
"data": {"blob": blob}
|
||||
}
|
||||
args_for_request = get_values_for_request(config)
|
||||
async with session.post(**args_for_request) as response:
|
||||
response.raise_for_status()
|
||||
await raise_for_status(response)
|
||||
decoded_json = await response.json()
|
||||
if "token" in decoded_json:
|
||||
return decoded_json["token"]
|
||||
|
@ -591,7 +657,9 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
|
||||
@classmethod
|
||||
def _create_request_args(cls, cookies: Union[Cookies, None]):
|
||||
cls._headers = {}
|
||||
cls._headers = {
|
||||
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'
|
||||
}
|
||||
cls._cookies = {} if cookies is None else cookies
|
||||
cls._update_cookie_header()
|
||||
|
||||
|
|
|
@ -161,10 +161,7 @@
|
|||
<option value="gemini-pro">gemini-pro</option>
|
||||
<option value="">----</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="field">
|
||||
<select name="model2" id="model2" class="hidden">
|
||||
</select>
|
||||
</div>
|
||||
<div class="field">
|
||||
<select name="jailbreak" id="jailbreak" style="display: none;">
|
||||
|
|
|
@ -16,9 +16,12 @@ const providerSelect = document.getElementById("provider");
|
|||
const modelSelect = document.getElementById("model");
|
||||
const modelProvider = document.getElementById("model2");
|
||||
const systemPrompt = document.getElementById("systemPrompt")
|
||||
const jailbreak = document.getElementById("jailbreak");
|
||||
|
||||
let prompt_lock = false;
|
||||
|
||||
const options = ["switch", "model", "model2", "jailbreak", "patch", "provider", "history"];
|
||||
|
||||
hljs.addPlugin(new CopyButtonPlugin());
|
||||
|
||||
messageInput.addEventListener("blur", () => {
|
||||
|
@ -207,9 +210,7 @@ const ask_gpt = async () => {
|
|||
|
||||
window.scrollTo(0, 0);
|
||||
window.controller = new AbortController();
|
||||
|
||||
jailbreak = document.getElementById("jailbreak");
|
||||
window.text = '';
|
||||
window.text = "";
|
||||
|
||||
stop_generating.classList.remove(`stop_generating-hidden`);
|
||||
|
||||
|
@ -246,7 +247,7 @@ const ask_gpt = async () => {
|
|||
id: window.token,
|
||||
conversation_id: window.conversation_id,
|
||||
model: get_selected_model(),
|
||||
jailbreak: jailbreak.options[jailbreak.selectedIndex].value,
|
||||
jailbreak: jailbreak?.options[jailbreak.selectedIndex].value,
|
||||
web_search: document.getElementById(`switch`).checked,
|
||||
provider: providerSelect.options[providerSelect.selectedIndex].value,
|
||||
patch_provider: document.getElementById('patch')?.checked,
|
||||
|
@ -670,8 +671,6 @@ sidebar_button.addEventListener("click", (event) => {
|
|||
window.scrollTo(0, 0);
|
||||
});
|
||||
|
||||
const options = ["switch", "model", "model2", "jailbreak", "patch", "provider", "history"];
|
||||
|
||||
const register_settings_localstorage = async () => {
|
||||
options.forEach((id) => {
|
||||
element = document.getElementById(id);
|
||||
|
|
|
@ -81,22 +81,25 @@ def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str =
|
|||
impersonate="chrome"
|
||||
)
|
||||
|
||||
def is_cloudflare(text: str):
|
||||
return '<div id="cf-please-wait">' in text or "<title>Just a moment...</title>" in text
|
||||
|
||||
async def raise_for_status_async(response: Union[StreamResponse, ClientResponse], message: str = None):
|
||||
if response.status in (429, 402):
|
||||
raise RateLimitError(f"Response {response.status}: Rate limit reached")
|
||||
message = await response.text() if not response.ok and message is None else message
|
||||
if response.status == 403 and "<title>Just a moment...</title>" in message:
|
||||
if response.status == 403 and is_cloudflare(message):
|
||||
raise ResponseStatusError(f"Response {response.status}: Cloudflare detected")
|
||||
elif not response.ok:
|
||||
raise ResponseStatusError(f"Response {response.status}: {message}")
|
||||
|
||||
def raise_for_status(response: Union[StreamResponse, ClientResponse, Response, RequestsResponse], message: str = None):
|
||||
if isinstance(response, StreamSession) or isinstance(response, ClientResponse):
|
||||
if hasattr(response, "status"):
|
||||
return raise_for_status_async(response, message)
|
||||
|
||||
if response.status_code in (429, 402):
|
||||
raise RateLimitError(f"Response {response.status_code}: Rate limit reached")
|
||||
elif response.status_code == 403 and "<title>Just a moment...</title>" in response.text:
|
||||
elif response.status_code == 403 and is_cloudflare(response.text):
|
||||
raise ResponseStatusError(f"Response {response.status_code}: Cloudflare detected")
|
||||
elif not response.ok:
|
||||
raise ResponseStatusError(f"Response {response.status_code}: {response.text if message is None else message}")
|
Loading…
Reference in New Issue