Merge pull request #1691 from hlohaus/retry
Add model preselection in gui
|
@ -12,7 +12,7 @@ from aiohttp import ClientSession, ClientTimeout, BaseConnector, WSMsgType
|
|||
from ..typing import AsyncResult, Messages, ImageType, Cookies
|
||||
from ..image import ImageRequest
|
||||
from ..errors import ResponseStatusError
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import get_connector, get_random_hex
|
||||
from .bing.upload_image import upload_image
|
||||
from .bing.conversation import Conversation, create_conversation, delete_conversation
|
||||
|
@ -26,8 +26,9 @@ class Tones:
|
|||
creative = "Creative"
|
||||
balanced = "Balanced"
|
||||
precise = "Precise"
|
||||
copilot = "Balanced"
|
||||
|
||||
class Bing(AsyncGeneratorProvider):
|
||||
class Bing(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"""
|
||||
Bing provider for generating responses using the Bing API.
|
||||
"""
|
||||
|
@ -35,18 +36,22 @@ class Bing(AsyncGeneratorProvider):
|
|||
working = True
|
||||
supports_message_history = True
|
||||
supports_gpt_4 = True
|
||||
default_model = "balanced"
|
||||
models = [key for key in Tones.__dict__ if not key.startswith("__")]
|
||||
|
||||
@staticmethod
|
||||
@classmethod
|
||||
def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 900,
|
||||
cookies: Cookies = None,
|
||||
connector: BaseConnector = None,
|
||||
tone: str = Tones.balanced,
|
||||
tone: str = None,
|
||||
image: ImageType = None,
|
||||
web_search: bool = False,
|
||||
context: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
"""
|
||||
|
@ -62,13 +67,12 @@ class Bing(AsyncGeneratorProvider):
|
|||
:param web_search: Flag to enable or disable web search.
|
||||
:return: An asynchronous result object.
|
||||
"""
|
||||
if len(messages) < 2:
|
||||
prompt = messages[0]["content"]
|
||||
context = None
|
||||
else:
|
||||
prompt = messages[-1]["content"]
|
||||
context = create_context(messages[:-1])
|
||||
|
||||
prompt = messages[-1]["content"]
|
||||
if context is None:
|
||||
context = create_context(messages[:-1]) if len(messages) > 1 else None
|
||||
if tone is None:
|
||||
tone = tone if model.startswith("gpt-4") else model
|
||||
tone = cls.get_model("" if tone is None else tone.lower())
|
||||
gpt4_turbo = True if model.startswith("gpt-4-turbo") else False
|
||||
|
||||
return stream_generate(
|
||||
|
@ -86,7 +90,9 @@ def create_context(messages: Messages) -> str:
|
|||
:return: A string representing the context created from the messages.
|
||||
"""
|
||||
return "".join(
|
||||
f"[{message['role']}]" + ("(#message)" if message['role'] != "system" else "(#additional_instructions)") + f"\n{message['content']}"
|
||||
f"[{message['role']}]" + ("(#message)"
|
||||
if message['role'] != "system"
|
||||
else "(#additional_instructions)") + f"\n{message['content']}"
|
||||
for message in messages
|
||||
) + "\n\n"
|
||||
|
||||
|
@ -122,7 +128,7 @@ class Defaults:
|
|||
"ActionRequest","Chat",
|
||||
"ConfirmationCard", "Context",
|
||||
"InternalSearchQuery", #"InternalSearchResult",
|
||||
"Disengaged", #"InternalLoaderMessage",
|
||||
#"Disengaged", "InternalLoaderMessage",
|
||||
"Progress", "RenderCardRequest",
|
||||
"RenderContentRequest", "AdsQuery",
|
||||
"SemanticSerp", "GenerateContentQuery",
|
||||
|
@ -131,53 +137,93 @@ class Defaults:
|
|||
]
|
||||
|
||||
sliceIds = {
|
||||
"Balanced": [
|
||||
"balanced": [
|
||||
"supllmnfe","archnewtf",
|
||||
"stpstream", "stpsig", "vnextvoicecf", "scmcbase", "cmcpupsalltf", "sydtransctrl",
|
||||
"thdnsrch", "220dcl1s0", "0215wcrwips0", "0305hrthrots0", "0130gpt4t",
|
||||
"bingfc", "0225unsticky1", "0228scss0",
|
||||
"defquerycf", "defcontrol", "3022tphpv"
|
||||
],
|
||||
"Creative": [
|
||||
"creative": [
|
||||
"bgstream", "fltltst2c",
|
||||
"stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl",
|
||||
"0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t",
|
||||
"bingfccf", "0225unsticky1", "0228scss0",
|
||||
"3022tpvs0"
|
||||
],
|
||||
"Precise": [
|
||||
"precise": [
|
||||
"bgstream", "fltltst2c",
|
||||
"stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl",
|
||||
"0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t",
|
||||
"bingfccf", "0225unsticky1", "0228scss0",
|
||||
"defquerycf", "3022tpvs0"
|
||||
],
|
||||
"copilot": []
|
||||
}
|
||||
|
||||
optionsSets = {
|
||||
"Balanced": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg", "autosave",
|
||||
"iyxapbing", "iycapbing",
|
||||
"galileo", "saharagenconv5", "gldcl1p",
|
||||
"gpt4tmncnp"
|
||||
],
|
||||
"Creative": [
|
||||
"balanced": {
|
||||
"default": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg", "autosave",
|
||||
"iyxapbing", "iycapbing",
|
||||
"galileo", "saharagenconv5", "gldcl1p",
|
||||
"gpt4tmncnp"
|
||||
],
|
||||
"nosearch": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg", "autosave",
|
||||
"iyxapbing", "iycapbing",
|
||||
"galileo", "sunoupsell", "base64filter", "uprv4p1upd",
|
||||
"hourthrot", "noctprf", "gndlogcf", "nosearchall"
|
||||
]
|
||||
},
|
||||
"creative": {
|
||||
"default": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg",
|
||||
"iyxapbing", "iycapbing",
|
||||
"h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3",
|
||||
"gpt4tmncnp"
|
||||
],
|
||||
"nosearch": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg", "autosave",
|
||||
"iyxapbing", "iycapbing",
|
||||
"h3imaginative", "sunoupsell", "base64filter", "uprv4p1upd",
|
||||
"hourthrot", "noctprf", "gndlogcf", "nosearchall",
|
||||
"clgalileo", "nocache", "up4rp14bstcst"
|
||||
]
|
||||
},
|
||||
"precise": {
|
||||
"default": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg",
|
||||
"iyxapbing", "iycapbing",
|
||||
"h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot",
|
||||
"clgalileo", "gencontentv3"
|
||||
],
|
||||
"nosearch": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg", "autosave",
|
||||
"iyxapbing", "iycapbing",
|
||||
"h3precise", "sunoupsell", "base64filter", "uprv4p1upd",
|
||||
"hourthrot", "noctprf", "gndlogcf", "nosearchall",
|
||||
"clgalileo", "nocache", "up4rp14bstcst"
|
||||
]
|
||||
},
|
||||
"copilot": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg",
|
||||
"iyxapbing", "iycapbing",
|
||||
"h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3",
|
||||
"gpt4tmncnp"
|
||||
],
|
||||
"Precise": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg",
|
||||
"iyxapbing", "iycapbing",
|
||||
"h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot",
|
||||
"clgalileo", "gencontentv3"
|
||||
"h3precise", "clgalileo", "gencontentv3", "prjupy"
|
||||
],
|
||||
}
|
||||
|
||||
|
@ -232,7 +278,8 @@ def create_message(
|
|||
context: str = None,
|
||||
image_request: ImageRequest = None,
|
||||
web_search: bool = False,
|
||||
gpt4_turbo: bool = False
|
||||
gpt4_turbo: bool = False,
|
||||
new_conversation: bool = True
|
||||
) -> str:
|
||||
"""
|
||||
Creates a message for the Bing API with specified parameters.
|
||||
|
@ -247,7 +294,12 @@ def create_message(
|
|||
:return: A formatted string message for the Bing API.
|
||||
"""
|
||||
|
||||
options_sets = []
|
||||
options_sets = Defaults.optionsSets[tone]
|
||||
if not web_search and "nosearch" in options_sets:
|
||||
options_sets = options_sets["nosearch"]
|
||||
elif "default" in options_sets:
|
||||
options_sets = options_sets["default"]
|
||||
options_sets = options_sets.copy()
|
||||
if gpt4_turbo:
|
||||
options_sets.append("dlgpt4t")
|
||||
|
||||
|
@ -255,16 +307,16 @@ def create_message(
|
|||
struct = {
|
||||
"arguments":[{
|
||||
"source": "cib",
|
||||
"optionsSets": [*Defaults.optionsSets[tone], *options_sets],
|
||||
"optionsSets": options_sets,
|
||||
"allowedMessageTypes": Defaults.allowedMessageTypes,
|
||||
"sliceIds": Defaults.sliceIds[tone],
|
||||
"verbosity": "verbose",
|
||||
"scenario": "SERP",
|
||||
"scenario": "CopilotMicrosoftCom", # "SERP",
|
||||
"plugins": [{"id": "c310c353-b9f0-4d76-ab0d-1dd5e979cf68", "category": 1}] if web_search else [],
|
||||
"traceId": get_random_hex(40),
|
||||
"conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"],
|
||||
"gptId": "copilot",
|
||||
"isStartOfSession": True,
|
||||
"isStartOfSession": new_conversation,
|
||||
"requestId": request_id,
|
||||
"message":{
|
||||
**Defaults.location,
|
||||
|
@ -277,8 +329,7 @@ def create_message(
|
|||
"requestId": request_id,
|
||||
"messageId": request_id
|
||||
},
|
||||
"tone": tone,
|
||||
"extraExtensionParameters": {"gpt-creator-persona": {"personaId": "copilot"}},
|
||||
"tone": getattr(Tones, tone),
|
||||
"spokenTextMode": "None",
|
||||
"conversationId": conversation.conversationId,
|
||||
"participant": {"id": conversation.clientId}
|
||||
|
@ -298,7 +349,7 @@ def create_message(
|
|||
struct['arguments'][0]['previousMessages'] = [{
|
||||
"author": "user",
|
||||
"description": context,
|
||||
"contextType": "WebPage",
|
||||
"contextType": "ClientApp",
|
||||
"messageType": "Context",
|
||||
"messageId": "discover-web--page-ping-mriduna-----"
|
||||
}]
|
||||
|
@ -317,8 +368,9 @@ async def stream_generate(
|
|||
gpt4_turbo: bool = False,
|
||||
timeout: int = 900,
|
||||
conversation: Conversation = None,
|
||||
return_conversation: bool = False,
|
||||
raise_apology: bool = False,
|
||||
max_retries: int = 5,
|
||||
max_retries: int = None,
|
||||
sleep_retry: int = 15,
|
||||
**kwargs
|
||||
):
|
||||
|
@ -336,13 +388,20 @@ async def stream_generate(
|
|||
:return: An asynchronous generator yielding responses.
|
||||
"""
|
||||
headers = create_headers(cookies)
|
||||
new_conversation = conversation is None
|
||||
max_retries = (5 if new_conversation else 0) if max_retries is None else max_retries
|
||||
async with ClientSession(
|
||||
timeout=ClientTimeout(total=timeout), connector=connector
|
||||
) as session:
|
||||
while conversation is None:
|
||||
first = True
|
||||
while first or conversation is None:
|
||||
first = False
|
||||
do_read = True
|
||||
try:
|
||||
conversation = await create_conversation(session, headers)
|
||||
if conversation is None:
|
||||
conversation = await create_conversation(session, headers, tone)
|
||||
if return_conversation:
|
||||
yield conversation
|
||||
except ResponseStatusError as e:
|
||||
max_retries -= 1
|
||||
if max_retries < 1:
|
||||
|
@ -353,8 +412,10 @@ async def stream_generate(
|
|||
await asyncio.sleep(sleep_retry)
|
||||
continue
|
||||
|
||||
image_request = await upload_image(session, image, tone, headers) if image else None
|
||||
image_request = await upload_image(session, image, getattr(Tones, tone), headers) if image else None
|
||||
async with session.ws_connect(
|
||||
'wss://s.copilot.microsoft.com/sydney/ChatHub'
|
||||
if tone == "copilot" else
|
||||
'wss://sydney.bing.com/sydney/ChatHub',
|
||||
autoping=False,
|
||||
params={'sec_access_token': conversation.conversationSignature},
|
||||
|
@ -363,7 +424,12 @@ async def stream_generate(
|
|||
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
|
||||
await wss.send_str(format_message({"type": 6}))
|
||||
await wss.receive(timeout=timeout)
|
||||
await wss.send_str(create_message(conversation, prompt, tone, context, image_request, web_search, gpt4_turbo))
|
||||
await wss.send_str(create_message(
|
||||
conversation, prompt, tone,
|
||||
context if new_conversation else None,
|
||||
image_request, web_search, gpt4_turbo,
|
||||
new_conversation
|
||||
))
|
||||
response_txt = ''
|
||||
returned_text = ''
|
||||
message_id = None
|
||||
|
@ -399,14 +465,15 @@ async def stream_generate(
|
|||
image_client = BingCreateImages(cookies, proxy)
|
||||
image_response = await image_client.create_async(prompt)
|
||||
except Exception as e:
|
||||
response_txt += f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}"
|
||||
do_read = False
|
||||
if debug.logging:
|
||||
print(f"Bing: Failed to create images: {e}")
|
||||
image_response = f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}"
|
||||
if response_txt.startswith(returned_text):
|
||||
new = response_txt[len(returned_text):]
|
||||
if new != "\n":
|
||||
if new not in ("", "\n"):
|
||||
yield new
|
||||
returned_text = response_txt
|
||||
if image_response:
|
||||
if image_response is not None:
|
||||
yield image_response
|
||||
elif response.get('type') == 2:
|
||||
result = response['item']['result']
|
||||
|
|
|
@ -4,14 +4,18 @@ import re
|
|||
import json
|
||||
import base64
|
||||
import uuid
|
||||
from asyncio import get_running_loop
|
||||
from aiohttp import ClientSession, FormData, BaseConnector, CookieJar
|
||||
try:
|
||||
from ..requests.curl_cffi import FormData
|
||||
has_curl_cffi = True
|
||||
except ImportError:
|
||||
has_curl_cffi = False
|
||||
|
||||
from ..typing import AsyncResult, Messages, ImageType, Cookies
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt, get_connector
|
||||
from .helper import format_prompt
|
||||
from ..image import to_bytes, ImageResponse
|
||||
from ..requests import WebDriver, raise_for_status, get_args_from_browser
|
||||
from ..requests import StreamSession, raise_for_status
|
||||
from ..errors import MissingRequirementsError
|
||||
|
||||
class You(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://you.com"
|
||||
|
@ -33,8 +37,6 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
model_aliases = {
|
||||
"claude-v2": "claude-2"
|
||||
}
|
||||
_args: dict = None
|
||||
_cookie_jar: CookieJar = None
|
||||
_cookies = None
|
||||
_cookies_used = 0
|
||||
|
||||
|
@ -45,19 +47,12 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
messages: Messages,
|
||||
image: ImageType = None,
|
||||
image_name: str = None,
|
||||
connector: BaseConnector = None,
|
||||
webdriver: WebDriver = None,
|
||||
proxy: str = None,
|
||||
chat_mode: str = "default",
|
||||
**kwargs,
|
||||
) -> AsyncResult:
|
||||
if cls._args is None:
|
||||
cls._args = get_args_from_browser(cls.url, webdriver, proxy)
|
||||
cls._cookie_jar = CookieJar(loop=get_running_loop())
|
||||
else:
|
||||
if "cookies" in cls._args:
|
||||
del cls._args["cookies"]
|
||||
cls._cookie_jar._loop = get_running_loop()
|
||||
if not has_curl_cffi:
|
||||
raise MissingRequirementsError('Install "curl_cffi" package')
|
||||
if image is not None:
|
||||
chat_mode = "agent"
|
||||
elif not model or model == cls.default_model:
|
||||
|
@ -67,10 +62,9 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
else:
|
||||
chat_mode = "custom"
|
||||
model = cls.get_model(model)
|
||||
async with ClientSession(
|
||||
connector=get_connector(connector, proxy),
|
||||
cookie_jar=cls._cookie_jar,
|
||||
**cls._args
|
||||
async with StreamSession(
|
||||
proxy=proxy,
|
||||
impersonate="chrome"
|
||||
) as session:
|
||||
cookies = await cls.get_cookies(session) if chat_mode != "default" else None
|
||||
upload = json.dumps([await cls.upload_file(session, cookies, to_bytes(image), image_name)]) if image else ""
|
||||
|
@ -82,8 +76,8 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
# and idx < len(questions)
|
||||
# ]
|
||||
headers = {
|
||||
"accept": "text/event-stream",
|
||||
"referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat",
|
||||
"Accept": "text/event-stream",
|
||||
"Referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat",
|
||||
}
|
||||
data = {
|
||||
"userFiles": upload,
|
||||
|
@ -106,12 +100,12 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
cookies=cookies
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
async for line in response.content:
|
||||
async for line in response.iter_lines():
|
||||
if line.startswith(b'event: '):
|
||||
event = line[7:-1].decode()
|
||||
event = line[7:].decode()
|
||||
elif line.startswith(b'data: '):
|
||||
if event in ["youChatUpdate", "youChatToken"]:
|
||||
data = json.loads(line[6:-1])
|
||||
data = json.loads(line[6:])
|
||||
if event == "youChatToken" and event in data:
|
||||
yield data[event]
|
||||
elif event == "youChatUpdate" and "t" in data:
|
||||
|
@ -122,7 +116,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
yield data["t"]
|
||||
|
||||
@classmethod
|
||||
async def upload_file(cls, client: ClientSession, cookies: Cookies, file: bytes, filename: str = None) -> dict:
|
||||
async def upload_file(cls, client: StreamSession, cookies: Cookies, file: bytes, filename: str = None) -> dict:
|
||||
async with client.get(
|
||||
f"{cls.url}/api/get_nonce",
|
||||
cookies=cookies,
|
||||
|
@ -146,7 +140,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
return result
|
||||
|
||||
@classmethod
|
||||
async def get_cookies(cls, client: ClientSession) -> Cookies:
|
||||
async def get_cookies(cls, client: StreamSession) -> Cookies:
|
||||
if not cls._cookies or cls._cookies_used >= 5:
|
||||
cls._cookies = await cls.create_cookies(client)
|
||||
cls._cookies_used = 0
|
||||
|
@ -173,7 +167,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
return f"Basic {auth}"
|
||||
|
||||
@classmethod
|
||||
async def create_cookies(cls, client: ClientSession) -> Cookies:
|
||||
async def create_cookies(cls, client: StreamSession) -> Cookies:
|
||||
user_uuid = str(uuid.uuid4())
|
||||
async with client.post(
|
||||
"https://web.stytch.com/sdk/v1/passwords",
|
||||
|
|
|
@ -20,7 +20,7 @@ class Conversation:
|
|||
self.clientId = clientId
|
||||
self.conversationSignature = conversationSignature
|
||||
|
||||
async def create_conversation(session: ClientSession, headers: dict) -> Conversation:
|
||||
async def create_conversation(session: ClientSession, headers: dict, tone: str) -> Conversation:
|
||||
"""
|
||||
Create a new conversation asynchronously.
|
||||
|
||||
|
@ -31,7 +31,10 @@ async def create_conversation(session: ClientSession, headers: dict) -> Conversa
|
|||
Returns:
|
||||
Conversation: An instance representing the created conversation.
|
||||
"""
|
||||
url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1"
|
||||
if tone == "copilot":
|
||||
url = "https://copilot.microsoft.com/turing/conversation/create?bundleVersion=1.1634.3-nodesign2"
|
||||
else:
|
||||
url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1"
|
||||
async with session.get(url, headers=headers) as response:
|
||||
await raise_for_status(response, "Failed to create conversation")
|
||||
data = await response.json()
|
||||
|
|
|
@ -14,6 +14,12 @@ try:
|
|||
except ImportError:
|
||||
has_arkose_generator = False
|
||||
|
||||
try:
|
||||
import webview
|
||||
has_webview = True
|
||||
except ImportError:
|
||||
has_webview = False
|
||||
|
||||
try:
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
|
@ -25,10 +31,10 @@ from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|||
from ..helper import get_cookies
|
||||
from ...webdriver import get_browser
|
||||
from ...typing import AsyncResult, Messages, Cookies, ImageType, Union, AsyncIterator
|
||||
from ...requests import get_args_from_browser
|
||||
from ...requests import get_args_from_browser, raise_for_status
|
||||
from ...requests.aiohttp import StreamSession
|
||||
from ...image import to_image, to_bytes, ImageResponse, ImageRequest
|
||||
from ...errors import MissingRequirementsError, MissingAuthError
|
||||
from ...errors import MissingRequirementsError, MissingAuthError, ProviderNotWorkingError
|
||||
from ... import debug
|
||||
|
||||
class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
|
@ -134,7 +140,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
}
|
||||
# Post the image data to the service and get the image data
|
||||
async with session.post(f"{cls.url}/backend-api/files", json=data, headers=headers) as response:
|
||||
response.raise_for_status()
|
||||
cls._update_request_args()
|
||||
await raise_for_status(response)
|
||||
image_data = {
|
||||
**data,
|
||||
**await response.json(),
|
||||
|
@ -152,14 +159,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"x-ms-blob-type": "BlockBlob"
|
||||
}
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
await raise_for_status(response)
|
||||
# Post the file ID to the service and get the download URL
|
||||
async with session.post(
|
||||
f"{cls.url}/backend-api/files/{image_data['file_id']}/uploaded",
|
||||
json={},
|
||||
headers=headers
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
cls._update_request_args(session)
|
||||
await raise_for_status(response)
|
||||
image_data["download_url"] = (await response.json())["download_url"]
|
||||
return ImageRequest(image_data)
|
||||
|
||||
|
@ -178,7 +186,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
if not cls.default_model:
|
||||
async with session.get(f"{cls.url}/backend-api/models", headers=headers) as response:
|
||||
cls._update_request_args(session)
|
||||
response.raise_for_status()
|
||||
await raise_for_status(response)
|
||||
data = await response.json()
|
||||
if "categories" in data:
|
||||
cls.default_model = data["categories"][-1]["default_model"]
|
||||
|
@ -261,7 +269,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
file_id = first_part["asset_pointer"].split("file-service://", 1)[1]
|
||||
try:
|
||||
async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response:
|
||||
response.raise_for_status()
|
||||
cls._update_request_args(session)
|
||||
await raise_for_status(response)
|
||||
download_url = (await response.json())["download_url"]
|
||||
return ImageResponse(download_url, prompt)
|
||||
except Exception as e:
|
||||
|
@ -288,6 +297,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
json={"is_visible": False},
|
||||
headers=headers
|
||||
) as response:
|
||||
cls._update_request_args(session)
|
||||
...
|
||||
|
||||
@classmethod
|
||||
|
@ -337,31 +347,32 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
if parent_id is None:
|
||||
parent_id = str(uuid.uuid4())
|
||||
|
||||
# Read api_key from arguments
|
||||
api_key = kwargs["access_token"] if "access_token" in kwargs else api_key
|
||||
|
||||
async with StreamSession(
|
||||
proxies={"https": proxy},
|
||||
impersonate="chrome",
|
||||
timeout=timeout
|
||||
) as session:
|
||||
# Read api_key and cookies from cache / browser config
|
||||
api_key = kwargs["access_token"] if "access_token" in kwargs else api_key
|
||||
if cls._headers is None or cls._expires is None or time.time() > cls._expires:
|
||||
if api_key is None:
|
||||
# Read api_key from cookies
|
||||
if cls._headers is None:
|
||||
cookies = get_cookies("chat.openai.com", False) if cookies is None else cookies
|
||||
api_key = cookies["access_token"] if "access_token" in cookies else api_key
|
||||
cls._create_request_args(cookies)
|
||||
if api_key is None:
|
||||
try:
|
||||
await cls.webview_access_token() if has_webview else None
|
||||
except Exception as e:
|
||||
if debug.logging:
|
||||
print(f"Use webview failed: {e}")
|
||||
else:
|
||||
api_key = cls._api_key if api_key is None else api_key
|
||||
# Read api_key with session cookies
|
||||
#if api_key is None and cookies:
|
||||
# api_key = await cls.fetch_access_token(session, cls._headers)
|
||||
# Load default model
|
||||
if cls.default_model is None and api_key is not None:
|
||||
|
||||
if api_key is not None:
|
||||
cls._create_request_args(cookies)
|
||||
cls._set_api_key(api_key)
|
||||
|
||||
if cls.default_model is None and cls._headers is not None:
|
||||
try:
|
||||
if not model:
|
||||
cls._set_api_key(api_key)
|
||||
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
|
||||
else:
|
||||
cls.default_model = cls.get_model(model)
|
||||
|
@ -369,8 +380,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
if debug.logging:
|
||||
print("OpenaiChat: Load default_model failed")
|
||||
print(f"{e.__class__.__name__}: {e}")
|
||||
# Browse api_key and default model
|
||||
if api_key is None or cls.default_model is None:
|
||||
if cls.default_model is None:
|
||||
login_url = os.environ.get("G4F_LOGIN_URL")
|
||||
if login_url:
|
||||
yield f"Please login: [ChatGPT]({login_url})\n\n"
|
||||
|
@ -379,20 +389,21 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
except MissingRequirementsError:
|
||||
raise MissingAuthError(f'Missing "access_token". Add a "api_key" please')
|
||||
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
|
||||
else:
|
||||
cls._set_api_key(api_key)
|
||||
|
||||
async with session.post(
|
||||
f"{cls.url}/backend-api/sentinel/chat-requirements",
|
||||
json={"conversation_mode_kind": "primary_assistant"},
|
||||
headers=cls._headers
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
cls._update_request_args(session)
|
||||
await raise_for_status(response)
|
||||
data = await response.json()
|
||||
blob = data["arkose"]["dx"]
|
||||
need_arkose = data["arkose"]["required"]
|
||||
chat_token = data["token"]
|
||||
|
||||
if need_arkose and not has_arkose_generator:
|
||||
raise ProviderNotWorkingError("OpenAI Plus Subscriber are not working")
|
||||
raise MissingRequirementsError('Install "py-arkose-generator" package')
|
||||
|
||||
try:
|
||||
|
@ -407,6 +418,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
while fields.finish_reason is None:
|
||||
conversation_id = conversation_id if fields.conversation_id is None else fields.conversation_id
|
||||
parent_id = parent_id if fields.message_id is None else fields.message_id
|
||||
websocket_request_id = str(uuid.uuid4())
|
||||
data = {
|
||||
"action": action,
|
||||
"conversation_mode": {"kind": "primary_assistant"},
|
||||
|
@ -416,25 +428,29 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"parent_message_id": parent_id,
|
||||
"model": model,
|
||||
"history_and_training_disabled": history_disabled and not auto_continue,
|
||||
"websocket_request_id": websocket_request_id
|
||||
}
|
||||
if action != "continue":
|
||||
messages = messages if conversation_id is None else [messages[-1]]
|
||||
data["messages"] = cls.create_messages(messages, image_request)
|
||||
data["messages"] = cls.create_messages(messages, image_request)
|
||||
headers = {
|
||||
"Accept": "text/event-stream",
|
||||
"OpenAI-Sentinel-Chat-Requirements-Token": chat_token,
|
||||
**cls._headers
|
||||
}
|
||||
if need_arkose:
|
||||
raise ProviderNotWorkingError("OpenAI Plus Subscriber are not working")
|
||||
headers["OpenAI-Sentinel-Arkose-Token"] = await cls.get_arkose_token(session, cls._headers, blob)
|
||||
headers["OpenAI-Sentinel-Chat-Requirements-Token"] = chat_token
|
||||
|
||||
async with session.post(
|
||||
f"{cls.url}/backend-api/conversation",
|
||||
json=data,
|
||||
headers={
|
||||
"Accept": "text/event-stream",
|
||||
**({"OpenAI-Sentinel-Arkose-Token": await cls.get_arkose_token(session)} if need_arkose else {}),
|
||||
"OpenAI-Sentinel-Chat-Requirements-Token": chat_token,
|
||||
**cls._headers
|
||||
}
|
||||
headers=headers
|
||||
) as response:
|
||||
cls._update_request_args(session)
|
||||
if not response.ok:
|
||||
raise RuntimeError(f"Response {response.status}: {await response.text()}")
|
||||
async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, fields):
|
||||
await raise_for_status(response)
|
||||
async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, fields, websocket_request_id):
|
||||
if response_fields:
|
||||
response_fields = False
|
||||
yield fields
|
||||
|
@ -447,21 +463,35 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
await cls.delete_conversation(session, cls._headers, fields.conversation_id)
|
||||
|
||||
@staticmethod
|
||||
async def iter_messages_ws(ws: ClientWebSocketResponse, conversation_id: str) -> AsyncIterator:
|
||||
async def iter_messages_ws(ws: ClientWebSocketResponse, conversation_id: str, is_curl: bool) -> AsyncIterator:
|
||||
while True:
|
||||
message = await ws.receive_json()
|
||||
if is_curl:
|
||||
message = json.loads(ws.recv()[0])
|
||||
else:
|
||||
message = await ws.receive_json()
|
||||
if message["conversation_id"] == conversation_id:
|
||||
yield base64.b64decode(message["body"])
|
||||
|
||||
@classmethod
|
||||
async def iter_messages_chunk(cls, messages: AsyncIterator, session: StreamSession, fields: ResponseFields) -> AsyncIterator:
|
||||
async def iter_messages_chunk(
|
||||
cls,
|
||||
messages: AsyncIterator,
|
||||
session: StreamSession,
|
||||
fields: ResponseFields
|
||||
) -> AsyncIterator:
|
||||
last_message: int = 0
|
||||
async for message in messages:
|
||||
if message.startswith(b'{"wss_url":'):
|
||||
message = json.loads(message)
|
||||
async with session.ws_connect(message["wss_url"]) as ws:
|
||||
async for chunk in cls.iter_messages_chunk(cls.iter_messages_ws(ws, message["conversation_id"]), session, fields):
|
||||
ws = await session.ws_connect(message["wss_url"])
|
||||
try:
|
||||
async for chunk in cls.iter_messages_chunk(
|
||||
cls.iter_messages_ws(ws, message["conversation_id"], hasattr(ws, "recv")),
|
||||
session, fields
|
||||
):
|
||||
yield chunk
|
||||
finally:
|
||||
await ws.aclose()
|
||||
break
|
||||
async for chunk in cls.iter_messages_line(session, message, fields):
|
||||
if fields.finish_reason is not None:
|
||||
|
@ -513,6 +543,43 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
if "finish_details" in line["message"]["metadata"]:
|
||||
fields.finish_reason = line["message"]["metadata"]["finish_details"]["type"]
|
||||
|
||||
@classmethod
|
||||
async def webview_access_token(cls) -> str:
|
||||
window = webview.create_window("OpenAI Chat", cls.url)
|
||||
await asyncio.sleep(3)
|
||||
prompt_input = None
|
||||
while not prompt_input:
|
||||
try:
|
||||
await asyncio.sleep(1)
|
||||
prompt_input = window.dom.get_element("#prompt-textarea")
|
||||
except:
|
||||
...
|
||||
window.evaluate_js("""
|
||||
this._fetch = this.fetch;
|
||||
this.fetch = async (url, options) => {
|
||||
const response = await this._fetch(url, options);
|
||||
if (url == "https://chat.openai.com/backend-api/conversation") {
|
||||
this._headers = options.headers;
|
||||
return response;
|
||||
}
|
||||
return response;
|
||||
};
|
||||
""")
|
||||
window.evaluate_js("""
|
||||
document.querySelector('.from-token-main-surface-secondary').click();
|
||||
""")
|
||||
headers = None
|
||||
while headers is None:
|
||||
headers = window.evaluate_js("this._headers")
|
||||
await asyncio.sleep(1)
|
||||
headers["User-Agent"] = window.evaluate_js("this.navigator.userAgent")
|
||||
cookies = [list(*cookie.items()) for cookie in window.get_cookies()]
|
||||
window.destroy()
|
||||
cls._cookies = dict([(name, cookie.value) for name, cookie in cookies])
|
||||
cls._headers = headers
|
||||
cls._expires = int(time.time()) + 60 * 60 * 4
|
||||
cls._update_cookie_header()
|
||||
|
||||
@classmethod
|
||||
def browse_access_token(cls, proxy: str = None, timeout: int = 1200) -> None:
|
||||
"""
|
||||
|
@ -542,10 +609,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
cls._update_cookie_header()
|
||||
cls._set_api_key(access_token)
|
||||
finally:
|
||||
driver.close()
|
||||
driver.close()
|
||||
|
||||
@classmethod
|
||||
async def get_arkose_token(cls, session: StreamSession) -> str:
|
||||
async def get_arkose_token(cls, session: StreamSession, headers: dict, blob: str) -> str:
|
||||
"""
|
||||
Obtain an Arkose token for the session.
|
||||
|
||||
|
@ -559,16 +626,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
RuntimeError: If unable to retrieve the token.
|
||||
"""
|
||||
config = {
|
||||
"pkey": "3D86FBBA-9D22-402A-B512-3420086BA6CC",
|
||||
"pkey": "35536E1E-65B4-4D96-9D97-6ADB7EFF8147",
|
||||
"surl": "https://tcr9i.chat.openai.com",
|
||||
"headers": {
|
||||
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'
|
||||
},
|
||||
"headers": headers,
|
||||
"site": cls.url,
|
||||
"data": {"blob": blob}
|
||||
}
|
||||
args_for_request = get_values_for_request(config)
|
||||
async with session.post(**args_for_request) as response:
|
||||
response.raise_for_status()
|
||||
await raise_for_status(response)
|
||||
decoded_json = await response.json()
|
||||
if "token" in decoded_json:
|
||||
return decoded_json["token"]
|
||||
|
@ -591,7 +657,9 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
|
||||
@classmethod
|
||||
def _create_request_args(cls, cookies: Union[Cookies, None]):
|
||||
cls._headers = {}
|
||||
cls._headers = {
|
||||
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'
|
||||
}
|
||||
cls._cookies = {} if cookies is None else cookies
|
||||
cls._update_cookie_header()
|
||||
|
||||
|
|
|
@ -69,11 +69,14 @@ def get_model_and_provider(model : Union[Model, str],
|
|||
if isinstance(model, Model):
|
||||
model = model.name
|
||||
|
||||
if ignored and isinstance(provider, BaseRetryProvider):
|
||||
provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
|
||||
|
||||
if not ignore_working and not provider.working:
|
||||
raise ProviderNotWorkingError(f'{provider.__name__} is not working')
|
||||
|
||||
if not ignore_working and isinstance(provider, BaseRetryProvider):
|
||||
provider.providers = [p for p in provider.providers if p.working]
|
||||
|
||||
if ignored and isinstance(provider, BaseRetryProvider):
|
||||
provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
|
||||
|
||||
if not ignore_stream and not provider.supports_stream and stream:
|
||||
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')
|
||||
|
|
|
@ -3,4 +3,5 @@ from .providers.types import ProviderType
|
|||
logging: bool = False
|
||||
version_check: bool = True
|
||||
last_provider: ProviderType = None
|
||||
last_model: str = None
|
||||
last_model: str = None
|
||||
version: str = None
|
|
@ -1,5 +1,5 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<html lang="en" data-framework="javascript">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
|
@ -10,14 +10,14 @@
|
|||
<meta property="og:image" content="https://openai.com/content/images/2022/11/ChatGPT.jpg">
|
||||
<meta property="og:description" content="A conversational AI system that listens, learns, and challenges">
|
||||
<meta property="og:url" content="https://g4f.ai">
|
||||
<link rel="stylesheet" href="/assets/css/style.css">
|
||||
<link rel="apple-touch-icon" sizes="180x180" href="/assets/img/apple-touch-icon.png">
|
||||
<link rel="icon" type="image/png" sizes="32x32" href="/assets/img/favicon-32x32.png">
|
||||
<link rel="icon" type="image/png" sizes="16x16" href="/assets/img/favicon-16x16.png">
|
||||
<link rel="manifest" href="/assets/img/site.webmanifest">
|
||||
<script src="/assets/js/icons.js"></script>
|
||||
<script src="/assets/js/highlightjs-copy.min.js"></script>
|
||||
<script src="/assets/js/chat.v1.js" defer></script>
|
||||
<link rel="stylesheet" href="/static/css/style.css">
|
||||
<link rel="apple-touch-icon" sizes="180x180" href="/static/img/apple-touch-icon.png">
|
||||
<link rel="icon" type="image/png" sizes="32x32" href="/static/img/favicon-32x32.png">
|
||||
<link rel="icon" type="image/png" sizes="16x16" href="/static/img/favicon-16x16.png">
|
||||
<link rel="manifest" href="/static/img/site.webmanifest">
|
||||
<script src="/static/js/icons.js"></script>
|
||||
<script src="/static/js/highlightjs-copy.min.js"></script>
|
||||
<script src="/static/js/chat.v1.js" defer></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/markdown-it@13.0.1/dist/markdown-it.min.js"></script>
|
||||
<link rel="stylesheet"
|
||||
href="//cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.7.0/build/styles/base16/dracula.min.css">
|
||||
|
@ -38,8 +38,8 @@
|
|||
</script>
|
||||
<script src="https://unpkg.com/gpt-tokenizer/dist/cl100k_base.js" async></script>
|
||||
<script>
|
||||
const user_image = '<img src="/assets/img/user.png" alt="your avatar">';
|
||||
const gpt_image = '<img src="/assets/img/gpt.png" alt="your avatar">';
|
||||
const user_image = '<img src="/static/img/user.png" alt="your avatar">';
|
||||
const gpt_image = '<img src="/static/img/gpt.png" alt="your avatar">';
|
||||
</script>
|
||||
<style>
|
||||
.hljs {
|
||||
|
@ -74,8 +74,8 @@
|
|||
background: #8b3dff;
|
||||
}
|
||||
</style>
|
||||
<script src="/assets/js/highlight.min.js"></script>
|
||||
<script>window.conversation_id = `{{chat_id}}`</script>
|
||||
<script src="/static/js/highlight.min.js"></script>
|
||||
<script>window.conversation_id = "{{chat_id}}"</script>
|
||||
<title>g4f - gui</title>
|
||||
</head>
|
||||
|
||||
|
@ -94,11 +94,10 @@
|
|||
<i class="fa-regular fa-trash"></i>
|
||||
<span>Clear Conversations</span>
|
||||
</button>
|
||||
<div class="info">
|
||||
<i class="fa-brands fa-telegram"></i>
|
||||
<span class="convo-title">tele ~ <a href="https://t.me/g4f_official">@g4f_official</a>
|
||||
</span>
|
||||
</div>
|
||||
<button onclick="save_storage()">
|
||||
<i class="fa-solid fa-download"></i>
|
||||
<a href="" onclick="return false;">Export Conversations</a>
|
||||
</button>
|
||||
<div class="info">
|
||||
<i class="fa-brands fa-github"></i>
|
||||
<span class="convo-title">github ~ <a href="https://github.com/xtekky/gpt4free">@gpt4free</a>
|
||||
|
@ -161,6 +160,7 @@
|
|||
<option value="gemini-pro">gemini-pro</option>
|
||||
<option value="">----</option>
|
||||
</select>
|
||||
<select name="model2" id="model2" class="hidden"></select>
|
||||
</div>
|
||||
<div class="field">
|
||||
<select name="jailbreak" id="jailbreak" style="display: none;">
|
|
@ -15,7 +15,7 @@
|
|||
margin: auto;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 16px;
|
||||
gap: var(--inner-gap);
|
||||
max-width: 200px;
|
||||
padding: var(--section-gap);
|
||||
overflow: none;
|
||||
|
@ -106,6 +106,10 @@ body {
|
|||
border: 1px solid var(--blur-border);
|
||||
}
|
||||
|
||||
.hidden {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.conversations {
|
||||
max-width: 260px;
|
||||
padding: var(--section-gap);
|
||||
|
@ -179,7 +183,8 @@ body {
|
|||
.conversations {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 16px;
|
||||
gap: var(--inner-gap);
|
||||
padding: var(--inner-gap);
|
||||
}
|
||||
|
||||
.conversations .title {
|
||||
|
@ -569,7 +574,7 @@ label[for="camera"] {
|
|||
height: fit-content;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 16px;
|
||||
gap: var(--inner-gap);
|
||||
}
|
||||
|
||||
.field .about {
|
||||
|
@ -653,10 +658,15 @@ select {
|
|||
font-size: 14px;
|
||||
}
|
||||
|
||||
.bottom_buttons button a {
|
||||
color: var(--colour-3);
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.conversations .top {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 16px;
|
||||
gap: var(--inner-gap);
|
||||
overflow: auto;
|
||||
}
|
||||
|
Before Width: | Height: | Size: 8.7 KiB After Width: | Height: | Size: 8.7 KiB |
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 7.8 KiB After Width: | Height: | Size: 7.8 KiB |
Before Width: | Height: | Size: 499 B After Width: | Height: | Size: 499 B |
Before Width: | Height: | Size: 1.0 KiB After Width: | Height: | Size: 1.0 KiB |
Before Width: | Height: | Size: 2.8 KiB After Width: | Height: | Size: 2.8 KiB |
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 17 KiB |
|
@ -1,5 +1,4 @@
|
|||
const colorThemes = document.querySelectorAll('[name="theme"]');
|
||||
const markdown = window.markdownit();
|
||||
const message_box = document.getElementById(`messages`);
|
||||
const messageInput = document.getElementById(`message-input`);
|
||||
const box_conversations = document.querySelector(`.top`);
|
||||
|
@ -12,12 +11,15 @@ const imageInput = document.getElementById("image");
|
|||
const cameraInput = document.getElementById("camera");
|
||||
const fileInput = document.getElementById("file");
|
||||
const inputCount = document.getElementById("input-count")
|
||||
const providerSelect = document.getElementById("provider");
|
||||
const modelSelect = document.getElementById("model");
|
||||
const modelProvider = document.getElementById("model2");
|
||||
const systemPrompt = document.getElementById("systemPrompt")
|
||||
const jailbreak = document.getElementById("jailbreak");
|
||||
|
||||
let prompt_lock = false;
|
||||
|
||||
hljs.addPlugin(new CopyButtonPlugin());
|
||||
const options = ["switch", "model", "model2", "jailbreak", "patch", "provider", "history"];
|
||||
|
||||
messageInput.addEventListener("blur", () => {
|
||||
window.scrollTo(0, 0);
|
||||
|
@ -34,15 +36,17 @@ appStorage = window.localStorage || {
|
|||
length: 0
|
||||
}
|
||||
|
||||
const markdown = window.markdownit();
|
||||
const markdown_render = (content) => {
|
||||
return markdown.render(content
|
||||
.replaceAll(/<!--.+-->/gm, "")
|
||||
.replaceAll(/<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm, "")
|
||||
.replaceAll(/<img data-prompt="[^>]+">/gm, "")
|
||||
)
|
||||
.replaceAll("<a href=", '<a target="_blank" href=')
|
||||
.replaceAll('<code>', '<code class="language-plaintext">')
|
||||
}
|
||||
|
||||
hljs.addPlugin(new CopyButtonPlugin());
|
||||
let typesetPromise = Promise.resolve();
|
||||
const highlight = (container) => {
|
||||
container.querySelectorAll('code:not(.hljs').forEach((el) => {
|
||||
|
@ -90,48 +94,48 @@ const handle_ask = async () => {
|
|||
window.scrollTo(0, 0);
|
||||
|
||||
message = messageInput.value
|
||||
if (message.length > 0) {
|
||||
messageInput.value = "";
|
||||
prompt_lock = true;
|
||||
count_input()
|
||||
await add_conversation(window.conversation_id, message);
|
||||
if ("text" in fileInput.dataset) {
|
||||
message += '\n```' + fileInput.dataset.type + '\n';
|
||||
message += fileInput.dataset.text;
|
||||
message += '\n```'
|
||||
}
|
||||
let message_index = await add_message(window.conversation_id, "user", message);
|
||||
window.token = message_id();
|
||||
|
||||
if (imageInput.dataset.src) URL.revokeObjectURL(imageInput.dataset.src);
|
||||
const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
|
||||
if (input.files.length > 0) imageInput.dataset.src = URL.createObjectURL(input.files[0]);
|
||||
else delete imageInput.dataset.src
|
||||
|
||||
model = modelSelect.options[modelSelect.selectedIndex].value
|
||||
message_box.innerHTML += `
|
||||
<div class="message" data-index="${message_index}">
|
||||
<div class="user">
|
||||
${user_image}
|
||||
<i class="fa-solid fa-xmark"></i>
|
||||
<i class="fa-regular fa-phone-arrow-up-right"></i>
|
||||
</div>
|
||||
<div class="content" id="user_${token}">
|
||||
<div class="content_inner">
|
||||
${markdown_render(message)}
|
||||
${imageInput.dataset.src
|
||||
? '<img src="' + imageInput.dataset.src + '" alt="Image upload">'
|
||||
: ''
|
||||
}
|
||||
</div>
|
||||
<div class="count">${count_words_and_tokens(message, model)}</div>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
await register_remove_message();
|
||||
highlight(message_box);
|
||||
await ask_gpt();
|
||||
if (message.length <= 0) {
|
||||
return;
|
||||
}
|
||||
messageInput.value = "";
|
||||
prompt_lock = true;
|
||||
count_input()
|
||||
await add_conversation(window.conversation_id, message);
|
||||
|
||||
if ("text" in fileInput.dataset) {
|
||||
message += '\n```' + fileInput.dataset.type + '\n';
|
||||
message += fileInput.dataset.text;
|
||||
message += '\n```'
|
||||
}
|
||||
let message_index = await add_message(window.conversation_id, "user", message);
|
||||
window.token = message_id();
|
||||
|
||||
if (imageInput.dataset.src) URL.revokeObjectURL(imageInput.dataset.src);
|
||||
const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
|
||||
if (input.files.length > 0) imageInput.dataset.src = URL.createObjectURL(input.files[0]);
|
||||
else delete imageInput.dataset.src
|
||||
|
||||
message_box.innerHTML += `
|
||||
<div class="message" data-index="${message_index}">
|
||||
<div class="user">
|
||||
${user_image}
|
||||
<i class="fa-solid fa-xmark"></i>
|
||||
<i class="fa-regular fa-phone-arrow-up-right"></i>
|
||||
</div>
|
||||
<div class="content" id="user_${token}">
|
||||
<div class="content_inner">
|
||||
${markdown_render(message)}
|
||||
${imageInput.dataset.src
|
||||
? '<img src="' + imageInput.dataset.src + '" alt="Image upload">'
|
||||
: ''
|
||||
}
|
||||
</div>
|
||||
<div class="count">${count_words_and_tokens(message, get_selected_model())}</div>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
highlight(message_box);
|
||||
await ask_gpt();
|
||||
};
|
||||
|
||||
const remove_cancel_button = async () => {
|
||||
|
@ -143,7 +147,7 @@ const remove_cancel_button = async () => {
|
|||
}, 300);
|
||||
};
|
||||
|
||||
const prepare_messages = (messages, filter_last_message = true) => {
|
||||
const prepare_messages = (messages, filter_last_message=true) => {
|
||||
// Removes none user messages at end
|
||||
if (filter_last_message) {
|
||||
let last_message;
|
||||
|
@ -193,20 +197,54 @@ const prepare_messages = (messages, filter_last_message = true) => {
|
|||
return new_messages;
|
||||
}
|
||||
|
||||
async function add_message_chunk(message) {
|
||||
if (message.type == "conversation") {
|
||||
console.info("Conversation used:", message.conversation)
|
||||
} else if (message.type == "provider") {
|
||||
window.provider_result = message.provider;
|
||||
window.content.querySelector('.provider').innerHTML = `
|
||||
<a href="${message.provider.url}" target="_blank">
|
||||
${message.provider.name}
|
||||
</a>
|
||||
${message.provider.model ? ' with ' + message.provider.model : ''}
|
||||
`
|
||||
} else if (message.type == "message") {
|
||||
console.error(messag.message)
|
||||
return;
|
||||
} else if (message.type == "error") {
|
||||
console.error(message.error);
|
||||
window.content_inner.innerHTML += `<p><strong>An error occured:</strong> ${message.error}</p>`;
|
||||
} else if (message.type == "content") {
|
||||
window.text += message.content;
|
||||
html = markdown_render(window.text);
|
||||
let lastElement, lastIndex = null;
|
||||
for (element of ['</p>', '</code></pre>', '</p>\n</li>\n</ol>', '</li>\n</ol>', '</li>\n</ul>']) {
|
||||
const index = html.lastIndexOf(element)
|
||||
if (index - element.length > lastIndex) {
|
||||
lastElement = element;
|
||||
lastIndex = index;
|
||||
}
|
||||
}
|
||||
if (lastIndex) {
|
||||
html = html.substring(0, lastIndex) + '<span id="cursor"></span>' + lastElement;
|
||||
}
|
||||
window.content_inner.innerHTML = html;
|
||||
window.content_count.innerText = count_words_and_tokens(text, window.provider_result?.model);
|
||||
highlight(window.content_inner);
|
||||
}
|
||||
|
||||
window.scrollTo(0, 0);
|
||||
if (message_box.scrollTop >= message_box.scrollHeight - message_box.clientHeight - 100) {
|
||||
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
|
||||
}
|
||||
}
|
||||
|
||||
const ask_gpt = async () => {
|
||||
regenerate.classList.add(`regenerate-hidden`);
|
||||
messages = await get_messages(window.conversation_id);
|
||||
total_messages = messages.length;
|
||||
|
||||
messages = prepare_messages(messages);
|
||||
|
||||
window.scrollTo(0, 0);
|
||||
window.controller = new AbortController();
|
||||
|
||||
jailbreak = document.getElementById("jailbreak");
|
||||
provider = document.getElementById("provider");
|
||||
window.text = '';
|
||||
|
||||
stop_generating.classList.remove(`stop_generating-hidden`);
|
||||
|
||||
message_box.scrollTop = message_box.scrollHeight;
|
||||
|
@ -229,103 +267,31 @@ const ask_gpt = async () => {
|
|||
</div>
|
||||
</div>
|
||||
`;
|
||||
content = document.getElementById(`gpt_${window.token}`);
|
||||
content_inner = content.querySelector('.content_inner');
|
||||
content_count = content.querySelector('.count');
|
||||
|
||||
window.controller = new AbortController();
|
||||
window.text = "";
|
||||
window.error = null;
|
||||
window.provider_result = null;
|
||||
|
||||
window.content = document.getElementById(`gpt_${window.token}`);
|
||||
window.content_inner = content.querySelector('.content_inner');
|
||||
window.content_count = content.querySelector('.count');
|
||||
|
||||
message_box.scrollTop = message_box.scrollHeight;
|
||||
window.scrollTo(0, 0);
|
||||
|
||||
error = provider_result = null;
|
||||
try {
|
||||
let body = JSON.stringify({
|
||||
const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput;
|
||||
const file = input && input.files.length > 0 ? input.files[0] : null;
|
||||
await api("conversation", {
|
||||
id: window.token,
|
||||
conversation_id: window.conversation_id,
|
||||
model: modelSelect.options[modelSelect.selectedIndex].value,
|
||||
jailbreak: jailbreak.options[jailbreak.selectedIndex].value,
|
||||
web_search: document.getElementById(`switch`).checked,
|
||||
provider: provider.options[provider.selectedIndex].value,
|
||||
patch_provider: document.getElementById('patch')?.checked,
|
||||
model: get_selected_model(),
|
||||
jailbreak: jailbreak?.options[jailbreak.selectedIndex].value,
|
||||
web_search: document.getElementById("switch").checked,
|
||||
provider: providerSelect.options[providerSelect.selectedIndex].value,
|
||||
patch_provider: document.getElementById("patch")?.checked,
|
||||
messages: messages
|
||||
});
|
||||
const headers = {
|
||||
accept: 'text/event-stream'
|
||||
}
|
||||
const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
|
||||
if (input && input.files.length > 0) {
|
||||
const formData = new FormData();
|
||||
formData.append('image', input.files[0]);
|
||||
formData.append('json', body);
|
||||
body = formData;
|
||||
} else {
|
||||
headers['content-type'] = 'application/json';
|
||||
}
|
||||
|
||||
const response = await fetch(`/backend-api/v2/conversation`, {
|
||||
method: 'POST',
|
||||
signal: window.controller.signal,
|
||||
headers: headers,
|
||||
body: body
|
||||
});
|
||||
const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
|
||||
let buffer = ""
|
||||
while (true) {
|
||||
const { value, done } = await reader.read();
|
||||
if (done) break;
|
||||
for (const line of value.split("\n")) {
|
||||
if (!line) {
|
||||
continue;
|
||||
}
|
||||
let message;
|
||||
try {
|
||||
message = JSON.parse(buffer + line);
|
||||
buffer = "";
|
||||
} catch {
|
||||
buffer += line
|
||||
continue;
|
||||
}
|
||||
if (message.type == "content") {
|
||||
text += message.content;
|
||||
} else if (message.type == "provider") {
|
||||
provider_result = message.provider
|
||||
content.querySelector('.provider').innerHTML = `
|
||||
<a href="${provider_result.url}" target="_blank">
|
||||
${provider_result.name}
|
||||
</a>
|
||||
${provider_result.model ? ' with ' + provider_result.model : ''}
|
||||
`
|
||||
} else if (message.type == "error") {
|
||||
error = message.error;
|
||||
} else if (messag.type == "message") {
|
||||
console.error(messag.message)
|
||||
}
|
||||
}
|
||||
if (error) {
|
||||
console.error(error);
|
||||
content_inner.innerHTML += `<p><strong>An error occured:</strong> ${error}</p>`;
|
||||
} else {
|
||||
html = markdown_render(text);
|
||||
let lastElement, lastIndex = null;
|
||||
for (element of ['</p>', '</code></pre>', '</p>\n</li>\n</ol>', '</li>\n</ol>', '</li>\n</ul>']) {
|
||||
const index = html.lastIndexOf(element)
|
||||
if (index - element.length > lastIndex) {
|
||||
lastElement = element;
|
||||
lastIndex = index;
|
||||
}
|
||||
}
|
||||
if (lastIndex) {
|
||||
html = html.substring(0, lastIndex) + '<span id="cursor"></span>' + lastElement;
|
||||
}
|
||||
content_inner.innerHTML = html;
|
||||
content_count.innerText = count_words_and_tokens(text, provider_result?.model);
|
||||
highlight(content_inner);
|
||||
}
|
||||
|
||||
window.scrollTo(0, 0);
|
||||
if (message_box.scrollTop >= message_box.scrollHeight - message_box.clientHeight - 100) {
|
||||
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
|
||||
}
|
||||
}
|
||||
}, file);
|
||||
if (!error) {
|
||||
html = markdown_render(text);
|
||||
content_inner.innerHTML = html;
|
||||
|
@ -350,7 +316,7 @@ const ask_gpt = async () => {
|
|||
await add_message(window.conversation_id, "assistant", text, provider_result);
|
||||
await load_conversation(window.conversation_id);
|
||||
} else {
|
||||
let cursorDiv = document.getElementById(`cursor`);
|
||||
let cursorDiv = document.getElementById("cursor");
|
||||
if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv);
|
||||
}
|
||||
window.scrollTo(0, 0);
|
||||
|
@ -439,7 +405,7 @@ const new_conversation = async () => {
|
|||
say_hello();
|
||||
};
|
||||
|
||||
const load_conversation = async (conversation_id, scroll = true) => {
|
||||
const load_conversation = async (conversation_id, scroll=true) => {
|
||||
let conversation = await get_conversation(conversation_id);
|
||||
let messages = conversation?.items || [];
|
||||
|
||||
|
@ -454,7 +420,6 @@ const load_conversation = async (conversation_id, scroll = true) => {
|
|||
last_model = item.provider?.model;
|
||||
let next_i = parseInt(i) + 1;
|
||||
let next_provider = item.provider ? item.provider : (messages.length > next_i ? messages[next_i].provider : null);
|
||||
|
||||
let provider_link = item.provider?.name ? `<a href="${item.provider.url}" target="_blank">${item.provider.name}</a>` : "";
|
||||
let provider = provider_link ? `
|
||||
<div class="provider">
|
||||
|
@ -491,7 +456,6 @@ const load_conversation = async (conversation_id, scroll = true) => {
|
|||
}
|
||||
|
||||
message_box.innerHTML = elements;
|
||||
|
||||
register_remove_message();
|
||||
highlight(message_box);
|
||||
|
||||
|
@ -543,7 +507,9 @@ async function add_conversation(conversation_id, content) {
|
|||
}
|
||||
|
||||
async function save_system_message() {
|
||||
if (!window.conversation_id) return;
|
||||
if (!window.conversation_id) {
|
||||
return;
|
||||
}
|
||||
const conversation = await get_conversation(window.conversation_id);
|
||||
conversation.system = systemPrompt?.value;
|
||||
await save_conversation(window.conversation_id, conversation);
|
||||
|
@ -580,7 +546,6 @@ const remove_message = async (conversation_id, index) => {
|
|||
|
||||
const add_message = async (conversation_id, role, content, provider) => {
|
||||
const conversation = await get_conversation(conversation_id);
|
||||
|
||||
conversation.items.push({
|
||||
role: role,
|
||||
content: content,
|
||||
|
@ -662,15 +627,14 @@ sidebar_button.addEventListener("click", (event) => {
|
|||
sidebar.classList.add("shown");
|
||||
sidebar_button.classList.add("rotated");
|
||||
}
|
||||
|
||||
window.scrollTo(0, 0);
|
||||
});
|
||||
|
||||
const register_settings_localstorage = async () => {
|
||||
for (id of ["switch", "model", "jailbreak", "patch", "provider", "history"]) {
|
||||
const register_settings_storage = async () => {
|
||||
options.forEach((id) => {
|
||||
element = document.getElementById(id);
|
||||
if (!element) {
|
||||
continue;
|
||||
return;
|
||||
}
|
||||
element.addEventListener('change', async (event) => {
|
||||
switch (event.target.type) {
|
||||
|
@ -684,14 +648,14 @@ const register_settings_localstorage = async () => {
|
|||
console.warn("Unresolved element type");
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
const load_settings_localstorage = async () => {
|
||||
for (id of ["switch", "model", "jailbreak", "patch", "provider", "history"]) {
|
||||
const load_settings_storage = async () => {
|
||||
options.forEach((id) => {
|
||||
element = document.getElementById(id);
|
||||
if (!element || !(value = appStorage.getItem(element.id))) {
|
||||
continue;
|
||||
if (!element || !(value = appStorage.getItem(id))) {
|
||||
return;
|
||||
}
|
||||
if (value) {
|
||||
switch (element.type) {
|
||||
|
@ -705,7 +669,7 @@ const load_settings_localstorage = async () => {
|
|||
console.warn("Unresolved element type");
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
const say_hello = async () => {
|
||||
|
@ -780,13 +744,16 @@ function count_words_and_tokens(text, model) {
|
|||
}
|
||||
|
||||
let countFocus = messageInput;
|
||||
let timeoutId;
|
||||
const count_input = async () => {
|
||||
if (countFocus.value) {
|
||||
model = modelSelect.options[modelSelect.selectedIndex].value;
|
||||
inputCount.innerText = count_words_and_tokens(countFocus.value, model);
|
||||
} else {
|
||||
inputCount.innerHTML = " "
|
||||
}
|
||||
if (timeoutId) clearTimeout(timeoutId);
|
||||
timeoutId = setTimeout(() => {
|
||||
if (countFocus.value) {
|
||||
inputCount.innerText = count_words_and_tokens(countFocus.value, get_selected_model());
|
||||
} else {
|
||||
inputCount.innerHTML = " "
|
||||
}
|
||||
}, 100);
|
||||
};
|
||||
messageInput.addEventListener("keyup", count_input);
|
||||
systemPrompt.addEventListener("keyup", count_input);
|
||||
|
@ -799,9 +766,21 @@ systemPrompt.addEventListener("blur", function() {
|
|||
count_input();
|
||||
});
|
||||
|
||||
window.onload = async () => {
|
||||
setTheme();
|
||||
window.addEventListener('load', async function() {
|
||||
await on_load();
|
||||
if (window.conversation_id == "{{chat_id}}") {
|
||||
window.conversation_id = uuid();
|
||||
} else {
|
||||
await on_api();
|
||||
}
|
||||
});
|
||||
|
||||
window.addEventListener('pywebviewready', async function() {
|
||||
await on_api();
|
||||
});
|
||||
|
||||
async function on_load() {
|
||||
setTheme();
|
||||
count_input();
|
||||
|
||||
if (/\/chat\/.+/.test(window.location.href)) {
|
||||
|
@ -809,9 +788,10 @@ window.onload = async () => {
|
|||
} else {
|
||||
say_hello()
|
||||
}
|
||||
|
||||
load_conversations();
|
||||
}
|
||||
|
||||
async function on_api() {
|
||||
messageInput.addEventListener("keydown", async (evt) => {
|
||||
if (prompt_lock) return;
|
||||
|
||||
|
@ -824,46 +804,17 @@ window.onload = async () => {
|
|||
messageInput.style.height = messageInput.scrollHeight + "px";
|
||||
}
|
||||
});
|
||||
|
||||
sendButton.addEventListener(`click`, async () => {
|
||||
console.log("clicked send");
|
||||
if (prompt_lock) return;
|
||||
await handle_ask();
|
||||
});
|
||||
|
||||
messageInput.focus();
|
||||
|
||||
register_settings_localstorage();
|
||||
};
|
||||
register_settings_storage();
|
||||
|
||||
(async () => {
|
||||
response = await fetch('/backend-api/v2/models')
|
||||
models = await response.json()
|
||||
|
||||
for (model of models) {
|
||||
let option = document.createElement('option');
|
||||
option.value = option.text = model;
|
||||
modelSelect.appendChild(option);
|
||||
}
|
||||
|
||||
response = await fetch('/backend-api/v2/providers')
|
||||
providers = await response.json()
|
||||
select = document.getElementById('provider');
|
||||
|
||||
for (provider of providers) {
|
||||
let option = document.createElement('option');
|
||||
option.value = option.text = provider;
|
||||
select.appendChild(option);
|
||||
}
|
||||
|
||||
await load_settings_localstorage()
|
||||
})();
|
||||
|
||||
(async () => {
|
||||
response = await fetch('/backend-api/v2/version')
|
||||
versions = await response.json()
|
||||
|
||||
document.title = 'g4f - gui - ' + versions["version"];
|
||||
versions = await api("version");
|
||||
document.title = 'g4f - ' + versions["version"];
|
||||
let text = "version ~ "
|
||||
if (versions["version"] != versions["latest_version"]) {
|
||||
let release_url = 'https://github.com/xtekky/gpt4free/releases/tag/' + versions["latest_version"];
|
||||
|
@ -873,7 +824,24 @@ window.onload = async () => {
|
|||
text += versions["version"];
|
||||
}
|
||||
document.getElementById("version_text").innerHTML = text
|
||||
})()
|
||||
|
||||
models = await api("models");
|
||||
models.forEach((model) => {
|
||||
let option = document.createElement("option");
|
||||
option.value = option.text = model;
|
||||
modelSelect.appendChild(option);
|
||||
});
|
||||
|
||||
providers = await api("providers")
|
||||
providers.forEach((provider) => {
|
||||
let option = document.createElement("option");
|
||||
option.value = option.text = provider;
|
||||
providerSelect.appendChild(option);
|
||||
})
|
||||
|
||||
await load_provider_models(appStorage.getItem("provider"));
|
||||
load_settings_storage()
|
||||
}
|
||||
|
||||
for (const el of [imageInput, cameraInput]) {
|
||||
el.addEventListener('click', async () => {
|
||||
|
@ -889,6 +857,7 @@ fileInput.addEventListener('click', async (event) => {
|
|||
fileInput.value = '';
|
||||
delete fileInput.dataset.text;
|
||||
});
|
||||
|
||||
fileInput.addEventListener('change', async (event) => {
|
||||
if (fileInput.files.length) {
|
||||
type = fileInput.files[0].type;
|
||||
|
@ -903,8 +872,21 @@ fileInput.addEventListener('change', async (event) => {
|
|||
}
|
||||
fileInput.dataset.type = type
|
||||
const reader = new FileReader();
|
||||
reader.addEventListener('load', (event) => {
|
||||
reader.addEventListener('load', async (event) => {
|
||||
fileInput.dataset.text = event.target.result;
|
||||
if (type == "json") {
|
||||
const data = JSON.parse(fileInput.dataset.text);
|
||||
if ("g4f" in data.options) {
|
||||
Object.keys(data).forEach(key => {
|
||||
if (key != "options" && !localStorage.getItem(key)) {
|
||||
appStorage.setItem(key, JSON.stringify(data[key]));
|
||||
}
|
||||
});
|
||||
delete fileInput.dataset.text;
|
||||
await load_conversations();
|
||||
fileInput.value = "";
|
||||
}
|
||||
}
|
||||
});
|
||||
reader.readAsText(fileInput.files[0]);
|
||||
} else {
|
||||
|
@ -914,4 +896,126 @@ fileInput.addEventListener('change', async (event) => {
|
|||
|
||||
systemPrompt?.addEventListener("blur", async () => {
|
||||
await save_system_message();
|
||||
});
|
||||
});
|
||||
|
||||
function get_selected_model() {
|
||||
if (modelProvider.selectedIndex >= 0) {
|
||||
return modelProvider.options[modelProvider.selectedIndex].value;
|
||||
} else if (modelSelect.selectedIndex >= 0) {
|
||||
return modelSelect.options[modelSelect.selectedIndex].value;
|
||||
}
|
||||
}
|
||||
|
||||
async function api(ressource, args=null, file=null) {
|
||||
if (window?.pywebview) {
|
||||
if (args) {
|
||||
if (ressource == "models") {
|
||||
ressource = "provider_models";
|
||||
}
|
||||
return pywebview.api["get_" + ressource](args);
|
||||
}
|
||||
return pywebview.api["get_" + ressource]();
|
||||
}
|
||||
if (ressource == "models" && args) {
|
||||
ressource = `${ressource}/${args}`;
|
||||
}
|
||||
const url = `/backend-api/v2/${ressource}`;
|
||||
if (ressource == "conversation") {
|
||||
const body = JSON.stringify(args);
|
||||
const headers = {
|
||||
accept: 'text/event-stream'
|
||||
}
|
||||
if (file) {
|
||||
const formData = new FormData();
|
||||
formData.append('file', file);
|
||||
formData.append('json', body);
|
||||
body = formData;
|
||||
} else {
|
||||
headers['content-type'] = 'application/json';
|
||||
}
|
||||
response = await fetch(url, {
|
||||
method: 'POST',
|
||||
signal: window.controller.signal,
|
||||
headers: headers,
|
||||
body: body
|
||||
});
|
||||
return read_response(response);
|
||||
}
|
||||
response = await fetch(url);
|
||||
return await response.json();
|
||||
}
|
||||
|
||||
async function read_response(response) {
|
||||
const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
|
||||
let buffer = ""
|
||||
while (true) {
|
||||
const { value, done } = await reader.read();
|
||||
if (done) {
|
||||
break;
|
||||
}
|
||||
for (const line of value.split("\n")) {
|
||||
if (!line) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
add_message_chunk(JSON.parse(buffer + line))
|
||||
buffer = "";
|
||||
} catch {
|
||||
buffer += line
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function load_provider_models(providerIndex=null) {
|
||||
if (!providerIndex) {
|
||||
providerIndex = providerSelect.selectedIndex;
|
||||
}
|
||||
const provider = providerSelect.options[providerIndex].value;
|
||||
if (!provider) {
|
||||
return;
|
||||
}
|
||||
const models = await api('models', provider);
|
||||
modelProvider.innerHTML = '';
|
||||
if (models.length > 0) {
|
||||
modelSelect.classList.add("hidden");
|
||||
modelProvider.classList.remove("hidden");
|
||||
models.forEach((model) => {
|
||||
let option = document.createElement('option');
|
||||
option.value = option.text = model.model;
|
||||
option.selected = model.default;
|
||||
modelProvider.appendChild(option);
|
||||
});
|
||||
} else {
|
||||
modelProvider.classList.add("hidden");
|
||||
modelSelect.classList.remove("hidden");
|
||||
}
|
||||
};
|
||||
providerSelect.addEventListener("change", () => load_provider_models());
|
||||
|
||||
function save_storage() {
|
||||
let filename = new Date().toLocaleString()
|
||||
filename += ".json"
|
||||
let data = {"options": {"g4f": ""}};
|
||||
for (let i = 0; i < appStorage.length; i++){
|
||||
let key = appStorage.key(i);
|
||||
let item = appStorage.getItem(key);
|
||||
if (key.startsWith("conversation:")) {
|
||||
data[key] = JSON.parse(item);
|
||||
} else {
|
||||
data["options"][key] = item;
|
||||
}
|
||||
}
|
||||
data = JSON.stringify(data, null, 4);
|
||||
const blob = new Blob([data], {type: 'text/csv'});
|
||||
if(window.navigator.msSaveOrOpenBlob) {
|
||||
window.navigator.msSaveBlob(blob, filename);
|
||||
} else{
|
||||
const elem = window.document.createElement('a');
|
||||
elem.href = window.URL.createObjectURL(blob);
|
||||
elem.download = filename;
|
||||
document.body.appendChild(elem);
|
||||
elem.click();
|
||||
document.body.removeChild(elem);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,185 @@
|
|||
import logging
|
||||
import json
|
||||
from typing import Iterator
|
||||
|
||||
try:
|
||||
import webview
|
||||
except ImportError:
|
||||
...
|
||||
|
||||
from g4f import version, models
|
||||
from g4f import get_last_provider, ChatCompletion
|
||||
from g4f.errors import VersionNotFoundError
|
||||
from g4f.Provider import ProviderType, __providers__, __map__
|
||||
from g4f.providers.base_provider import ProviderModelMixin
|
||||
from g4f.Provider.bing.create_images import patch_provider
|
||||
from g4f.Provider.Bing import Conversation
|
||||
|
||||
conversations: dict[str, Conversation] = {}
|
||||
|
||||
class Api():
|
||||
|
||||
def get_models(self) -> list[str]:
|
||||
"""
|
||||
Return a list of all models.
|
||||
|
||||
Fetches and returns a list of all available models in the system.
|
||||
|
||||
Returns:
|
||||
List[str]: A list of model names.
|
||||
"""
|
||||
return models._all_models
|
||||
|
||||
def get_provider_models(self, provider: str) -> list[dict]:
|
||||
if provider in __map__:
|
||||
provider: ProviderType = __map__[provider]
|
||||
if issubclass(provider, ProviderModelMixin):
|
||||
return [{"model": model, "default": model == provider.default_model} for model in provider.get_models()]
|
||||
elif provider.supports_gpt_35_turbo or provider.supports_gpt_4:
|
||||
return [
|
||||
*([{"model": "gpt-4", "default": not provider.supports_gpt_4}] if provider.supports_gpt_4 else []),
|
||||
*([{"model": "gpt-3.5-turbo", "default": not provider.supports_gpt_4}] if provider.supports_gpt_35_turbo else [])
|
||||
]
|
||||
else:
|
||||
return [];
|
||||
|
||||
def get_providers(self) -> list[str]:
|
||||
"""
|
||||
Return a list of all working providers.
|
||||
"""
|
||||
return [provider.__name__ for provider in __providers__ if provider.working]
|
||||
|
||||
def get_version(self):
|
||||
"""
|
||||
Returns the current and latest version of the application.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing the current and latest version.
|
||||
"""
|
||||
try:
|
||||
current_version = version.utils.current_version
|
||||
except VersionNotFoundError:
|
||||
current_version = None
|
||||
return {
|
||||
"version": current_version,
|
||||
"latest_version": version.utils.latest_version,
|
||||
}
|
||||
|
||||
def generate_title(self):
|
||||
"""
|
||||
Generates and returns a title based on the request data.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary with the generated title.
|
||||
"""
|
||||
return {'title': ''}
|
||||
|
||||
def get_conversation(self, options: dict, **kwargs) -> Iterator:
|
||||
window = webview.active_window()
|
||||
for message in self._create_response_stream(
|
||||
self._prepare_conversation_kwargs(options, kwargs),
|
||||
options.get("conversation_id")
|
||||
):
|
||||
window.evaluate_js(f"this.add_message_chunk({json.dumps(message)})")
|
||||
|
||||
def _prepare_conversation_kwargs(self, json_data: dict, kwargs: dict):
|
||||
"""
|
||||
Prepares arguments for chat completion based on the request data.
|
||||
|
||||
Reads the request and prepares the necessary arguments for handling
|
||||
a chat completion request.
|
||||
|
||||
Returns:
|
||||
dict: Arguments prepared for chat completion.
|
||||
"""
|
||||
provider = json_data.get('provider', None)
|
||||
if "image" in kwargs and provider is None:
|
||||
provider = "Bing"
|
||||
if provider == 'OpenaiChat':
|
||||
kwargs['auto_continue'] = True
|
||||
|
||||
messages = json_data['messages']
|
||||
if json_data.get('web_search'):
|
||||
if provider == "Bing":
|
||||
kwargs['web_search'] = True
|
||||
else:
|
||||
from .internet import get_search_message
|
||||
messages[-1]["content"] = get_search_message(messages[-1]["content"])
|
||||
|
||||
conversation_id = json_data.get("conversation_id")
|
||||
if conversation_id and conversation_id in conversations:
|
||||
kwargs["conversation"] = conversations[conversation_id]
|
||||
|
||||
model = json_data.get('model')
|
||||
model = model if model else models.default
|
||||
patch = patch_provider if json_data.get('patch_provider') else None
|
||||
|
||||
return {
|
||||
"model": model,
|
||||
"provider": provider,
|
||||
"messages": messages,
|
||||
"stream": True,
|
||||
"ignore_stream": True,
|
||||
"patch_provider": patch,
|
||||
"return_conversation": True,
|
||||
**kwargs
|
||||
}
|
||||
|
||||
def _create_response_stream(self, kwargs, conversation_id: str) -> Iterator:
|
||||
"""
|
||||
Creates and returns a streaming response for the conversation.
|
||||
|
||||
Args:
|
||||
kwargs (dict): Arguments for creating the chat completion.
|
||||
|
||||
Yields:
|
||||
str: JSON formatted response chunks for the stream.
|
||||
|
||||
Raises:
|
||||
Exception: If an error occurs during the streaming process.
|
||||
"""
|
||||
try:
|
||||
first = True
|
||||
for chunk in ChatCompletion.create(**kwargs):
|
||||
if first:
|
||||
first = False
|
||||
yield self._format_json("provider", get_last_provider(True))
|
||||
if isinstance(chunk, Conversation):
|
||||
conversations[conversation_id] = chunk
|
||||
yield self._format_json("conversation", conversation_id)
|
||||
elif isinstance(chunk, Exception):
|
||||
logging.exception(chunk)
|
||||
yield self._format_json("message", get_error_message(chunk))
|
||||
else:
|
||||
yield self._format_json("content", chunk)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
yield self._format_json('error', get_error_message(e))
|
||||
|
||||
def _format_json(self, response_type: str, content):
|
||||
"""
|
||||
Formats and returns a JSON response.
|
||||
|
||||
Args:
|
||||
response_type (str): The type of the response.
|
||||
content: The content to be included in the response.
|
||||
|
||||
Returns:
|
||||
str: A JSON formatted string.
|
||||
"""
|
||||
return {
|
||||
'type': response_type,
|
||||
response_type: content
|
||||
}
|
||||
|
||||
def get_error_message(exception: Exception) -> str:
|
||||
"""
|
||||
Generates a formatted error message from an exception.
|
||||
|
||||
Args:
|
||||
exception (Exception): The exception to format.
|
||||
|
||||
Returns:
|
||||
str: A formatted error message string.
|
||||
"""
|
||||
return f"{get_last_provider().__name__}: {type(exception).__name__}: {exception}"
|
|
@ -1,3 +1,9 @@
|
|||
import sys, os
|
||||
from flask import Flask
|
||||
|
||||
app = Flask(__name__, template_folder='./../client/html')
|
||||
if getattr(sys, 'frozen', False):
|
||||
template_folder = os.path.join(sys._MEIPASS, "client")
|
||||
else:
|
||||
template_folder = "../client"
|
||||
|
||||
app = Flask(__name__, template_folder=template_folder, static_folder=f"{template_folder}/static")
|
|
@ -1,15 +1,9 @@
|
|||
import logging
|
||||
import json
|
||||
from flask import request, Flask
|
||||
from typing import Generator
|
||||
from g4f import version, models
|
||||
from g4f import get_last_provider, ChatCompletion
|
||||
from g4f.image import is_allowed_extension, to_image
|
||||
from g4f.errors import VersionNotFoundError
|
||||
from g4f.Provider import __providers__
|
||||
from g4f.Provider.bing.create_images import patch_provider
|
||||
from .api import Api
|
||||
|
||||
class Backend_Api:
|
||||
class Backend_Api(Api):
|
||||
"""
|
||||
Handles various endpoints in a Flask application for backend operations.
|
||||
|
||||
|
@ -33,6 +27,10 @@ class Backend_Api:
|
|||
'function': self.get_models,
|
||||
'methods': ['GET']
|
||||
},
|
||||
'/backend-api/v2/models/<provider>': {
|
||||
'function': self.get_provider_models,
|
||||
'methods': ['GET']
|
||||
},
|
||||
'/backend-api/v2/providers': {
|
||||
'function': self.get_providers,
|
||||
'methods': ['GET']
|
||||
|
@ -54,7 +52,7 @@ class Backend_Api:
|
|||
'methods': ['POST']
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def handle_error(self):
|
||||
"""
|
||||
Initialize the backend API with the given Flask application.
|
||||
|
@ -64,49 +62,7 @@ class Backend_Api:
|
|||
"""
|
||||
print(request.json)
|
||||
return 'ok', 200
|
||||
|
||||
def get_models(self):
|
||||
"""
|
||||
Return a list of all models.
|
||||
|
||||
Fetches and returns a list of all available models in the system.
|
||||
|
||||
Returns:
|
||||
List[str]: A list of model names.
|
||||
"""
|
||||
return models._all_models
|
||||
|
||||
def get_providers(self):
|
||||
"""
|
||||
Return a list of all working providers.
|
||||
"""
|
||||
return [provider.__name__ for provider in __providers__ if provider.working]
|
||||
|
||||
def get_version(self):
|
||||
"""
|
||||
Returns the current and latest version of the application.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing the current and latest version.
|
||||
"""
|
||||
try:
|
||||
current_version = version.utils.current_version
|
||||
except VersionNotFoundError:
|
||||
current_version = None
|
||||
return {
|
||||
"version": current_version,
|
||||
"latest_version": version.utils.latest_version,
|
||||
}
|
||||
|
||||
def generate_title(self):
|
||||
"""
|
||||
Generates and returns a title based on the request data.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary with the generated title.
|
||||
"""
|
||||
return {'title': ''}
|
||||
|
||||
def handle_conversation(self):
|
||||
"""
|
||||
Handles conversation requests and streams responses back.
|
||||
|
@ -114,26 +70,10 @@ class Backend_Api:
|
|||
Returns:
|
||||
Response: A Flask response object for streaming.
|
||||
"""
|
||||
kwargs = self._prepare_conversation_kwargs()
|
||||
|
||||
return self.app.response_class(
|
||||
self._create_response_stream(kwargs),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
def _prepare_conversation_kwargs(self):
|
||||
"""
|
||||
Prepares arguments for chat completion based on the request data.
|
||||
|
||||
Reads the request and prepares the necessary arguments for handling
|
||||
a chat completion request.
|
||||
|
||||
Returns:
|
||||
dict: Arguments prepared for chat completion.
|
||||
"""
|
||||
|
||||
kwargs = {}
|
||||
if "image" in request.files:
|
||||
file = request.files['image']
|
||||
if "file" in request.files:
|
||||
file = request.files['file']
|
||||
if file.filename != '' and is_allowed_extension(file.filename):
|
||||
kwargs['image'] = to_image(file.stream, file.filename.endswith('.svg'))
|
||||
kwargs['image_name'] = file.filename
|
||||
|
@ -141,66 +81,20 @@ class Backend_Api:
|
|||
json_data = json.loads(request.form['json'])
|
||||
else:
|
||||
json_data = request.json
|
||||
|
||||
provider = json_data.get('provider', '').replace('g4f.Provider.', '')
|
||||
provider = provider if provider and provider != "Auto" else None
|
||||
|
||||
if "image" in kwargs and not provider:
|
||||
provider = "Bing"
|
||||
if provider == 'OpenaiChat':
|
||||
kwargs['auto_continue'] = True
|
||||
kwargs = self._prepare_conversation_kwargs(json_data, kwargs)
|
||||
|
||||
messages = json_data['messages']
|
||||
if json_data.get('web_search'):
|
||||
if provider == "Bing":
|
||||
kwargs['web_search'] = True
|
||||
else:
|
||||
# ResourceWarning: unclosed event loop
|
||||
from .internet import get_search_message
|
||||
messages[-1]["content"] = get_search_message(messages[-1]["content"])
|
||||
return self.app.response_class(
|
||||
self._create_response_stream(kwargs, json_data.get("conversation_id")),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
model = json_data.get('model')
|
||||
model = model if model else models.default
|
||||
patch = patch_provider if json_data.get('patch_provider') else None
|
||||
def get_provider_models(self, provider: str):
|
||||
models = super().get_provider_models(provider)
|
||||
if models is None:
|
||||
return 404, "Provider not found"
|
||||
return models
|
||||
|
||||
return {
|
||||
"model": model,
|
||||
"provider": provider,
|
||||
"messages": messages,
|
||||
"stream": True,
|
||||
"ignore_stream": True,
|
||||
"patch_provider": patch,
|
||||
**kwargs
|
||||
}
|
||||
|
||||
def _create_response_stream(self, kwargs) -> Generator[str, None, None]:
|
||||
"""
|
||||
Creates and returns a streaming response for the conversation.
|
||||
|
||||
Args:
|
||||
kwargs (dict): Arguments for creating the chat completion.
|
||||
|
||||
Yields:
|
||||
str: JSON formatted response chunks for the stream.
|
||||
|
||||
Raises:
|
||||
Exception: If an error occurs during the streaming process.
|
||||
"""
|
||||
try:
|
||||
first = True
|
||||
for chunk in ChatCompletion.create(**kwargs):
|
||||
if first:
|
||||
first = False
|
||||
yield self._format_json('provider', get_last_provider(True))
|
||||
if isinstance(chunk, Exception):
|
||||
logging.exception(chunk)
|
||||
yield self._format_json('message', get_error_message(chunk))
|
||||
else:
|
||||
yield self._format_json('content', str(chunk))
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
yield self._format_json('error', get_error_message(e))
|
||||
|
||||
def _format_json(self, response_type: str, content) -> str:
|
||||
"""
|
||||
Formats and returns a JSON response.
|
||||
|
@ -212,19 +106,4 @@ class Backend_Api:
|
|||
Returns:
|
||||
str: A JSON formatted string.
|
||||
"""
|
||||
return json.dumps({
|
||||
'type': response_type,
|
||||
response_type: content
|
||||
}) + "\n"
|
||||
|
||||
def get_error_message(exception: Exception) -> str:
|
||||
"""
|
||||
Generates a formatted error message from an exception.
|
||||
|
||||
Args:
|
||||
exception (Exception): The exception to format.
|
||||
|
||||
Returns:
|
||||
str: A formatted error message string.
|
||||
"""
|
||||
return f"{get_last_provider().__name__}: {type(exception).__name__}: {exception}"
|
||||
return json.dumps(super()._format_json(response_type, content)) + "\n"
|
|
@ -1,6 +1,5 @@
|
|||
from flask import render_template, send_file, redirect
|
||||
from time import time
|
||||
from os import urandom
|
||||
import uuid
|
||||
from flask import render_template, redirect
|
||||
|
||||
class Website:
|
||||
def __init__(self, app) -> None:
|
||||
|
@ -18,23 +17,12 @@ class Website:
|
|||
'function': self._chat,
|
||||
'methods': ['GET', 'POST']
|
||||
},
|
||||
'/assets/<folder>/<file>': {
|
||||
'function': self._assets,
|
||||
'methods': ['GET', 'POST']
|
||||
}
|
||||
}
|
||||
|
||||
def _chat(self, conversation_id):
|
||||
if '-' not in conversation_id:
|
||||
return redirect('/chat')
|
||||
|
||||
return render_template('index.html', chat_id = conversation_id)
|
||||
return render_template('index.html', chat_id=conversation_id)
|
||||
|
||||
def _index(self):
|
||||
return render_template('index.html', chat_id = f'{urandom(4).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{hex(int(time() * 1000))[2:]}')
|
||||
|
||||
def _assets(self, folder: str, file: str):
|
||||
try:
|
||||
return send_file(f"./../client/{folder}/{file}", as_attachment=False)
|
||||
except:
|
||||
return "File not found", 404
|
||||
return render_template('index.html', chat_id=str(uuid.uuid4()))
|
|
@ -1,24 +1,37 @@
|
|||
import webview
|
||||
from functools import partial
|
||||
from platformdirs import user_config_dir
|
||||
try:
|
||||
from platformdirs import user_config_dir
|
||||
has_platformdirs = True
|
||||
except ImportError:
|
||||
has_platformdirs = False
|
||||
|
||||
from g4f.gui import run_gui
|
||||
from g4f.gui.run import gui_parser
|
||||
from g4f.gui.server.api import Api
|
||||
import g4f.version
|
||||
import g4f.debug
|
||||
|
||||
def run_webview(host: str = "0.0.0.0", port: int = 8080, debug: bool = True):
|
||||
webview.create_window(f"g4f - {g4f.version.utils.current_version}", f"http://{host}:{port}/")
|
||||
if debug:
|
||||
g4f.debug.logging = True
|
||||
def run_webview(
|
||||
debug: bool = False,
|
||||
storage_path: str = None
|
||||
):
|
||||
webview.create_window(
|
||||
f"g4f - {g4f.version.utils.current_version}",
|
||||
"client/index.html",
|
||||
text_select=True,
|
||||
js_api=Api(),
|
||||
)
|
||||
if has_platformdirs and storage_path is None:
|
||||
storage_path = user_config_dir("g4f-webview")
|
||||
webview.start(
|
||||
partial(run_gui, host, port),
|
||||
private_mode=False,
|
||||
storage_path=user_config_dir("g4f-webview"),
|
||||
debug=debug
|
||||
storage_path=storage_path,
|
||||
debug=debug,
|
||||
ssl=True
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = gui_parser()
|
||||
args = parser.parse_args()
|
||||
run_webview(args.host, args.port, args.debug)
|
||||
if args.debug:
|
||||
g4f.debug.logging = True
|
||||
run_webview(args.debug)
|
|
@ -0,0 +1,45 @@
|
|||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
|
||||
block_cipher = None
|
||||
|
||||
|
||||
a = Analysis(
|
||||
['webview.py'],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[],
|
||||
hiddenimports=[],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
win_no_prefer_redirects=False,
|
||||
win_private_assemblies=False,
|
||||
cipher=block_cipher,
|
||||
noarchive=False,
|
||||
)
|
||||
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
Tree('client', prefix='client'),
|
||||
a.zipfiles,
|
||||
a.datas,
|
||||
[],
|
||||
name='webview',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=False,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
|
@ -70,7 +70,14 @@ class AbstractProvider(BaseProvider):
|
|||
loop.run_in_executor(executor, create_func),
|
||||
timeout=kwargs.get("timeout")
|
||||
)
|
||||
|
||||
|
||||
def get_parameters(cls) -> dict:
|
||||
return signature(
|
||||
cls.create_async_generator if issubclass(cls, AsyncGeneratorProvider) else
|
||||
cls.create_async if issubclass(cls, AsyncProvider) else
|
||||
cls.create_completion
|
||||
).parameters
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def params(cls) -> str:
|
||||
|
@ -83,17 +90,12 @@ class AbstractProvider(BaseProvider):
|
|||
Returns:
|
||||
str: A string listing the supported parameters.
|
||||
"""
|
||||
sig = signature(
|
||||
cls.create_async_generator if issubclass(cls, AsyncGeneratorProvider) else
|
||||
cls.create_async if issubclass(cls, AsyncProvider) else
|
||||
cls.create_completion
|
||||
)
|
||||
|
||||
def get_type_name(annotation: type) -> str:
|
||||
return annotation.__name__ if hasattr(annotation, "__name__") else str(annotation)
|
||||
|
||||
args = ""
|
||||
for name, param in sig.parameters.items():
|
||||
for name, param in cls.get_parameters():
|
||||
if name in ("self", "kwargs") or (name == "stream" and not cls.supports_stream):
|
||||
continue
|
||||
args += f"\n {name}"
|
||||
|
|
|
@ -1,22 +1,48 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import Union
|
||||
from aiohttp import ClientResponse
|
||||
from requests import Response as RequestsResponse
|
||||
|
||||
try:
|
||||
from curl_cffi.requests import Session, Response
|
||||
from .curl_cffi import StreamResponse, StreamSession
|
||||
from .curl_cffi import StreamResponse, StreamSession, FormData
|
||||
has_curl_cffi = True
|
||||
except ImportError:
|
||||
from typing import Type as Session, Type as Response
|
||||
from .aiohttp import StreamResponse, StreamSession
|
||||
from .aiohttp import StreamResponse, StreamSession, FormData
|
||||
has_curl_cffi = False
|
||||
try:
|
||||
import webview
|
||||
import asyncio
|
||||
has_webview = True
|
||||
except ImportError:
|
||||
has_webview = False
|
||||
|
||||
from .raise_for_status import raise_for_status
|
||||
from ..webdriver import WebDriver, WebDriverSession
|
||||
from ..webdriver import bypass_cloudflare, get_driver_cookies
|
||||
from ..errors import MissingRequirementsError, RateLimitError, ResponseStatusError
|
||||
from .defaults import DEFAULT_HEADERS
|
||||
from ..errors import MissingRequirementsError
|
||||
from .defaults import DEFAULT_HEADERS, WEBVIEW_HAEDERS
|
||||
|
||||
async def get_args_from_webview(url: str) -> dict:
|
||||
if not has_webview:
|
||||
raise MissingRequirementsError('Install "webview" package')
|
||||
window = webview.create_window("", url, hidden=True)
|
||||
await asyncio.sleep(2)
|
||||
body = None
|
||||
while body is None:
|
||||
try:
|
||||
await asyncio.sleep(1)
|
||||
body = window.dom.get_element("body:not(.no-js)")
|
||||
except:
|
||||
...
|
||||
headers = {
|
||||
**WEBVIEW_HAEDERS,
|
||||
"User-Agent": window.evaluate_js("this.navigator.userAgent"),
|
||||
"Accept-Language": window.evaluate_js("this.navigator.language"),
|
||||
"Referer": window.real_url
|
||||
}
|
||||
cookies = [list(*cookie.items()) for cookie in window.get_cookies()]
|
||||
cookies = dict([(name, cookie.value) for name, cookie in cookies])
|
||||
window.destroy()
|
||||
return {"headers": headers, "cookies": cookies}
|
||||
|
||||
def get_args_from_browser(
|
||||
url: str,
|
||||
|
@ -79,24 +105,4 @@ def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str =
|
|||
proxies={"https": proxy, "http": proxy},
|
||||
timeout=timeout,
|
||||
impersonate="chrome"
|
||||
)
|
||||
|
||||
async def raise_for_status_async(response: Union[StreamResponse, ClientResponse], message: str = None):
|
||||
if response.status in (429, 402):
|
||||
raise RateLimitError(f"Response {response.status}: Rate limit reached")
|
||||
message = await response.text() if not response.ok and message is None else message
|
||||
if response.status == 403 and "<title>Just a moment...</title>" in message:
|
||||
raise ResponseStatusError(f"Response {response.status}: Cloudflare detected")
|
||||
elif not response.ok:
|
||||
raise ResponseStatusError(f"Response {response.status}: {message}")
|
||||
|
||||
def raise_for_status(response: Union[StreamResponse, ClientResponse, Response, RequestsResponse], message: str = None):
|
||||
if isinstance(response, StreamSession) or isinstance(response, ClientResponse):
|
||||
return raise_for_status_async(response, message)
|
||||
|
||||
if response.status_code in (429, 402):
|
||||
raise RateLimitError(f"Response {response.status_code}: Rate limit reached")
|
||||
elif response.status_code == 403 and "<title>Just a moment...</title>" in response.text:
|
||||
raise ResponseStatusError(f"Response {response.status_code}: Cloudflare detected")
|
||||
elif not response.ok:
|
||||
raise ResponseStatusError(f"Response {response.status_code}: {response.text if message is None else message}")
|
||||
)
|
|
@ -1,6 +1,6 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession, ClientResponse, ClientTimeout, BaseConnector
|
||||
from aiohttp import ClientSession, ClientResponse, ClientTimeout, BaseConnector, FormData
|
||||
from typing import AsyncIterator, Any, Optional
|
||||
|
||||
from .defaults import DEFAULT_HEADERS
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from curl_cffi.requests import AsyncSession, Response
|
||||
from curl_cffi.requests import AsyncSession, Response, CurlMime
|
||||
from typing import AsyncGenerator, Any
|
||||
from functools import partialmethod
|
||||
import json
|
||||
|
@ -65,6 +65,8 @@ class StreamSession(AsyncSession):
|
|||
def request(
|
||||
self, method: str, url: str, **kwargs
|
||||
) -> StreamResponse:
|
||||
if isinstance(kwargs.get("data"), CurlMime):
|
||||
kwargs["multipart"] = kwargs.pop("data")
|
||||
"""Create and return a StreamResponse object for the given HTTP request."""
|
||||
return StreamResponse(super().request(method, url, stream=True, **kwargs))
|
||||
|
||||
|
@ -75,3 +77,7 @@ class StreamSession(AsyncSession):
|
|||
put = partialmethod(request, "PUT")
|
||||
patch = partialmethod(request, "PATCH")
|
||||
delete = partialmethod(request, "DELETE")
|
||||
|
||||
class FormData(CurlMime):
|
||||
def add_field(self, name, data=None, content_type: str = None, filename: str = None) -> None:
|
||||
self.addpart(name, content_type=content_type, filename=filename, data=data)
|
|
@ -16,4 +16,14 @@ DEFAULT_HEADERS = {
|
|||
"referer": "",
|
||||
"accept-encoding": "gzip, deflate, br",
|
||||
"accept-language": "en-US",
|
||||
}
|
||||
WEBVIEW_HAEDERS = {
|
||||
"Accept": "*/*",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Accept-Language": "",
|
||||
"Referer": "",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"User-Agent": "",
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import Union
|
||||
from aiohttp import ClientResponse
|
||||
from requests import Response as RequestsResponse
|
||||
|
||||
from ..errors import ResponseStatusError, RateLimitError
|
||||
from . import Response, StreamResponse
|
||||
|
||||
class CloudflareError(ResponseStatusError):
|
||||
...
|
||||
|
||||
def is_cloudflare(text: str) -> bool:
|
||||
return '<div id="cf-please-wait">' in text or "<title>Just a moment...</title>" in text
|
||||
|
||||
async def raise_for_status_async(response: Union[StreamResponse, ClientResponse], message: str = None):
|
||||
if response.status in (429, 402):
|
||||
raise RateLimitError(f"Response {response.status}: Rate limit reached")
|
||||
message = await response.text() if not response.ok and message is None else message
|
||||
if response.status == 403 and is_cloudflare(message):
|
||||
raise CloudflareError(f"Response {response.status}: Cloudflare detected")
|
||||
elif not response.ok:
|
||||
raise ResponseStatusError(f"Response {response.status}: {message}")
|
||||
|
||||
def raise_for_status(response: Union[Response, StreamResponse, ClientResponse, RequestsResponse], message: str = None):
|
||||
if hasattr(response, "status"):
|
||||
return raise_for_status_async(response, message)
|
||||
|
||||
if response.status_code in (429, 402):
|
||||
raise RateLimitError(f"Response {response.status_code}: Rate limit reached")
|
||||
elif response.status_code == 403 and is_cloudflare(response.text):
|
||||
raise CloudflareError(f"Response {response.status_code}: Cloudflare detected")
|
||||
elif not response.ok:
|
||||
raise ResponseStatusError(f"Response {response.status_code}: {response.text if message is None else message}")
|
|
@ -6,6 +6,7 @@ from functools import cached_property
|
|||
from importlib.metadata import version as get_package_version, PackageNotFoundError
|
||||
from subprocess import check_output, CalledProcessError, PIPE
|
||||
from .errors import VersionNotFoundError
|
||||
from . import debug
|
||||
|
||||
PACKAGE_NAME = "g4f"
|
||||
GITHUB_REPOSITORY = "xtekky/gpt4free"
|
||||
|
@ -64,6 +65,9 @@ class VersionUtils:
|
|||
VersionNotFoundError: If the version cannot be determined from the package manager,
|
||||
Docker environment, or git repository.
|
||||
"""
|
||||
if debug.version:
|
||||
return debug.version
|
||||
|
||||
# Read from package manager
|
||||
try:
|
||||
return get_package_version(PACKAGE_NAME)
|
||||
|
|
|
@ -15,7 +15,6 @@ fastapi
|
|||
uvicorn
|
||||
flask
|
||||
py-arkose-generator
|
||||
async-property
|
||||
undetected-chromedriver>=3.5.5
|
||||
brotli
|
||||
beautifulsoup4
|
||||
|
|