Merge pull request #1691 from hlohaus/retry

Add model preselection in gui
This commit is contained in:
H Lohaus 2024-03-16 18:22:26 +01:00 committed by GitHub
commit fb2061da48
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
34 changed files with 994 additions and 567 deletions

View File

@ -12,7 +12,7 @@ from aiohttp import ClientSession, ClientTimeout, BaseConnector, WSMsgType
from ..typing import AsyncResult, Messages, ImageType, Cookies from ..typing import AsyncResult, Messages, ImageType, Cookies
from ..image import ImageRequest from ..image import ImageRequest
from ..errors import ResponseStatusError from ..errors import ResponseStatusError
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector, get_random_hex from .helper import get_connector, get_random_hex
from .bing.upload_image import upload_image from .bing.upload_image import upload_image
from .bing.conversation import Conversation, create_conversation, delete_conversation from .bing.conversation import Conversation, create_conversation, delete_conversation
@ -26,8 +26,9 @@ class Tones:
creative = "Creative" creative = "Creative"
balanced = "Balanced" balanced = "Balanced"
precise = "Precise" precise = "Precise"
copilot = "Balanced"
class Bing(AsyncGeneratorProvider): class Bing(AsyncGeneratorProvider, ProviderModelMixin):
""" """
Bing provider for generating responses using the Bing API. Bing provider for generating responses using the Bing API.
""" """
@ -35,18 +36,22 @@ class Bing(AsyncGeneratorProvider):
working = True working = True
supports_message_history = True supports_message_history = True
supports_gpt_4 = True supports_gpt_4 = True
default_model = "balanced"
models = [key for key in Tones.__dict__ if not key.startswith("__")]
@staticmethod @classmethod
def create_async_generator( def create_async_generator(
cls,
model: str, model: str,
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
timeout: int = 900, timeout: int = 900,
cookies: Cookies = None, cookies: Cookies = None,
connector: BaseConnector = None, connector: BaseConnector = None,
tone: str = Tones.balanced, tone: str = None,
image: ImageType = None, image: ImageType = None,
web_search: bool = False, web_search: bool = False,
context: str = None,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
""" """
@ -62,13 +67,12 @@ class Bing(AsyncGeneratorProvider):
:param web_search: Flag to enable or disable web search. :param web_search: Flag to enable or disable web search.
:return: An asynchronous result object. :return: An asynchronous result object.
""" """
if len(messages) < 2: prompt = messages[-1]["content"]
prompt = messages[0]["content"] if context is None:
context = None context = create_context(messages[:-1]) if len(messages) > 1 else None
else: if tone is None:
prompt = messages[-1]["content"] tone = tone if model.startswith("gpt-4") else model
context = create_context(messages[:-1]) tone = cls.get_model("" if tone is None else tone.lower())
gpt4_turbo = True if model.startswith("gpt-4-turbo") else False gpt4_turbo = True if model.startswith("gpt-4-turbo") else False
return stream_generate( return stream_generate(
@ -86,7 +90,9 @@ def create_context(messages: Messages) -> str:
:return: A string representing the context created from the messages. :return: A string representing the context created from the messages.
""" """
return "".join( return "".join(
f"[{message['role']}]" + ("(#message)" if message['role'] != "system" else "(#additional_instructions)") + f"\n{message['content']}" f"[{message['role']}]" + ("(#message)"
if message['role'] != "system"
else "(#additional_instructions)") + f"\n{message['content']}"
for message in messages for message in messages
) + "\n\n" ) + "\n\n"
@ -122,7 +128,7 @@ class Defaults:
"ActionRequest","Chat", "ActionRequest","Chat",
"ConfirmationCard", "Context", "ConfirmationCard", "Context",
"InternalSearchQuery", #"InternalSearchResult", "InternalSearchQuery", #"InternalSearchResult",
"Disengaged", #"InternalLoaderMessage", #"Disengaged", "InternalLoaderMessage",
"Progress", "RenderCardRequest", "Progress", "RenderCardRequest",
"RenderContentRequest", "AdsQuery", "RenderContentRequest", "AdsQuery",
"SemanticSerp", "GenerateContentQuery", "SemanticSerp", "GenerateContentQuery",
@ -131,53 +137,93 @@ class Defaults:
] ]
sliceIds = { sliceIds = {
"Balanced": [ "balanced": [
"supllmnfe","archnewtf", "supllmnfe","archnewtf",
"stpstream", "stpsig", "vnextvoicecf", "scmcbase", "cmcpupsalltf", "sydtransctrl", "stpstream", "stpsig", "vnextvoicecf", "scmcbase", "cmcpupsalltf", "sydtransctrl",
"thdnsrch", "220dcl1s0", "0215wcrwips0", "0305hrthrots0", "0130gpt4t", "thdnsrch", "220dcl1s0", "0215wcrwips0", "0305hrthrots0", "0130gpt4t",
"bingfc", "0225unsticky1", "0228scss0", "bingfc", "0225unsticky1", "0228scss0",
"defquerycf", "defcontrol", "3022tphpv" "defquerycf", "defcontrol", "3022tphpv"
], ],
"Creative": [ "creative": [
"bgstream", "fltltst2c", "bgstream", "fltltst2c",
"stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl", "stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl",
"0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t", "0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t",
"bingfccf", "0225unsticky1", "0228scss0", "bingfccf", "0225unsticky1", "0228scss0",
"3022tpvs0" "3022tpvs0"
], ],
"Precise": [ "precise": [
"bgstream", "fltltst2c", "bgstream", "fltltst2c",
"stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl", "stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl",
"0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t", "0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t",
"bingfccf", "0225unsticky1", "0228scss0", "bingfccf", "0225unsticky1", "0228scss0",
"defquerycf", "3022tpvs0" "defquerycf", "3022tpvs0"
], ],
"copilot": []
} }
optionsSets = { optionsSets = {
"Balanced": [ "balanced": {
"nlu_direct_response_filter", "deepleo", "default": [
"disable_emoji_spoken_text", "responsible_ai_policy_235", "nlu_direct_response_filter", "deepleo",
"enablemm", "dv3sugg", "autosave", "disable_emoji_spoken_text", "responsible_ai_policy_235",
"iyxapbing", "iycapbing", "enablemm", "dv3sugg", "autosave",
"galileo", "saharagenconv5", "gldcl1p", "iyxapbing", "iycapbing",
"gpt4tmncnp" "galileo", "saharagenconv5", "gldcl1p",
], "gpt4tmncnp"
"Creative": [ ],
"nosearch": [
"nlu_direct_response_filter", "deepleo",
"disable_emoji_spoken_text", "responsible_ai_policy_235",
"enablemm", "dv3sugg", "autosave",
"iyxapbing", "iycapbing",
"galileo", "sunoupsell", "base64filter", "uprv4p1upd",
"hourthrot", "noctprf", "gndlogcf", "nosearchall"
]
},
"creative": {
"default": [
"nlu_direct_response_filter", "deepleo",
"disable_emoji_spoken_text", "responsible_ai_policy_235",
"enablemm", "dv3sugg",
"iyxapbing", "iycapbing",
"h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3",
"gpt4tmncnp"
],
"nosearch": [
"nlu_direct_response_filter", "deepleo",
"disable_emoji_spoken_text", "responsible_ai_policy_235",
"enablemm", "dv3sugg", "autosave",
"iyxapbing", "iycapbing",
"h3imaginative", "sunoupsell", "base64filter", "uprv4p1upd",
"hourthrot", "noctprf", "gndlogcf", "nosearchall",
"clgalileo", "nocache", "up4rp14bstcst"
]
},
"precise": {
"default": [
"nlu_direct_response_filter", "deepleo",
"disable_emoji_spoken_text", "responsible_ai_policy_235",
"enablemm", "dv3sugg",
"iyxapbing", "iycapbing",
"h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot",
"clgalileo", "gencontentv3"
],
"nosearch": [
"nlu_direct_response_filter", "deepleo",
"disable_emoji_spoken_text", "responsible_ai_policy_235",
"enablemm", "dv3sugg", "autosave",
"iyxapbing", "iycapbing",
"h3precise", "sunoupsell", "base64filter", "uprv4p1upd",
"hourthrot", "noctprf", "gndlogcf", "nosearchall",
"clgalileo", "nocache", "up4rp14bstcst"
]
},
"copilot": [
"nlu_direct_response_filter", "deepleo", "nlu_direct_response_filter", "deepleo",
"disable_emoji_spoken_text", "responsible_ai_policy_235", "disable_emoji_spoken_text", "responsible_ai_policy_235",
"enablemm", "dv3sugg", "enablemm", "dv3sugg",
"iyxapbing", "iycapbing", "iyxapbing", "iycapbing",
"h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3", "h3precise", "clgalileo", "gencontentv3", "prjupy"
"gpt4tmncnp"
],
"Precise": [
"nlu_direct_response_filter", "deepleo",
"disable_emoji_spoken_text", "responsible_ai_policy_235",
"enablemm", "dv3sugg",
"iyxapbing", "iycapbing",
"h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot",
"clgalileo", "gencontentv3"
], ],
} }
@ -232,7 +278,8 @@ def create_message(
context: str = None, context: str = None,
image_request: ImageRequest = None, image_request: ImageRequest = None,
web_search: bool = False, web_search: bool = False,
gpt4_turbo: bool = False gpt4_turbo: bool = False,
new_conversation: bool = True
) -> str: ) -> str:
""" """
Creates a message for the Bing API with specified parameters. Creates a message for the Bing API with specified parameters.
@ -247,7 +294,12 @@ def create_message(
:return: A formatted string message for the Bing API. :return: A formatted string message for the Bing API.
""" """
options_sets = [] options_sets = Defaults.optionsSets[tone]
if not web_search and "nosearch" in options_sets:
options_sets = options_sets["nosearch"]
elif "default" in options_sets:
options_sets = options_sets["default"]
options_sets = options_sets.copy()
if gpt4_turbo: if gpt4_turbo:
options_sets.append("dlgpt4t") options_sets.append("dlgpt4t")
@ -255,16 +307,16 @@ def create_message(
struct = { struct = {
"arguments":[{ "arguments":[{
"source": "cib", "source": "cib",
"optionsSets": [*Defaults.optionsSets[tone], *options_sets], "optionsSets": options_sets,
"allowedMessageTypes": Defaults.allowedMessageTypes, "allowedMessageTypes": Defaults.allowedMessageTypes,
"sliceIds": Defaults.sliceIds[tone], "sliceIds": Defaults.sliceIds[tone],
"verbosity": "verbose", "verbosity": "verbose",
"scenario": "SERP", "scenario": "CopilotMicrosoftCom", # "SERP",
"plugins": [{"id": "c310c353-b9f0-4d76-ab0d-1dd5e979cf68", "category": 1}] if web_search else [], "plugins": [{"id": "c310c353-b9f0-4d76-ab0d-1dd5e979cf68", "category": 1}] if web_search else [],
"traceId": get_random_hex(40), "traceId": get_random_hex(40),
"conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"], "conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"],
"gptId": "copilot", "gptId": "copilot",
"isStartOfSession": True, "isStartOfSession": new_conversation,
"requestId": request_id, "requestId": request_id,
"message":{ "message":{
**Defaults.location, **Defaults.location,
@ -277,8 +329,7 @@ def create_message(
"requestId": request_id, "requestId": request_id,
"messageId": request_id "messageId": request_id
}, },
"tone": tone, "tone": getattr(Tones, tone),
"extraExtensionParameters": {"gpt-creator-persona": {"personaId": "copilot"}},
"spokenTextMode": "None", "spokenTextMode": "None",
"conversationId": conversation.conversationId, "conversationId": conversation.conversationId,
"participant": {"id": conversation.clientId} "participant": {"id": conversation.clientId}
@ -298,7 +349,7 @@ def create_message(
struct['arguments'][0]['previousMessages'] = [{ struct['arguments'][0]['previousMessages'] = [{
"author": "user", "author": "user",
"description": context, "description": context,
"contextType": "WebPage", "contextType": "ClientApp",
"messageType": "Context", "messageType": "Context",
"messageId": "discover-web--page-ping-mriduna-----" "messageId": "discover-web--page-ping-mriduna-----"
}] }]
@ -317,8 +368,9 @@ async def stream_generate(
gpt4_turbo: bool = False, gpt4_turbo: bool = False,
timeout: int = 900, timeout: int = 900,
conversation: Conversation = None, conversation: Conversation = None,
return_conversation: bool = False,
raise_apology: bool = False, raise_apology: bool = False,
max_retries: int = 5, max_retries: int = None,
sleep_retry: int = 15, sleep_retry: int = 15,
**kwargs **kwargs
): ):
@ -336,13 +388,20 @@ async def stream_generate(
:return: An asynchronous generator yielding responses. :return: An asynchronous generator yielding responses.
""" """
headers = create_headers(cookies) headers = create_headers(cookies)
new_conversation = conversation is None
max_retries = (5 if new_conversation else 0) if max_retries is None else max_retries
async with ClientSession( async with ClientSession(
timeout=ClientTimeout(total=timeout), connector=connector timeout=ClientTimeout(total=timeout), connector=connector
) as session: ) as session:
while conversation is None: first = True
while first or conversation is None:
first = False
do_read = True do_read = True
try: try:
conversation = await create_conversation(session, headers) if conversation is None:
conversation = await create_conversation(session, headers, tone)
if return_conversation:
yield conversation
except ResponseStatusError as e: except ResponseStatusError as e:
max_retries -= 1 max_retries -= 1
if max_retries < 1: if max_retries < 1:
@ -353,8 +412,10 @@ async def stream_generate(
await asyncio.sleep(sleep_retry) await asyncio.sleep(sleep_retry)
continue continue
image_request = await upload_image(session, image, tone, headers) if image else None image_request = await upload_image(session, image, getattr(Tones, tone), headers) if image else None
async with session.ws_connect( async with session.ws_connect(
'wss://s.copilot.microsoft.com/sydney/ChatHub'
if tone == "copilot" else
'wss://sydney.bing.com/sydney/ChatHub', 'wss://sydney.bing.com/sydney/ChatHub',
autoping=False, autoping=False,
params={'sec_access_token': conversation.conversationSignature}, params={'sec_access_token': conversation.conversationSignature},
@ -363,7 +424,12 @@ async def stream_generate(
await wss.send_str(format_message({'protocol': 'json', 'version': 1})) await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
await wss.send_str(format_message({"type": 6})) await wss.send_str(format_message({"type": 6}))
await wss.receive(timeout=timeout) await wss.receive(timeout=timeout)
await wss.send_str(create_message(conversation, prompt, tone, context, image_request, web_search, gpt4_turbo)) await wss.send_str(create_message(
conversation, prompt, tone,
context if new_conversation else None,
image_request, web_search, gpt4_turbo,
new_conversation
))
response_txt = '' response_txt = ''
returned_text = '' returned_text = ''
message_id = None message_id = None
@ -399,14 +465,15 @@ async def stream_generate(
image_client = BingCreateImages(cookies, proxy) image_client = BingCreateImages(cookies, proxy)
image_response = await image_client.create_async(prompt) image_response = await image_client.create_async(prompt)
except Exception as e: except Exception as e:
response_txt += f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}" if debug.logging:
do_read = False print(f"Bing: Failed to create images: {e}")
image_response = f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}"
if response_txt.startswith(returned_text): if response_txt.startswith(returned_text):
new = response_txt[len(returned_text):] new = response_txt[len(returned_text):]
if new != "\n": if new not in ("", "\n"):
yield new yield new
returned_text = response_txt returned_text = response_txt
if image_response: if image_response is not None:
yield image_response yield image_response
elif response.get('type') == 2: elif response.get('type') == 2:
result = response['item']['result'] result = response['item']['result']

View File

@ -4,14 +4,18 @@ import re
import json import json
import base64 import base64
import uuid import uuid
from asyncio import get_running_loop try:
from aiohttp import ClientSession, FormData, BaseConnector, CookieJar from ..requests.curl_cffi import FormData
has_curl_cffi = True
except ImportError:
has_curl_cffi = False
from ..typing import AsyncResult, Messages, ImageType, Cookies from ..typing import AsyncResult, Messages, ImageType, Cookies
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt, get_connector from .helper import format_prompt
from ..image import to_bytes, ImageResponse from ..image import to_bytes, ImageResponse
from ..requests import WebDriver, raise_for_status, get_args_from_browser from ..requests import StreamSession, raise_for_status
from ..errors import MissingRequirementsError
class You(AsyncGeneratorProvider, ProviderModelMixin): class You(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://you.com" url = "https://you.com"
@ -33,8 +37,6 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
model_aliases = { model_aliases = {
"claude-v2": "claude-2" "claude-v2": "claude-2"
} }
_args: dict = None
_cookie_jar: CookieJar = None
_cookies = None _cookies = None
_cookies_used = 0 _cookies_used = 0
@ -45,19 +47,12 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
messages: Messages, messages: Messages,
image: ImageType = None, image: ImageType = None,
image_name: str = None, image_name: str = None,
connector: BaseConnector = None,
webdriver: WebDriver = None,
proxy: str = None, proxy: str = None,
chat_mode: str = "default", chat_mode: str = "default",
**kwargs, **kwargs,
) -> AsyncResult: ) -> AsyncResult:
if cls._args is None: if not has_curl_cffi:
cls._args = get_args_from_browser(cls.url, webdriver, proxy) raise MissingRequirementsError('Install "curl_cffi" package')
cls._cookie_jar = CookieJar(loop=get_running_loop())
else:
if "cookies" in cls._args:
del cls._args["cookies"]
cls._cookie_jar._loop = get_running_loop()
if image is not None: if image is not None:
chat_mode = "agent" chat_mode = "agent"
elif not model or model == cls.default_model: elif not model or model == cls.default_model:
@ -67,10 +62,9 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
else: else:
chat_mode = "custom" chat_mode = "custom"
model = cls.get_model(model) model = cls.get_model(model)
async with ClientSession( async with StreamSession(
connector=get_connector(connector, proxy), proxy=proxy,
cookie_jar=cls._cookie_jar, impersonate="chrome"
**cls._args
) as session: ) as session:
cookies = await cls.get_cookies(session) if chat_mode != "default" else None cookies = await cls.get_cookies(session) if chat_mode != "default" else None
upload = json.dumps([await cls.upload_file(session, cookies, to_bytes(image), image_name)]) if image else "" upload = json.dumps([await cls.upload_file(session, cookies, to_bytes(image), image_name)]) if image else ""
@ -82,8 +76,8 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
# and idx < len(questions) # and idx < len(questions)
# ] # ]
headers = { headers = {
"accept": "text/event-stream", "Accept": "text/event-stream",
"referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat", "Referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat",
} }
data = { data = {
"userFiles": upload, "userFiles": upload,
@ -106,12 +100,12 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
cookies=cookies cookies=cookies
) as response: ) as response:
await raise_for_status(response) await raise_for_status(response)
async for line in response.content: async for line in response.iter_lines():
if line.startswith(b'event: '): if line.startswith(b'event: '):
event = line[7:-1].decode() event = line[7:].decode()
elif line.startswith(b'data: '): elif line.startswith(b'data: '):
if event in ["youChatUpdate", "youChatToken"]: if event in ["youChatUpdate", "youChatToken"]:
data = json.loads(line[6:-1]) data = json.loads(line[6:])
if event == "youChatToken" and event in data: if event == "youChatToken" and event in data:
yield data[event] yield data[event]
elif event == "youChatUpdate" and "t" in data: elif event == "youChatUpdate" and "t" in data:
@ -122,7 +116,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
yield data["t"] yield data["t"]
@classmethod @classmethod
async def upload_file(cls, client: ClientSession, cookies: Cookies, file: bytes, filename: str = None) -> dict: async def upload_file(cls, client: StreamSession, cookies: Cookies, file: bytes, filename: str = None) -> dict:
async with client.get( async with client.get(
f"{cls.url}/api/get_nonce", f"{cls.url}/api/get_nonce",
cookies=cookies, cookies=cookies,
@ -146,7 +140,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
return result return result
@classmethod @classmethod
async def get_cookies(cls, client: ClientSession) -> Cookies: async def get_cookies(cls, client: StreamSession) -> Cookies:
if not cls._cookies or cls._cookies_used >= 5: if not cls._cookies or cls._cookies_used >= 5:
cls._cookies = await cls.create_cookies(client) cls._cookies = await cls.create_cookies(client)
cls._cookies_used = 0 cls._cookies_used = 0
@ -173,7 +167,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
return f"Basic {auth}" return f"Basic {auth}"
@classmethod @classmethod
async def create_cookies(cls, client: ClientSession) -> Cookies: async def create_cookies(cls, client: StreamSession) -> Cookies:
user_uuid = str(uuid.uuid4()) user_uuid = str(uuid.uuid4())
async with client.post( async with client.post(
"https://web.stytch.com/sdk/v1/passwords", "https://web.stytch.com/sdk/v1/passwords",

View File

@ -20,7 +20,7 @@ class Conversation:
self.clientId = clientId self.clientId = clientId
self.conversationSignature = conversationSignature self.conversationSignature = conversationSignature
async def create_conversation(session: ClientSession, headers: dict) -> Conversation: async def create_conversation(session: ClientSession, headers: dict, tone: str) -> Conversation:
""" """
Create a new conversation asynchronously. Create a new conversation asynchronously.
@ -31,7 +31,10 @@ async def create_conversation(session: ClientSession, headers: dict) -> Conversa
Returns: Returns:
Conversation: An instance representing the created conversation. Conversation: An instance representing the created conversation.
""" """
url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1" if tone == "copilot":
url = "https://copilot.microsoft.com/turing/conversation/create?bundleVersion=1.1634.3-nodesign2"
else:
url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1"
async with session.get(url, headers=headers) as response: async with session.get(url, headers=headers) as response:
await raise_for_status(response, "Failed to create conversation") await raise_for_status(response, "Failed to create conversation")
data = await response.json() data = await response.json()

View File

@ -14,6 +14,12 @@ try:
except ImportError: except ImportError:
has_arkose_generator = False has_arkose_generator = False
try:
import webview
has_webview = True
except ImportError:
has_webview = False
try: try:
from selenium.webdriver.common.by import By from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.ui import WebDriverWait
@ -25,10 +31,10 @@ from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import get_cookies from ..helper import get_cookies
from ...webdriver import get_browser from ...webdriver import get_browser
from ...typing import AsyncResult, Messages, Cookies, ImageType, Union, AsyncIterator from ...typing import AsyncResult, Messages, Cookies, ImageType, Union, AsyncIterator
from ...requests import get_args_from_browser from ...requests import get_args_from_browser, raise_for_status
from ...requests.aiohttp import StreamSession from ...requests.aiohttp import StreamSession
from ...image import to_image, to_bytes, ImageResponse, ImageRequest from ...image import to_image, to_bytes, ImageResponse, ImageRequest
from ...errors import MissingRequirementsError, MissingAuthError from ...errors import MissingRequirementsError, MissingAuthError, ProviderNotWorkingError
from ... import debug from ... import debug
class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
@ -134,7 +140,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
} }
# Post the image data to the service and get the image data # Post the image data to the service and get the image data
async with session.post(f"{cls.url}/backend-api/files", json=data, headers=headers) as response: async with session.post(f"{cls.url}/backend-api/files", json=data, headers=headers) as response:
response.raise_for_status() cls._update_request_args()
await raise_for_status(response)
image_data = { image_data = {
**data, **data,
**await response.json(), **await response.json(),
@ -152,14 +159,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"x-ms-blob-type": "BlockBlob" "x-ms-blob-type": "BlockBlob"
} }
) as response: ) as response:
response.raise_for_status() await raise_for_status(response)
# Post the file ID to the service and get the download URL # Post the file ID to the service and get the download URL
async with session.post( async with session.post(
f"{cls.url}/backend-api/files/{image_data['file_id']}/uploaded", f"{cls.url}/backend-api/files/{image_data['file_id']}/uploaded",
json={}, json={},
headers=headers headers=headers
) as response: ) as response:
response.raise_for_status() cls._update_request_args(session)
await raise_for_status(response)
image_data["download_url"] = (await response.json())["download_url"] image_data["download_url"] = (await response.json())["download_url"]
return ImageRequest(image_data) return ImageRequest(image_data)
@ -178,7 +186,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if not cls.default_model: if not cls.default_model:
async with session.get(f"{cls.url}/backend-api/models", headers=headers) as response: async with session.get(f"{cls.url}/backend-api/models", headers=headers) as response:
cls._update_request_args(session) cls._update_request_args(session)
response.raise_for_status() await raise_for_status(response)
data = await response.json() data = await response.json()
if "categories" in data: if "categories" in data:
cls.default_model = data["categories"][-1]["default_model"] cls.default_model = data["categories"][-1]["default_model"]
@ -261,7 +269,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
file_id = first_part["asset_pointer"].split("file-service://", 1)[1] file_id = first_part["asset_pointer"].split("file-service://", 1)[1]
try: try:
async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response: async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response:
response.raise_for_status() cls._update_request_args(session)
await raise_for_status(response)
download_url = (await response.json())["download_url"] download_url = (await response.json())["download_url"]
return ImageResponse(download_url, prompt) return ImageResponse(download_url, prompt)
except Exception as e: except Exception as e:
@ -288,6 +297,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
json={"is_visible": False}, json={"is_visible": False},
headers=headers headers=headers
) as response: ) as response:
cls._update_request_args(session)
... ...
@classmethod @classmethod
@ -337,31 +347,32 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if parent_id is None: if parent_id is None:
parent_id = str(uuid.uuid4()) parent_id = str(uuid.uuid4())
# Read api_key from arguments
api_key = kwargs["access_token"] if "access_token" in kwargs else api_key
async with StreamSession( async with StreamSession(
proxies={"https": proxy}, proxies={"https": proxy},
impersonate="chrome", impersonate="chrome",
timeout=timeout timeout=timeout
) as session: ) as session:
# Read api_key and cookies from cache / browser config api_key = kwargs["access_token"] if "access_token" in kwargs else api_key
if cls._headers is None or cls._expires is None or time.time() > cls._expires: if cls._headers is None or cls._expires is None or time.time() > cls._expires:
if api_key is None: if cls._headers is None:
# Read api_key from cookies
cookies = get_cookies("chat.openai.com", False) if cookies is None else cookies cookies = get_cookies("chat.openai.com", False) if cookies is None else cookies
api_key = cookies["access_token"] if "access_token" in cookies else api_key api_key = cookies["access_token"] if "access_token" in cookies else api_key
cls._create_request_args(cookies) if api_key is None:
try:
await cls.webview_access_token() if has_webview else None
except Exception as e:
if debug.logging:
print(f"Use webview failed: {e}")
else: else:
api_key = cls._api_key if api_key is None else api_key api_key = cls._api_key if api_key is None else api_key
# Read api_key with session cookies
#if api_key is None and cookies: if api_key is not None:
# api_key = await cls.fetch_access_token(session, cls._headers) cls._create_request_args(cookies)
# Load default model cls._set_api_key(api_key)
if cls.default_model is None and api_key is not None:
if cls.default_model is None and cls._headers is not None:
try: try:
if not model: if not model:
cls._set_api_key(api_key)
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers)) cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
else: else:
cls.default_model = cls.get_model(model) cls.default_model = cls.get_model(model)
@ -369,8 +380,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if debug.logging: if debug.logging:
print("OpenaiChat: Load default_model failed") print("OpenaiChat: Load default_model failed")
print(f"{e.__class__.__name__}: {e}") print(f"{e.__class__.__name__}: {e}")
# Browse api_key and default model if cls.default_model is None:
if api_key is None or cls.default_model is None:
login_url = os.environ.get("G4F_LOGIN_URL") login_url = os.environ.get("G4F_LOGIN_URL")
if login_url: if login_url:
yield f"Please login: [ChatGPT]({login_url})\n\n" yield f"Please login: [ChatGPT]({login_url})\n\n"
@ -379,20 +389,21 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
except MissingRequirementsError: except MissingRequirementsError:
raise MissingAuthError(f'Missing "access_token". Add a "api_key" please') raise MissingAuthError(f'Missing "access_token". Add a "api_key" please')
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers)) cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
else:
cls._set_api_key(api_key)
async with session.post( async with session.post(
f"{cls.url}/backend-api/sentinel/chat-requirements", f"{cls.url}/backend-api/sentinel/chat-requirements",
json={"conversation_mode_kind": "primary_assistant"}, json={"conversation_mode_kind": "primary_assistant"},
headers=cls._headers headers=cls._headers
) as response: ) as response:
response.raise_for_status() cls._update_request_args(session)
await raise_for_status(response)
data = await response.json() data = await response.json()
blob = data["arkose"]["dx"]
need_arkose = data["arkose"]["required"] need_arkose = data["arkose"]["required"]
chat_token = data["token"] chat_token = data["token"]
if need_arkose and not has_arkose_generator: if need_arkose and not has_arkose_generator:
raise ProviderNotWorkingError("OpenAI Plus Subscriber are not working")
raise MissingRequirementsError('Install "py-arkose-generator" package') raise MissingRequirementsError('Install "py-arkose-generator" package')
try: try:
@ -407,6 +418,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
while fields.finish_reason is None: while fields.finish_reason is None:
conversation_id = conversation_id if fields.conversation_id is None else fields.conversation_id conversation_id = conversation_id if fields.conversation_id is None else fields.conversation_id
parent_id = parent_id if fields.message_id is None else fields.message_id parent_id = parent_id if fields.message_id is None else fields.message_id
websocket_request_id = str(uuid.uuid4())
data = { data = {
"action": action, "action": action,
"conversation_mode": {"kind": "primary_assistant"}, "conversation_mode": {"kind": "primary_assistant"},
@ -416,25 +428,29 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"parent_message_id": parent_id, "parent_message_id": parent_id,
"model": model, "model": model,
"history_and_training_disabled": history_disabled and not auto_continue, "history_and_training_disabled": history_disabled and not auto_continue,
"websocket_request_id": websocket_request_id
} }
if action != "continue": if action != "continue":
messages = messages if conversation_id is None else [messages[-1]] messages = messages if conversation_id is None else [messages[-1]]
data["messages"] = cls.create_messages(messages, image_request) data["messages"] = cls.create_messages(messages, image_request)
headers = {
"Accept": "text/event-stream",
"OpenAI-Sentinel-Chat-Requirements-Token": chat_token,
**cls._headers
}
if need_arkose:
raise ProviderNotWorkingError("OpenAI Plus Subscriber are not working")
headers["OpenAI-Sentinel-Arkose-Token"] = await cls.get_arkose_token(session, cls._headers, blob)
headers["OpenAI-Sentinel-Chat-Requirements-Token"] = chat_token
async with session.post( async with session.post(
f"{cls.url}/backend-api/conversation", f"{cls.url}/backend-api/conversation",
json=data, json=data,
headers={ headers=headers
"Accept": "text/event-stream",
**({"OpenAI-Sentinel-Arkose-Token": await cls.get_arkose_token(session)} if need_arkose else {}),
"OpenAI-Sentinel-Chat-Requirements-Token": chat_token,
**cls._headers
}
) as response: ) as response:
cls._update_request_args(session) cls._update_request_args(session)
if not response.ok: await raise_for_status(response)
raise RuntimeError(f"Response {response.status}: {await response.text()}") async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, fields, websocket_request_id):
async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, fields):
if response_fields: if response_fields:
response_fields = False response_fields = False
yield fields yield fields
@ -447,21 +463,35 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
await cls.delete_conversation(session, cls._headers, fields.conversation_id) await cls.delete_conversation(session, cls._headers, fields.conversation_id)
@staticmethod @staticmethod
async def iter_messages_ws(ws: ClientWebSocketResponse, conversation_id: str) -> AsyncIterator: async def iter_messages_ws(ws: ClientWebSocketResponse, conversation_id: str, is_curl: bool) -> AsyncIterator:
while True: while True:
message = await ws.receive_json() if is_curl:
message = json.loads(ws.recv()[0])
else:
message = await ws.receive_json()
if message["conversation_id"] == conversation_id: if message["conversation_id"] == conversation_id:
yield base64.b64decode(message["body"]) yield base64.b64decode(message["body"])
@classmethod @classmethod
async def iter_messages_chunk(cls, messages: AsyncIterator, session: StreamSession, fields: ResponseFields) -> AsyncIterator: async def iter_messages_chunk(
cls,
messages: AsyncIterator,
session: StreamSession,
fields: ResponseFields
) -> AsyncIterator:
last_message: int = 0 last_message: int = 0
async for message in messages: async for message in messages:
if message.startswith(b'{"wss_url":'): if message.startswith(b'{"wss_url":'):
message = json.loads(message) message = json.loads(message)
async with session.ws_connect(message["wss_url"]) as ws: ws = await session.ws_connect(message["wss_url"])
async for chunk in cls.iter_messages_chunk(cls.iter_messages_ws(ws, message["conversation_id"]), session, fields): try:
async for chunk in cls.iter_messages_chunk(
cls.iter_messages_ws(ws, message["conversation_id"], hasattr(ws, "recv")),
session, fields
):
yield chunk yield chunk
finally:
await ws.aclose()
break break
async for chunk in cls.iter_messages_line(session, message, fields): async for chunk in cls.iter_messages_line(session, message, fields):
if fields.finish_reason is not None: if fields.finish_reason is not None:
@ -513,6 +543,43 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if "finish_details" in line["message"]["metadata"]: if "finish_details" in line["message"]["metadata"]:
fields.finish_reason = line["message"]["metadata"]["finish_details"]["type"] fields.finish_reason = line["message"]["metadata"]["finish_details"]["type"]
@classmethod
async def webview_access_token(cls) -> str:
window = webview.create_window("OpenAI Chat", cls.url)
await asyncio.sleep(3)
prompt_input = None
while not prompt_input:
try:
await asyncio.sleep(1)
prompt_input = window.dom.get_element("#prompt-textarea")
except:
...
window.evaluate_js("""
this._fetch = this.fetch;
this.fetch = async (url, options) => {
const response = await this._fetch(url, options);
if (url == "https://chat.openai.com/backend-api/conversation") {
this._headers = options.headers;
return response;
}
return response;
};
""")
window.evaluate_js("""
document.querySelector('.from-token-main-surface-secondary').click();
""")
headers = None
while headers is None:
headers = window.evaluate_js("this._headers")
await asyncio.sleep(1)
headers["User-Agent"] = window.evaluate_js("this.navigator.userAgent")
cookies = [list(*cookie.items()) for cookie in window.get_cookies()]
window.destroy()
cls._cookies = dict([(name, cookie.value) for name, cookie in cookies])
cls._headers = headers
cls._expires = int(time.time()) + 60 * 60 * 4
cls._update_cookie_header()
@classmethod @classmethod
def browse_access_token(cls, proxy: str = None, timeout: int = 1200) -> None: def browse_access_token(cls, proxy: str = None, timeout: int = 1200) -> None:
""" """
@ -542,10 +609,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
cls._update_cookie_header() cls._update_cookie_header()
cls._set_api_key(access_token) cls._set_api_key(access_token)
finally: finally:
driver.close() driver.close()
@classmethod @classmethod
async def get_arkose_token(cls, session: StreamSession) -> str: async def get_arkose_token(cls, session: StreamSession, headers: dict, blob: str) -> str:
""" """
Obtain an Arkose token for the session. Obtain an Arkose token for the session.
@ -559,16 +626,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
RuntimeError: If unable to retrieve the token. RuntimeError: If unable to retrieve the token.
""" """
config = { config = {
"pkey": "3D86FBBA-9D22-402A-B512-3420086BA6CC", "pkey": "35536E1E-65B4-4D96-9D97-6ADB7EFF8147",
"surl": "https://tcr9i.chat.openai.com", "surl": "https://tcr9i.chat.openai.com",
"headers": { "headers": headers,
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'
},
"site": cls.url, "site": cls.url,
"data": {"blob": blob}
} }
args_for_request = get_values_for_request(config) args_for_request = get_values_for_request(config)
async with session.post(**args_for_request) as response: async with session.post(**args_for_request) as response:
response.raise_for_status() await raise_for_status(response)
decoded_json = await response.json() decoded_json = await response.json()
if "token" in decoded_json: if "token" in decoded_json:
return decoded_json["token"] return decoded_json["token"]
@ -591,7 +657,9 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
@classmethod @classmethod
def _create_request_args(cls, cookies: Union[Cookies, None]): def _create_request_args(cls, cookies: Union[Cookies, None]):
cls._headers = {} cls._headers = {
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'
}
cls._cookies = {} if cookies is None else cookies cls._cookies = {} if cookies is None else cookies
cls._update_cookie_header() cls._update_cookie_header()

View File

@ -69,11 +69,14 @@ def get_model_and_provider(model : Union[Model, str],
if isinstance(model, Model): if isinstance(model, Model):
model = model.name model = model.name
if ignored and isinstance(provider, BaseRetryProvider):
provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
if not ignore_working and not provider.working: if not ignore_working and not provider.working:
raise ProviderNotWorkingError(f'{provider.__name__} is not working') raise ProviderNotWorkingError(f'{provider.__name__} is not working')
if not ignore_working and isinstance(provider, BaseRetryProvider):
provider.providers = [p for p in provider.providers if p.working]
if ignored and isinstance(provider, BaseRetryProvider):
provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
if not ignore_stream and not provider.supports_stream and stream: if not ignore_stream and not provider.supports_stream and stream:
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument') raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')

View File

@ -3,4 +3,5 @@ from .providers.types import ProviderType
logging: bool = False logging: bool = False
version_check: bool = True version_check: bool = True
last_provider: ProviderType = None last_provider: ProviderType = None
last_model: str = None last_model: str = None
version: str = None

View File

@ -1,5 +1,5 @@
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en"> <html lang="en" data-framework="javascript">
<head> <head>
<meta charset="UTF-8"> <meta charset="UTF-8">
@ -10,14 +10,14 @@
<meta property="og:image" content="https://openai.com/content/images/2022/11/ChatGPT.jpg"> <meta property="og:image" content="https://openai.com/content/images/2022/11/ChatGPT.jpg">
<meta property="og:description" content="A conversational AI system that listens, learns, and challenges"> <meta property="og:description" content="A conversational AI system that listens, learns, and challenges">
<meta property="og:url" content="https://g4f.ai"> <meta property="og:url" content="https://g4f.ai">
<link rel="stylesheet" href="/assets/css/style.css"> <link rel="stylesheet" href="/static/css/style.css">
<link rel="apple-touch-icon" sizes="180x180" href="/assets/img/apple-touch-icon.png"> <link rel="apple-touch-icon" sizes="180x180" href="/static/img/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="/assets/img/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="32x32" href="/static/img/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="/assets/img/favicon-16x16.png"> <link rel="icon" type="image/png" sizes="16x16" href="/static/img/favicon-16x16.png">
<link rel="manifest" href="/assets/img/site.webmanifest"> <link rel="manifest" href="/static/img/site.webmanifest">
<script src="/assets/js/icons.js"></script> <script src="/static/js/icons.js"></script>
<script src="/assets/js/highlightjs-copy.min.js"></script> <script src="/static/js/highlightjs-copy.min.js"></script>
<script src="/assets/js/chat.v1.js" defer></script> <script src="/static/js/chat.v1.js" defer></script>
<script src="https://cdn.jsdelivr.net/npm/markdown-it@13.0.1/dist/markdown-it.min.js"></script> <script src="https://cdn.jsdelivr.net/npm/markdown-it@13.0.1/dist/markdown-it.min.js"></script>
<link rel="stylesheet" <link rel="stylesheet"
href="//cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.7.0/build/styles/base16/dracula.min.css"> href="//cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.7.0/build/styles/base16/dracula.min.css">
@ -38,8 +38,8 @@
</script> </script>
<script src="https://unpkg.com/gpt-tokenizer/dist/cl100k_base.js" async></script> <script src="https://unpkg.com/gpt-tokenizer/dist/cl100k_base.js" async></script>
<script> <script>
const user_image = '<img src="/assets/img/user.png" alt="your avatar">'; const user_image = '<img src="/static/img/user.png" alt="your avatar">';
const gpt_image = '<img src="/assets/img/gpt.png" alt="your avatar">'; const gpt_image = '<img src="/static/img/gpt.png" alt="your avatar">';
</script> </script>
<style> <style>
.hljs { .hljs {
@ -74,8 +74,8 @@
background: #8b3dff; background: #8b3dff;
} }
</style> </style>
<script src="/assets/js/highlight.min.js"></script> <script src="/static/js/highlight.min.js"></script>
<script>window.conversation_id = `{{chat_id}}`</script> <script>window.conversation_id = "{{chat_id}}"</script>
<title>g4f - gui</title> <title>g4f - gui</title>
</head> </head>
@ -94,11 +94,10 @@
<i class="fa-regular fa-trash"></i> <i class="fa-regular fa-trash"></i>
<span>Clear Conversations</span> <span>Clear Conversations</span>
</button> </button>
<div class="info"> <button onclick="save_storage()">
<i class="fa-brands fa-telegram"></i> <i class="fa-solid fa-download"></i>
<span class="convo-title">tele ~ <a href="https://t.me/g4f_official">@g4f_official</a> <a href="" onclick="return false;">Export Conversations</a>
</span> </button>
</div>
<div class="info"> <div class="info">
<i class="fa-brands fa-github"></i> <i class="fa-brands fa-github"></i>
<span class="convo-title">github ~ <a href="https://github.com/xtekky/gpt4free">@gpt4free</a> <span class="convo-title">github ~ <a href="https://github.com/xtekky/gpt4free">@gpt4free</a>
@ -161,6 +160,7 @@
<option value="gemini-pro">gemini-pro</option> <option value="gemini-pro">gemini-pro</option>
<option value="">----</option> <option value="">----</option>
</select> </select>
<select name="model2" id="model2" class="hidden"></select>
</div> </div>
<div class="field"> <div class="field">
<select name="jailbreak" id="jailbreak" style="display: none;"> <select name="jailbreak" id="jailbreak" style="display: none;">

View File

@ -15,7 +15,7 @@
margin: auto; margin: auto;
display: flex; display: flex;
flex-direction: column; flex-direction: column;
gap: 16px; gap: var(--inner-gap);
max-width: 200px; max-width: 200px;
padding: var(--section-gap); padding: var(--section-gap);
overflow: none; overflow: none;
@ -106,6 +106,10 @@ body {
border: 1px solid var(--blur-border); border: 1px solid var(--blur-border);
} }
.hidden {
display: none;
}
.conversations { .conversations {
max-width: 260px; max-width: 260px;
padding: var(--section-gap); padding: var(--section-gap);
@ -179,7 +183,8 @@ body {
.conversations { .conversations {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
gap: 16px; gap: var(--inner-gap);
padding: var(--inner-gap);
} }
.conversations .title { .conversations .title {
@ -569,7 +574,7 @@ label[for="camera"] {
height: fit-content; height: fit-content;
display: flex; display: flex;
align-items: center; align-items: center;
gap: 16px; gap: var(--inner-gap);
} }
.field .about { .field .about {
@ -653,10 +658,15 @@ select {
font-size: 14px; font-size: 14px;
} }
.bottom_buttons button a {
color: var(--colour-3);
font-weight: 500;
}
.conversations .top { .conversations .top {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
gap: 16px; gap: var(--inner-gap);
overflow: auto; overflow: auto;
} }

View File

Before

Width:  |  Height:  |  Size: 8.7 KiB

After

Width:  |  Height:  |  Size: 8.7 KiB

View File

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View File

Before

Width:  |  Height:  |  Size: 7.8 KiB

After

Width:  |  Height:  |  Size: 7.8 KiB

View File

Before

Width:  |  Height:  |  Size: 499 B

After

Width:  |  Height:  |  Size: 499 B

View File

Before

Width:  |  Height:  |  Size: 1.0 KiB

After

Width:  |  Height:  |  Size: 1.0 KiB

View File

Before

Width:  |  Height:  |  Size: 2.8 KiB

After

Width:  |  Height:  |  Size: 2.8 KiB

View File

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View File

@ -1,5 +1,4 @@
const colorThemes = document.querySelectorAll('[name="theme"]'); const colorThemes = document.querySelectorAll('[name="theme"]');
const markdown = window.markdownit();
const message_box = document.getElementById(`messages`); const message_box = document.getElementById(`messages`);
const messageInput = document.getElementById(`message-input`); const messageInput = document.getElementById(`message-input`);
const box_conversations = document.querySelector(`.top`); const box_conversations = document.querySelector(`.top`);
@ -12,12 +11,15 @@ const imageInput = document.getElementById("image");
const cameraInput = document.getElementById("camera"); const cameraInput = document.getElementById("camera");
const fileInput = document.getElementById("file"); const fileInput = document.getElementById("file");
const inputCount = document.getElementById("input-count") const inputCount = document.getElementById("input-count")
const providerSelect = document.getElementById("provider");
const modelSelect = document.getElementById("model"); const modelSelect = document.getElementById("model");
const modelProvider = document.getElementById("model2");
const systemPrompt = document.getElementById("systemPrompt") const systemPrompt = document.getElementById("systemPrompt")
const jailbreak = document.getElementById("jailbreak");
let prompt_lock = false; let prompt_lock = false;
hljs.addPlugin(new CopyButtonPlugin()); const options = ["switch", "model", "model2", "jailbreak", "patch", "provider", "history"];
messageInput.addEventListener("blur", () => { messageInput.addEventListener("blur", () => {
window.scrollTo(0, 0); window.scrollTo(0, 0);
@ -34,15 +36,17 @@ appStorage = window.localStorage || {
length: 0 length: 0
} }
const markdown = window.markdownit();
const markdown_render = (content) => { const markdown_render = (content) => {
return markdown.render(content return markdown.render(content
.replaceAll(/<!--.+-->/gm, "") .replaceAll(/<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm, "")
.replaceAll(/<img data-prompt="[^>]+">/gm, "") .replaceAll(/<img data-prompt="[^>]+">/gm, "")
) )
.replaceAll("<a href=", '<a target="_blank" href=') .replaceAll("<a href=", '<a target="_blank" href=')
.replaceAll('<code>', '<code class="language-plaintext">') .replaceAll('<code>', '<code class="language-plaintext">')
} }
hljs.addPlugin(new CopyButtonPlugin());
let typesetPromise = Promise.resolve(); let typesetPromise = Promise.resolve();
const highlight = (container) => { const highlight = (container) => {
container.querySelectorAll('code:not(.hljs').forEach((el) => { container.querySelectorAll('code:not(.hljs').forEach((el) => {
@ -90,48 +94,48 @@ const handle_ask = async () => {
window.scrollTo(0, 0); window.scrollTo(0, 0);
message = messageInput.value message = messageInput.value
if (message.length > 0) { if (message.length <= 0) {
messageInput.value = ""; return;
prompt_lock = true;
count_input()
await add_conversation(window.conversation_id, message);
if ("text" in fileInput.dataset) {
message += '\n```' + fileInput.dataset.type + '\n';
message += fileInput.dataset.text;
message += '\n```'
}
let message_index = await add_message(window.conversation_id, "user", message);
window.token = message_id();
if (imageInput.dataset.src) URL.revokeObjectURL(imageInput.dataset.src);
const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
if (input.files.length > 0) imageInput.dataset.src = URL.createObjectURL(input.files[0]);
else delete imageInput.dataset.src
model = modelSelect.options[modelSelect.selectedIndex].value
message_box.innerHTML += `
<div class="message" data-index="${message_index}">
<div class="user">
${user_image}
<i class="fa-solid fa-xmark"></i>
<i class="fa-regular fa-phone-arrow-up-right"></i>
</div>
<div class="content" id="user_${token}">
<div class="content_inner">
${markdown_render(message)}
${imageInput.dataset.src
? '<img src="' + imageInput.dataset.src + '" alt="Image upload">'
: ''
}
</div>
<div class="count">${count_words_and_tokens(message, model)}</div>
</div>
</div>
`;
await register_remove_message();
highlight(message_box);
await ask_gpt();
} }
messageInput.value = "";
prompt_lock = true;
count_input()
await add_conversation(window.conversation_id, message);
if ("text" in fileInput.dataset) {
message += '\n```' + fileInput.dataset.type + '\n';
message += fileInput.dataset.text;
message += '\n```'
}
let message_index = await add_message(window.conversation_id, "user", message);
window.token = message_id();
if (imageInput.dataset.src) URL.revokeObjectURL(imageInput.dataset.src);
const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
if (input.files.length > 0) imageInput.dataset.src = URL.createObjectURL(input.files[0]);
else delete imageInput.dataset.src
message_box.innerHTML += `
<div class="message" data-index="${message_index}">
<div class="user">
${user_image}
<i class="fa-solid fa-xmark"></i>
<i class="fa-regular fa-phone-arrow-up-right"></i>
</div>
<div class="content" id="user_${token}">
<div class="content_inner">
${markdown_render(message)}
${imageInput.dataset.src
? '<img src="' + imageInput.dataset.src + '" alt="Image upload">'
: ''
}
</div>
<div class="count">${count_words_and_tokens(message, get_selected_model())}</div>
</div>
</div>
`;
highlight(message_box);
await ask_gpt();
}; };
const remove_cancel_button = async () => { const remove_cancel_button = async () => {
@ -143,7 +147,7 @@ const remove_cancel_button = async () => {
}, 300); }, 300);
}; };
const prepare_messages = (messages, filter_last_message = true) => { const prepare_messages = (messages, filter_last_message=true) => {
// Removes none user messages at end // Removes none user messages at end
if (filter_last_message) { if (filter_last_message) {
let last_message; let last_message;
@ -193,20 +197,54 @@ const prepare_messages = (messages, filter_last_message = true) => {
return new_messages; return new_messages;
} }
async function add_message_chunk(message) {
if (message.type == "conversation") {
console.info("Conversation used:", message.conversation)
} else if (message.type == "provider") {
window.provider_result = message.provider;
window.content.querySelector('.provider').innerHTML = `
<a href="${message.provider.url}" target="_blank">
${message.provider.name}
</a>
${message.provider.model ? ' with ' + message.provider.model : ''}
`
} else if (message.type == "message") {
console.error(messag.message)
return;
} else if (message.type == "error") {
console.error(message.error);
window.content_inner.innerHTML += `<p><strong>An error occured:</strong> ${message.error}</p>`;
} else if (message.type == "content") {
window.text += message.content;
html = markdown_render(window.text);
let lastElement, lastIndex = null;
for (element of ['</p>', '</code></pre>', '</p>\n</li>\n</ol>', '</li>\n</ol>', '</li>\n</ul>']) {
const index = html.lastIndexOf(element)
if (index - element.length > lastIndex) {
lastElement = element;
lastIndex = index;
}
}
if (lastIndex) {
html = html.substring(0, lastIndex) + '<span id="cursor"></span>' + lastElement;
}
window.content_inner.innerHTML = html;
window.content_count.innerText = count_words_and_tokens(text, window.provider_result?.model);
highlight(window.content_inner);
}
window.scrollTo(0, 0);
if (message_box.scrollTop >= message_box.scrollHeight - message_box.clientHeight - 100) {
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
}
}
const ask_gpt = async () => { const ask_gpt = async () => {
regenerate.classList.add(`regenerate-hidden`); regenerate.classList.add(`regenerate-hidden`);
messages = await get_messages(window.conversation_id); messages = await get_messages(window.conversation_id);
total_messages = messages.length; total_messages = messages.length;
messages = prepare_messages(messages); messages = prepare_messages(messages);
window.scrollTo(0, 0);
window.controller = new AbortController();
jailbreak = document.getElementById("jailbreak");
provider = document.getElementById("provider");
window.text = '';
stop_generating.classList.remove(`stop_generating-hidden`); stop_generating.classList.remove(`stop_generating-hidden`);
message_box.scrollTop = message_box.scrollHeight; message_box.scrollTop = message_box.scrollHeight;
@ -229,103 +267,31 @@ const ask_gpt = async () => {
</div> </div>
</div> </div>
`; `;
content = document.getElementById(`gpt_${window.token}`);
content_inner = content.querySelector('.content_inner'); window.controller = new AbortController();
content_count = content.querySelector('.count'); window.text = "";
window.error = null;
window.provider_result = null;
window.content = document.getElementById(`gpt_${window.token}`);
window.content_inner = content.querySelector('.content_inner');
window.content_count = content.querySelector('.count');
message_box.scrollTop = message_box.scrollHeight; message_box.scrollTop = message_box.scrollHeight;
window.scrollTo(0, 0); window.scrollTo(0, 0);
error = provider_result = null;
try { try {
let body = JSON.stringify({ const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput;
const file = input && input.files.length > 0 ? input.files[0] : null;
await api("conversation", {
id: window.token, id: window.token,
conversation_id: window.conversation_id, conversation_id: window.conversation_id,
model: modelSelect.options[modelSelect.selectedIndex].value, model: get_selected_model(),
jailbreak: jailbreak.options[jailbreak.selectedIndex].value, jailbreak: jailbreak?.options[jailbreak.selectedIndex].value,
web_search: document.getElementById(`switch`).checked, web_search: document.getElementById("switch").checked,
provider: provider.options[provider.selectedIndex].value, provider: providerSelect.options[providerSelect.selectedIndex].value,
patch_provider: document.getElementById('patch')?.checked, patch_provider: document.getElementById("patch")?.checked,
messages: messages messages: messages
}); }, file);
const headers = {
accept: 'text/event-stream'
}
const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
if (input && input.files.length > 0) {
const formData = new FormData();
formData.append('image', input.files[0]);
formData.append('json', body);
body = formData;
} else {
headers['content-type'] = 'application/json';
}
const response = await fetch(`/backend-api/v2/conversation`, {
method: 'POST',
signal: window.controller.signal,
headers: headers,
body: body
});
const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
let buffer = ""
while (true) {
const { value, done } = await reader.read();
if (done) break;
for (const line of value.split("\n")) {
if (!line) {
continue;
}
let message;
try {
message = JSON.parse(buffer + line);
buffer = "";
} catch {
buffer += line
continue;
}
if (message.type == "content") {
text += message.content;
} else if (message.type == "provider") {
provider_result = message.provider
content.querySelector('.provider').innerHTML = `
<a href="${provider_result.url}" target="_blank">
${provider_result.name}
</a>
${provider_result.model ? ' with ' + provider_result.model : ''}
`
} else if (message.type == "error") {
error = message.error;
} else if (messag.type == "message") {
console.error(messag.message)
}
}
if (error) {
console.error(error);
content_inner.innerHTML += `<p><strong>An error occured:</strong> ${error}</p>`;
} else {
html = markdown_render(text);
let lastElement, lastIndex = null;
for (element of ['</p>', '</code></pre>', '</p>\n</li>\n</ol>', '</li>\n</ol>', '</li>\n</ul>']) {
const index = html.lastIndexOf(element)
if (index - element.length > lastIndex) {
lastElement = element;
lastIndex = index;
}
}
if (lastIndex) {
html = html.substring(0, lastIndex) + '<span id="cursor"></span>' + lastElement;
}
content_inner.innerHTML = html;
content_count.innerText = count_words_and_tokens(text, provider_result?.model);
highlight(content_inner);
}
window.scrollTo(0, 0);
if (message_box.scrollTop >= message_box.scrollHeight - message_box.clientHeight - 100) {
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
}
}
if (!error) { if (!error) {
html = markdown_render(text); html = markdown_render(text);
content_inner.innerHTML = html; content_inner.innerHTML = html;
@ -350,7 +316,7 @@ const ask_gpt = async () => {
await add_message(window.conversation_id, "assistant", text, provider_result); await add_message(window.conversation_id, "assistant", text, provider_result);
await load_conversation(window.conversation_id); await load_conversation(window.conversation_id);
} else { } else {
let cursorDiv = document.getElementById(`cursor`); let cursorDiv = document.getElementById("cursor");
if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv); if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv);
} }
window.scrollTo(0, 0); window.scrollTo(0, 0);
@ -439,7 +405,7 @@ const new_conversation = async () => {
say_hello(); say_hello();
}; };
const load_conversation = async (conversation_id, scroll = true) => { const load_conversation = async (conversation_id, scroll=true) => {
let conversation = await get_conversation(conversation_id); let conversation = await get_conversation(conversation_id);
let messages = conversation?.items || []; let messages = conversation?.items || [];
@ -454,7 +420,6 @@ const load_conversation = async (conversation_id, scroll = true) => {
last_model = item.provider?.model; last_model = item.provider?.model;
let next_i = parseInt(i) + 1; let next_i = parseInt(i) + 1;
let next_provider = item.provider ? item.provider : (messages.length > next_i ? messages[next_i].provider : null); let next_provider = item.provider ? item.provider : (messages.length > next_i ? messages[next_i].provider : null);
let provider_link = item.provider?.name ? `<a href="${item.provider.url}" target="_blank">${item.provider.name}</a>` : ""; let provider_link = item.provider?.name ? `<a href="${item.provider.url}" target="_blank">${item.provider.name}</a>` : "";
let provider = provider_link ? ` let provider = provider_link ? `
<div class="provider"> <div class="provider">
@ -491,7 +456,6 @@ const load_conversation = async (conversation_id, scroll = true) => {
} }
message_box.innerHTML = elements; message_box.innerHTML = elements;
register_remove_message(); register_remove_message();
highlight(message_box); highlight(message_box);
@ -543,7 +507,9 @@ async function add_conversation(conversation_id, content) {
} }
async function save_system_message() { async function save_system_message() {
if (!window.conversation_id) return; if (!window.conversation_id) {
return;
}
const conversation = await get_conversation(window.conversation_id); const conversation = await get_conversation(window.conversation_id);
conversation.system = systemPrompt?.value; conversation.system = systemPrompt?.value;
await save_conversation(window.conversation_id, conversation); await save_conversation(window.conversation_id, conversation);
@ -580,7 +546,6 @@ const remove_message = async (conversation_id, index) => {
const add_message = async (conversation_id, role, content, provider) => { const add_message = async (conversation_id, role, content, provider) => {
const conversation = await get_conversation(conversation_id); const conversation = await get_conversation(conversation_id);
conversation.items.push({ conversation.items.push({
role: role, role: role,
content: content, content: content,
@ -662,15 +627,14 @@ sidebar_button.addEventListener("click", (event) => {
sidebar.classList.add("shown"); sidebar.classList.add("shown");
sidebar_button.classList.add("rotated"); sidebar_button.classList.add("rotated");
} }
window.scrollTo(0, 0); window.scrollTo(0, 0);
}); });
const register_settings_localstorage = async () => { const register_settings_storage = async () => {
for (id of ["switch", "model", "jailbreak", "patch", "provider", "history"]) { options.forEach((id) => {
element = document.getElementById(id); element = document.getElementById(id);
if (!element) { if (!element) {
continue; return;
} }
element.addEventListener('change', async (event) => { element.addEventListener('change', async (event) => {
switch (event.target.type) { switch (event.target.type) {
@ -684,14 +648,14 @@ const register_settings_localstorage = async () => {
console.warn("Unresolved element type"); console.warn("Unresolved element type");
} }
}); });
} });
} }
const load_settings_localstorage = async () => { const load_settings_storage = async () => {
for (id of ["switch", "model", "jailbreak", "patch", "provider", "history"]) { options.forEach((id) => {
element = document.getElementById(id); element = document.getElementById(id);
if (!element || !(value = appStorage.getItem(element.id))) { if (!element || !(value = appStorage.getItem(id))) {
continue; return;
} }
if (value) { if (value) {
switch (element.type) { switch (element.type) {
@ -705,7 +669,7 @@ const load_settings_localstorage = async () => {
console.warn("Unresolved element type"); console.warn("Unresolved element type");
} }
} }
} });
} }
const say_hello = async () => { const say_hello = async () => {
@ -780,13 +744,16 @@ function count_words_and_tokens(text, model) {
} }
let countFocus = messageInput; let countFocus = messageInput;
let timeoutId;
const count_input = async () => { const count_input = async () => {
if (countFocus.value) { if (timeoutId) clearTimeout(timeoutId);
model = modelSelect.options[modelSelect.selectedIndex].value; timeoutId = setTimeout(() => {
inputCount.innerText = count_words_and_tokens(countFocus.value, model); if (countFocus.value) {
} else { inputCount.innerText = count_words_and_tokens(countFocus.value, get_selected_model());
inputCount.innerHTML = "&nbsp;" } else {
} inputCount.innerHTML = "&nbsp;"
}
}, 100);
}; };
messageInput.addEventListener("keyup", count_input); messageInput.addEventListener("keyup", count_input);
systemPrompt.addEventListener("keyup", count_input); systemPrompt.addEventListener("keyup", count_input);
@ -799,9 +766,21 @@ systemPrompt.addEventListener("blur", function() {
count_input(); count_input();
}); });
window.onload = async () => { window.addEventListener('load', async function() {
setTheme(); await on_load();
if (window.conversation_id == "{{chat_id}}") {
window.conversation_id = uuid();
} else {
await on_api();
}
});
window.addEventListener('pywebviewready', async function() {
await on_api();
});
async function on_load() {
setTheme();
count_input(); count_input();
if (/\/chat\/.+/.test(window.location.href)) { if (/\/chat\/.+/.test(window.location.href)) {
@ -809,9 +788,10 @@ window.onload = async () => {
} else { } else {
say_hello() say_hello()
} }
load_conversations(); load_conversations();
}
async function on_api() {
messageInput.addEventListener("keydown", async (evt) => { messageInput.addEventListener("keydown", async (evt) => {
if (prompt_lock) return; if (prompt_lock) return;
@ -824,46 +804,17 @@ window.onload = async () => {
messageInput.style.height = messageInput.scrollHeight + "px"; messageInput.style.height = messageInput.scrollHeight + "px";
} }
}); });
sendButton.addEventListener(`click`, async () => { sendButton.addEventListener(`click`, async () => {
console.log("clicked send"); console.log("clicked send");
if (prompt_lock) return; if (prompt_lock) return;
await handle_ask(); await handle_ask();
}); });
messageInput.focus(); messageInput.focus();
register_settings_localstorage(); register_settings_storage();
};
(async () => { versions = await api("version");
response = await fetch('/backend-api/v2/models') document.title = 'g4f - ' + versions["version"];
models = await response.json()
for (model of models) {
let option = document.createElement('option');
option.value = option.text = model;
modelSelect.appendChild(option);
}
response = await fetch('/backend-api/v2/providers')
providers = await response.json()
select = document.getElementById('provider');
for (provider of providers) {
let option = document.createElement('option');
option.value = option.text = provider;
select.appendChild(option);
}
await load_settings_localstorage()
})();
(async () => {
response = await fetch('/backend-api/v2/version')
versions = await response.json()
document.title = 'g4f - gui - ' + versions["version"];
let text = "version ~ " let text = "version ~ "
if (versions["version"] != versions["latest_version"]) { if (versions["version"] != versions["latest_version"]) {
let release_url = 'https://github.com/xtekky/gpt4free/releases/tag/' + versions["latest_version"]; let release_url = 'https://github.com/xtekky/gpt4free/releases/tag/' + versions["latest_version"];
@ -873,7 +824,24 @@ window.onload = async () => {
text += versions["version"]; text += versions["version"];
} }
document.getElementById("version_text").innerHTML = text document.getElementById("version_text").innerHTML = text
})()
models = await api("models");
models.forEach((model) => {
let option = document.createElement("option");
option.value = option.text = model;
modelSelect.appendChild(option);
});
providers = await api("providers")
providers.forEach((provider) => {
let option = document.createElement("option");
option.value = option.text = provider;
providerSelect.appendChild(option);
})
await load_provider_models(appStorage.getItem("provider"));
load_settings_storage()
}
for (const el of [imageInput, cameraInput]) { for (const el of [imageInput, cameraInput]) {
el.addEventListener('click', async () => { el.addEventListener('click', async () => {
@ -889,6 +857,7 @@ fileInput.addEventListener('click', async (event) => {
fileInput.value = ''; fileInput.value = '';
delete fileInput.dataset.text; delete fileInput.dataset.text;
}); });
fileInput.addEventListener('change', async (event) => { fileInput.addEventListener('change', async (event) => {
if (fileInput.files.length) { if (fileInput.files.length) {
type = fileInput.files[0].type; type = fileInput.files[0].type;
@ -903,8 +872,21 @@ fileInput.addEventListener('change', async (event) => {
} }
fileInput.dataset.type = type fileInput.dataset.type = type
const reader = new FileReader(); const reader = new FileReader();
reader.addEventListener('load', (event) => { reader.addEventListener('load', async (event) => {
fileInput.dataset.text = event.target.result; fileInput.dataset.text = event.target.result;
if (type == "json") {
const data = JSON.parse(fileInput.dataset.text);
if ("g4f" in data.options) {
Object.keys(data).forEach(key => {
if (key != "options" && !localStorage.getItem(key)) {
appStorage.setItem(key, JSON.stringify(data[key]));
}
});
delete fileInput.dataset.text;
await load_conversations();
fileInput.value = "";
}
}
}); });
reader.readAsText(fileInput.files[0]); reader.readAsText(fileInput.files[0]);
} else { } else {
@ -914,4 +896,126 @@ fileInput.addEventListener('change', async (event) => {
systemPrompt?.addEventListener("blur", async () => { systemPrompt?.addEventListener("blur", async () => {
await save_system_message(); await save_system_message();
}); });
function get_selected_model() {
if (modelProvider.selectedIndex >= 0) {
return modelProvider.options[modelProvider.selectedIndex].value;
} else if (modelSelect.selectedIndex >= 0) {
return modelSelect.options[modelSelect.selectedIndex].value;
}
}
async function api(ressource, args=null, file=null) {
if (window?.pywebview) {
if (args) {
if (ressource == "models") {
ressource = "provider_models";
}
return pywebview.api["get_" + ressource](args);
}
return pywebview.api["get_" + ressource]();
}
if (ressource == "models" && args) {
ressource = `${ressource}/${args}`;
}
const url = `/backend-api/v2/${ressource}`;
if (ressource == "conversation") {
const body = JSON.stringify(args);
const headers = {
accept: 'text/event-stream'
}
if (file) {
const formData = new FormData();
formData.append('file', file);
formData.append('json', body);
body = formData;
} else {
headers['content-type'] = 'application/json';
}
response = await fetch(url, {
method: 'POST',
signal: window.controller.signal,
headers: headers,
body: body
});
return read_response(response);
}
response = await fetch(url);
return await response.json();
}
async function read_response(response) {
const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
let buffer = ""
while (true) {
const { value, done } = await reader.read();
if (done) {
break;
}
for (const line of value.split("\n")) {
if (!line) {
continue;
}
try {
add_message_chunk(JSON.parse(buffer + line))
buffer = "";
} catch {
buffer += line
}
}
}
}
async function load_provider_models(providerIndex=null) {
if (!providerIndex) {
providerIndex = providerSelect.selectedIndex;
}
const provider = providerSelect.options[providerIndex].value;
if (!provider) {
return;
}
const models = await api('models', provider);
modelProvider.innerHTML = '';
if (models.length > 0) {
modelSelect.classList.add("hidden");
modelProvider.classList.remove("hidden");
models.forEach((model) => {
let option = document.createElement('option');
option.value = option.text = model.model;
option.selected = model.default;
modelProvider.appendChild(option);
});
} else {
modelProvider.classList.add("hidden");
modelSelect.classList.remove("hidden");
}
};
providerSelect.addEventListener("change", () => load_provider_models());
function save_storage() {
let filename = new Date().toLocaleString()
filename += ".json"
let data = {"options": {"g4f": ""}};
for (let i = 0; i < appStorage.length; i++){
let key = appStorage.key(i);
let item = appStorage.getItem(key);
if (key.startsWith("conversation:")) {
data[key] = JSON.parse(item);
} else {
data["options"][key] = item;
}
}
data = JSON.stringify(data, null, 4);
const blob = new Blob([data], {type: 'text/csv'});
if(window.navigator.msSaveOrOpenBlob) {
window.navigator.msSaveBlob(blob, filename);
} else{
const elem = window.document.createElement('a');
elem.href = window.URL.createObjectURL(blob);
elem.download = filename;
document.body.appendChild(elem);
elem.click();
document.body.removeChild(elem);
}
}

185
g4f/gui/server/api.py Normal file
View File

@ -0,0 +1,185 @@
import logging
import json
from typing import Iterator
try:
import webview
except ImportError:
...
from g4f import version, models
from g4f import get_last_provider, ChatCompletion
from g4f.errors import VersionNotFoundError
from g4f.Provider import ProviderType, __providers__, __map__
from g4f.providers.base_provider import ProviderModelMixin
from g4f.Provider.bing.create_images import patch_provider
from g4f.Provider.Bing import Conversation
conversations: dict[str, Conversation] = {}
class Api():
def get_models(self) -> list[str]:
"""
Return a list of all models.
Fetches and returns a list of all available models in the system.
Returns:
List[str]: A list of model names.
"""
return models._all_models
def get_provider_models(self, provider: str) -> list[dict]:
if provider in __map__:
provider: ProviderType = __map__[provider]
if issubclass(provider, ProviderModelMixin):
return [{"model": model, "default": model == provider.default_model} for model in provider.get_models()]
elif provider.supports_gpt_35_turbo or provider.supports_gpt_4:
return [
*([{"model": "gpt-4", "default": not provider.supports_gpt_4}] if provider.supports_gpt_4 else []),
*([{"model": "gpt-3.5-turbo", "default": not provider.supports_gpt_4}] if provider.supports_gpt_35_turbo else [])
]
else:
return [];
def get_providers(self) -> list[str]:
"""
Return a list of all working providers.
"""
return [provider.__name__ for provider in __providers__ if provider.working]
def get_version(self):
"""
Returns the current and latest version of the application.
Returns:
dict: A dictionary containing the current and latest version.
"""
try:
current_version = version.utils.current_version
except VersionNotFoundError:
current_version = None
return {
"version": current_version,
"latest_version": version.utils.latest_version,
}
def generate_title(self):
"""
Generates and returns a title based on the request data.
Returns:
dict: A dictionary with the generated title.
"""
return {'title': ''}
def get_conversation(self, options: dict, **kwargs) -> Iterator:
window = webview.active_window()
for message in self._create_response_stream(
self._prepare_conversation_kwargs(options, kwargs),
options.get("conversation_id")
):
window.evaluate_js(f"this.add_message_chunk({json.dumps(message)})")
def _prepare_conversation_kwargs(self, json_data: dict, kwargs: dict):
"""
Prepares arguments for chat completion based on the request data.
Reads the request and prepares the necessary arguments for handling
a chat completion request.
Returns:
dict: Arguments prepared for chat completion.
"""
provider = json_data.get('provider', None)
if "image" in kwargs and provider is None:
provider = "Bing"
if provider == 'OpenaiChat':
kwargs['auto_continue'] = True
messages = json_data['messages']
if json_data.get('web_search'):
if provider == "Bing":
kwargs['web_search'] = True
else:
from .internet import get_search_message
messages[-1]["content"] = get_search_message(messages[-1]["content"])
conversation_id = json_data.get("conversation_id")
if conversation_id and conversation_id in conversations:
kwargs["conversation"] = conversations[conversation_id]
model = json_data.get('model')
model = model if model else models.default
patch = patch_provider if json_data.get('patch_provider') else None
return {
"model": model,
"provider": provider,
"messages": messages,
"stream": True,
"ignore_stream": True,
"patch_provider": patch,
"return_conversation": True,
**kwargs
}
def _create_response_stream(self, kwargs, conversation_id: str) -> Iterator:
"""
Creates and returns a streaming response for the conversation.
Args:
kwargs (dict): Arguments for creating the chat completion.
Yields:
str: JSON formatted response chunks for the stream.
Raises:
Exception: If an error occurs during the streaming process.
"""
try:
first = True
for chunk in ChatCompletion.create(**kwargs):
if first:
first = False
yield self._format_json("provider", get_last_provider(True))
if isinstance(chunk, Conversation):
conversations[conversation_id] = chunk
yield self._format_json("conversation", conversation_id)
elif isinstance(chunk, Exception):
logging.exception(chunk)
yield self._format_json("message", get_error_message(chunk))
else:
yield self._format_json("content", chunk)
except Exception as e:
logging.exception(e)
yield self._format_json('error', get_error_message(e))
def _format_json(self, response_type: str, content):
"""
Formats and returns a JSON response.
Args:
response_type (str): The type of the response.
content: The content to be included in the response.
Returns:
str: A JSON formatted string.
"""
return {
'type': response_type,
response_type: content
}
def get_error_message(exception: Exception) -> str:
"""
Generates a formatted error message from an exception.
Args:
exception (Exception): The exception to format.
Returns:
str: A formatted error message string.
"""
return f"{get_last_provider().__name__}: {type(exception).__name__}: {exception}"

View File

@ -1,3 +1,9 @@
import sys, os
from flask import Flask from flask import Flask
app = Flask(__name__, template_folder='./../client/html') if getattr(sys, 'frozen', False):
template_folder = os.path.join(sys._MEIPASS, "client")
else:
template_folder = "../client"
app = Flask(__name__, template_folder=template_folder, static_folder=f"{template_folder}/static")

View File

@ -1,15 +1,9 @@
import logging
import json import json
from flask import request, Flask from flask import request, Flask
from typing import Generator
from g4f import version, models
from g4f import get_last_provider, ChatCompletion
from g4f.image import is_allowed_extension, to_image from g4f.image import is_allowed_extension, to_image
from g4f.errors import VersionNotFoundError from .api import Api
from g4f.Provider import __providers__
from g4f.Provider.bing.create_images import patch_provider
class Backend_Api: class Backend_Api(Api):
""" """
Handles various endpoints in a Flask application for backend operations. Handles various endpoints in a Flask application for backend operations.
@ -33,6 +27,10 @@ class Backend_Api:
'function': self.get_models, 'function': self.get_models,
'methods': ['GET'] 'methods': ['GET']
}, },
'/backend-api/v2/models/<provider>': {
'function': self.get_provider_models,
'methods': ['GET']
},
'/backend-api/v2/providers': { '/backend-api/v2/providers': {
'function': self.get_providers, 'function': self.get_providers,
'methods': ['GET'] 'methods': ['GET']
@ -54,7 +52,7 @@ class Backend_Api:
'methods': ['POST'] 'methods': ['POST']
} }
} }
def handle_error(self): def handle_error(self):
""" """
Initialize the backend API with the given Flask application. Initialize the backend API with the given Flask application.
@ -64,49 +62,7 @@ class Backend_Api:
""" """
print(request.json) print(request.json)
return 'ok', 200 return 'ok', 200
def get_models(self):
"""
Return a list of all models.
Fetches and returns a list of all available models in the system.
Returns:
List[str]: A list of model names.
"""
return models._all_models
def get_providers(self):
"""
Return a list of all working providers.
"""
return [provider.__name__ for provider in __providers__ if provider.working]
def get_version(self):
"""
Returns the current and latest version of the application.
Returns:
dict: A dictionary containing the current and latest version.
"""
try:
current_version = version.utils.current_version
except VersionNotFoundError:
current_version = None
return {
"version": current_version,
"latest_version": version.utils.latest_version,
}
def generate_title(self):
"""
Generates and returns a title based on the request data.
Returns:
dict: A dictionary with the generated title.
"""
return {'title': ''}
def handle_conversation(self): def handle_conversation(self):
""" """
Handles conversation requests and streams responses back. Handles conversation requests and streams responses back.
@ -114,26 +70,10 @@ class Backend_Api:
Returns: Returns:
Response: A Flask response object for streaming. Response: A Flask response object for streaming.
""" """
kwargs = self._prepare_conversation_kwargs()
return self.app.response_class(
self._create_response_stream(kwargs),
mimetype='text/event-stream'
)
def _prepare_conversation_kwargs(self):
"""
Prepares arguments for chat completion based on the request data.
Reads the request and prepares the necessary arguments for handling
a chat completion request.
Returns:
dict: Arguments prepared for chat completion.
"""
kwargs = {} kwargs = {}
if "image" in request.files: if "file" in request.files:
file = request.files['image'] file = request.files['file']
if file.filename != '' and is_allowed_extension(file.filename): if file.filename != '' and is_allowed_extension(file.filename):
kwargs['image'] = to_image(file.stream, file.filename.endswith('.svg')) kwargs['image'] = to_image(file.stream, file.filename.endswith('.svg'))
kwargs['image_name'] = file.filename kwargs['image_name'] = file.filename
@ -141,66 +81,20 @@ class Backend_Api:
json_data = json.loads(request.form['json']) json_data = json.loads(request.form['json'])
else: else:
json_data = request.json json_data = request.json
provider = json_data.get('provider', '').replace('g4f.Provider.', '')
provider = provider if provider and provider != "Auto" else None
if "image" in kwargs and not provider: kwargs = self._prepare_conversation_kwargs(json_data, kwargs)
provider = "Bing"
if provider == 'OpenaiChat':
kwargs['auto_continue'] = True
messages = json_data['messages'] return self.app.response_class(
if json_data.get('web_search'): self._create_response_stream(kwargs, json_data.get("conversation_id")),
if provider == "Bing": mimetype='text/event-stream'
kwargs['web_search'] = True )
else:
# ResourceWarning: unclosed event loop
from .internet import get_search_message
messages[-1]["content"] = get_search_message(messages[-1]["content"])
model = json_data.get('model') def get_provider_models(self, provider: str):
model = model if model else models.default models = super().get_provider_models(provider)
patch = patch_provider if json_data.get('patch_provider') else None if models is None:
return 404, "Provider not found"
return models
return {
"model": model,
"provider": provider,
"messages": messages,
"stream": True,
"ignore_stream": True,
"patch_provider": patch,
**kwargs
}
def _create_response_stream(self, kwargs) -> Generator[str, None, None]:
"""
Creates and returns a streaming response for the conversation.
Args:
kwargs (dict): Arguments for creating the chat completion.
Yields:
str: JSON formatted response chunks for the stream.
Raises:
Exception: If an error occurs during the streaming process.
"""
try:
first = True
for chunk in ChatCompletion.create(**kwargs):
if first:
first = False
yield self._format_json('provider', get_last_provider(True))
if isinstance(chunk, Exception):
logging.exception(chunk)
yield self._format_json('message', get_error_message(chunk))
else:
yield self._format_json('content', str(chunk))
except Exception as e:
logging.exception(e)
yield self._format_json('error', get_error_message(e))
def _format_json(self, response_type: str, content) -> str: def _format_json(self, response_type: str, content) -> str:
""" """
Formats and returns a JSON response. Formats and returns a JSON response.
@ -212,19 +106,4 @@ class Backend_Api:
Returns: Returns:
str: A JSON formatted string. str: A JSON formatted string.
""" """
return json.dumps({ return json.dumps(super()._format_json(response_type, content)) + "\n"
'type': response_type,
response_type: content
}) + "\n"
def get_error_message(exception: Exception) -> str:
"""
Generates a formatted error message from an exception.
Args:
exception (Exception): The exception to format.
Returns:
str: A formatted error message string.
"""
return f"{get_last_provider().__name__}: {type(exception).__name__}: {exception}"

View File

@ -1,6 +1,5 @@
from flask import render_template, send_file, redirect import uuid
from time import time from flask import render_template, redirect
from os import urandom
class Website: class Website:
def __init__(self, app) -> None: def __init__(self, app) -> None:
@ -18,23 +17,12 @@ class Website:
'function': self._chat, 'function': self._chat,
'methods': ['GET', 'POST'] 'methods': ['GET', 'POST']
}, },
'/assets/<folder>/<file>': {
'function': self._assets,
'methods': ['GET', 'POST']
}
} }
def _chat(self, conversation_id): def _chat(self, conversation_id):
if '-' not in conversation_id: if '-' not in conversation_id:
return redirect('/chat') return redirect('/chat')
return render_template('index.html', chat_id=conversation_id)
return render_template('index.html', chat_id = conversation_id)
def _index(self): def _index(self):
return render_template('index.html', chat_id = f'{urandom(4).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{hex(int(time() * 1000))[2:]}') return render_template('index.html', chat_id=str(uuid.uuid4()))
def _assets(self, folder: str, file: str):
try:
return send_file(f"./../client/{folder}/{file}", as_attachment=False)
except:
return "File not found", 404

View File

@ -1,24 +1,37 @@
import webview import webview
from functools import partial try:
from platformdirs import user_config_dir from platformdirs import user_config_dir
has_platformdirs = True
except ImportError:
has_platformdirs = False
from g4f.gui import run_gui
from g4f.gui.run import gui_parser from g4f.gui.run import gui_parser
from g4f.gui.server.api import Api
import g4f.version import g4f.version
import g4f.debug import g4f.debug
def run_webview(host: str = "0.0.0.0", port: int = 8080, debug: bool = True): def run_webview(
webview.create_window(f"g4f - {g4f.version.utils.current_version}", f"http://{host}:{port}/") debug: bool = False,
if debug: storage_path: str = None
g4f.debug.logging = True ):
webview.create_window(
f"g4f - {g4f.version.utils.current_version}",
"client/index.html",
text_select=True,
js_api=Api(),
)
if has_platformdirs and storage_path is None:
storage_path = user_config_dir("g4f-webview")
webview.start( webview.start(
partial(run_gui, host, port),
private_mode=False, private_mode=False,
storage_path=user_config_dir("g4f-webview"), storage_path=storage_path,
debug=debug debug=debug,
ssl=True
) )
if __name__ == "__main__": if __name__ == "__main__":
parser = gui_parser() parser = gui_parser()
args = parser.parse_args() args = parser.parse_args()
run_webview(args.host, args.port, args.debug) if args.debug:
g4f.debug.logging = True
run_webview(args.debug)

45
g4f/gui/webview.spec Normal file
View File

@ -0,0 +1,45 @@
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(
['webview.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(
pyz,
a.scripts,
a.binaries,
Tree('client', prefix='client'),
a.zipfiles,
a.datas,
[],
name='webview',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=False,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)

View File

@ -70,7 +70,14 @@ class AbstractProvider(BaseProvider):
loop.run_in_executor(executor, create_func), loop.run_in_executor(executor, create_func),
timeout=kwargs.get("timeout") timeout=kwargs.get("timeout")
) )
def get_parameters(cls) -> dict:
return signature(
cls.create_async_generator if issubclass(cls, AsyncGeneratorProvider) else
cls.create_async if issubclass(cls, AsyncProvider) else
cls.create_completion
).parameters
@classmethod @classmethod
@property @property
def params(cls) -> str: def params(cls) -> str:
@ -83,17 +90,12 @@ class AbstractProvider(BaseProvider):
Returns: Returns:
str: A string listing the supported parameters. str: A string listing the supported parameters.
""" """
sig = signature(
cls.create_async_generator if issubclass(cls, AsyncGeneratorProvider) else
cls.create_async if issubclass(cls, AsyncProvider) else
cls.create_completion
)
def get_type_name(annotation: type) -> str: def get_type_name(annotation: type) -> str:
return annotation.__name__ if hasattr(annotation, "__name__") else str(annotation) return annotation.__name__ if hasattr(annotation, "__name__") else str(annotation)
args = "" args = ""
for name, param in sig.parameters.items(): for name, param in cls.get_parameters():
if name in ("self", "kwargs") or (name == "stream" and not cls.supports_stream): if name in ("self", "kwargs") or (name == "stream" and not cls.supports_stream):
continue continue
args += f"\n {name}" args += f"\n {name}"

View File

@ -1,22 +1,48 @@
from __future__ import annotations from __future__ import annotations
from typing import Union
from aiohttp import ClientResponse
from requests import Response as RequestsResponse
try: try:
from curl_cffi.requests import Session, Response from curl_cffi.requests import Session, Response
from .curl_cffi import StreamResponse, StreamSession from .curl_cffi import StreamResponse, StreamSession, FormData
has_curl_cffi = True has_curl_cffi = True
except ImportError: except ImportError:
from typing import Type as Session, Type as Response from typing import Type as Session, Type as Response
from .aiohttp import StreamResponse, StreamSession from .aiohttp import StreamResponse, StreamSession, FormData
has_curl_cffi = False has_curl_cffi = False
try:
import webview
import asyncio
has_webview = True
except ImportError:
has_webview = False
from .raise_for_status import raise_for_status
from ..webdriver import WebDriver, WebDriverSession from ..webdriver import WebDriver, WebDriverSession
from ..webdriver import bypass_cloudflare, get_driver_cookies from ..webdriver import bypass_cloudflare, get_driver_cookies
from ..errors import MissingRequirementsError, RateLimitError, ResponseStatusError from ..errors import MissingRequirementsError
from .defaults import DEFAULT_HEADERS from .defaults import DEFAULT_HEADERS, WEBVIEW_HAEDERS
async def get_args_from_webview(url: str) -> dict:
if not has_webview:
raise MissingRequirementsError('Install "webview" package')
window = webview.create_window("", url, hidden=True)
await asyncio.sleep(2)
body = None
while body is None:
try:
await asyncio.sleep(1)
body = window.dom.get_element("body:not(.no-js)")
except:
...
headers = {
**WEBVIEW_HAEDERS,
"User-Agent": window.evaluate_js("this.navigator.userAgent"),
"Accept-Language": window.evaluate_js("this.navigator.language"),
"Referer": window.real_url
}
cookies = [list(*cookie.items()) for cookie in window.get_cookies()]
cookies = dict([(name, cookie.value) for name, cookie in cookies])
window.destroy()
return {"headers": headers, "cookies": cookies}
def get_args_from_browser( def get_args_from_browser(
url: str, url: str,
@ -79,24 +105,4 @@ def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str =
proxies={"https": proxy, "http": proxy}, proxies={"https": proxy, "http": proxy},
timeout=timeout, timeout=timeout,
impersonate="chrome" impersonate="chrome"
) )
async def raise_for_status_async(response: Union[StreamResponse, ClientResponse], message: str = None):
if response.status in (429, 402):
raise RateLimitError(f"Response {response.status}: Rate limit reached")
message = await response.text() if not response.ok and message is None else message
if response.status == 403 and "<title>Just a moment...</title>" in message:
raise ResponseStatusError(f"Response {response.status}: Cloudflare detected")
elif not response.ok:
raise ResponseStatusError(f"Response {response.status}: {message}")
def raise_for_status(response: Union[StreamResponse, ClientResponse, Response, RequestsResponse], message: str = None):
if isinstance(response, StreamSession) or isinstance(response, ClientResponse):
return raise_for_status_async(response, message)
if response.status_code in (429, 402):
raise RateLimitError(f"Response {response.status_code}: Rate limit reached")
elif response.status_code == 403 and "<title>Just a moment...</title>" in response.text:
raise ResponseStatusError(f"Response {response.status_code}: Cloudflare detected")
elif not response.ok:
raise ResponseStatusError(f"Response {response.status_code}: {response.text if message is None else message}")

View File

@ -1,6 +1,6 @@
from __future__ import annotations from __future__ import annotations
from aiohttp import ClientSession, ClientResponse, ClientTimeout, BaseConnector from aiohttp import ClientSession, ClientResponse, ClientTimeout, BaseConnector, FormData
from typing import AsyncIterator, Any, Optional from typing import AsyncIterator, Any, Optional
from .defaults import DEFAULT_HEADERS from .defaults import DEFAULT_HEADERS

View File

@ -1,6 +1,6 @@
from __future__ import annotations from __future__ import annotations
from curl_cffi.requests import AsyncSession, Response from curl_cffi.requests import AsyncSession, Response, CurlMime
from typing import AsyncGenerator, Any from typing import AsyncGenerator, Any
from functools import partialmethod from functools import partialmethod
import json import json
@ -65,6 +65,8 @@ class StreamSession(AsyncSession):
def request( def request(
self, method: str, url: str, **kwargs self, method: str, url: str, **kwargs
) -> StreamResponse: ) -> StreamResponse:
if isinstance(kwargs.get("data"), CurlMime):
kwargs["multipart"] = kwargs.pop("data")
"""Create and return a StreamResponse object for the given HTTP request.""" """Create and return a StreamResponse object for the given HTTP request."""
return StreamResponse(super().request(method, url, stream=True, **kwargs)) return StreamResponse(super().request(method, url, stream=True, **kwargs))
@ -75,3 +77,7 @@ class StreamSession(AsyncSession):
put = partialmethod(request, "PUT") put = partialmethod(request, "PUT")
patch = partialmethod(request, "PATCH") patch = partialmethod(request, "PATCH")
delete = partialmethod(request, "DELETE") delete = partialmethod(request, "DELETE")
class FormData(CurlMime):
def add_field(self, name, data=None, content_type: str = None, filename: str = None) -> None:
self.addpart(name, content_type=content_type, filename=filename, data=data)

View File

@ -16,4 +16,14 @@ DEFAULT_HEADERS = {
"referer": "", "referer": "",
"accept-encoding": "gzip, deflate, br", "accept-encoding": "gzip, deflate, br",
"accept-language": "en-US", "accept-language": "en-US",
}
WEBVIEW_HAEDERS = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "",
"Referer": "",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "",
} }

View File

@ -0,0 +1,34 @@
from __future__ import annotations
from typing import Union
from aiohttp import ClientResponse
from requests import Response as RequestsResponse
from ..errors import ResponseStatusError, RateLimitError
from . import Response, StreamResponse
class CloudflareError(ResponseStatusError):
...
def is_cloudflare(text: str) -> bool:
return '<div id="cf-please-wait">' in text or "<title>Just a moment...</title>" in text
async def raise_for_status_async(response: Union[StreamResponse, ClientResponse], message: str = None):
if response.status in (429, 402):
raise RateLimitError(f"Response {response.status}: Rate limit reached")
message = await response.text() if not response.ok and message is None else message
if response.status == 403 and is_cloudflare(message):
raise CloudflareError(f"Response {response.status}: Cloudflare detected")
elif not response.ok:
raise ResponseStatusError(f"Response {response.status}: {message}")
def raise_for_status(response: Union[Response, StreamResponse, ClientResponse, RequestsResponse], message: str = None):
if hasattr(response, "status"):
return raise_for_status_async(response, message)
if response.status_code in (429, 402):
raise RateLimitError(f"Response {response.status_code}: Rate limit reached")
elif response.status_code == 403 and is_cloudflare(response.text):
raise CloudflareError(f"Response {response.status_code}: Cloudflare detected")
elif not response.ok:
raise ResponseStatusError(f"Response {response.status_code}: {response.text if message is None else message}")

View File

@ -6,6 +6,7 @@ from functools import cached_property
from importlib.metadata import version as get_package_version, PackageNotFoundError from importlib.metadata import version as get_package_version, PackageNotFoundError
from subprocess import check_output, CalledProcessError, PIPE from subprocess import check_output, CalledProcessError, PIPE
from .errors import VersionNotFoundError from .errors import VersionNotFoundError
from . import debug
PACKAGE_NAME = "g4f" PACKAGE_NAME = "g4f"
GITHUB_REPOSITORY = "xtekky/gpt4free" GITHUB_REPOSITORY = "xtekky/gpt4free"
@ -64,6 +65,9 @@ class VersionUtils:
VersionNotFoundError: If the version cannot be determined from the package manager, VersionNotFoundError: If the version cannot be determined from the package manager,
Docker environment, or git repository. Docker environment, or git repository.
""" """
if debug.version:
return debug.version
# Read from package manager # Read from package manager
try: try:
return get_package_version(PACKAGE_NAME) return get_package_version(PACKAGE_NAME)

View File

@ -15,7 +15,6 @@ fastapi
uvicorn uvicorn
flask flask
py-arkose-generator py-arkose-generator
async-property
undetected-chromedriver>=3.5.5 undetected-chromedriver>=3.5.5
brotli brotli
beautifulsoup4 beautifulsoup4