mirror of https://github.com/xtekky/gpt4free.git
Merge pull request #1504 from hlohaus/sort
Add upload svg image support
This commit is contained in:
commit
2a35052687
|
@ -239,7 +239,7 @@ def main():
|
||||||
if comments:
|
if comments:
|
||||||
pull.create_review(body=review, comments=comments)
|
pull.create_review(body=review, comments=comments)
|
||||||
else:
|
else:
|
||||||
pull.create_comment(body=review)
|
pull.create_issue_comment(body=review)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error posting review: {e}")
|
print(f"Error posting review: {e}")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
|
@ -0,0 +1,97 @@
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import random
|
||||||
|
import json
|
||||||
|
from aiohttp import ClientSession, WSMsgType
|
||||||
|
|
||||||
|
from ..typing import AsyncResult, Messages
|
||||||
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
|
API_URL = "https://labs-api.perplexity.ai/socket.io/"
|
||||||
|
WS_URL = "wss://labs-api.perplexity.ai/socket.io/"
|
||||||
|
MODELS = ['pplx-7b-online', 'pplx-70b-online', 'pplx-7b-chat', 'pplx-70b-chat', 'mistral-7b-instruct',
|
||||||
|
'codellama-34b-instruct', 'llama-2-70b-chat', 'llava-7b-chat', 'mixtral-8x7b-instruct',
|
||||||
|
'mistral-medium', 'related']
|
||||||
|
DEFAULT_MODEL = MODELS[1]
|
||||||
|
MODEL_MAP = {
|
||||||
|
"mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
|
||||||
|
"meta-llama/Llama-2-70b-chat-hf": "llama-2-70b-chat",
|
||||||
|
"mistralai/Mixtral-8x7B-Instruct-v0.1": "mixtral-8x7b-instruct",
|
||||||
|
}
|
||||||
|
|
||||||
|
class PerplexityLabs(AsyncGeneratorProvider):
|
||||||
|
url = "https://labs.perplexity.ai"
|
||||||
|
working = True
|
||||||
|
supports_gpt_35_turbo = True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncResult:
|
||||||
|
if not model:
|
||||||
|
model = DEFAULT_MODEL
|
||||||
|
elif model in MODEL_MAP:
|
||||||
|
model = MODEL_MAP[model]
|
||||||
|
elif model not in MODELS:
|
||||||
|
raise ValueError(f"Model is not supported: {model}")
|
||||||
|
headers = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:121.0) Gecko/20100101 Firefox/121.0",
|
||||||
|
"Accept": "*/*",
|
||||||
|
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
|
||||||
|
"Accept-Encoding": "gzip, deflate, br",
|
||||||
|
"Origin": cls.url,
|
||||||
|
"Connection": "keep-alive",
|
||||||
|
"Referer": f"{cls.url}/",
|
||||||
|
"Sec-Fetch-Dest": "empty",
|
||||||
|
"Sec-Fetch-Mode": "cors",
|
||||||
|
"Sec-Fetch-Site": "same-site",
|
||||||
|
"TE": "trailers",
|
||||||
|
}
|
||||||
|
async with ClientSession(headers=headers) as session:
|
||||||
|
t = format(random.getrandbits(32), '08x')
|
||||||
|
async with session.get(
|
||||||
|
f"{API_URL}?EIO=4&transport=polling&t={t}",
|
||||||
|
proxy=proxy
|
||||||
|
) as response:
|
||||||
|
text = await response.text()
|
||||||
|
|
||||||
|
sid = json.loads(text[1:])['sid']
|
||||||
|
post_data = '40{"jwt":"anonymous-ask-user"}'
|
||||||
|
async with session.post(
|
||||||
|
f'{API_URL}?EIO=4&transport=polling&t={t}&sid={sid}',
|
||||||
|
data=post_data,
|
||||||
|
proxy=proxy
|
||||||
|
) as response:
|
||||||
|
assert await response.text() == 'OK'
|
||||||
|
|
||||||
|
async with session.ws_connect(f'{WS_URL}?EIO=4&transport=websocket&sid={sid}', autoping=False) as ws:
|
||||||
|
await ws.send_str('2probe')
|
||||||
|
assert(await ws.receive_str() == '3probe')
|
||||||
|
await ws.send_str('5')
|
||||||
|
assert(await ws.receive_str())
|
||||||
|
assert(await ws.receive_str() == '6')
|
||||||
|
message_data = {
|
||||||
|
'version': '2.2',
|
||||||
|
'source': 'default',
|
||||||
|
'model': model,
|
||||||
|
'messages': messages
|
||||||
|
}
|
||||||
|
await ws.send_str('42' + json.dumps(['perplexity_playground', message_data]))
|
||||||
|
last_message = 0
|
||||||
|
while True:
|
||||||
|
message = await ws.receive_str()
|
||||||
|
if message == '2':
|
||||||
|
await ws.send_str('3')
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
data = json.loads(message[2:])[1]
|
||||||
|
yield data["output"][last_message:]
|
||||||
|
last_message = len(data["output"])
|
||||||
|
if data["final"]:
|
||||||
|
break
|
||||||
|
except:
|
||||||
|
raise RuntimeError(f"Message: {message}")
|
|
@ -9,10 +9,11 @@ from .needs_auth import *
|
||||||
from .unfinished import *
|
from .unfinished import *
|
||||||
from .selenium import *
|
from .selenium import *
|
||||||
|
|
||||||
from .Aura import Aura
|
|
||||||
from .AiAsk import AiAsk
|
from .AiAsk import AiAsk
|
||||||
from .AiChatOnline import AiChatOnline
|
from .AiChatOnline import AiChatOnline
|
||||||
from .AItianhu import AItianhu
|
from .AItianhu import AItianhu
|
||||||
|
from .Aura import Aura
|
||||||
|
from .Bestim import Bestim
|
||||||
from .Bing import Bing
|
from .Bing import Bing
|
||||||
from .ChatAnywhere import ChatAnywhere
|
from .ChatAnywhere import ChatAnywhere
|
||||||
from .ChatBase import ChatBase
|
from .ChatBase import ChatBase
|
||||||
|
@ -45,12 +46,12 @@ from .Koala import Koala
|
||||||
from .Liaobots import Liaobots
|
from .Liaobots import Liaobots
|
||||||
from .Llama2 import Llama2
|
from .Llama2 import Llama2
|
||||||
from .OnlineGpt import OnlineGpt
|
from .OnlineGpt import OnlineGpt
|
||||||
|
from .PerplexityLabs import PerplexityLabs
|
||||||
from .Phind import Phind
|
from .Phind import Phind
|
||||||
from .Pi import Pi
|
from .Pi import Pi
|
||||||
from .Vercel import Vercel
|
from .Vercel import Vercel
|
||||||
from .Ylokh import Ylokh
|
from .Ylokh import Ylokh
|
||||||
from .You import You
|
from .You import You
|
||||||
from .Bestim import Bestim
|
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|
|
@ -82,13 +82,16 @@ def build_image_upload_payload(image_bin: str, tone: str) -> Tuple[str, str]:
|
||||||
Tuple[str, str]: The data and boundary for the payload.
|
Tuple[str, str]: The data and boundary for the payload.
|
||||||
"""
|
"""
|
||||||
boundary = "----WebKitFormBoundary" + ''.join(random.choices(string.ascii_letters + string.digits, k=16))
|
boundary = "----WebKitFormBoundary" + ''.join(random.choices(string.ascii_letters + string.digits, k=16))
|
||||||
data = f"--{boundary}\r\n" \
|
data = f"""--{boundary}
|
||||||
f"Content-Disposition: form-data; name=\"knowledgeRequest\"\r\n\r\n" \
|
Content-Disposition: form-data; name="knowledgeRequest"
|
||||||
f"{json.dumps(build_knowledge_request(tone), ensure_ascii=False)}\r\n" \
|
|
||||||
f"--{boundary}\r\n" \
|
{json.dumps(build_knowledge_request(tone), ensure_ascii=False)}
|
||||||
f"Content-Disposition: form-data; name=\"imageBase64\"\r\n\r\n" \
|
--{boundary}
|
||||||
f"{image_bin}\r\n" \
|
Content-Disposition: form-data; name="imageBase64"
|
||||||
f"--{boundary}--\r\n"
|
|
||||||
|
{image_bin}
|
||||||
|
--{boundary}--
|
||||||
|
"""
|
||||||
return data, boundary
|
return data, boundary
|
||||||
|
|
||||||
def build_knowledge_request(tone: str) -> dict:
|
def build_knowledge_request(tone: str) -> dict:
|
||||||
|
@ -102,14 +105,17 @@ def build_knowledge_request(tone: str) -> dict:
|
||||||
dict: The knowledge request payload.
|
dict: The knowledge request payload.
|
||||||
"""
|
"""
|
||||||
return {
|
return {
|
||||||
'invokedSkills': ["ImageById"],
|
"imageInfo": {},
|
||||||
'subscriptionId': "Bing.Chat.Multimodal",
|
"knowledgeRequest": {
|
||||||
'invokedSkillsRequestData': {
|
'invokedSkills': ["ImageById"],
|
||||||
'enableFaceBlur': True
|
'subscriptionId': "Bing.Chat.Multimodal",
|
||||||
},
|
'invokedSkillsRequestData': {
|
||||||
'convoData': {
|
'enableFaceBlur': True
|
||||||
'convoid': "",
|
},
|
||||||
'convotone': tone
|
'convoData': {
|
||||||
|
'convoid': "",
|
||||||
|
'convotone': tone
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -115,11 +115,11 @@
|
||||||
<textarea id="message-input" placeholder="Ask a question" cols="30" rows="10"
|
<textarea id="message-input" placeholder="Ask a question" cols="30" rows="10"
|
||||||
style="white-space: pre-wrap;resize: none;"></textarea>
|
style="white-space: pre-wrap;resize: none;"></textarea>
|
||||||
<label for="image" title="Works only with Bing and OpenaiChat">
|
<label for="image" title="Works only with Bing and OpenaiChat">
|
||||||
<input type="file" id="image" name="image" accept="image/png, image/gif, image/jpeg" required/>
|
<input type="file" id="image" name="image" accept="image/png, image/gif, image/jpeg, image/svg+xml" required/>
|
||||||
<i class="fa-regular fa-image"></i>
|
<i class="fa-regular fa-image"></i>
|
||||||
</label>
|
</label>
|
||||||
<label for="file">
|
<label for="file">
|
||||||
<input type="file" id="file" name="file" accept="text/plain, text/html, text/xml, application/json, text/javascript, .sh, .py, .php, .css, .yaml, .sql, .svg, .log, .csv, .twig, .md" required/>
|
<input type="file" id="file" name="file" accept="text/plain, text/html, text/xml, application/json, text/javascript, .sh, .py, .php, .css, .yaml, .sql, .log, .csv, .twig, .md" required/>
|
||||||
<i class="fa-solid fa-paperclip"></i>
|
<i class="fa-solid fa-paperclip"></i>
|
||||||
</label>
|
</label>
|
||||||
<div id="send-button">
|
<div id="send-button">
|
||||||
|
|
|
@ -660,7 +660,13 @@ observer.observe(message_input, { attributes: true });
|
||||||
}
|
}
|
||||||
document.getElementById("version_text").innerHTML = text
|
document.getElementById("version_text").innerHTML = text
|
||||||
})()
|
})()
|
||||||
|
imageInput.addEventListener('click', async (event) => {
|
||||||
|
imageInput.value = '';
|
||||||
|
});
|
||||||
|
fileInput.addEventListener('click', async (event) => {
|
||||||
|
fileInput.value = '';
|
||||||
|
delete fileInput.dataset.text;
|
||||||
|
});
|
||||||
fileInput.addEventListener('change', async (event) => {
|
fileInput.addEventListener('change', async (event) => {
|
||||||
if (fileInput.files.length) {
|
if (fileInput.files.length) {
|
||||||
type = fileInput.files[0].type;
|
type = fileInput.files[0].type;
|
||||||
|
|
|
@ -137,7 +137,7 @@ class Backend_Api:
|
||||||
if 'image' in request.files:
|
if 'image' in request.files:
|
||||||
file = request.files['image']
|
file = request.files['image']
|
||||||
if file.filename != '' and is_allowed_extension(file.filename):
|
if file.filename != '' and is_allowed_extension(file.filename):
|
||||||
kwargs['image'] = to_image(file.stream)
|
kwargs['image'] = to_image(file.stream, file.filename.endswith('.svg'))
|
||||||
if 'json' in request.form:
|
if 'json' in request.form:
|
||||||
json_data = json.loads(request.form['json'])
|
json_data = json.loads(request.form['json'])
|
||||||
else:
|
else:
|
||||||
|
|
16
g4f/image.py
16
g4f/image.py
|
@ -4,9 +4,9 @@ import base64
|
||||||
from .typing import ImageType, Union
|
from .typing import ImageType, Union
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp'}
|
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp', 'svg'}
|
||||||
|
|
||||||
def to_image(image: ImageType) -> Image.Image:
|
def to_image(image: ImageType, is_svg: bool = False) -> Image.Image:
|
||||||
"""
|
"""
|
||||||
Converts the input image to a PIL Image object.
|
Converts the input image to a PIL Image object.
|
||||||
|
|
||||||
|
@ -16,6 +16,16 @@ def to_image(image: ImageType) -> Image.Image:
|
||||||
Returns:
|
Returns:
|
||||||
Image.Image: The converted PIL Image object.
|
Image.Image: The converted PIL Image object.
|
||||||
"""
|
"""
|
||||||
|
if is_svg:
|
||||||
|
try:
|
||||||
|
import cairosvg
|
||||||
|
except ImportError:
|
||||||
|
raise RuntimeError('Install "cairosvg" package for open svg images')
|
||||||
|
if not isinstance(image, bytes):
|
||||||
|
image = image.read()
|
||||||
|
buffer = BytesIO()
|
||||||
|
cairosvg.svg2png(image, write_to=buffer)
|
||||||
|
image = Image.open(buffer)
|
||||||
if isinstance(image, str):
|
if isinstance(image, str):
|
||||||
is_data_uri_an_image(image)
|
is_data_uri_an_image(image)
|
||||||
image = extract_data_uri(image)
|
image = extract_data_uri(image)
|
||||||
|
@ -153,6 +163,8 @@ def to_base64(image: Image.Image, compression_rate: float) -> str:
|
||||||
str: The base64-encoded image.
|
str: The base64-encoded image.
|
||||||
"""
|
"""
|
||||||
output_buffer = BytesIO()
|
output_buffer = BytesIO()
|
||||||
|
if image.mode != "RGB":
|
||||||
|
image = image.convert('RGB')
|
||||||
image.save(output_buffer, format="JPEG", quality=int(compression_rate * 100))
|
image.save(output_buffer, format="JPEG", quality=int(compression_rate * 100))
|
||||||
return base64.b64encode(output_buffer.getvalue()).decode()
|
return base64.b64encode(output_buffer.getvalue()).decode()
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ from .Provider import (
|
||||||
Chatgpt4Online,
|
Chatgpt4Online,
|
||||||
ChatgptDemoAi,
|
ChatgptDemoAi,
|
||||||
GeminiProChat,
|
GeminiProChat,
|
||||||
|
PerplexityAi,
|
||||||
ChatgptNext,
|
ChatgptNext,
|
||||||
HuggingChat,
|
HuggingChat,
|
||||||
ChatgptDemo,
|
ChatgptDemo,
|
||||||
|
@ -78,7 +79,7 @@ gpt_35_long = Model(
|
||||||
gpt_35_turbo = Model(
|
gpt_35_turbo = Model(
|
||||||
name = 'gpt-3.5-turbo',
|
name = 'gpt-3.5-turbo',
|
||||||
base_provider = 'openai',
|
base_provider = 'openai',
|
||||||
best_provider=RetryProvider([
|
best_provider = RetryProvider([
|
||||||
GptGo, You,
|
GptGo, You,
|
||||||
GptForLove, ChatBase,
|
GptForLove, ChatBase,
|
||||||
Chatgpt4Online,
|
Chatgpt4Online,
|
||||||
|
@ -114,20 +115,20 @@ llama2_13b = Model(
|
||||||
llama2_70b = Model(
|
llama2_70b = Model(
|
||||||
name = "meta-llama/Llama-2-70b-chat-hf",
|
name = "meta-llama/Llama-2-70b-chat-hf",
|
||||||
base_provider = "huggingface",
|
base_provider = "huggingface",
|
||||||
best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat])
|
best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat, PerplexityAi])
|
||||||
)
|
)
|
||||||
|
|
||||||
# Mistal
|
# Mistal
|
||||||
mixtral_8x7b = Model(
|
mixtral_8x7b = Model(
|
||||||
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||||
base_provider = "huggingface",
|
base_provider = "huggingface",
|
||||||
best_provider = RetryProvider([DeepInfra, HuggingChat])
|
best_provider = RetryProvider([DeepInfra, HuggingChat, PerplexityAi])
|
||||||
)
|
)
|
||||||
|
|
||||||
mistral_7b = Model(
|
mistral_7b = Model(
|
||||||
name = "mistralai/Mistral-7B-Instruct-v0.1",
|
name = "mistralai/Mistral-7B-Instruct-v0.1",
|
||||||
base_provider = "huggingface",
|
base_provider = "huggingface",
|
||||||
best_provider = RetryProvider([DeepInfra, HuggingChat])
|
best_provider = RetryProvider([DeepInfra, HuggingChat, PerplexityAi])
|
||||||
)
|
)
|
||||||
|
|
||||||
# Dolphin
|
# Dolphin
|
||||||
|
@ -311,7 +312,7 @@ llama70b_v2_chat = Model(
|
||||||
pi = Model(
|
pi = Model(
|
||||||
name = 'pi',
|
name = 'pi',
|
||||||
base_provider = 'inflection',
|
base_provider = 'inflection',
|
||||||
best_provider=Pi
|
best_provider = Pi
|
||||||
)
|
)
|
||||||
|
|
||||||
class ModelUtils:
|
class ModelUtils:
|
||||||
|
|
Loading…
Reference in New Issue