refactor V50 and Raycast

This commit is contained in:
Bagus Indrayana 2023-08-17 21:42:00 +08:00
parent 74ecdee784
commit 64e8381c32
2 changed files with 123 additions and 85 deletions

View File

@ -1,17 +1,25 @@
import json
import os
import requests
from g4f.typing import get_type_hints
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
url = "https://backend.raycast.com/api/v1/ai/chat_completions"
model = ['gpt-3.5-turbo', 'gpt-4']
class Raycast(BaseProvider):
url = "https://raycast.com"
# model = ['gpt-3.5-turbo', 'gpt-4']
supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_stream = True
needs_auth = True
working = True
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool,
**kwargs: Any,
) -> CreateResult:
auth = kwargs.get('auth')
headers = {
'Accept': 'application/json',
@ -36,7 +44,7 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
"system_instruction": "markdown",
"temperature": 0.5
}
response = requests.post(url, headers=headers, json=data, stream=True)
response = requests.post("https://backend.raycast.com/api/v1/ai/chat_completions", headers=headers, json=data, stream=True)
for token in response.iter_lines():
if b'data: ' not in token:
continue
@ -45,6 +53,17 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
if token != None:
yield token
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
("top_p", "int"),
("model", "str"),
("auth", "str"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -1,13 +1,22 @@
import os, uuid, requests
from ..typing import get_type_hints
import uuid, requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
class V50(BaseProvider):
url = 'https://p5.v50.ltd'
model = ['gpt-3.5-turbo','gpt-3.5-turbo-16k']
supports_gpt_35_turbo = True
supports_stream = False
needs_auth = False
working = True
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool,
**kwargs: Any,
) -> CreateResult:
conversation = ''
for message in messages:
conversation += '%s: %s\n' % (message['role'], message['content'])
@ -17,8 +26,8 @@ def _create_completion(model: str, messages: list, stream: bool, temperature: fl
"prompt": conversation,
"options": {},
"systemMessage": ".",
"temperature": temperature,
"top_p": 1,
"temperature": kwargs.get("temperature", 0.4),
"top_p": kwargs.get("top_p", 0.4),
"model": model,
"user": str(uuid.uuid4())
}
@ -39,5 +48,15 @@ def _create_completion(model: str, messages: list, stream: bool, temperature: fl
json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
yield response.text
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
("top_p", "int"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"