Merge pull request #1149 from Luneye/patch-4

[suggestion] Adding new parameter to check if a provider 'natively' supports mesage history
This commit is contained in:
Tekky 2023-10-25 14:07:40 +01:00 committed by GitHub
commit a167970d76
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 18 additions and 7 deletions

View File

@ -32,6 +32,7 @@ default_cookies = {
class Bing(AsyncGeneratorProvider): class Bing(AsyncGeneratorProvider):
url = "https://bing.com/chat" url = "https://bing.com/chat"
working = True working = True
supports_message_history = True
supports_gpt_4 = True supports_gpt_4 = True
@staticmethod @staticmethod

View File

@ -9,6 +9,7 @@ from .base_provider import AsyncGeneratorProvider
class ChatBase(AsyncGeneratorProvider): class ChatBase(AsyncGeneratorProvider):
url = "https://www.chatbase.co" url = "https://www.chatbase.co"
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_message_history = True
working = True working = True
list_incorrect_responses = ["Hmm, I am not sure. Email support@chatbase.co for more info.", list_incorrect_responses = ["Hmm, I am not sure. Email support@chatbase.co for more info.",
"I can only provide support and information about Chatbase"] "I can only provide support and information about Chatbase"]
@ -60,4 +61,4 @@ class ChatBase(AsyncGeneratorProvider):
("stream", "bool"), ("stream", "bool"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -11,6 +11,7 @@ from .base_provider import AsyncGeneratorProvider
class ChatForAi(AsyncGeneratorProvider): class ChatForAi(AsyncGeneratorProvider):
url = "https://chatforai.store" url = "https://chatforai.store"
working = True working = True
supports_message_history = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@classmethod @classmethod
@ -69,4 +70,4 @@ class ChatForAi(AsyncGeneratorProvider):
def generate_signature(timestamp: int, message: str, id: str): def generate_signature(timestamp: int, message: str, id: str):
buffer = f"{timestamp}:{id}:{message}:7YN8z6d6" buffer = f"{timestamp}:{id}:{message}:7YN8z6d6"
return hashlib.sha256(buffer.encode()).hexdigest() return hashlib.sha256(buffer.encode()).hexdigest()

View File

@ -12,6 +12,7 @@ from .helper import format_prompt
class ChatgptX(AsyncGeneratorProvider): class ChatgptX(AsyncGeneratorProvider):
url = "https://chatgptx.de" url = "https://chatgptx.de"
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_message_history = True
working = True working = True
@classmethod @classmethod
@ -96,4 +97,4 @@ class ChatgptX(AsyncGeneratorProvider):
except: except:
raise RuntimeError(f"Broken line: {line.decode()}") raise RuntimeError(f"Broken line: {line.decode()}")
if content: if content:
yield content yield content

View File

@ -10,6 +10,7 @@ from .helper import format_prompt
class FakeGpt(AsyncGeneratorProvider): class FakeGpt(AsyncGeneratorProvider):
url = "https://chat-shared2.zhile.io" url = "https://chat-shared2.zhile.io"
supports_message_history = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
working = True working = True
_access_token = None _access_token = None
@ -91,4 +92,4 @@ class FakeGpt(AsyncGeneratorProvider):
raise RuntimeError("No valid response") raise RuntimeError("No valid response")
def random_string(length: int = 10): def random_string(length: int = 10):
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length)) return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))

View File

@ -12,6 +12,7 @@ domains = [
class FreeGpt(AsyncGeneratorProvider): class FreeGpt(AsyncGeneratorProvider):
url = "https://freegpts1.aifree.site/" url = "https://freegpts1.aifree.site/"
supports_message_history = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
working = True working = True

View File

@ -11,6 +11,7 @@ from .helper import format_prompt
class GPTalk(AsyncGeneratorProvider): class GPTalk(AsyncGeneratorProvider):
url = "https://gptalk.net" url = "https://gptalk.net"
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_message_history = True
working = True working = True
_auth = None _auth = None
@ -81,4 +82,4 @@ class GPTalk(AsyncGeneratorProvider):
break break
message = json.loads(line[6:-1])["content"] message = json.loads(line[6:-1])["content"]
yield message[len(last_message):] yield message[len(last_message):]
last_message = message last_message = message

View File

@ -9,6 +9,7 @@ from .helper import format_prompt
class GptForLove(AsyncGeneratorProvider): class GptForLove(AsyncGeneratorProvider):
url = "https://ai18.gptforlove.com" url = "https://ai18.gptforlove.com"
supports_message_history = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
working = True working = True

View File

@ -10,6 +10,7 @@ from .base_provider import AsyncGeneratorProvider, format_prompt
class You(AsyncGeneratorProvider): class You(AsyncGeneratorProvider):
url = "https://you.com" url = "https://you.com"
working = True working = True
supports_message_history = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@ -37,4 +38,4 @@ class You(AsyncGeneratorProvider):
start = b'data: {"youChatToken": ' start = b'data: {"youChatToken": '
async for line in response.iter_lines(): async for line in response.iter_lines():
if line.startswith(start): if line.startswith(start):
yield json.loads(line[len(start):-1]) yield json.loads(line[len(start):-1])

View File

@ -10,6 +10,7 @@ from .base_provider import AsyncGeneratorProvider, format_prompt
class Yqcloud(AsyncGeneratorProvider): class Yqcloud(AsyncGeneratorProvider):
url = "https://chat9.yqcloud.top/" url = "https://chat9.yqcloud.top/"
working = True working = True
supports_message_history = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @staticmethod

View File

@ -15,6 +15,7 @@ class BaseProvider(ABC):
supports_stream: bool = False supports_stream: bool = False
supports_gpt_35_turbo: bool = False supports_gpt_35_turbo: bool = False
supports_gpt_4: bool = False supports_gpt_4: bool = False
supports_message_history: bool = False
@staticmethod @staticmethod
@abstractmethod @abstractmethod
@ -135,4 +136,4 @@ class AsyncGeneratorProvider(AsyncProvider):
messages: Messages, messages: Messages,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
raise NotImplementedError() raise NotImplementedError()