~ | Merge pull request #1060 from hlohaus/don

Add ChatgptLogin, ChatgptFree and GptChatly Provider
This commit is contained in:
Tekky 2023-10-13 11:33:57 +01:00 committed by GitHub
commit 99bc58ab99
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 398 additions and 85 deletions

View File

@ -222,6 +222,7 @@ print(response)
##### Providers: ##### Providers:
```py ```py
import g4f import g4f
from g4f.Provider import ( from g4f.Provider import (
AItianhu, AItianhu,
Acytoo, Acytoo,

View File

@ -0,0 +1,66 @@
from __future__ import annotations
import re
from aiohttp import ClientSession
from ..typing import Messages
from .base_provider import AsyncProvider
from .helper import format_prompt
class ChatgptFree(AsyncProvider):
url = "https://chatgptfree.ai"
supports_gpt_35_turbo = True
working = True
_post_id = None
_nonce = None
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> str:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Origin": cls.url,
"Alt-Used": "chatgptfree.ai",
"Connection": "keep-alive",
"Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
if not cls._nonce:
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
result = re.search(r'data-post-id="([0-9]+)"', response)
if not result:
raise RuntimeError("No post id found")
cls._post_id = result.group(1)
result = re.search(r'data-nonce="(.*?)"', response)
if not result:
raise RuntimeError("No nonce found")
cls._nonce = result.group(1)
prompt = format_prompt(messages)
data = {
"_wpnonce": cls._nonce,
"post_id": cls._post_id,
"url": cls.url,
"action": "wpaicg_chat_shortcode_message",
"message": prompt,
"bot_id": "0"
}
async with session.post(cls.url + "/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response:
response.raise_for_status()
return (await response.json())["data"]

View File

@ -0,0 +1,71 @@
from __future__ import annotations
import re
import time
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
class ChatgptLogin(AsyncGeneratorProvider):
url = "https://chatgptlogin.ai"
supports_gpt_35_turbo = True
working = True
_user_id = None
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/chat/",
"Content-Type": "application/json",
"Origin": cls.url,
"Alt-Used": "chatgptlogin.ai",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache"
}
async with ClientSession(headers=headers) as session:
if not cls._user_id:
async with session.get(f"{cls.url}/chat/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
result = re.search(r'<div id="USERID" style="display: none">(.*?)<\/div>', response)
if not result:
raise RuntimeError("No user id found")
cls._user_id = result.group(1)
async with session.post(f"{cls.url}/chat/new_chat", json={"user_id": cls._user_id}, proxy=proxy) as response:
response.raise_for_status()
chat_id = (await response.json())["id_"]
if not chat_id:
raise RuntimeError("Could not create new chat")
prompt = format_prompt(messages)
data = {
"question": prompt,
"chat_id": chat_id,
"timestamp": int(time.time() * 1e3),
}
async with session.post(f"{cls.url}/chat/chat_api_stream", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
content = json.loads(line[6:])["choices"][0]["delta"].get("content")
if content:
yield content
async with session.post(f"{cls.url}/chat/delete_chat", json={"chat_id": chat_id}, proxy=proxy) as response:
response.raise_for_status()

49
g4f/Provider/GptChatly.py Normal file
View File

@ -0,0 +1,49 @@
from __future__ import annotations
from aiohttp import ClientSession
from ..typing import Messages
from .base_provider import AsyncProvider
class GptChatly(AsyncProvider):
url = "https://gptchatly.com"
supports_gpt_35_turbo = True
supports_gpt_4 = True
working = True
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> str:
if model.startswith("gpt-4"):
chat_url = f"{cls.url}/fetch-gpt4-response"
else:
chat_url = f"{cls.url}/fetch-response"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/",
"Content-Type": "application/json",
"Origin": cls.url,
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers",
}
async with ClientSession(headers=headers) as session:
data = {
"past_conversations": messages
}
async with session.post(chat_url, json=data, proxy=proxy) as response:
response.raise_for_status()
return (await response.json())["chatGPTResponse"]

View File

@ -9,15 +9,17 @@ from .AItianhu import AItianhu
from .AItianhuSpace import AItianhuSpace from .AItianhuSpace import AItianhuSpace
from .Bing import Bing from .Bing import Bing
from .ChatBase import ChatBase from .ChatBase import ChatBase
from .ChatForAi import ChatForAi
from .Chatgpt4Online import Chatgpt4Online from .Chatgpt4Online import Chatgpt4Online
from .ChatgptAi import ChatgptAi from .ChatgptAi import ChatgptAi
from .ChatgptDemo import ChatgptDemo from .ChatgptDemo import ChatgptDemo
from .ChatgptDuo import ChatgptDuo from .ChatgptDuo import ChatgptDuo
from .ChatgptFree import ChatgptFree
from .ChatgptLogin import ChatgptLogin
from .ChatgptX import ChatgptX from .ChatgptX import ChatgptX
from .Cromicle import Cromicle from .Cromicle import Cromicle
from .FreeGpt import FreeGpt from .FreeGpt import FreeGpt
from .GPTalk import GPTalk from .GPTalk import GPTalk
from .GptChatly import GptChatly
from .GptForLove import GptForLove from .GptForLove import GptForLove
from .GptGo import GptGo from .GptGo import GptGo
from .GptGod import GptGod from .GptGod import GptGod
@ -59,6 +61,7 @@ class ProviderUtils:
'ChatgptAi': ChatgptAi, 'ChatgptAi': ChatgptAi,
'ChatgptDemo': ChatgptDemo, 'ChatgptDemo': ChatgptDemo,
'ChatgptDuo': ChatgptDuo, 'ChatgptDuo': ChatgptDuo,
'ChatgptFree': ChatgptFree,
'ChatgptLogin': ChatgptLogin, 'ChatgptLogin': ChatgptLogin,
'ChatgptX': ChatgptX, 'ChatgptX': ChatgptX,
'CodeLinkAva': CodeLinkAva, 'CodeLinkAva': CodeLinkAva,
@ -70,6 +73,7 @@ class ProviderUtils:
'Forefront': Forefront, 'Forefront': Forefront,
'FreeGpt': FreeGpt, 'FreeGpt': FreeGpt,
'GPTalk': GPTalk, 'GPTalk': GPTalk,
'GptChatly': GptChatly,
'GetGpt': GetGpt, 'GetGpt': GetGpt,
'GptForLove': GptForLove, 'GptForLove': GptForLove,
'GptGo': GptGo, 'GptGo': GptGo,
@ -121,6 +125,7 @@ __all__ = [
'ChatgptAi', 'ChatgptAi',
'ChatgptDemo', 'ChatgptDemo',
'ChatgptDuo', 'ChatgptDuo',
'ChatgptFree',
'ChatgptLogin', 'ChatgptLogin',
'ChatgptX', 'ChatgptX',
'Cromicle', 'Cromicle',
@ -130,6 +135,7 @@ __all__ = [
'Forefront', 'Forefront',
'FreeGpt', 'FreeGpt',
'GPTalk', 'GPTalk',
'GptChatly',
'GptForLove', 'GptForLove',
'GetGpt', 'GetGpt',
'GptGo', 'GptGo',
@ -156,4 +162,4 @@ __all__ = [
'FastGpt', 'FastGpt',
'Wuguokai', 'Wuguokai',
'V50' 'V50'
] ]

View File

@ -1,14 +1,13 @@
from __future__ import annotations from __future__ import annotations
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from ..requests import StreamSession from ...requests import StreamSession
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
class ChatForAi(AsyncGeneratorProvider): class ChatForAi(AsyncGeneratorProvider):
url = "https://chatforai.com" url = "https://chatforai.com"
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
working = True
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@ -40,6 +39,8 @@ class ChatForAi(AsyncGeneratorProvider):
async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response: async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response:
response.raise_for_status() response.raise_for_status()
async for chunk in response.iter_content(): async for chunk in response.iter_content():
if b"https://chatforai.store" in chunk:
raise RuntimeError(f"Response: {chunk.decode()}")
yield chunk.decode() yield chunk.decode()
@classmethod @classmethod

View File

@ -1,74 +0,0 @@
from __future__ import annotations
import os, re
from aiohttp import ClientSession
from ..base_provider import AsyncProvider, format_prompt
class ChatgptLogin(AsyncProvider):
url = "https://opchatgpts.net"
supports_gpt_35_turbo = True
working = True
_nonce = None
@classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> str:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : "https://opchatgpts.net",
"Alt-Used" : "opchatgpts.net",
"Referer" : "https://opchatgpts.net/chatgpt-free-use/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
async with ClientSession(
headers=headers
) as session:
if not cls._nonce:
async with session.get(
"https://opchatgpts.net/chatgpt-free-use/",
params={"id": os.urandom(6).hex()},
) as response:
result = re.search(r'data-nonce="(.*?)"', await response.text())
if not result:
raise RuntimeError("No nonce value")
cls._nonce = result.group(1)
data = {
"_wpnonce": cls._nonce,
"post_id": 28,
"url": "https://opchatgpts.net/chatgpt-free-use",
"action": "wpaicg_chat_shortcode_message",
"message": format_prompt(messages),
"bot_id": 0
}
async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response:
response.raise_for_status()
data = await response.json()
if "data" in data:
return data["data"]
elif "msg" in data:
raise RuntimeError(data["msg"])
else:
raise RuntimeError(f"Response: {data}")
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -1,7 +1,74 @@
from __future__ import annotations from __future__ import annotations
from .ChatgptLogin import ChatgptLogin import os, re
from aiohttp import ClientSession
from ..base_provider import AsyncProvider, format_prompt
class Opchatgpts(ChatgptLogin): class Opchatgpts(AsyncProvider):
url = "https://opchatgpts.net" url = "https://opchatgpts.net"
supports_gpt_35_turbo = True
working = True
_nonce = None
@classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> str:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : "https://opchatgpts.net",
"Alt-Used" : "opchatgpts.net",
"Referer" : "https://opchatgpts.net/chatgpt-free-use/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
async with ClientSession(
headers=headers
) as session:
if not cls._nonce:
async with session.get(
"https://opchatgpts.net/chatgpt-free-use/",
params={"id": os.urandom(6).hex()},
) as response:
result = re.search(r'data-nonce="(.*?)"', await response.text())
if not result:
raise RuntimeError("No nonce value")
cls._nonce = result.group(1)
data = {
"_wpnonce": cls._nonce,
"post_id": 28,
"url": "https://opchatgpts.net/chatgpt-free-use",
"action": "wpaicg_chat_shortcode_message",
"message": format_prompt(messages),
"bot_id": 0
}
async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response:
response.raise_for_status()
data = await response.json()
if "data" in data:
return data["data"]
elif "msg" in data:
raise RuntimeError(data["msg"])
else:
raise RuntimeError(f"Response: {data}")
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -11,4 +11,4 @@ from .Equing import Equing
from .Wuguokai import Wuguokai from .Wuguokai import Wuguokai
from .V50 import V50 from .V50 import V50
from .FastGpt import FastGpt from .FastGpt import FastGpt
from .ChatgptLogin import ChatgptLogin from .ChatForAi import ChatForAi

View File

@ -0,0 +1,64 @@
from __future__ import annotations
import re
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt
class ChatAiGpt(AsyncGeneratorProvider):
url = "https://chataigpt.org"
supports_gpt_35_turbo = True
working = True
_nonce = None
_post_id = None
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Origin": cls.url,
"Alt-Used": cls.url,
"Connection": "keep-alive",
"Referer": cls.url,
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
async with ClientSession(headers=headers) as session:
if not cls._nonce:
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
result = re.search(r'data-nonce=(.*?) data-post-id=([0-9]+)', response)
if not result:
raise RuntimeError("No nonce found")
cls._nonce, cls._post_id = result.group(1), result.group(2)
prompt = format_prompt(messages)
data = {
"_wpnonce": cls._nonce,
"post_id": cls._post_id,
"url": cls.url,
"action": "wpaicg_chat_shortcode_message",
"message": prompt,
"bot_id": 0
}
async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk:
yield chunk.decode()

View File

@ -0,0 +1,60 @@
from __future__ import annotations
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
class TalkAi(AsyncGeneratorProvider):
url = "https://talkai.info"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "application/json",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/de/chat/",
"content-type": "application/json",
"Origin": cls.url,
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache"
}
async with ClientSession(headers=headers) as session:
history = [{
"content": message["content"],
"from": "you" if message["role"] == "user" else "chatGPT"
} for message in messages]
data = {
"type": "chat",
"message": messages[-1]["content"],
"messagesHistory": history,
"model": model,
"max_tokens": 256,
"temperature": 1,
"top_p": 1,
"presence_penalty": 0,
"frequency_penalty": 0,
**kwargs
}
async with session.post(f"{cls.url}/de/chat/send2/", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk:
yield chunk.decode()

View File

@ -1,3 +1,5 @@
from .MikuChat import MikuChat from .MikuChat import MikuChat
from .PerplexityAi import PerplexityAi from .PerplexityAi import PerplexityAi
from .Komo import Komo from .Komo import Komo
from .TalkAi import TalkAi
from .ChatAiGpt import ChatAiGpt