Merge pull request #743 from bagusindrayana/refactor-provider

my bad (refactor/move provider from testing folder)
This commit is contained in:
xtekky 2023-07-16 18:50:00 +01:00 committed by GitHub
commit 821c8dcd47
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 74 additions and 240 deletions

View File

@ -0,0 +1,40 @@
import os,sys
import requests
from ...typing import get_type_hints
url = "https://aiservice.vercel.app/api/chat/answer"
model = ['gpt-3.5-turbo']
supports_stream = False
needs_auth = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
base = ''
for message in messages:
base += '%s: %s\n' % (message['role'], message['content'])
base += 'assistant:'
headers = {
"accept": "*/*",
"content-type": "text/plain;charset=UTF-8",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"Referer": "https://aiservice.vercel.app/chat",
}
data = {
"input": base
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
_json = response.json()
yield _json['data']
else:
print(f"Error Occurred::{response.status_code}")
return None
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join(
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])

View File

@ -0,0 +1,27 @@
import os,sys
import json
import subprocess
from ...typing import sha256, Dict, get_type_hints
url = 'https://b.ai-huan.xyz'
model = ['gpt-3.5-turbo', 'gpt-4']
supports_stream = True
needs_auth = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
path = os.path.dirname(os.path.realpath(__file__))
config = json.dumps({
'messages': messages,
'model': model}, separators=(',', ':'))
cmd = ['python', f'{path}/helpers/binghuan.py', config]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, b''):
yield line.decode('cp1252')
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join(
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])

View File

@ -4,7 +4,7 @@ import json
import random
import time
import string
# from ...typing import sha256, Dict, get_type_hints
from ...typing import sha256, Dict, get_type_hints
url = "https://wewordle.org/gptapi/v1/android/turbo"
model = ['gpt-3.5-turbo']
@ -68,30 +68,6 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
print(f"Error Occurred::{response.status_code}")
return None
# params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
# '(%s)' % ', '.join(
# [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
# Temporary For ChatCompletion Class
class ChatCompletion:
@staticmethod
def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs):
kwargs['auth'] = auth
if provider and needs_auth and not auth:
print(
f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr)
sys.exit(1)
try:
return (_create_completion(model, messages, stream, **kwargs)
if stream else ''.join(_create_completion(model, messages, stream, **kwargs)))
except TypeError as e:
print(e)
arg: str = str(e).split("'")[1]
print(
f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr)
sys.exit(1)
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join(
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])

View File

@ -19,6 +19,9 @@ from .Providers import (
EasyChat,
Acytoo,
DFEHub,
AiService,
BingHuan,
Wewordle
)
Palm = Bard

View File

@ -1,62 +0,0 @@
import os,sys
import requests
# from ...typing import get_type_hints
url = "https://aiservice.vercel.app/api/chat/answer"
model = ['gpt-3.5-turbo']
supports_stream = False
needs_auth = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
base = ''
for message in messages:
base += '%s: %s\n' % (message['role'], message['content'])
base += 'assistant:'
headers = {
"accept": "*/*",
"content-type": "text/plain;charset=UTF-8",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"Referer": "https://aiservice.vercel.app/chat",
}
data = {
"input": base
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
_json = response.json()
yield _json['data']
else:
print(f"Error Occurred::{response.status_code}")
return None
# params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
# '(%s)' % ', '.join(
# [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
# Temporary For ChatCompletion Class
class ChatCompletion:
@staticmethod
def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs):
kwargs['auth'] = auth
if provider and needs_auth and not auth:
print(
f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr)
sys.exit(1)
try:
return (_create_completion(model, messages, stream, **kwargs)
if stream else ''.join(_create_completion(model, messages, stream, **kwargs)))
except TypeError as e:
print(e)
arg: str = str(e).split("'")[1]
print(
f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr)
sys.exit(1)

View File

@ -1,2 +0,0 @@
https://github.com/xtekky/gpt4free/issues/40#issuecomment-1629152431
probably gpt-3.5

View File

@ -1,30 +0,0 @@
from AiService import ChatCompletion
# Test 1
response = ChatCompletion.create(model="gpt-3.5-turbo",
provider="AiService",
stream=False,
messages=[{'role': 'user', 'content': 'who are you?'}])
print(response)
# Test 2
response = ChatCompletion.create(model="gpt-3.5-turbo",
provider="AiService",
stream=False,
messages=[{'role': 'user', 'content': 'what you can do?'}])
print(response)
# Test 3
response = ChatCompletion.create(model="gpt-3.5-turbo",
provider="AiService",
stream=False,
messages=[
{'role': 'user', 'content': 'now your name is Bob'},
{'role': 'assistant', 'content': 'Hello Im Bob, you asistant'},
{'role': 'user', 'content': 'what your name again?'},
])
print(response)

View File

@ -1,49 +0,0 @@
import os,sys
import json
import subprocess
# from ...typing import sha256, Dict, get_type_hints
url = 'https://b.ai-huan.xyz'
model = ['gpt-3.5-turbo', 'gpt-4']
supports_stream = True
needs_auth = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
path = os.path.dirname(os.path.realpath(__file__))
config = json.dumps({
'messages': messages,
'model': model}, separators=(',', ':'))
cmd = ['python', f'{path}/helpers/binghuan.py', config]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, b''):
yield line.decode('cp1252')
# params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
# '(%s)' % ', '.join(
# [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
# Temporary For ChatCompletion Class
class ChatCompletion:
@staticmethod
def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs):
kwargs['auth'] = auth
if provider and needs_auth and not auth:
print(
f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr)
sys.exit(1)
try:
return (_create_completion(model, messages, stream, **kwargs)
if stream else ''.join(_create_completion(model, messages, stream, **kwargs)))
except TypeError as e:
print(e)
arg: str = str(e).split("'")[1]
print(
f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr)
sys.exit(1)

View File

@ -1,7 +0,0 @@
https://github.com/xtekky/gpt4free/issues/40#issuecomment-1630946450
flow chat process is realy like real Bing (create conversation,listern to websocket and more)
so i just use code Bing Provider from https://gitler.moe/g4f/gpt4free/ version and replace API endpoint and some conversationstyles and work fine
but bing dont realy support multi/continues conversation (using prompt template from original Provider : def convert(messages) : https://github.com/xtekky/gpt4free/blob/e594500c4e7a8443e9b3f4af755c72f42dae83f0/g4f/Provider/Providers/Bing.py#L322)
also i have problem with emoji encoding idk how to fix that

View File

@ -1,31 +0,0 @@
from BingHuan import ChatCompletion
# Test 1
response = ChatCompletion.create(model="gpt-3.5-turbo",
provider="BingHuan",
stream=False,
messages=[{'role': 'user', 'content': 'who are you?'}])
print(response)
# Test 2
# this prompt will return emoji in end of response
response = ChatCompletion.create(model="gpt-3.5-turbo",
provider="BingHuan",
stream=False,
messages=[{'role': 'user', 'content': 'what you can do?'}])
print(response)
# Test 3
response = ChatCompletion.create(model="gpt-4",
provider="BingHuan",
stream=False,
messages=[
{'role': 'user', 'content': 'now your name is Bob'},
{'role': 'assistant', 'content': 'Hello Im Bob, you asistant'},
{'role': 'user', 'content': 'what your name again?'},
])
print(response)

View File

@ -1 +0,0 @@
original from website https://chat-gpt.com/chat https://github.com/xtekky/gpt4free/issues/40#issuecomment-1629152431, i got api https://wewordle.org/gptapi/v1/web/turbo but it got limit so i try to try reverse they android app and i got api https://wewordle.org/gptapi/v1/android/turbo and just randomize user id to bypass limit

View File

@ -1,30 +0,0 @@
from Wewordle import ChatCompletion
# Test 1
response = ChatCompletion.create(model="gpt-3.5-turbo",
provider="Wewordle",
stream=False,
messages=[{'role': 'user', 'content': 'who are you?'}])
print(response)
# Test 2
response = ChatCompletion.create(model="gpt-3.5-turbo",
provider="Wewordle",
stream=False,
messages=[{'role': 'user', 'content': 'what you can do?'}])
print(response)
# Test 3
response = ChatCompletion.create(model="gpt-3.5-turbo",
provider="Wewordle",
stream=False,
messages=[
{'role': 'user', 'content': 'now your name is Bob'},
{'role': 'assistant', 'content': 'Hello Im Bob, you asistant'},
{'role': 'user', 'content': 'what your name again?'},
])
print(response)