Merge pull request #737 from AlephZero255/main

Fix empty H2o output
This commit is contained in:
xtekky 2023-07-11 20:24:46 +02:00 committed by GitHub
commit 4133ac9200
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -18,89 +18,77 @@ models = {
} }
def _create_completion(model: str, messages: list, stream: bool, **kwargs): def _create_completion(model: str, messages: list, stream: bool, **kwargs):
conversation = 'instruction: this is a conversation beween, a user and an AI assistant, respond to the latest message, referring to the conversation if needed\n' conversation = ''
for message in messages: for message in messages:
conversation += '%s: %s\n' % (message['role'], message['content']) conversation += '%s: %s\n' % (message['role'], message['content'])
conversation += 'assistant:'
client = Session() conversation += 'assistant: '
client.headers = { session = requests.Session()
'authority': 'gpt-gm.h2o.ai',
'origin': 'https://gpt-gm.h2o.ai', response = session.get("https://gpt-gm.h2o.ai/")
'referer': 'https://gpt-gm.h2o.ai/', headers = {
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
'sec-ch-ua-mobile': '?0', "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
'sec-ch-ua-platform': '"Windows"', "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
'sec-fetch-dest': 'document', "Content-Type": "application/x-www-form-urlencoded",
'sec-fetch-mode': 'navigate', "Upgrade-Insecure-Requests": "1",
'sec-fetch-site': 'same-origin', "Sec-Fetch-Dest": "document",
'sec-fetch-user': '?1', "Sec-Fetch-Mode": "navigate",
'upgrade-insecure-requests': '1', "Sec-Fetch-Site": "same-origin",
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', "Sec-Fetch-User": "?1",
"Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"
} }
data = {
"ethicsModalAccepted": "true",
"shareConversationsWithModelAuthors": "true",
"ethicsModalAcceptedAt": "",
"activeModel": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
"searchEnabled": "true"
}
response = session.post("https://gpt-gm.h2o.ai/settings", headers=headers, data=data)
client.get('https://gpt-gm.h2o.ai/')
response = client.post('https://gpt-gm.h2o.ai/settings', data={
'ethicsModalAccepted': 'true',
'shareConversationsWithModelAuthors': 'true',
'ethicsModalAcceptedAt': '',
'activeModel': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
'searchEnabled': 'true',
})
headers = { headers = {
'authority': 'gpt-gm.h2o.ai', "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
'accept': '*/*', "Accept": "*/*",
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
'origin': 'https://gpt-gm.h2o.ai', "Content-Type": "application/json",
'referer': 'https://gpt-gm.h2o.ai/', "Sec-Fetch-Dest": "empty",
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', "Sec-Fetch-Mode": "cors",
'sec-ch-ua-mobile': '?0', "Sec-Fetch-Site": "same-origin",
'sec-ch-ua-platform': '"Windows"', "Referer": "https://gpt-gm.h2o.ai/"
'sec-fetch-dest': 'empty', }
'sec-fetch-mode': 'cors', data = {
'sec-fetch-site': 'same-origin', "model": models[model]
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
} }
json_data = { conversation_id = session.post("https://gpt-gm.h2o.ai/conversation", headers=headers, json=data)
'model': models[model] data = {
} "inputs": conversation,
"parameters": {
response = client.post('https://gpt-gm.h2o.ai/conversation', "temperature": kwargs.get('temperature', 0.4),
headers=headers, json=json_data) "truncate": kwargs.get('truncate', 2048),
conversationId = response.json()['conversationId'] "max_new_tokens": kwargs.get('max_new_tokens', 1024),
"do_sample": kwargs.get('do_sample', True),
"repetition_penalty": kwargs.get('repetition_penalty', 1.2),
completion = client.post(f'https://gpt-gm.h2o.ai/conversation/{conversationId}', stream=True, json = { "return_full_text": kwargs.get('return_full_text', False)
'inputs': conversation,
'parameters': {
'temperature': kwargs.get('temperature', 0.4),
'truncate': kwargs.get('truncate', 2048),
'max_new_tokens': kwargs.get('max_new_tokens', 1024),
'do_sample': kwargs.get('do_sample', True),
'repetition_penalty': kwargs.get('repetition_penalty', 1.2),
'return_full_text': kwargs.get('return_full_text', False)
}, },
'stream': True, "stream": True,
'options': { "options": {
'id': kwargs.get('id', str(uuid4())), "id": kwargs.get('id', str(uuid4())),
'response_id': kwargs.get('response_id', str(uuid4())), "response_id": kwargs.get('response_id', str(uuid4())),
'is_retry': False, "is_retry": False,
'use_cache': False, "use_cache": False,
'web_search_id': '' "web_search_id": ""
}
} }
})
for line in completion.iter_lines(): response = session.post(f"https://gpt-gm.h2o.ai/conversation/{conversation_id.json()['conversationId']}", headers=headers, json=data)
if b'data' in line: generated_text = response.text.replace("\n", "").split("data:")
line = loads(line.decode('utf-8').replace('data:', '')) generated_text = json.loads(generated_text[-1])
token = line['token']['text']
if token == '<|endoftext|>': return generated_text["generated_text"]
break
else:
yield (token)
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])