Merge pull request #1383 from xtekky/aura

Fix api and None provider
This commit is contained in:
H Lohaus 2023-12-23 20:51:30 +01:00 committed by GitHub
commit 6d09dbf4a9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 39 additions and 20 deletions

27
etc/testing/test_api.py Normal file
View File

@ -0,0 +1,27 @@
import openai
# Set your Hugging Face token as the API key if you use embeddings
# If you don't use embeddings, leave it empty
openai.api_key = "YOUR_HUGGING_FACE_TOKEN" # Replace with your actual token
# Set the API base URL if needed, e.g., for a local development environment
openai.api_base = "http://localhost:1337/v1"
def main():
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "write a poem about a tree"}],
stream=True,
)
if isinstance(response, dict):
# Not streaming
print(response.choices[0].message.content)
else:
# Streaming
for token in response:
content = token["choices"][0]["delta"].get("content")
if content is not None:
print(content, end="", flush=True)
if __name__ == "__main__":
main()

View File

@ -83,28 +83,17 @@ class Api:
model = item_data.get('model')
stream = True if item_data.get("stream") == "True" else False
messages = item_data.get('messages')
conversation = item_data.get('conversation') if item_data.get('conversation') != None else None
provider = item_data.get('provider').replace('g4f.Provider.', '')
provider = item_data.get('provider', '').replace('g4f.Provider.', '')
provider = provider if provider and provider != "Auto" else None
if provider != None:
provider = g4f.Provider.ProviderUtils.convert.get(provider)
try:
if model == 'pi':
response = g4f.ChatCompletion.create(
model=model,
stream=stream,
messages=messages,
conversation=conversation,
provider = provider,
ignored=self.list_ignored_providers)
else:
response = g4f.ChatCompletion.create(
model=model,
stream=stream,
messages=messages,
provider = provider,
ignored=self.list_ignored_providers)
response = g4f.ChatCompletion.create(
model=model,
stream=stream,
messages=messages,
provider = provider,
ignored=self.list_ignored_providers
)
except Exception as e:
logging.exception(e)
return Response(content=json.dumps({"error": "An error occurred while generating the response."}, indent=4), media_type="application/json")
@ -179,9 +168,12 @@ class Api:
content = json.dumps(end_completion_data, separators=(',', ':'))
yield f'data: {content}\n\n'
except GeneratorExit:
pass
except Exception as e:
logging.exception(e)
content=json.dumps({"error": "An error occurred while generating the response."}, indent=4)
yield f'data: {content}\n\n'
return StreamingResponse(streaming(), media_type="text/event-stream")