gpt4free/g4f/interference/__init__.py

94 lines
2.8 KiB
Python
Raw Normal View History

2023-06-23 21:47:00 -04:00
import json
2023-10-02 13:07:20 -04:00
import time
2023-06-23 21:47:00 -04:00
import random
2023-07-28 06:07:17 -04:00
import string
2023-10-02 13:07:20 -04:00
from typing import Any
from flask import Flask, request
from flask_cors import CORS
from g4f import ChatCompletion
2023-07-28 06:07:17 -04:00
2023-06-23 21:47:00 -04:00
app = Flask(__name__)
CORS(app)
2023-10-06 14:52:39 -04:00
@app.route('/')
def index():
return 'interference api, url: http://127.0.0.1:1337'
2023-10-02 13:07:20 -04:00
@app.route('/chat/completions', methods=['POST'])
2023-06-23 21:47:00 -04:00
def chat_completions():
2023-10-02 13:07:20 -04:00
model = request.get_json().get('model', 'gpt-3.5-turbo')
stream = request.get_json().get('stream', False)
messages = request.get_json().get('messages')
2023-06-23 21:47:00 -04:00
2023-10-02 13:07:20 -04:00
response = ChatCompletion.create(model = model,
stream = stream, messages = messages)
2023-06-23 21:47:00 -04:00
2023-10-02 13:07:20 -04:00
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
2023-07-28 06:07:17 -04:00
completion_timestamp = int(time.time())
if not stream:
2023-06-23 21:47:00 -04:00
return {
2023-10-02 13:07:20 -04:00
'id': f'chatcmpl-{completion_id}',
'object': 'chat.completion',
'created': completion_timestamp,
'model': model,
'choices': [
2023-07-28 06:07:17 -04:00
{
2023-10-02 13:07:20 -04:00
'index': 0,
'message': {
'role': 'assistant',
'content': response,
2023-07-28 06:07:17 -04:00
},
2023-10-02 13:07:20 -04:00
'finish_reason': 'stop',
2023-07-28 06:07:17 -04:00
}
],
2023-10-02 13:07:20 -04:00
'usage': {
'prompt_tokens': None,
'completion_tokens': None,
'total_tokens': None,
2023-06-23 21:47:00 -04:00
},
}
2023-07-28 06:07:17 -04:00
def streaming():
for chunk in response:
2023-06-23 21:47:00 -04:00
completion_data = {
2023-10-02 13:07:20 -04:00
'id': f'chatcmpl-{completion_id}',
'object': 'chat.completion.chunk',
'created': completion_timestamp,
'model': model,
'choices': [
2023-06-23 21:47:00 -04:00
{
2023-10-02 13:07:20 -04:00
'index': 0,
'delta': {
'content': chunk,
2023-06-23 21:47:00 -04:00
},
2023-10-02 13:07:20 -04:00
'finish_reason': None,
2023-06-23 21:47:00 -04:00
}
2023-07-28 06:07:17 -04:00
],
2023-06-23 21:47:00 -04:00
}
2023-10-02 13:07:20 -04:00
content = json.dumps(completion_data, separators=(',', ':'))
yield f'data: {content}\n\n'
2023-06-23 21:47:00 -04:00
time.sleep(0.1)
2023-07-28 06:07:17 -04:00
end_completion_data: dict[str, Any] = {
2023-10-02 13:07:20 -04:00
'id': f'chatcmpl-{completion_id}',
'object': 'chat.completion.chunk',
'created': completion_timestamp,
'model': model,
'choices': [
2023-07-28 06:07:17 -04:00
{
2023-10-02 13:07:20 -04:00
'index': 0,
'delta': {},
'finish_reason': 'stop',
2023-07-28 06:07:17 -04:00
}
],
}
2023-10-02 13:07:20 -04:00
content = json.dumps(end_completion_data, separators=(',', ':'))
yield f'data: {content}\n\n'
2023-06-23 21:47:00 -04:00
2023-10-02 13:07:20 -04:00
return app.response_class(streaming(), mimetype='text/event-stream')
2023-06-23 21:47:00 -04:00
2023-10-06 14:52:39 -04:00
def run_interference():
app.run(host='0.0.0.0', port=1337, debug=True)