diff options
author | t.me/xtekky <98614666+xtekky@users.noreply.github.com> | 2023-06-24 03:47:00 +0200 |
---|---|---|
committer | t.me/xtekky <98614666+xtekky@users.noreply.github.com> | 2023-06-24 03:47:00 +0200 |
commit | 5db58fd87f230fbe5bae599bb4b120ab42cad3be (patch) | |
tree | 770be13bca77c5d04dfe3265f378431df788706f /interference/app.py | |
parent | Merge pull request #664 from LopeKinz/main (diff) | |
download | gpt4free-2.tar gpt4free-2.tar.gz gpt4free-2.tar.bz2 gpt4free-2.tar.lz gpt4free-2.tar.xz gpt4free-2.tar.zst gpt4free-2.zip |
Diffstat (limited to '')
-rw-r--r-- | interference/app.py | 86 |
1 files changed, 86 insertions, 0 deletions
diff --git a/interference/app.py b/interference/app.py new file mode 100644 index 00000000..afe15df7 --- /dev/null +++ b/interference/app.py @@ -0,0 +1,86 @@ +import os +import time +import json +import random + +from g4f import Model, ChatCompletion, Provider +from flask import Flask, request, Response +from flask_cors import CORS + +app = Flask(__name__) +CORS(app) + +@app.route("/chat/completions", methods=['POST']) +def chat_completions(): + streaming = request.json.get('stream', False) + model = request.json.get('model', 'gpt-3.5-turbo') + messages = request.json.get('messages') + + response = ChatCompletion.create(model=model, stream=streaming, + messages=messages) + + if not streaming: + while 'curl_cffi.requests.errors.RequestsError' in response: + response = ChatCompletion.create(model=model, stream=streaming, + messages=messages) + + completion_timestamp = int(time.time()) + completion_id = ''.join(random.choices( + 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28)) + + return { + 'id': 'chatcmpl-%s' % completion_id, + 'object': 'chat.completion', + 'created': completion_timestamp, + 'model': model, + 'usage': { + 'prompt_tokens': None, + 'completion_tokens': None, + 'total_tokens': None + }, + 'choices': [{ + 'message': { + 'role': 'assistant', + 'content': response + }, + 'finish_reason': 'stop', + 'index': 0 + }] + } + + def stream(): + for token in response: + completion_timestamp = int(time.time()) + completion_id = ''.join(random.choices( + 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28)) + + completion_data = { + 'id': f'chatcmpl-{completion_id}', + 'object': 'chat.completion.chunk', + 'created': completion_timestamp, + 'model': 'gpt-3.5-turbo-0301', + 'choices': [ + { + 'delta': { + 'content': token + }, + 'index': 0, + 'finish_reason': None + } + ] + } + + yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':')) + time.sleep(0.1) + + return app.response_class(stream(), mimetype='text/event-stream') + + +if __name__ == '__main__': + config = { + 'host': '0.0.0.0', + 'port': 1337, + 'debug': True + } + + app.run(**config) |