From b04ff7fb53e0370ebe6d558060bd0495f41273de Mon Sep 17 00:00:00 2001 From: abc <98614666+xtekky@users.noreply.github.com> Date: Fri, 6 Oct 2023 19:52:39 +0100 Subject: ~ | new interference --- g4f/interference/__init__.py | 94 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 g4f/interference/__init__.py (limited to 'g4f/interference') diff --git a/g4f/interference/__init__.py b/g4f/interference/__init__.py new file mode 100644 index 00000000..d756faa7 --- /dev/null +++ b/g4f/interference/__init__.py @@ -0,0 +1,94 @@ +import json +import time +import random +import string + +from typing import Any +from flask import Flask, request +from flask_cors import CORS +from g4f import ChatCompletion + +app = Flask(__name__) +CORS(app) + +@app.route('/') +def index(): + return 'interference api, url: http://127.0.0.1:1337' + +@app.route('/chat/completions', methods=['POST']) +def chat_completions(): + model = request.get_json().get('model', 'gpt-3.5-turbo') + stream = request.get_json().get('stream', False) + messages = request.get_json().get('messages') + + response = ChatCompletion.create(model = model, + stream = stream, messages = messages) + + completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) + completion_timestamp = int(time.time()) + + if not stream: + return { + 'id': f'chatcmpl-{completion_id}', + 'object': 'chat.completion', + 'created': completion_timestamp, + 'model': model, + 'choices': [ + { + 'index': 0, + 'message': { + 'role': 'assistant', + 'content': response, + }, + 'finish_reason': 'stop', + } + ], + 'usage': { + 'prompt_tokens': None, + 'completion_tokens': None, + 'total_tokens': None, + }, + } + + def streaming(): + for chunk in response: + completion_data = { + 'id': f'chatcmpl-{completion_id}', + 'object': 'chat.completion.chunk', + 'created': completion_timestamp, + 'model': model, + 'choices': [ + { + 'index': 0, + 'delta': { + 'content': chunk, + }, + 'finish_reason': None, + } + ], + } + + content = json.dumps(completion_data, separators=(',', ':')) + yield f'data: {content}\n\n' + time.sleep(0.1) + + end_completion_data: dict[str, Any] = { + 'id': f'chatcmpl-{completion_id}', + 'object': 'chat.completion.chunk', + 'created': completion_timestamp, + 'model': model, + 'choices': [ + { + 'index': 0, + 'delta': {}, + 'finish_reason': 'stop', + } + ], + } + content = json.dumps(end_completion_data, separators=(',', ':')) + yield f'data: {content}\n\n' + + return app.response_class(streaming(), mimetype='text/event-stream') + +def run_interference(): + app.run(host='0.0.0.0', port=1337, debug=True) \ No newline at end of file -- cgit v1.2.3 From 64fe0ee487c506febdc5126e7a8c9f2e30165101 Mon Sep 17 00:00:00 2001 From: abc <98614666+xtekky@users.noreply.github.com> Date: Sun, 8 Oct 2023 13:31:46 +0100 Subject: ~ | g4f `v-0.1.5.6` gui & interference can now be run with: python -m g4f.gui.run and python -m g4f.interference.run --- g4f/interference/run.py | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 g4f/interference/run.py (limited to 'g4f/interference') diff --git a/g4f/interference/run.py b/g4f/interference/run.py new file mode 100644 index 00000000..e527ce11 --- /dev/null +++ b/g4f/interference/run.py @@ -0,0 +1,4 @@ +from g4f.interference import run_interference + +if __name__ == '__main__': + run_interference() \ No newline at end of file -- cgit v1.2.3