summaryrefslogtreecommitdiffstats
path: root/g4f/gui/server/backend.py
diff options
context:
space:
mode:
authorCommenter123321 <36051603+Commenter123321@users.noreply.github.com>2023-10-10 14:15:12 +0200
committerCommenter123321 <36051603+Commenter123321@users.noreply.github.com>2023-10-10 14:15:12 +0200
commit0e4297494dd46533738f786e4ac675541586177a (patch)
tree6c2999e14ed831f7f37cc60991571cd040772918 /g4f/gui/server/backend.py
parentadd cool testing for gpt-3.5 and and gpt-4 (diff)
parentUpdate Aivvm.py (diff)
downloadgpt4free-0e4297494dd46533738f786e4ac675541586177a.tar
gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.gz
gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.bz2
gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.lz
gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.xz
gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.zst
gpt4free-0e4297494dd46533738f786e4ac675541586177a.zip
Diffstat (limited to 'g4f/gui/server/backend.py')
-rw-r--r--g4f/gui/server/backend.py19
1 files changed, 12 insertions, 7 deletions
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index 9a1dbbf8..8f4b529f 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -1,9 +1,11 @@
import g4f
from flask import request
-from threading import Thread
from .internet import search
from .config import special_instructions
+from .provider import get_provider
+
+g4f.logging = True
class Backend_Api:
def __init__(self, app) -> None:
@@ -31,22 +33,25 @@ class Backend_Api:
conversation = request.json['meta']['content']['conversation']
prompt = request.json['meta']['content']['parts'][0]
model = request.json['model']
+ provider = get_provider(request.json.get('provider'))
messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt]
def stream():
- answer = g4f.ChatCompletion.create(model = model,
- messages = messages, stream=True)
+ if provider:
+ answer = g4f.ChatCompletion.create(model=model,
+ provider=provider, messages=messages, stream=True)
+ else:
+ answer = g4f.ChatCompletion.create(model=model,
+ messages=messages, stream=True)
for token in answer:
yield token
-
+
return self.app.response_class(stream(), mimetype='text/event-stream')
- except Exception as e:
- print(e)
+ except Exception as e:
return {
- '_token': 'anerroroccuredmf',
'_action': '_ask',
'success': False,
"error": f"an error occured {str(e)}"}, 400 \ No newline at end of file