diff options
author | H Lohaus <hlohaus@users.noreply.github.com> | 2024-01-02 01:10:31 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-01-02 01:10:31 +0100 |
commit | b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366 (patch) | |
tree | 6cd09fb2eb4c144e28a82759a2a9a2fa7f30d311 /g4f/gui | |
parent | Merge pull request #1414 from hlohaus/lia (diff) | |
parent | Fix markdown replace (diff) | |
download | gpt4free-b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366.tar gpt4free-b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366.tar.gz gpt4free-b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366.tar.bz2 gpt4free-b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366.tar.lz gpt4free-b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366.tar.xz gpt4free-b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366.tar.zst gpt4free-b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366.zip |
Diffstat (limited to 'g4f/gui')
-rw-r--r-- | g4f/gui/client/css/style.css | 7 | ||||
-rw-r--r-- | g4f/gui/client/js/chat.v1.js | 67 | ||||
-rw-r--r-- | g4f/gui/server/backend.py | 33 |
3 files changed, 68 insertions, 39 deletions
diff --git a/g4f/gui/client/css/style.css b/g4f/gui/client/css/style.css index b6d73650..e619b409 100644 --- a/g4f/gui/client/css/style.css +++ b/g4f/gui/client/css/style.css @@ -295,11 +295,12 @@ body { gap: 18px; } -.message .content p, -.message .content li, -.message .content code { +.message .content, +.message .content a:link, +.message .content a:visited{ font-size: 15px; line-height: 1.3; + color: var(--colour-3); } .message .content pre { white-space: pre-wrap; diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js index 644ff77a..a335a3cc 100644 --- a/g4f/gui/client/js/chat.v1.js +++ b/g4f/gui/client/js/chat.v1.js @@ -20,7 +20,9 @@ message_input.addEventListener("focus", () => { }); const markdown_render = (content) => { - return markdown.render(content).replace("<a href=", '<a target="_blank" href=').replace('<code>', '<code class="language-plaintext">') + return markdown.render(content) + .replaceAll("<a href=", '<a target="_blank" href=') + .replaceAll('<code>', '<code class="language-plaintext">') } const delete_conversations = async () => { @@ -73,7 +75,7 @@ const ask_gpt = async () => { provider = document.getElementById("provider"); model = document.getElementById("model"); prompt_lock = true; - window.text = ``; + window.text = ''; stop_generating.classList.remove(`stop_generating-hidden`); @@ -88,10 +90,13 @@ const ask_gpt = async () => { ${gpt_image} <i class="fa-regular fa-phone-arrow-down-left"></i> </div> <div class="content" id="gpt_${window.token}"> - <div id="cursor"></div> + <div class="provider"></div> + <div class="content_inner"><div id="cursor"></div></div> </div> </div> `; + content = document.getElementById(`gpt_${window.token}`); + content_inner = content.querySelector('.content_inner'); message_box.scrollTop = message_box.scrollHeight; window.scrollTo(0, 0); @@ -123,28 +128,38 @@ const ask_gpt = async () => { await new Promise((r) => setTimeout(r, 1000)); window.scrollTo(0, 0); - const reader = response.body.getReader(); + const reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); + + error = provider = null; while (true) { const { value, done } = await reader.read(); if (done) break; - - chunk = new TextDecoder().decode(value); - - text += chunk; - - document.getElementById(`gpt_${window.token}`).innerHTML = markdown_render(text); - document.querySelectorAll(`code`).forEach((el) => { - hljs.highlightElement(el); - }); + for (const line of value.split("\n")) { + if (!line) continue; + const message = JSON.parse(line); + if (message["type"] == "content") { + text += message["content"]; + } else if (message["type"] == "provider") { + provider = message["provider"]; + content.querySelector('.provider').innerHTML = + '<a href="' + provider.url + '" target="_blank">' + provider.name + "</a>" + } else if (message["type"] == "error") { + error = message["error"]; + } + } + if (error) { + console.error(error); + content_inner.innerHTML = "An error occured, please try again, if the problem persists, please use a other model or provider"; + } else { + content_inner.innerHTML = markdown_render(text); + document.querySelectorAll('code').forEach((el) => { + hljs.highlightElement(el); + }); + } window.scrollTo(0, 0); message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" }); } - - if (text.includes(`G4F_ERROR`)) { - console.log("response", text); - document.getElementById(`gpt_${window.token}`).innerHTML = "An error occured, please try again, if the problem persists, please use a other model or provider"; - } } catch (e) { console.log(e); @@ -153,13 +168,13 @@ const ask_gpt = async () => { if (e.name != `AbortError`) { text = `oops ! something went wrong, please try again / reload. [stacktrace in console]`; - document.getElementById(`gpt_${window.token}`).innerHTML = text; + content_inner.innerHTML = text; } else { - document.getElementById(`gpt_${window.token}`).innerHTML += ` [aborted]`; + content_inner.innerHTML += ` [aborted]`; text += ` [aborted]` } } - add_message(window.conversation_id, "assistant", text); + add_message(window.conversation_id, "assistant", text, provider); message_box.scrollTop = message_box.scrollHeight; await remove_cancel_button(); prompt_lock = false; @@ -259,10 +274,11 @@ const load_conversation = async (conversation_id) => { } </div> <div class="content"> - ${item.role == "assistant" - ? markdown_render(item.content) - : item.content + ${item.provider + ? '<div class="provider"><a href="' + item.provider.url + '" target="_blank">' + item.provider.name + '</a></div>' + : '' } + <div class="content_inner">${markdown_render(item.content)}</div> </div> </div> `; @@ -323,12 +339,13 @@ const remove_last_message = async (conversation_id) => { ); }; -const add_message = async (conversation_id, role, content) => { +const add_message = async (conversation_id, role, content, provider) => { const conversation = await get_conversation(conversation_id); conversation.items.push({ role: role, content: content, + provider: provider }); localStorage.setItem( diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py index 105edb43..595f5aa1 100644 --- a/g4f/gui/server/backend.py +++ b/g4f/gui/server/backend.py @@ -1,10 +1,11 @@ +import logging import g4f from g4f.Provider import __providers__ import json from flask import request, Flask from .internet import get_search_message -from g4f import debug +from g4f import debug, version debug.logging = True @@ -53,8 +54,8 @@ class Backend_Api: def version(self): return { - "version": debug.get_version(), - "lastet_version": debug.get_latest_version(), + "version": version.utils.current_version, + "lastet_version": version.utils.latest_version, } def _gen_title(self): @@ -65,7 +66,7 @@ class Backend_Api: def _conversation(self): #jailbreak = request.json['jailbreak'] messages = request.json['meta']['content']['parts'] - if request.json['internet_access']: + if request.json.get('internet_access'): messages[-1]["content"] = get_search_message(messages[-1]["content"]) model = request.json.get('model') model = model if model else g4f.models.default @@ -74,20 +75,30 @@ class Backend_Api: def try_response(): try: - yield from g4f.ChatCompletion.create( + first = True + for chunk in g4f.ChatCompletion.create( model=model, provider=provider, messages=messages, stream=True, ignore_stream_and_auth=True - ) + ): + if first: + first = False + yield json.dumps({ + 'type' : 'provider', + 'provider': g4f.get_last_provider(True) + }) + "\n" + yield json.dumps({ + 'type' : 'content', + 'content': chunk, + }) + "\n" + except Exception as e: - print(e) + logging.exception(e) yield json.dumps({ - 'code' : 'G4F_ERROR', - '_action': '_ask', - 'success': False, - 'error' : f'{e.__class__.__name__}: {e}' + 'type' : 'error', + 'error': f'{e.__class__.__name__}: {e}' }) return self.app.response_class(try_response(), mimetype='text/event-stream')
\ No newline at end of file |