From bf41cfc5d1d7ef6e2e8dde6237e54757c22b66b3 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 10 Dec 2023 21:46:11 +0100 Subject: Add G4F_PROXY environment Add regenerate button in gui --- g4f/__init__.py | 6 +- g4f/gui/client/css/style.css | 18 ++--- g4f/gui/client/html/index.html | 6 ++ g4f/gui/client/js/chat.v1.js | 151 +++++++++++++++++++---------------------- g4f/gui/server/backend.py | 52 +++++++------- 5 files changed, 113 insertions(+), 120 deletions(-) (limited to 'g4f') diff --git a/g4f/__init__.py b/g4f/__init__.py index 92bce194..c7909bd4 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -96,6 +96,10 @@ class ChatCompletion: if auth: kwargs['auth'] = auth + + proxy = os.environ.get("G4F_PROXY") + if proxy and "proxy" not in kwargs: + kwargs['proxy'] = proxy result = provider.create_completion(model.name, messages, stream, **kwargs) return result if stream else ''.join(result) @@ -112,7 +116,7 @@ class ChatCompletion: if stream: if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider): return await provider.create_async_generator(model.name, messages, **kwargs) - raise ValueError(f'{provider.__name__} does not support "stream" argument') + raise ValueError(f'{provider.__name__} does not support "stream" argument in "create_async"') return await provider.create_async(model.name, messages, **kwargs) diff --git a/g4f/gui/client/css/style.css b/g4f/gui/client/css/style.css index 254a4b15..b6d73650 100644 --- a/g4f/gui/client/css/style.css +++ b/g4f/gui/client/css/style.css @@ -301,6 +301,9 @@ body { font-size: 15px; line-height: 1.3; } +.message .content pre { + white-space: pre-wrap; +} .message .user i { position: absolute; @@ -338,19 +341,15 @@ body { font-size: 14px; } - -.stop_generating { +.stop_generating, .regenerate { position: absolute; - bottom: 118px; - /* left: 10px; - bottom: 125px; - right: 8px; */ + bottom: 158px; left: 50%; transform: translateX(-50%); z-index: 1000000; } -.stop_generating button { +.stop_generating button, .regenerate button{ backdrop-filter: blur(20px); -webkit-backdrop-filter: blur(20px); background-color: var(--blur-bg); @@ -380,11 +379,8 @@ body { } } -.stop_generating-hiding button { +.stop_generating-hidden #cancelButton, .regenerate-hidden #regenerateButton { animation: hide_popup 0.4s; -} - -.stop_generating-hidden button { display: none; } diff --git a/g4f/gui/client/html/index.html b/g4f/gui/client/html/index.html index 53c028d7..da7aeefb 100644 --- a/g4f/gui/client/html/index.html +++ b/g4f/gui/client/html/index.html @@ -101,6 +101,12 @@ +
+ +
diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js index 2a1bdd73..64c52275 100644 --- a/g4f/gui/client/js/chat.v1.js +++ b/g4f/gui/client/js/chat.v1.js @@ -5,15 +5,12 @@ const message_input = document.getElementById(`message-input`); const box_conversations = document.querySelector(`.top`); const spinner = box_conversations.querySelector(".spinner"); const stop_generating = document.querySelector(`.stop_generating`); +const regenerate = document.querySelector(`.regenerate`); const send_button = document.querySelector(`#send-button`); let prompt_lock = false; hljs.addPlugin(new CopyButtonPlugin()); -const format = (text) => { - return text.replace(/(?:\r\n|\r|\n)/g, "
"); -}; - message_input.addEventListener("blur", () => { window.scrollTo(0, 0); }); @@ -22,6 +19,10 @@ message_input.addEventListener("focus", () => { document.documentElement.scrollTop = document.documentElement.scrollHeight; }); +const markdown_render = (content) => { + return markdown.render(content).replace("', '') +} + const delete_conversations = async () => { localStorage.clear(); await new_conversation(); @@ -30,38 +31,25 @@ const delete_conversations = async () => { const handle_ask = async () => { message_input.style.height = `80px`; message_input.focus(); - - let txtMsgs = []; - const divTags = document.getElementsByClassName("message"); - for(let i=0;i 0) { - message_input.value = ``; - await ask_gpt(txtMsgs); + message_input.value = ''; + await add_conversation(window.conversation_id, message); + await add_message(window.conversation_id, "user", message); + window.token = message_id(); + message_box.innerHTML += ` +
+
+ ${user_image} + +
+
+ ${markdown_render(message)} +
+
+ `; + await ask_gpt(); } }; @@ -74,13 +62,10 @@ const remove_cancel_button = async () => { }, 300); }; -const ask_gpt = async (txtMsgs) => { +const ask_gpt = async () => { + regenerate.classList.add(`regenerate-hidden`); + messages = await get_messages(window.conversation_id); try { - message_input.value = ``; - message_input.innerHTML = ``; - message_input.innerText = ``; - - add_conversation(window.conversation_id, txtMsgs[0].content); window.scrollTo(0, 0); window.controller = new AbortController(); @@ -89,22 +74,9 @@ const ask_gpt = async (txtMsgs) => { model = document.getElementById("model"); prompt_lock = true; window.text = ``; - window.token = message_id(); stop_generating.classList.remove(`stop_generating-hidden`); - message_box.innerHTML += ` -
-
- ${user_image} - -
-
- ${format(txtMsgs[txtMsgs.length-1].content)} -
-
- `; - message_box.scrollTop = message_box.scrollHeight; window.scrollTo(0, 0); await new Promise((r) => setTimeout(r, 500)); @@ -138,14 +110,13 @@ const ask_gpt = async (txtMsgs) => { action: `_ask`, model: model.options[model.selectedIndex].value, jailbreak: jailbreak.options[jailbreak.selectedIndex].value, + internet_access: document.getElementById(`switch`).checked, provider: provider.options[provider.selectedIndex].value, meta: { id: window.token, content: { - conversation: await get_conversation(window.conversation_id), - internet_access: document.getElementById(`switch`).checked, content_type: `text`, - parts: txtMsgs, + parts: messages, }, }, }), @@ -161,7 +132,7 @@ const ask_gpt = async (txtMsgs) => { text += chunk; - document.getElementById(`gpt_${window.token}`).innerHTML = markdown.render(text).replace("