From 42805ac7894d62996d68e4038c37900b26a14a9e Mon Sep 17 00:00:00 2001
From: hlohaus <983577+hlohaus@users.noreply.github.com>
Date: Sun, 26 Jan 2025 16:32:32 +0100
Subject: Add leagcy port to docker-compose files Update demo model list
Disable upload cookies in demo Track usage in demo mode Add messages without
asking the ai Add hint for browser usage in provider list Add qwen2 prompt
template to HuggingFace provider Trim automatic messages in HuggingFaceAPI
---
docker-compose-slim.yml | 3 +-
docker-compose.yml | 1 +
g4f/Provider/Cloudflare.py | 3 +-
g4f/Provider/Copilot.py | 1 -
g4f/Provider/Pi.py | 6 +-
g4f/Provider/You.py | 2 +
g4f/Provider/mini_max/HailuoAI.py | 1 +
g4f/Provider/needs_auth/CopilotAccount.py | 1 +
g4f/Provider/needs_auth/Gemini.py | 1 +
g4f/Provider/needs_auth/HuggingChat.py | 6 +-
g4f/Provider/needs_auth/HuggingFace.py | 12 ++
g4f/Provider/needs_auth/HuggingFaceAPI.py | 17 ++-
g4f/Provider/needs_auth/MicrosoftDesigner.py | 1 +
g4f/Provider/needs_auth/OpenaiChat.py | 1 +
g4f/api/__init__.py | 2 +-
g4f/cli.py | 4 +-
g4f/gui/client/demo.html | 1 +
g4f/gui/client/index.html | 14 +-
g4f/gui/client/static/js/chat.v1.js | 184 +++++++++++++++++++--------
g4f/gui/server/api.py | 1 +
g4f/gui/server/backend_api.py | 71 ++++++++---
g4f/models.py | 12 +-
22 files changed, 252 insertions(+), 93 deletions(-)
diff --git a/docker-compose-slim.yml b/docker-compose-slim.yml
index 7a02810f..39754f72 100644
--- a/docker-compose-slim.yml
+++ b/docker-compose-slim.yml
@@ -11,4 +11,5 @@ services:
volumes:
- .:/app
ports:
- - '8080:8080'
\ No newline at end of file
+ - '8080:8080'
+ - '1337:8080'
\ No newline at end of file
diff --git a/docker-compose.yml b/docker-compose.yml
index 1984bc0b..8d9dd722 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -11,6 +11,7 @@ services:
- .:/app
ports:
- '8080:8080'
+ - '1337:8080'
- '7900:7900'
environment:
- OLLAMA_HOST=host.docker.internal
diff --git a/g4f/Provider/Cloudflare.py b/g4f/Provider/Cloudflare.py
index e6f0dab3..89ee4193 100644
--- a/g4f/Provider/Cloudflare.py
+++ b/g4f/Provider/Cloudflare.py
@@ -15,9 +15,10 @@ from ..errors import ResponseStatusError, ModelNotFoundError
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
label = "Cloudflare AI"
url = "https://playground.ai.cloudflare.com"
+ working = True
+ use_nodriver = True
api_endpoint = "https://playground.ai.cloudflare.com/api/inference"
models_url = "https://playground.ai.cloudflare.com/api/models"
- working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py
index bdbbcc4b..4a828c96 100644
--- a/g4f/Provider/Copilot.py
+++ b/g4f/Provider/Copilot.py
@@ -69,7 +69,6 @@ class Copilot(AbstractProvider, ProviderModelMixin):
conversation: BaseConversation = None,
return_conversation: bool = False,
api_key: str = None,
- web_search: bool = False,
**kwargs
) -> CreateResult:
if not has_curl_cffi:
diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py
index 6aabe7b1..8b07c0fe 100644
--- a/g4f/Provider/Pi.py
+++ b/g4f/Provider/Pi.py
@@ -7,9 +7,11 @@ from .base_provider import AsyncGeneratorProvider, format_prompt
from ..requests import StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
class Pi(AsyncGeneratorProvider):
- url = "https://pi.ai/talk"
- working = True
+ url = "https://pi.ai/talk"
+ working = True
+ use_nodriver = True
supports_stream = True
+ use_nodriver = True
default_model = "pi"
models = [default_model]
_headers: dict = None
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index 71b92837..7e5df0fd 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -75,6 +75,8 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
try:
cookies = get_cookies(".you.com")
except MissingRequirementsError:
+ pass
+ if not cookies or "afUserId" not in cookies:
browser = await get_nodriver(proxy=proxy)
try:
page = await browser.get(cls.url)
diff --git a/g4f/Provider/mini_max/HailuoAI.py b/g4f/Provider/mini_max/HailuoAI.py
index c9b3e49c..d8c145a1 100644
--- a/g4f/Provider/mini_max/HailuoAI.py
+++ b/g4f/Provider/mini_max/HailuoAI.py
@@ -22,6 +22,7 @@ class HailuoAI(AsyncAuthedProvider, ProviderModelMixin):
label = "Hailuo AI"
url = "https://www.hailuo.ai"
working = True
+ use_nodriver = True
supports_stream = True
default_model = "MiniMax"
diff --git a/g4f/Provider/needs_auth/CopilotAccount.py b/g4f/Provider/needs_auth/CopilotAccount.py
index 6d7964d1..527432c3 100644
--- a/g4f/Provider/needs_auth/CopilotAccount.py
+++ b/g4f/Provider/needs_auth/CopilotAccount.py
@@ -4,6 +4,7 @@ from ..Copilot import Copilot
class CopilotAccount(Copilot):
needs_auth = True
+ use_nodriver = True
parent = "Copilot"
default_model = "Copilot"
default_vision_model = default_model
diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py
index 0f563b32..e6d71255 100644
--- a/g4f/Provider/needs_auth/Gemini.py
+++ b/g4f/Provider/needs_auth/Gemini.py
@@ -58,6 +58,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
needs_auth = True
working = True
+ use_nodriver = True
default_model = 'gemini'
default_image_model = default_model
diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py
index bf5abb8f..e72612b9 100644
--- a/g4f/Provider/needs_auth/HuggingChat.py
+++ b/g4f/Provider/needs_auth/HuggingChat.py
@@ -32,6 +32,7 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"
working = True
+ use_nodriver = True
supports_stream = True
needs_auth = True
@@ -68,10 +69,11 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin):
### Image ###
"flux-dev": "black-forest-labs/FLUX.1-dev",
"flux-schnell": "black-forest-labs/FLUX.1-schnell",
- ### API ###
+ ### Used in other providers ###
"qwen-2-vl-7b": "Qwen/Qwen2-VL-7B-Instruct",
"gemma-2-27b": "google/gemma-2-27b-it",
- "qvq-72b": "Qwen/QVQ-72B-Preview"
+ "qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
+ "qvq-72b": "Qwen/QVQ-72B-Preview",
}
@classmethod
diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py
index b9ef5418..07e110b3 100644
--- a/g4f/Provider/needs_auth/HuggingFace.py
+++ b/g4f/Provider/needs_auth/HuggingFace.py
@@ -102,6 +102,8 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
) as session:
if payload is None:
async with session.get(f"https://huggingface.co/api/models/{model}") as response:
+ if response.status == 404:
+ raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}")
await raise_for_status(response)
model_data = await response.json()
model_type = None
@@ -172,6 +174,14 @@ def format_prompt_qwen(messages: Messages, do_continue: bool = False) -> str:
return prompt[:-len("\n<|im_end|>\n")]
return prompt
+def format_prompt_qwen2(messages: Messages, do_continue: bool = False) -> str:
+ prompt = "".join([
+ f"\u003C|{message['role'].capitalize()}|\u003E{message['content']}\u003C|end▁of▁sentence|\u003E" for message in messages
+ ]) + ("" if do_continue else "\u003C|Assistant|\u003E")
+ if do_continue:
+ return prompt[:-len("\u003C|Assistant|\u003E")]
+ return prompt
+
def format_prompt_llama(messages: Messages, do_continue: bool = False) -> str:
prompt = "<|begin_of_text|>" + "".join([
f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n{message['content']}\n<|eot_id|>\n" for message in messages
@@ -199,6 +209,8 @@ def get_inputs(messages: Messages, model_data: dict, model_type: str, do_continu
inputs = format_prompt_custom(messages, eos_token, do_continue)
elif eos_token == "<|im_end|>":
inputs = format_prompt_qwen(messages, do_continue)
+ elif "content" in eos_token and eos_token["content"] == "\u003C|end▁of▁sentence|\u003E":
+ inputs = format_prompt_qwen2(messages, do_continue)
elif eos_token == "<|eot_id|>":
inputs = format_prompt_llama(messages, do_continue)
else:
diff --git a/g4f/Provider/needs_auth/HuggingFaceAPI.py b/g4f/Provider/needs_auth/HuggingFaceAPI.py
index dc783f4e..59c8f6ee 100644
--- a/g4f/Provider/needs_auth/HuggingFaceAPI.py
+++ b/g4f/Provider/needs_auth/HuggingFaceAPI.py
@@ -3,6 +3,7 @@ from __future__ import annotations
from .OpenaiTemplate import OpenaiTemplate
from .HuggingChat import HuggingChat
from ...providers.types import Messages
+from ... import debug
class HuggingFaceAPI(OpenaiTemplate):
label = "HuggingFace (Inference API)"
@@ -31,6 +32,7 @@ class HuggingFaceAPI(OpenaiTemplate):
messages: Messages,
api_base: str = None,
max_tokens: int = 2048,
+ max_inputs_lenght: int = 10000,
**kwargs
):
if api_base is None:
@@ -38,5 +40,18 @@ class HuggingFaceAPI(OpenaiTemplate):
if model in cls.model_aliases:
model_name = cls.model_aliases[model]
api_base = f"https://api-inference.huggingface.co/models/{model_name}/v1"
+ start = calculate_lenght(messages)
+ if start > max_inputs_lenght:
+ if len(messages) > 6:
+ messages = messages[:3] + messages[-3:]
+ if calculate_lenght(messages) > max_inputs_lenght:
+ if len(messages) > 2:
+ messages = [m for m in messages if m["role"] == "system"] + messages[-1:]
+ if len(messages) > 1 and calculate_lenght(messages) > max_inputs_lenght:
+ messages = [messages[-1]]
+ debug.log(f"Messages trimmed from: {start} to: {calculate_lenght(messages)}")
async for chunk in super().create_async_generator(model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs):
- yield chunk
\ No newline at end of file
+ yield chunk
+
+def calculate_lenght(messages: Messages) -> int:
+ return sum([len(message["content"]) + 16 for message in messages])
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/MicrosoftDesigner.py b/g4f/Provider/needs_auth/MicrosoftDesigner.py
index 2a89e8be..c413c19b 100644
--- a/g4f/Provider/needs_auth/MicrosoftDesigner.py
+++ b/g4f/Provider/needs_auth/MicrosoftDesigner.py
@@ -21,6 +21,7 @@ class MicrosoftDesigner(AsyncGeneratorProvider, ProviderModelMixin):
label = "Microsoft Designer"
url = "https://designer.microsoft.com"
working = True
+ use_nodriver = True
needs_auth = True
default_image_model = "dall-e-3"
image_models = [default_image_model, "1024x1024", "1024x1792", "1792x1024"]
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index a8a4018e..29ada409 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -91,6 +91,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
label = "OpenAI ChatGPT"
url = "https://chatgpt.com"
working = True
+ use_nodriver = True
supports_gpt_4 = True
supports_message_history = True
supports_system_message = True
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index 896367e4..eb69c656 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -169,7 +169,7 @@ class Api:
except HTTPException:
user_g4f_api_key = None
path = request.url.path
- if path.startswith("/v1"):
+ if path.startswith("/v1") or (AppConfig.demo and path == '/backend-api/v2/upload_cookies'):
if user_g4f_api_key is None:
return ErrorResponse.from_message("G4F API key required", HTTP_401_UNAUTHORIZED)
if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key):
diff --git a/g4f/cli.py b/g4f/cli.py
index af872489..5bb5c460 100644
--- a/g4f/cli.py
+++ b/g4f/cli.py
@@ -12,7 +12,7 @@ def get_api_parser():
api_parser.add_argument("--bind", default=None, help="The bind string. (Default: 0.0.0.0:1337)")
api_parser.add_argument("--port", "-p", default=None, help="Change the port of the server.")
api_parser.add_argument("--debug", "-d", action="store_true", help="Enable verbose logging.")
- api_parser.add_argument("--gui", "-g", default=None, action="store_true", help="Add gui to the api.")
+ api_parser.add_argument("--gui", "-g", default=None, action="store_true", help="Start also the gui.")
api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --reload and --workers)")
api_parser.add_argument("--provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working],
default=None, help="Default provider for chat completion. (incompatible with --reload and --workers)")
@@ -28,7 +28,7 @@ def get_api_parser():
api_parser.add_argument("--cookie-browsers", nargs="+", choices=[browser.__name__ for browser in g4f.cookies.browsers],
default=[], help="List of browsers to access or retrieve cookies from. (incompatible with --reload and --workers)")
api_parser.add_argument("--reload", action="store_true", help="Enable reloading.")
- api_parser.add_argument("--demo", action="store_true", help="Enable demo modus.")
+ api_parser.add_argument("--demo", action="store_true", help="Enable demo mode.")
return api_parser
def main():
diff --git a/g4f/gui/client/demo.html b/g4f/gui/client/demo.html
index dd0c078f..ff5b993b 100644
--- a/g4f/gui/client/demo.html
+++ b/g4f/gui/client/demo.html
@@ -215,6 +215,7 @@
}
localStorage.setItem("HuggingFace-api_key", accessToken);
localStorage.setItem("HuggingFace-user", JSON.stringify(user));
+ localStorage.setItem("user", user.name);
location.href = "/chat/";
}
input.addEventListener("input", () => check_access_token());
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index 46b348d1..fe6eda18 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -142,6 +142,11 @@
+
+ Track usage
+
+
+
@@ -184,6 +189,12 @@
Show log
+
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index dfedb2aa..9f43d499 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -361,7 +361,7 @@ const delete_conversations = async () => {
await new_conversation();
};
-const handle_ask = async () => {
+const handle_ask = async (do_ask_gpt = true) => {
messageInput.style.height = "82px";
messageInput.focus();
await scroll_to_bottom();
@@ -377,17 +377,19 @@ const handle_ask = async () => {
let message_index = await add_message(window.conversation_id, "user", message);
let message_id = get_message_id();
- if (imageInput.dataset.objects) {
- imageInput.dataset.objects.split(" ").forEach((object)=>URL.revokeObjectURL(object))
- delete imageInput.dataset.objects;
- }
- const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
- images = [];
- if (input.files.length > 0) {
- for (const file of input.files) {
- images.push(URL.createObjectURL(file));
+ let images = [];
+ if (do_ask_gpt) {
+ if (imageInput.dataset.objects) {
+ imageInput.dataset.objects.split(" ").forEach((object)=>URL.revokeObjectURL(object))
+ delete imageInput.dataset.objects;
+ }
+ const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
+ if (input.files.length > 0) {
+ for (const file of input.files) {
+ images.push(URL.createObjectURL(file));
+ }
+ imageInput.dataset.objects = images.join(" ");
}
- imageInput.dataset.objects = images.join(" ");
}
message_box.innerHTML += `
@@ -408,18 +410,21 @@ const handle_ask = async () => {
`;
highlight(message_box);
-
- const all_pinned = document.querySelectorAll(".buttons button.pinned")
- if (all_pinned.length > 0) {
- all_pinned.forEach((el, idx) => ask_gpt(
- idx == 0 ? message_id : get_message_id(),
- -1,
- idx != 0,
- el.dataset.provider,
- el.dataset.model
- ));
+ if (do_ask_gpt) {
+ const all_pinned = document.querySelectorAll(".buttons button.pinned")
+ if (all_pinned.length > 0) {
+ all_pinned.forEach((el, idx) => ask_gpt(
+ idx == 0 ? message_id : get_message_id(),
+ -1,
+ idx != 0,
+ el.dataset.provider,
+ el.dataset.model
+ ));
+ } else {
+ await ask_gpt(message_id);
+ }
} else {
- await ask_gpt(message_id);
+ await lazy_scroll_to_bottom();
}
};
@@ -450,11 +455,14 @@ stop_generating.addEventListener("click", async () => {
for (key in controller_storage) {
if (!controller_storage[key].signal.aborted) {
console.log(`aborted ${window.conversation_id} #${key}`);
- controller_storage[key].abort();
- let message = message_storage[key];
- if (message) {
- content_storage[key].inner.innerHTML += " [aborted]";
- message_storage[key] += " [aborted]";
+ try {
+ controller_storage[key].abort();
+ } finally {
+ let message = message_storage[key];
+ if (message) {
+ content_storage[key].inner.innerHTML += " [aborted]";
+ message_storage[key] += " [aborted]";
+ }
}
}
}
@@ -486,7 +494,7 @@ const prepare_messages = (messages, message_index = -1, do_continue = false, do_
}
}
}
- // Combine messages with same role
+ // Combine assistant messages
let last_message;
let new_messages = [];
messages.forEach((message) => {
@@ -503,21 +511,25 @@ const prepare_messages = (messages, message_index = -1, do_continue = false, do_
messages = new_messages;
// Insert system prompt as first message
- new_messages = [];
+ let final_messages = [];
if (chatPrompt?.value) {
- new_messages.push({
+ final_messages.push({
"role": "system",
"content": chatPrompt.value
});
}
- // Remove history, if it's selected
- if (document.getElementById('history')?.checked && do_filter) {
- if (message_index == null) {
- messages = [messages.pop(), messages.pop()];
- } else {
- messages = [messages.pop()];
+ // Remove history, only add new user messages
+ let filtered_messages = [];
+ // The message_index is null on count total tokens
+ if (document.getElementById('history')?.checked && do_filter && message_index != null) {
+ while (last_message = messages.pop()) {
+ if (last_message["role"] == "user") {
+ filtered_messages.push(last_message);
+ break;
+ }
}
+ messages = filtered_messages.reverse();
}
messages.forEach((new_message, i) => {
@@ -845,28 +857,63 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
}
if (message_storage[message_id]) {
const message_provider = message_id in provider_storage ? provider_storage[message_id] : null;
+ let usage;
+ if (usage_storage[message_id]) {
+ usage = usage_storage[message_id];
+ delete usage_storage[message_id];
+ }
+ // Calculate usage if we have no usage result jet
+ if (document.getElementById("track_usage").checked && !usage && window.GPTTokenizer_cl100k_base) {
+ const prompt_token_model = model?.startsWith("gpt-3") ? "gpt-3.5-turbo" : "gpt-4"
+ const prompt_tokens = GPTTokenizer_cl100k_base?.encodeChat(messages, prompt_token_model).length;
+ const completion_tokens = count_tokens(message_provider?.model, message_storage[message_id]);
+ usage = {
+ model: message_provider?.model,
+ provider: message_provider?.name,
+ prompt_tokens: prompt_tokens,
+ completion_tokens: completion_tokens,
+ total_tokens: prompt_tokens + completion_tokens
+ }
+ }
+ // It is not regenerated, if it is the first response to a new question
+ if (regenerate && message_index == -1) {
+ let conversation = await get_conversation(window.conversation_id);
+ regenerate = conversation.items[conversation.items.length-1]["role"] != "user";
+ }
+ // Create final message content
+ const final_message = message_storage[message_id]
+ + (error_storage[message_id] ? " [error]" : "")
+ + (stop_generating.classList.contains('stop_generating-hidden') ? " [aborted]" : "")
+ // Save message in local storage
await add_message(
window.conversation_id,
"assistant",
- message_storage[message_id] + (error_storage[message_id] ? " [error]" : "") + (stop_generating.classList.contains('stop_generating-hidden') ? " [aborted]" : ""),
+ final_message,
message_provider,
message_index,
synthesize_storage[message_id],
regenerate,
title_storage[message_id],
finish_storage[message_id],
- usage_storage[message_id],
+ usage,
reasoning_storage[message_id],
action=="continue"
);
delete message_storage[message_id];
+ // Send usage to the server
+ if (document.getElementById("track_usage").checked) {
+ const user = localStorage.getItem("user");
+ if (user) {
+ usage = {user: user, ...usage};
+ }
+ api("usage", usage);
+ }
}
+ // Update controller storage
if (controller_storage[message_id]) {
- if (!controller_storage[message_id].signal.aborted) {
- controller_storage[message_id].abort();
- }
delete controller_storage[message_id];
}
+ // Reload conversation if no error
if (!error_storage[message_id]) {
await safe_load_conversation(window.conversation_id, scroll);
}
@@ -1485,22 +1532,27 @@ async function hide_sidebar() {
settings.classList.add("hidden");
chat.classList.remove("hidden");
log_storage.classList.add("hidden");
+ await hide_settings();
if (window.location.pathname == "/menu/" || window.location.pathname == "/settings/") {
history.back();
}
}
-window.addEventListener('popstate', hide_sidebar, false);
-
-sidebar_button.addEventListener("click", async () => {
+async function hide_settings() {
settings.classList.add("hidden");
let provider_forms = document.querySelectorAll(".provider_forms from");
Array.from(provider_forms).forEach((form) => form.classList.add("hidden"));
+}
+
+window.addEventListener('popstate', hide_sidebar, false);
+
+sidebar_button.addEventListener("click", async () => {
if (sidebar.classList.contains("shown")) {
await hide_sidebar();
} else {
sidebar.classList.add("shown");
sidebar_button.classList.add("rotated");
+ await hide_settings();
add_url_to_history("/menu/");
}
window.scrollTo(0, 0);
@@ -1797,13 +1849,16 @@ async function on_api() {
messageInput.style.height = messageInput.scrollHeight + "px";
}
});
- sendButton.addEventListener(`click`, async () => {
+ sendButton.querySelector(".fa-paper-plane").addEventListener(`click`, async () => {
console.log("clicked send");
if (prompt_lock) return;
prompt_lock = true;
setTimeout(()=>prompt_lock=false, 3000);
await handle_ask();
});
+ sendButton.querySelector(".fa-square-plus").addEventListener(`click`, async () => {
+ await handle_ask(false);
+ });
messageInput.focus();
let provider_options = [];
models = await api("models");
@@ -1817,13 +1872,16 @@ async function on_api() {
});
let login_urls;
if (is_demo) {
- if (!localStorage.getItem("HuggingFace-api_key")) {
+ if (!localStorage.getItem("user")) {
location.href = "/";
return;
}
- providerSelect.innerHTML = '
'
- document.getElementById("pin")?.remove();
+ providerSelect.innerHTML = '
'
+ document.getElementById("pin").disabled = true;
document.getElementById("refine")?.parentElement.remove();
+ const track_usage = document.getElementById("track_usage");
+ track_usage.checked = true;
+ track_usage.disabled = true;
Array.from(modelSelect.querySelectorAll(':not([data-providers])')).forEach((option)=>{
if (!option.disabled && option.value) {
option.remove();
@@ -1844,8 +1902,8 @@ async function on_api() {
option.text = provider.label
+ (provider.vision ? " (Image Upload)" : "")
+ (provider.image ? " (Image Generation)" : "")
- + (provider.webdriver ? " (Webdriver)" : "")
- + (provider.auth ? " (Auth)" : "");
+ + (provider.nodriver ? " (Browser)" : "")
+ + (!provider.nodriver && provider.auth ? " (Auth)" : "");
if (provider.parent)
option.dataset.parent = provider.parent;
providerSelect.appendChild(option);
@@ -2151,6 +2209,8 @@ async function api(ressource, args=null, files=null, message_id=null, scroll=tru
return pywebview.api[`get_${ressource}`]();
}
const headers = {};
+ const url = new URL(`/backend-api/v2/${ressource}`, window?.location || "http://localhost:8080");
+ let response;
if (ressource == "models" && args) {
api_key = get_api_key_by_provider(args);
if (api_key) {
@@ -2161,9 +2221,7 @@ async function api(ressource, args=null, files=null, message_id=null, scroll=tru
headers.x_api_base = api_base;
}
ressource = `${ressource}/${args}`;
- }
- const url = new URL(`/backend-api/v2/${ressource}`, window?.location || "http://localhost:8080");
- if (ressource == "conversation") {
+ } else if (ressource == "conversation") {
let body = JSON.stringify(args);
headers.accept = 'text/event-stream';
if (files !== null) {
@@ -2210,8 +2268,17 @@ async function api(ressource, args=null, files=null, message_id=null, scroll=tru
await finish_message();
return;
}
+ } else if (args) {
+ headers['content-type'] = 'application/json';
+ response = await fetch(url, {
+ method: 'POST',
+ headers: headers,
+ body: JSON.stringify(args),
+ });
+ }
+ if (!response) {
+ response = await fetch(url, {headers: headers});
}
- response = await fetch(url, {headers: headers});
if (response.status != 200) {
console.error(response);
}
@@ -2349,7 +2416,8 @@ function save_storage() {
}
}
-function add_memory() {
+function import_memory() {
+ hide_sidebar();
let conversations = [];
for (let i = 0; i < appStorage.length; i++) {
if (appStorage.key(i).startsWith("conversation:")) {
@@ -2357,7 +2425,8 @@ function add_memory() {
conversations.push(JSON.parse(conversation));
}
}
- conversations.sort((a, b) => (b.updated||0)-(a.updated||0));
+ conversations.sort((a, b) => (a.updated||0)-(b.updated||0));
+ let count = 0;
conversations.forEach(async (conversation)=>{
let body = JSON.stringify(conversation);
response = await fetch("/backend-api/v2/memory", {
@@ -2365,6 +2434,9 @@ function add_memory() {
body: body,
headers: {"content-type": "application/json"}
});
+ const result = await response.json();
+ count += result.count;
+ inputCount.innerText = `${count} Messages are imported`;
});
}
diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py
index 748a3280..a21f7128 100644
--- a/g4f/gui/server/api.py
+++ b/g4f/gui/server/api.py
@@ -65,6 +65,7 @@ class Api:
"parent": getattr(provider, "parent", None),
"image": bool(getattr(provider, "image_models", False)),
"vision": getattr(provider, "default_vision_model", None) is not None,
+ "nodriver": getattr(provider, "use_nodriver", False),
"auth": provider.needs_auth,
"login_url": getattr(provider, "login_url", None),
} for provider in __providers__ if provider.working]
diff --git a/g4f/gui/server/backend_api.py b/g4f/gui/server/backend_api.py
index 9c2d5261..8d9b4f2d 100644
--- a/g4f/gui/server/backend_api.py
+++ b/g4f/gui/server/backend_api.py
@@ -7,6 +7,7 @@ import logging
import asyncio
import shutil
import random
+import datetime
from flask import Flask, Response, request, jsonify
from typing import Generator
from pathlib import Path
@@ -61,7 +62,7 @@ class Backend_Api(Api):
"""
self.app: Flask = app
- if has_flask_limiter:
+ if has_flask_limiter and app.demo:
limiter = Limiter(
get_remote_address,
app=app,
@@ -71,8 +72,8 @@ class Backend_Api(Api):
else:
class Dummy():
def limit(self, value):
- def callback(v):
- return v
+ def callback(value):
+ return value
return callback
limiter = Dummy()
@@ -111,7 +112,7 @@ class Backend_Api(Api):
for model, providers in models.demo_models.values()]
@app.route('/backend-api/v2/conversation', methods=['POST'])
- @limiter.limit("4 per minute")
+ @limiter.limit("4 per minute") # 1 request in 15 seconds
def handle_conversation():
"""
Handles conversation requests and streams responses back.
@@ -150,6 +151,41 @@ class Backend_Api(Api):
mimetype='text/event-stream'
)
+ @app.route('/backend-api/v2/usage', methods=['POST'])
+ def add_usage():
+ cache_dir = Path(get_cookies_dir()) / ".usage"
+ cache_file = cache_dir / f"{datetime.date.today()}.jsonl"
+ cache_dir.mkdir(parents=True, exist_ok=True)
+ with cache_file.open("a" if cache_file.exists() else "w") as f:
+ f.write(f"{json.dumps(request.json)}\n")
+ return {}
+
+ @app.route('/backend-api/v2/memory', methods=['POST'])
+ def add_memory():
+ api_key = request.headers.get("x_api_key")
+ json_data = request.json
+ from mem0 import MemoryClient
+ client = MemoryClient(api_key=api_key)
+ client.add(
+ [{"role": item["role"], "content": item["content"]} for item in json_data.get("items")],
+ user_id="user",
+ metadata={"conversation_id": json_data.get("id"), "title": json_data.get("title")}
+ )
+ return {"count": len(json_data.get("items"))}
+
+ @app.route('/backend-api/v2/memory/
', methods=['GET'])
+ def read_memory(user_id: str):
+ api_key = request.headers.get("x_api_key")
+ from mem0 import MemoryClient
+ client = MemoryClient(api_key=api_key)
+ if request.args.search:
+ return client.search(
+ request.args.search,
+ user_id=user_id,
+ metadata=json.loads(request.args.metadata) if request.args.metadata else None
+ )
+ return {}
+
self.routes = {
'/backend-api/v2/version': {
'function': self.get_version,
@@ -159,10 +195,6 @@ class Backend_Api(Api):
'function': self.handle_synthesize,
'methods': ['GET']
},
- '/backend-api/v2/upload_cookies': {
- 'function': self.upload_cookies,
- 'methods': ['POST']
- },
'/images/': {
'function': self.serve_images,
'methods': ['GET']
@@ -301,17 +333,18 @@ class Backend_Api(Api):
except Exception as e:
return jsonify({"error": {"message": f"Error uploading file: {str(e)}"}}), 500
- def upload_cookies(self):
- file = None
- if "file" in request.files:
- file = request.files['file']
- if file.filename == '':
- return 'No selected file', 400
- if file and file.filename.endswith(".json") or file.filename.endswith(".har"):
- filename = secure_filename(file.filename)
- file.save(os.path.join(get_cookies_dir(), filename))
- return "File saved", 200
- return 'Not supported file', 400
+ @app.route('/backend-api/v2/upload_cookies', methods=['POST'])
+ def upload_cookies(self):
+ file = None
+ if "file" in request.files:
+ file = request.files['file']
+ if file.filename == '':
+ return 'No selected file', 400
+ if file and file.filename.endswith(".json") or file.filename.endswith(".har"):
+ filename = secure_filename(file.filename)
+ file.save(os.path.join(get_cookies_dir(), filename))
+ return "File saved", 200
+ return 'Not supported file', 400
def handle_synthesize(self, provider: str):
try:
diff --git a/g4f/models.py b/g4f/models.py
index d0ca6d27..802a4c20 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -771,22 +771,22 @@ class ModelUtils:
}
demo_models = {
- gpt_4o.name: [gpt_4o, [PollinationsAI]],
+ gpt_4o.name: [gpt_4o, [PollinationsAI, Blackbox]],
"default": [llama_3_2_11b, [HuggingFaceAPI]],
qwen_2_vl_7b.name: [qwen_2_vl_7b, [HuggingFaceAPI]],
- qvq_72b.name: [qvq_72b, [HuggingSpace]],
+ qvq_72b.name: [qvq_72b, [HuggingSpace, HuggingFaceAPI]],
deepseek_r1.name: [deepseek_r1, [HuggingFace, HuggingFaceAPI]],
- claude_3_haiku.name: [claude_3_haiku, [DDG]],
+ claude_3_haiku.name: [claude_3_haiku, [DDG, Jmuz]],
command_r.name: [command_r, [HuggingSpace]],
command_r_plus.name: [command_r_plus, [HuggingSpace]],
command_r7b.name: [command_r7b, [HuggingSpace]],
gemma_2_27b.name: [gemma_2_27b, [HuggingFaceAPI]],
- qwen_2_72b.name: [qwen_2_72b, [HuggingFace]],
+ qwen_2_72b.name: [qwen_2_72b, [HuggingFaceAPI]],
qwen_2_5_coder_32b.name: [qwen_2_5_coder_32b, [HuggingFace]],
qwq_32b.name: [qwq_32b, [HuggingFace]],
llama_3_3_70b.name: [llama_3_3_70b, [HuggingFace]],
- sd_3_5.name: [sd_3_5, [HuggingSpace]],
- flux_dev.name: [flux_dev, [HuggingSpace]],
+ sd_3_5.name: [sd_3_5, [HuggingSpace, HuggingFace]],
+ flux_dev.name: [flux_dev, [HuggingSpace, HuggingFace]],
flux_schnell.name: [flux_schnell, [HuggingFace]],
}
--
cgit v1.2.3