From 42805ac7894d62996d68e4038c37900b26a14a9e Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Sun, 26 Jan 2025 16:32:32 +0100 Subject: Add leagcy port to docker-compose files Update demo model list Disable upload cookies in demo Track usage in demo mode Add messages without asking the ai Add hint for browser usage in provider list Add qwen2 prompt template to HuggingFace provider Trim automatic messages in HuggingFaceAPI --- docker-compose-slim.yml | 3 +- docker-compose.yml | 1 + g4f/Provider/Cloudflare.py | 3 +- g4f/Provider/Copilot.py | 1 - g4f/Provider/Pi.py | 6 +- g4f/Provider/You.py | 2 + g4f/Provider/mini_max/HailuoAI.py | 1 + g4f/Provider/needs_auth/CopilotAccount.py | 1 + g4f/Provider/needs_auth/Gemini.py | 1 + g4f/Provider/needs_auth/HuggingChat.py | 6 +- g4f/Provider/needs_auth/HuggingFace.py | 12 ++ g4f/Provider/needs_auth/HuggingFaceAPI.py | 17 ++- g4f/Provider/needs_auth/MicrosoftDesigner.py | 1 + g4f/Provider/needs_auth/OpenaiChat.py | 1 + g4f/api/__init__.py | 2 +- g4f/cli.py | 4 +- g4f/gui/client/demo.html | 1 + g4f/gui/client/index.html | 14 +- g4f/gui/client/static/js/chat.v1.js | 184 +++++++++++++++++++-------- g4f/gui/server/api.py | 1 + g4f/gui/server/backend_api.py | 71 ++++++++--- g4f/models.py | 12 +- 22 files changed, 252 insertions(+), 93 deletions(-) diff --git a/docker-compose-slim.yml b/docker-compose-slim.yml index 7a02810f..39754f72 100644 --- a/docker-compose-slim.yml +++ b/docker-compose-slim.yml @@ -11,4 +11,5 @@ services: volumes: - .:/app ports: - - '8080:8080' \ No newline at end of file + - '8080:8080' + - '1337:8080' \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index 1984bc0b..8d9dd722 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -11,6 +11,7 @@ services: - .:/app ports: - '8080:8080' + - '1337:8080' - '7900:7900' environment: - OLLAMA_HOST=host.docker.internal diff --git a/g4f/Provider/Cloudflare.py b/g4f/Provider/Cloudflare.py index e6f0dab3..89ee4193 100644 --- a/g4f/Provider/Cloudflare.py +++ b/g4f/Provider/Cloudflare.py @@ -15,9 +15,10 @@ from ..errors import ResponseStatusError, ModelNotFoundError class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): label = "Cloudflare AI" url = "https://playground.ai.cloudflare.com" + working = True + use_nodriver = True api_endpoint = "https://playground.ai.cloudflare.com/api/inference" models_url = "https://playground.ai.cloudflare.com/api/models" - working = True supports_stream = True supports_system_message = True supports_message_history = True diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py index bdbbcc4b..4a828c96 100644 --- a/g4f/Provider/Copilot.py +++ b/g4f/Provider/Copilot.py @@ -69,7 +69,6 @@ class Copilot(AbstractProvider, ProviderModelMixin): conversation: BaseConversation = None, return_conversation: bool = False, api_key: str = None, - web_search: bool = False, **kwargs ) -> CreateResult: if not has_curl_cffi: diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py index 6aabe7b1..8b07c0fe 100644 --- a/g4f/Provider/Pi.py +++ b/g4f/Provider/Pi.py @@ -7,9 +7,11 @@ from .base_provider import AsyncGeneratorProvider, format_prompt from ..requests import StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies class Pi(AsyncGeneratorProvider): - url = "https://pi.ai/talk" - working = True + url = "https://pi.ai/talk" + working = True + use_nodriver = True supports_stream = True + use_nodriver = True default_model = "pi" models = [default_model] _headers: dict = None diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py index 71b92837..7e5df0fd 100644 --- a/g4f/Provider/You.py +++ b/g4f/Provider/You.py @@ -75,6 +75,8 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): try: cookies = get_cookies(".you.com") except MissingRequirementsError: + pass + if not cookies or "afUserId" not in cookies: browser = await get_nodriver(proxy=proxy) try: page = await browser.get(cls.url) diff --git a/g4f/Provider/mini_max/HailuoAI.py b/g4f/Provider/mini_max/HailuoAI.py index c9b3e49c..d8c145a1 100644 --- a/g4f/Provider/mini_max/HailuoAI.py +++ b/g4f/Provider/mini_max/HailuoAI.py @@ -22,6 +22,7 @@ class HailuoAI(AsyncAuthedProvider, ProviderModelMixin): label = "Hailuo AI" url = "https://www.hailuo.ai" working = True + use_nodriver = True supports_stream = True default_model = "MiniMax" diff --git a/g4f/Provider/needs_auth/CopilotAccount.py b/g4f/Provider/needs_auth/CopilotAccount.py index 6d7964d1..527432c3 100644 --- a/g4f/Provider/needs_auth/CopilotAccount.py +++ b/g4f/Provider/needs_auth/CopilotAccount.py @@ -4,6 +4,7 @@ from ..Copilot import Copilot class CopilotAccount(Copilot): needs_auth = True + use_nodriver = True parent = "Copilot" default_model = "Copilot" default_vision_model = default_model diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py index 0f563b32..e6d71255 100644 --- a/g4f/Provider/needs_auth/Gemini.py +++ b/g4f/Provider/needs_auth/Gemini.py @@ -58,6 +58,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin): needs_auth = True working = True + use_nodriver = True default_model = 'gemini' default_image_model = default_model diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py index bf5abb8f..e72612b9 100644 --- a/g4f/Provider/needs_auth/HuggingChat.py +++ b/g4f/Provider/needs_auth/HuggingChat.py @@ -32,6 +32,7 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin): url = "https://huggingface.co/chat" working = True + use_nodriver = True supports_stream = True needs_auth = True @@ -68,10 +69,11 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin): ### Image ### "flux-dev": "black-forest-labs/FLUX.1-dev", "flux-schnell": "black-forest-labs/FLUX.1-schnell", - ### API ### + ### Used in other providers ### "qwen-2-vl-7b": "Qwen/Qwen2-VL-7B-Instruct", "gemma-2-27b": "google/gemma-2-27b-it", - "qvq-72b": "Qwen/QVQ-72B-Preview" + "qwen-2-72b": "Qwen/Qwen2-72B-Instruct", + "qvq-72b": "Qwen/QVQ-72B-Preview", } @classmethod diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py index b9ef5418..07e110b3 100644 --- a/g4f/Provider/needs_auth/HuggingFace.py +++ b/g4f/Provider/needs_auth/HuggingFace.py @@ -102,6 +102,8 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): ) as session: if payload is None: async with session.get(f"https://huggingface.co/api/models/{model}") as response: + if response.status == 404: + raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}") await raise_for_status(response) model_data = await response.json() model_type = None @@ -172,6 +174,14 @@ def format_prompt_qwen(messages: Messages, do_continue: bool = False) -> str: return prompt[:-len("\n<|im_end|>\n")] return prompt +def format_prompt_qwen2(messages: Messages, do_continue: bool = False) -> str: + prompt = "".join([ + f"\u003C|{message['role'].capitalize()}|\u003E{message['content']}\u003C|end▁of▁sentence|\u003E" for message in messages + ]) + ("" if do_continue else "\u003C|Assistant|\u003E") + if do_continue: + return prompt[:-len("\u003C|Assistant|\u003E")] + return prompt + def format_prompt_llama(messages: Messages, do_continue: bool = False) -> str: prompt = "<|begin_of_text|>" + "".join([ f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n{message['content']}\n<|eot_id|>\n" for message in messages @@ -199,6 +209,8 @@ def get_inputs(messages: Messages, model_data: dict, model_type: str, do_continu inputs = format_prompt_custom(messages, eos_token, do_continue) elif eos_token == "<|im_end|>": inputs = format_prompt_qwen(messages, do_continue) + elif "content" in eos_token and eos_token["content"] == "\u003C|end▁of▁sentence|\u003E": + inputs = format_prompt_qwen2(messages, do_continue) elif eos_token == "<|eot_id|>": inputs = format_prompt_llama(messages, do_continue) else: diff --git a/g4f/Provider/needs_auth/HuggingFaceAPI.py b/g4f/Provider/needs_auth/HuggingFaceAPI.py index dc783f4e..59c8f6ee 100644 --- a/g4f/Provider/needs_auth/HuggingFaceAPI.py +++ b/g4f/Provider/needs_auth/HuggingFaceAPI.py @@ -3,6 +3,7 @@ from __future__ import annotations from .OpenaiTemplate import OpenaiTemplate from .HuggingChat import HuggingChat from ...providers.types import Messages +from ... import debug class HuggingFaceAPI(OpenaiTemplate): label = "HuggingFace (Inference API)" @@ -31,6 +32,7 @@ class HuggingFaceAPI(OpenaiTemplate): messages: Messages, api_base: str = None, max_tokens: int = 2048, + max_inputs_lenght: int = 10000, **kwargs ): if api_base is None: @@ -38,5 +40,18 @@ class HuggingFaceAPI(OpenaiTemplate): if model in cls.model_aliases: model_name = cls.model_aliases[model] api_base = f"https://api-inference.huggingface.co/models/{model_name}/v1" + start = calculate_lenght(messages) + if start > max_inputs_lenght: + if len(messages) > 6: + messages = messages[:3] + messages[-3:] + if calculate_lenght(messages) > max_inputs_lenght: + if len(messages) > 2: + messages = [m for m in messages if m["role"] == "system"] + messages[-1:] + if len(messages) > 1 and calculate_lenght(messages) > max_inputs_lenght: + messages = [messages[-1]] + debug.log(f"Messages trimmed from: {start} to: {calculate_lenght(messages)}") async for chunk in super().create_async_generator(model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs): - yield chunk \ No newline at end of file + yield chunk + +def calculate_lenght(messages: Messages) -> int: + return sum([len(message["content"]) + 16 for message in messages]) \ No newline at end of file diff --git a/g4f/Provider/needs_auth/MicrosoftDesigner.py b/g4f/Provider/needs_auth/MicrosoftDesigner.py index 2a89e8be..c413c19b 100644 --- a/g4f/Provider/needs_auth/MicrosoftDesigner.py +++ b/g4f/Provider/needs_auth/MicrosoftDesigner.py @@ -21,6 +21,7 @@ class MicrosoftDesigner(AsyncGeneratorProvider, ProviderModelMixin): label = "Microsoft Designer" url = "https://designer.microsoft.com" working = True + use_nodriver = True needs_auth = True default_image_model = "dall-e-3" image_models = [default_image_model, "1024x1024", "1024x1792", "1792x1024"] diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index a8a4018e..29ada409 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -91,6 +91,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): label = "OpenAI ChatGPT" url = "https://chatgpt.com" working = True + use_nodriver = True supports_gpt_4 = True supports_message_history = True supports_system_message = True diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 896367e4..eb69c656 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -169,7 +169,7 @@ class Api: except HTTPException: user_g4f_api_key = None path = request.url.path - if path.startswith("/v1"): + if path.startswith("/v1") or (AppConfig.demo and path == '/backend-api/v2/upload_cookies'): if user_g4f_api_key is None: return ErrorResponse.from_message("G4F API key required", HTTP_401_UNAUTHORIZED) if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key): diff --git a/g4f/cli.py b/g4f/cli.py index af872489..5bb5c460 100644 --- a/g4f/cli.py +++ b/g4f/cli.py @@ -12,7 +12,7 @@ def get_api_parser(): api_parser.add_argument("--bind", default=None, help="The bind string. (Default: 0.0.0.0:1337)") api_parser.add_argument("--port", "-p", default=None, help="Change the port of the server.") api_parser.add_argument("--debug", "-d", action="store_true", help="Enable verbose logging.") - api_parser.add_argument("--gui", "-g", default=None, action="store_true", help="Add gui to the api.") + api_parser.add_argument("--gui", "-g", default=None, action="store_true", help="Start also the gui.") api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --reload and --workers)") api_parser.add_argument("--provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working], default=None, help="Default provider for chat completion. (incompatible with --reload and --workers)") @@ -28,7 +28,7 @@ def get_api_parser(): api_parser.add_argument("--cookie-browsers", nargs="+", choices=[browser.__name__ for browser in g4f.cookies.browsers], default=[], help="List of browsers to access or retrieve cookies from. (incompatible with --reload and --workers)") api_parser.add_argument("--reload", action="store_true", help="Enable reloading.") - api_parser.add_argument("--demo", action="store_true", help="Enable demo modus.") + api_parser.add_argument("--demo", action="store_true", help="Enable demo mode.") return api_parser def main(): diff --git a/g4f/gui/client/demo.html b/g4f/gui/client/demo.html index dd0c078f..ff5b993b 100644 --- a/g4f/gui/client/demo.html +++ b/g4f/gui/client/demo.html @@ -215,6 +215,7 @@ } localStorage.setItem("HuggingFace-api_key", accessToken); localStorage.setItem("HuggingFace-user", JSON.stringify(user)); + localStorage.setItem("user", user.name); location.href = "/chat/"; } input.addEventListener("input", () => check_access_token()); diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index 46b348d1..fe6eda18 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -142,6 +142,11 @@ +
+ Track usage + + +
@@ -184,6 +189,12 @@ Show log
+