From ab04d1d8943557e41191241686c10d5aa89bddd3 Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Sun, 26 Jan 2025 02:48:43 +0100 Subject: Increase max token in HuggingfaceAPI Restore browser instance on start up errors in nodriver Restored instances can be used as usual or to stop the browser Add demo modus to web ui for HuggingSpace Add rate limit support to web ui. Simply install flask_limiter Add home for demo with Access Token input and validation Add stripped model list for demo Add ignores for encoding error in web_search and file upload --- g4f/Provider/CablyAI.py | 7 +- g4f/Provider/DeepInfraChat.py | 1 - g4f/Provider/mini_max/HailuoAI.py | 4 +- g4f/Provider/mini_max/crypt.py | 17 +- g4f/Provider/needs_auth/HuggingChat.py | 5 +- g4f/Provider/needs_auth/HuggingFaceAPI.py | 10 +- g4f/api/__init__.py | 17 +- g4f/cli.py | 2 + g4f/gui/__init__.py | 3 +- g4f/gui/client/demo.html | 257 ++++++++++++++++++++++++++++++ g4f/gui/server/api.py | 5 +- g4f/gui/server/backend_api.py | 127 +++++++++------ g4f/gui/server/website.py | 7 +- g4f/models.py | 64 +++++--- g4f/requests/__init__.py | 20 ++- g4f/tools/files.py | 4 +- g4f/tools/web_search.py | 12 +- 17 files changed, 444 insertions(+), 118 deletions(-) create mode 100644 g4f/gui/client/demo.html diff --git a/g4f/Provider/CablyAI.py b/g4f/Provider/CablyAI.py index b82ee5d2..07e9fe7f 100644 --- a/g4f/Provider/CablyAI.py +++ b/g4f/Provider/CablyAI.py @@ -1,16 +1,15 @@ from __future__ import annotations from ..typing import AsyncResult, Messages -from .needs_auth import OpenaiAPI +from .needs_auth.OpenaiTemplate import OpenaiTemplate -class CablyAI(OpenaiAPI): - label = "CablyAI" +class CablyAI(OpenaiTemplate): url = "https://cablyai.com" login_url = None needs_auth = False api_base = "https://cablyai.com/v1" working = True - + default_model = "Cably-80B" models = [default_model] model_aliases = {"cably-80b": default_model} diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py index e42d9a18..7aa1fd09 100644 --- a/g4f/Provider/DeepInfraChat.py +++ b/g4f/Provider/DeepInfraChat.py @@ -4,7 +4,6 @@ from ..typing import AsyncResult, Messages from .needs_auth.OpenaiTemplate import OpenaiTemplate class DeepInfraChat(OpenaiTemplate): - label = "DeepInfraChat" url = "https://deepinfra.com/chat" api_base = "https://api.deepinfra.com/v1/openai" working = True diff --git a/g4f/Provider/mini_max/HailuoAI.py b/g4f/Provider/mini_max/HailuoAI.py index 810120f7..c9b3e49c 100644 --- a/g4f/Provider/mini_max/HailuoAI.py +++ b/g4f/Provider/mini_max/HailuoAI.py @@ -7,7 +7,7 @@ from aiohttp import ClientSession, FormData from ...typing import AsyncResult, Messages from ..base_provider import AsyncAuthedProvider, ProviderModelMixin, format_prompt -from ..mini_max.crypt import CallbackResults, get_browser_callback, generate_yy_header +from ..mini_max.crypt import CallbackResults, get_browser_callback, generate_yy_header, get_body_to_yy from ...requests import get_args_from_nodriver, raise_for_status from ...providers.response import AuthResult, JsonConversation, RequestLogin, TitleGeneration from ... import debug @@ -71,7 +71,7 @@ class HailuoAI(AsyncAuthedProvider, ProviderModelMixin): data.add_field(name, str(value)) headers = { "token": token, - "yy": generate_yy_header(auth_result.path_and_query, form_data, "POST", timestamp) + "yy": generate_yy_header(auth_result.path_and_query, get_body_to_yy(form_data), timestamp) } async with session.post(f"{cls.url}{path_and_query}", data=data, headers=headers) as response: await raise_for_status(response) diff --git a/g4f/Provider/mini_max/crypt.py b/g4f/Provider/mini_max/crypt.py index c96491f4..197bda6e 100644 --- a/g4f/Provider/mini_max/crypt.py +++ b/g4f/Provider/mini_max/crypt.py @@ -22,27 +22,17 @@ def hash_function(base_string: str) -> str: """ return hashlib.md5(base_string.encode()).hexdigest() -def generate_yy_header(has_search_params_path: str, body: dict, method: str, time: int) -> str: +def generate_yy_header(has_search_params_path: str, body_to_yy: dict, time: int) -> str: """ Python equivalent of the generateYYHeader function. """ - body_to_yy=get_body_to_yy(body) - - if method and method.lower() == 'post': - s = body or {} - else: - s = {} - - s = json.dumps(s, ensure_ascii=True, sort_keys=True) - if body_to_yy: - s = body_to_yy # print("Encoded Path:", quote(has_search_params_path, "")) # print("Stringified Body:", s) # print("Hashed Time:", hash_function(str(time))) encoded_path = quote(has_search_params_path, "") time_hash = hash_function(str(time)) - combined_string = f"{encoded_path}_{s}{time_hash}ooui" + combined_string = f"{encoded_path}_{body_to_yy}{time_hash}ooui" # print("Combined String:", combined_string) # print("Hashed Combined String:", hash_function(combined_string)) @@ -56,6 +46,9 @@ def get_body_to_yy(l): # print("bodyToYY:", M) return M +def get_body_json(s): + return json.dumps(s, ensure_ascii=True, sort_keys=True) + async def get_browser_callback(auth_result: CallbackResults): async def callback(page: Tab): while not auth_result.token: diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py index 36d292b7..bf5abb8f 100644 --- a/g4f/Provider/needs_auth/HuggingChat.py +++ b/g4f/Provider/needs_auth/HuggingChat.py @@ -65,10 +65,13 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin): "llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct", "mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407", "phi-3.5-mini": "microsoft/Phi-3.5-mini-instruct", - ### Image ### "flux-dev": "black-forest-labs/FLUX.1-dev", "flux-schnell": "black-forest-labs/FLUX.1-schnell", + ### API ### + "qwen-2-vl-7b": "Qwen/Qwen2-VL-7B-Instruct", + "gemma-2-27b": "google/gemma-2-27b-it", + "qvq-72b": "Qwen/QVQ-72B-Preview" } @classmethod diff --git a/g4f/Provider/needs_auth/HuggingFaceAPI.py b/g4f/Provider/needs_auth/HuggingFaceAPI.py index b36f0e0a..000dac90 100644 --- a/g4f/Provider/needs_auth/HuggingFaceAPI.py +++ b/g4f/Provider/needs_auth/HuggingFaceAPI.py @@ -14,6 +14,8 @@ class HuggingFaceAPI(OpenaiTemplate): default_model = "meta-llama/Llama-3.2-11B-Vision-Instruct" default_vision_model = default_model + vision_models = [default_vision_model, "Qwen/Qwen2-VL-7B-Instruct"] + model_aliases = HuggingChat.model_aliases @classmethod def get_models(cls, **kwargs): @@ -28,9 +30,13 @@ class HuggingFaceAPI(OpenaiTemplate): model: str, messages: Messages, api_base: str = None, + max_tokens: int = 2048, **kwargs ): if api_base is None: - api_base = f"https://api-inference.huggingface.co/models/{model}/v1" - async for chunk in super().create_async_generator(model, messages, api_base=api_base, **kwargs): + model_name = model + if model in cls.model_aliases: + model_name = cls.model_aliases[model] + api_base = f"https://api-inference.huggingface.co/models/{model_name}/v1" + async for chunk in super().create_async_generator(model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs): yield chunk \ No newline at end of file diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index c0c26104..896367e4 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -73,7 +73,7 @@ def create_app(): api.register_validation_exception_handler() if AppConfig.gui: - gui_app = WSGIMiddleware(get_gui_app()) + gui_app = WSGIMiddleware(get_gui_app(AppConfig.demo)) app.mount("/", gui_app) # Read cookie files if not ignored @@ -96,6 +96,12 @@ def create_app_with_gui_and_debug(): AppConfig.gui = True return create_app() +def create_app_with_demo_and_debug(): + g4f.debug.logging = True + AppConfig.gui = True + AppConfig.demo = True + return create_app() + class ErrorResponse(Response): media_type = "application/json" @@ -121,6 +127,7 @@ class AppConfig: image_provider: str = None proxy: str = None gui: bool = False + demo: bool = False @classmethod def set_config(cls, **data): @@ -156,7 +163,7 @@ class Api: print(f"Register authentication key: {''.join(['*' for _ in range(len(AppConfig.g4f_api_key))])}") @self.app.middleware("http") async def authorization(request: Request, call_next): - if AppConfig.g4f_api_key is not None: + if AppConfig.g4f_api_key is not None or AppConfig.demo: try: user_g4f_api_key = await self.get_g4f_api_key(request) except HTTPException: @@ -167,7 +174,7 @@ class Api: return ErrorResponse.from_message("G4F API key required", HTTP_401_UNAUTHORIZED) if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key): return ErrorResponse.from_message("Invalid G4F API key", HTTP_403_FORBIDDEN) - else: + elif not AppConfig.demo: if user_g4f_api_key is not None and path.startswith("/images/"): if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key): return ErrorResponse.from_message("Invalid G4F API key", HTTP_403_FORBIDDEN) @@ -562,7 +569,9 @@ def run_api( host, port = bind.split(":") if port is None: port = DEFAULT_PORT - if AppConfig.gui and debug: + if AppConfig.demo and debug: + method = "create_app_with_demo_and_debug" + elif AppConfig.gui and debug: method = "create_app_with_gui_and_debug" else: method = "create_app_debug" if debug else "create_app" diff --git a/g4f/cli.py b/g4f/cli.py index 51721548..af872489 100644 --- a/g4f/cli.py +++ b/g4f/cli.py @@ -28,6 +28,7 @@ def get_api_parser(): api_parser.add_argument("--cookie-browsers", nargs="+", choices=[browser.__name__ for browser in g4f.cookies.browsers], default=[], help="List of browsers to access or retrieve cookies from. (incompatible with --reload and --workers)") api_parser.add_argument("--reload", action="store_true", help="Enable reloading.") + api_parser.add_argument("--demo", action="store_true", help="Enable demo modus.") return api_parser def main(): @@ -57,6 +58,7 @@ def run_api_args(args): proxy=args.proxy, model=args.model, gui=args.gui, + demo=args.demo, ) if args.cookie_browsers: g4f.cookies.browsers = [g4f.cookies[browser] for browser in args.cookie_browsers] diff --git a/g4f/gui/__init__.py b/g4f/gui/__init__.py index 4dc286ad..8dab783a 100644 --- a/g4f/gui/__init__.py +++ b/g4f/gui/__init__.py @@ -8,10 +8,11 @@ try: except ImportError as e: import_error = e -def get_gui_app(): +def get_gui_app(demo: bool = False): if import_error is not None: raise MissingRequirementsError(f'Install "gui" requirements | pip install -U g4f[gui]\n{import_error}') app = create_app() + app.demo = demo site = Website(app) for route in site.routes: diff --git a/g4f/gui/client/demo.html b/g4f/gui/client/demo.html new file mode 100644 index 00000000..dd0c078f --- /dev/null +++ b/g4f/gui/client/demo.html @@ -0,0 +1,257 @@ + + + + + + + G4F DEMO + + + + + + + + + + + + +
+ + +
+
+ G4F DEMO +
+
+ Welcome to the G4F Web UI!
+ Your AI assistant is ready to assist you. +
+ + +
+ + +

+ Get Access Token +

+
+ + + + +
+ + + \ No newline at end of file diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py index 42f9d9fc..748a3280 100644 --- a/g4f/gui/server/api.py +++ b/g4f/gui/server/api.py @@ -28,6 +28,7 @@ class Api: return [{ "name": model.name, "image": isinstance(model, models.ImageModel), + "vision": isinstance(model, models.VisionModel), "providers": [ getattr(provider, "parent", provider.__name__) for provider in providers @@ -84,7 +85,7 @@ class Api: return send_from_directory(os.path.abspath(images_dir), name) def _prepare_conversation_kwargs(self, json_data: dict, kwargs: dict): - model = json_data.get('model') or models.default + model = json_data.get('model') provider = json_data.get('provider') messages = json_data.get('messages') api_key = json_data.get("api_key") @@ -180,7 +181,7 @@ class Api: conversations[provider][conversation_id] = chunk if isinstance(chunk, JsonConversation): yield self._format_json("conversation", { - provider: chunk.get_dict() + provider.__name__ if isinstance(provider, type) else provider: chunk.get_dict() }) else: yield self._format_json("conversation_id", conversation_id) diff --git a/g4f/gui/server/backend_api.py b/g4f/gui/server/backend_api.py index f9264da4..a6775847 100644 --- a/g4f/gui/server/backend_api.py +++ b/g4f/gui/server/backend_api.py @@ -6,12 +6,19 @@ import os import logging import asyncio import shutil +import random from flask import Flask, Response, request, jsonify from typing import Generator from pathlib import Path from urllib.parse import quote_plus from hashlib import sha256 from werkzeug.utils import secure_filename +try: + from flask_limiter import Limiter + from flask_limiter.util import get_remote_address + has_flask_limiter = True +except ImportError: + has_flask_limiter = False from ...image import is_allowed_extension, to_image from ...client.service import convert_to_provider @@ -22,6 +29,7 @@ from ...tools.run_tools import iter_run_tools from ...errors import ProviderNotFoundError from ...cookies import get_cookies_dir from ... import ChatCompletion +from ... import models from .api import Api logger = logging.getLogger(__name__) @@ -53,45 +61,98 @@ class Backend_Api(Api): """ self.app: Flask = app + if has_flask_limiter: + limiter = Limiter( + get_remote_address, + app=app, + default_limits=["200 per day", "50 per hour"], + storage_uri="memory://", + ) + else: + class Dummy(): + def limit(self, value): + pass + limiter = Dummy() + + @app.route('/backend-api/v2/models', methods=['GET']) def jsonify_models(**kwargs): - response = self.get_models(**kwargs) + response = get_demo_models() if app.demo else self.get_models(**kwargs) if isinstance(response, list): return jsonify(response) return response + @app.route('/backend-api/v2/models/', methods=['GET']) def jsonify_provider_models(**kwargs): response = self.get_provider_models(**kwargs) if isinstance(response, list): return jsonify(response) return response + @app.route('/backend-api/v2/providers', methods=['GET']) def jsonify_providers(**kwargs): response = self.get_providers(**kwargs) if isinstance(response, list): return jsonify(response) return response + def get_demo_models(): + return [{ + "name": model.name, + "image": isinstance(model, models.ImageModel), + "vision": isinstance(model, models.VisionModel), + "providers": [ + getattr(provider, "parent", provider.__name__) + for provider in providers + ], + "demo": True + } + for model, providers in models.demo_models.values()] + + @app.route('/backend-api/v2/conversation', methods=['POST']) + @limiter.limit("4 per minute") + def handle_conversation(): + """ + Handles conversation requests and streams responses back. + + Returns: + Response: A Flask response object for streaming. + """ + kwargs = {} + if "files[]" in request.files: + images = [] + for file in request.files.getlist('files[]'): + if file.filename != '' and is_allowed_extension(file.filename): + images.append((to_image(file.stream, file.filename.endswith('.svg')), file.filename)) + kwargs['images'] = images + if "json" in request.form: + json_data = json.loads(request.form['json']) + else: + json_data = request.json + + if app.demo: + model = json_data.get("model") + if model != "default" and model in models.demo_models: + json_data["provider"] = random.choice(models.demo_models[model][1]) + else: + json_data["model"] = models.demo_models["default"][0].name + json_data["provider"] = random.choice(models.demo_models["default"][1]) + + kwargs = self._prepare_conversation_kwargs(json_data, kwargs) + return self.app.response_class( + self._create_response_stream( + kwargs, + json_data.get("conversation_id"), + json_data.get("provider"), + json_data.get("download_images", True), + ), + mimetype='text/event-stream' + ) + self.routes = { - '/backend-api/v2/models': { - 'function': jsonify_models, - 'methods': ['GET'] - }, - '/backend-api/v2/models/': { - 'function': jsonify_provider_models, - 'methods': ['GET'] - }, - '/backend-api/v2/providers': { - 'function': jsonify_providers, - 'methods': ['GET'] - }, '/backend-api/v2/version': { 'function': self.get_version, 'methods': ['GET'] }, - '/backend-api/v2/conversation': { - 'function': self.handle_conversation, - 'methods': ['POST'] - }, '/backend-api/v2/synthesize/': { 'function': self.handle_synthesize, 'methods': ['GET'] @@ -250,38 +311,6 @@ class Backend_Api(Api): return "File saved", 200 return 'Not supported file', 400 - def handle_conversation(self): - """ - Handles conversation requests and streams responses back. - - Returns: - Response: A Flask response object for streaming. - """ - - kwargs = {} - if "files[]" in request.files: - images = [] - for file in request.files.getlist('files[]'): - if file.filename != '' and is_allowed_extension(file.filename): - images.append((to_image(file.stream, file.filename.endswith('.svg')), file.filename)) - kwargs['images'] = images - if "json" in request.form: - json_data = json.loads(request.form['json']) - else: - json_data = request.json - - kwargs = self._prepare_conversation_kwargs(json_data, kwargs) - - return self.app.response_class( - self._create_response_stream( - kwargs, - json_data.get("conversation_id"), - json_data.get("provider"), - json_data.get("download_images", True), - ), - mimetype='text/event-stream' - ) - def handle_synthesize(self, provider: str): try: provider_handler = convert_to_provider(provider) diff --git a/g4f/gui/server/website.py b/g4f/gui/server/website.py index 1af8aa08..76ca4438 100644 --- a/g4f/gui/server/website.py +++ b/g4f/gui/server/website.py @@ -9,7 +9,7 @@ class Website: self.app = app self.routes = { '/': { - 'function': self._home, + 'function': self._demo if app.demo else self._home, 'methods': ['GET', 'POST'] }, '/chat/': { @@ -43,4 +43,7 @@ class Website: return render_template('index.html', chat_id=str(uuid.uuid4())) def _home(self): - return render_template('home.html') \ No newline at end of file + return render_template('home.html') + + def _demo(self): + return render_template('demo.html') \ No newline at end of file diff --git a/g4f/models.py b/g4f/models.py index 950094e9..d0ca6d27 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -71,6 +71,9 @@ class Model: class ImageModel(Model): pass +class VisionModel(Model): + pass + ### Default ### default = Model( name = "", @@ -128,7 +131,7 @@ gpt_4 = Model( ) # gpt-4o -gpt_4o = Model( +gpt_4o = VisionModel( name = 'gpt-4o', base_provider = 'OpenAI', best_provider = IterListProvider([Blackbox, ChatGptt, Jmuz, ChatGptEs, PollinationsAI, DarkAI, Copilot, ChatGpt, Liaobots, OpenaiChat]) @@ -218,7 +221,7 @@ llama_3_2_1b = Model( best_provider = Cloudflare ) -llama_3_2_11b = Model( +llama_3_2_11b = VisionModel( name = "llama-3.2-11b", base_provider = "Meta Llama", best_provider = IterListProvider([Jmuz, HuggingChat, HuggingFace]) @@ -338,6 +341,11 @@ gemini_2_0_flash_thinking = Model( best_provider = Liaobots ) +gemma_2_27b = Model( + name = 'gemma-2-27b', + base_provider = 'Google DeepMind', + best_provider = HuggingFace +) ### Anthropic ### # claude 3 @@ -407,27 +415,26 @@ command_r7b = Model( ) ### Qwen ### -# qwen 1_5 qwen_1_5_7b = Model( name = 'qwen-1.5-7b', base_provider = 'Qwen', best_provider = Cloudflare ) - -# qwen 2 qwen_2_72b = Model( name = 'qwen-2-72b', base_provider = 'Qwen', best_provider = HuggingSpace ) - -# qwen 2.5 +qwen_2_vl_7b = VisionModel( + name = "qwen-2-vl-7b", + base_provider = 'Qwen', + best_provider = HuggingFaceAPI +) qwen_2_5_72b = Model( name = 'qwen-2.5-72b', base_provider = 'Qwen', best_provider = IterListProvider([DeepInfraChat, PollinationsAI, Jmuz]) ) - qwen_2_5_coder_32b = Model( name = 'qwen-2.5-coder-32b', base_provider = 'Qwen', @@ -441,7 +448,7 @@ qwq_32b = Model( best_provider = IterListProvider([Blackbox, DeepInfraChat, Jmuz, HuggingChat]) ) -qvq_72b = Model( +qvq_72b = VisionModel( name = 'qvq-72b', base_provider = 'Qwen', best_provider = HuggingSpace @@ -674,20 +681,15 @@ class ModelUtils: wizardlm_2_8x22b.name: wizardlm_2_8x22b, ### Google ### - # gemini + ### Gemini gemini.name: gemini, - - # gemini-exp gemini_exp.name: gemini_exp, - - # gemini-1.5 gemini_1_5_pro.name: gemini_1_5_pro, gemini_1_5_flash.name: gemini_1_5_flash, - - # gemini-2.0 gemini_2_0_flash.name: gemini_2_0_flash, gemini_2_0_flash_thinking.name: gemini_2_0_flash_thinking, - + ### Gemma + gemma_2_27b.name: gemma_2_27b, ### Anthropic ### # claude 3 @@ -714,16 +716,12 @@ class ModelUtils: gigachat.name: gigachat, ### Qwen ### - # qwen 1_5 qwen_1_5_7b.name: qwen_1_5_7b, - - # qwen 2 qwen_2_72b.name: qwen_2_72b, - - # qwen 2.5 + qwen_2_vl_7b.name: qwen_2_vl_7b, qwen_2_5_72b.name: qwen_2_5_72b, qwen_2_5_coder_32b.name: qwen_2_5_coder_32b, - + # qwq/qvq qwq_32b.name: qwq_32b, qvq_72b.name: qvq_72b, @@ -772,6 +770,26 @@ class ModelUtils: midjourney.name: midjourney, } +demo_models = { + gpt_4o.name: [gpt_4o, [PollinationsAI]], + "default": [llama_3_2_11b, [HuggingFaceAPI]], + qwen_2_vl_7b.name: [qwen_2_vl_7b, [HuggingFaceAPI]], + qvq_72b.name: [qvq_72b, [HuggingSpace]], + deepseek_r1.name: [deepseek_r1, [HuggingFace, HuggingFaceAPI]], + claude_3_haiku.name: [claude_3_haiku, [DDG]], + command_r.name: [command_r, [HuggingSpace]], + command_r_plus.name: [command_r_plus, [HuggingSpace]], + command_r7b.name: [command_r7b, [HuggingSpace]], + gemma_2_27b.name: [gemma_2_27b, [HuggingFaceAPI]], + qwen_2_72b.name: [qwen_2_72b, [HuggingFace]], + qwen_2_5_coder_32b.name: [qwen_2_5_coder_32b, [HuggingFace]], + qwq_32b.name: [qwq_32b, [HuggingFace]], + llama_3_3_70b.name: [llama_3_3_70b, [HuggingFace]], + sd_3_5.name: [sd_3_5, [HuggingSpace]], + flux_dev.name: [flux_dev, [HuggingSpace]], + flux_schnell.name: [flux_schnell, [HuggingFace]], +} + # Create a list of all models and his providers __models__ = { model.name: (model, providers) diff --git a/g4f/requests/__init__.py b/g4f/requests/__init__.py index 2811f046..2eb1f3e5 100644 --- a/g4f/requests/__init__.py +++ b/g4f/requests/__init__.py @@ -25,7 +25,7 @@ try: import nodriver from nodriver.cdp.network import CookieParam from nodriver.core.config import find_chrome_executable - from nodriver import Browser, Tab + from nodriver import Browser, Tab, util has_nodriver = True except ImportError: from typing import Type as Tab @@ -158,12 +158,18 @@ async def get_nodriver( break lock_file.write_text(str(time.time())) debug.log(f"Open nodriver with user_dir: {user_data_dir}") - browser = await nodriver.start( - user_data_dir=user_data_dir, - browser_args=None if proxy is None else [f"--proxy-server={proxy}"], - browser_executable_path=browser_executable_path, - **kwargs - ) + try: + browser = await nodriver.start( + user_data_dir=user_data_dir, + browser_args=None if proxy is None else [f"--proxy-server={proxy}"], + browser_executable_path=browser_executable_path, + **kwargs + ) + except: + if util.get_registered_instances(): + browser = util.get_registered_instances()[-1] + else: + raise stop = browser.stop def on_stop(): try: diff --git a/g4f/tools/files.py b/g4f/tools/files.py index d1d45e13..f23f931b 100644 --- a/g4f/tools/files.py +++ b/g4f/tools/files.py @@ -225,9 +225,9 @@ def cache_stream(stream: Iterator[str], bucket_dir: Path) -> Iterator[str]: for chunk in read_path_chunked(cache_file): yield chunk return - with open(tmp_file, "w") as f: + with open(tmp_file, "wb") as f: for chunk in stream: - f.write(chunk) + f.write(chunk.encode(errors="replace")) yield chunk tmp_file.rename(cache_file) diff --git a/g4f/tools/web_search.py b/g4f/tools/web_search.py index d9b40554..12a374ea 100644 --- a/g4f/tools/web_search.py +++ b/g4f/tools/web_search.py @@ -130,7 +130,7 @@ async def fetch_and_scrape(session: ClientSession, url: str, max_words: int = No try: bucket_dir: Path = Path(get_cookies_dir()) / ".scrape_cache" / "fetch_and_scrape" bucket_dir.mkdir(parents=True, exist_ok=True) - md5_hash = hashlib.md5(url.encode()).hexdigest() + md5_hash = hashlib.md5(url.encode(errors="ignore")).hexdigest() cache_file = bucket_dir / f"{quote_plus(url.split('?')[0].split('//')[1].replace('/', ' ')[:48])}.{datetime.date.today()}.{md5_hash[:16]}.cache" if cache_file.exists(): return cache_file.read_text() @@ -138,8 +138,8 @@ async def fetch_and_scrape(session: ClientSession, url: str, max_words: int = No if response.status == 200: html = await response.text(errors="replace") text = "".join(scrape_text(html, max_words, add_source)) - with open(cache_file, "w") as f: - f.write(text) + with open(cache_file, "wb") as f: + f.write(text.encode(errors="replace")) return text except (ClientError, asyncio.TimeoutError): return @@ -194,7 +194,7 @@ async def search(query: str, max_results: int = 5, max_words: int = 2500, backen async def do_search(prompt: str, query: str = None, instructions: str = DEFAULT_INSTRUCTIONS, **kwargs) -> str: if query is None: query = spacy_get_keywords(prompt) - json_bytes = json.dumps({"query": query, **kwargs}, sort_keys=True).encode() + json_bytes = json.dumps({"query": query, **kwargs}, sort_keys=True).encode(errors="ignore") md5_hash = hashlib.md5(json_bytes).hexdigest() bucket_dir: Path = Path(get_cookies_dir()) / ".scrape_cache" / f"web_search" / f"{datetime.date.today()}" bucket_dir.mkdir(parents=True, exist_ok=True) @@ -205,8 +205,8 @@ async def do_search(prompt: str, query: str = None, instructions: str = DEFAULT_ else: search_results = await search(query, **kwargs) if search_results.results: - with cache_file.open("w") as f: - f.write(str(search_results)) + with cache_file.open("wb") as f: + f.write(str(search_results).encode(errors="replace")) if instructions: new_prompt = f""" {search_results} -- cgit v1.2.3 From ca323b117f64fe40296fb142257c26a83a238611 Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Sun, 26 Jan 2025 04:23:20 +0100 Subject: Fix missing model in unittest --- g4f/Provider/needs_auth/HuggingFaceAPI.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/g4f/Provider/needs_auth/HuggingFaceAPI.py b/g4f/Provider/needs_auth/HuggingFaceAPI.py index 000dac90..dc783f4e 100644 --- a/g4f/Provider/needs_auth/HuggingFaceAPI.py +++ b/g4f/Provider/needs_auth/HuggingFaceAPI.py @@ -19,9 +19,9 @@ class HuggingFaceAPI(OpenaiTemplate): @classmethod def get_models(cls, **kwargs): - HuggingChat.get_models() - cls.models = HuggingChat.text_models - cls.vision_models = HuggingChat.vision_models + if not cls.models: + HuggingChat.get_models() + cls.models = list(set(HuggingChat.text_models + cls.vision_models)) return cls.models @classmethod -- cgit v1.2.3 From 5a0afdb11009352a283dc0396789198fdbac26a7 Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Sun, 26 Jan 2025 04:25:21 +0100 Subject: Handle demo modus in Web UI --- g4f/gui/client/static/js/chat.v1.js | 290 +++++++++++++++++++++++------------- g4f/gui/server/backend_api.py | 4 +- 2 files changed, 193 insertions(+), 101 deletions(-) diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index d0116c80..dfedb2aa 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -36,6 +36,7 @@ let parameters_storage = {}; let finish_storage = {}; let usage_storage = {}; let reasoning_storage = {} +let is_demo = false; messageInput.addEventListener("blur", () => { window.scrollTo(0, 0); @@ -683,7 +684,7 @@ async function load_provider_parameters(provider) { } } -async function add_message_chunk(message, message_id, provider, scroll) { +async function add_message_chunk(message, message_id, provider, scroll, finish_message=null) { content_map = content_storage[message_id]; if (message.type == "conversation") { const conversation = await get_conversation(window.conversation_id); @@ -737,6 +738,9 @@ async function add_message_chunk(message, message_id, provider, scroll) { update_message(content_map, message_id, markdown_render(message.login), scroll); } else if (message.type == "finish") { finish_storage[message_id] = message.finish; + if (finish_message) { + await finish_message(); + } } else if (message.type == "usage") { usage_storage[message_id] = message.usage; } else if (message.type == "reasoning") { @@ -829,11 +833,67 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi if (scroll) { await lazy_scroll_to_bottom(); } + async function finish_message() { + content_map.update_timeouts.forEach((timeoutId)=>clearTimeout(timeoutId)); + content_map.update_timeouts = []; + if (!error_storage[message_id] && message_storage[message_id]) { + html = markdown_render(message_storage[message_id]); + content_map.inner.innerHTML = html; + highlight(content_map.inner); + if (imageInput) imageInput.value = ""; + if (cameraInput) cameraInput.value = ""; + } + if (message_storage[message_id]) { + const message_provider = message_id in provider_storage ? provider_storage[message_id] : null; + await add_message( + window.conversation_id, + "assistant", + message_storage[message_id] + (error_storage[message_id] ? " [error]" : "") + (stop_generating.classList.contains('stop_generating-hidden') ? " [aborted]" : ""), + message_provider, + message_index, + synthesize_storage[message_id], + regenerate, + title_storage[message_id], + finish_storage[message_id], + usage_storage[message_id], + reasoning_storage[message_id], + action=="continue" + ); + delete message_storage[message_id]; + } + if (controller_storage[message_id]) { + if (!controller_storage[message_id].signal.aborted) { + controller_storage[message_id].abort(); + } + delete controller_storage[message_id]; + } + if (!error_storage[message_id]) { + await safe_load_conversation(window.conversation_id, scroll); + } + let cursorDiv = message_el.querySelector(".cursor"); + if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv); + if (scroll) { + await lazy_scroll_to_bottom(); + } + await safe_remove_cancel_button(); + await register_message_buttons(); + await load_conversations(); + regenerate_button.classList.remove("regenerate-hidden"); + } try { + let api_key; + if (is_demo && provider != "Custom") { + api_key = localStorage.getItem("HuggingFace-api_key"); + if (!api_key) { + location.href = "/"; + return; + } + } else { + api_key = get_api_key_by_provider(provider); + } const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput; const files = input && input.files.length > 0 ? input.files : null; const download_images = document.getElementById("download_images")?.checked; - const api_key = get_api_key_by_provider(provider); const api_base = provider == "Custom" ? document.getElementById(`${provider}-api_base`).value : null; const ignored = Array.from(settings.querySelectorAll("input.provider:not(:checked)")).map((el)=>el.value); await api("conversation", { @@ -849,54 +909,15 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi api_key: api_key, api_base: api_base, ignored: ignored, - }, files, message_id, scroll); - content_map.update_timeouts.forEach((timeoutId)=>clearTimeout(timeoutId)); - content_map.update_timeouts = []; - if (!error_storage[message_id]) { - html = markdown_render(message_storage[message_id]); - content_map.inner.innerHTML = html; - highlight(content_map.inner); - if (imageInput) imageInput.value = ""; - if (cameraInput) cameraInput.value = ""; - } + }, files, message_id, scroll, finish_message); } catch (e) { console.error(e); if (e.name != "AbortError") { error_storage[message_id] = true; content_map.inner.innerHTML += markdown_render(`**An error occured:** ${e}`); } + await finish_message(); } - if (message_storage[message_id]) { - const message_provider = message_id in provider_storage ? provider_storage[message_id] : null; - await add_message( - window.conversation_id, - "assistant", - message_storage[message_id] + (error_storage[message_id] ? " [error]" : "") + (stop_generating.classList.contains('stop_generating-hidden') ? " [aborted]" : ""), - message_provider, - message_index, - synthesize_storage[message_id], - regenerate, - title_storage[message_id], - finish_storage[message_id], - usage_storage[message_id], - reasoning_storage[message_id], - action=="continue" - ); - delete controller_storage[message_id]; - delete message_storage[message_id]; - if (!error_storage[message_id]) { - await safe_load_conversation(window.conversation_id, scroll); - } - } - let cursorDiv = message_el.querySelector(".cursor"); - if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv); - if (scroll) { - await lazy_scroll_to_bottom(); - } - await safe_remove_cancel_button(); - await register_message_buttons(); - await load_conversations(); - regenerate_button.classList.remove("regenerate-hidden"); }; async function scroll_to_bottom() { @@ -1789,70 +1810,93 @@ async function on_api() { models.forEach((model) => { let option = document.createElement("option"); option.value = model.name; - option.text = model.name + (model.image ? " (Image Generation)" : ""); + option.text = model.name + (model.image ? " (Image Generation)" : "") + (model.vision ? " (Image Upload)" : ""); option.dataset.providers = model.providers.join(" "); modelSelect.appendChild(option); + is_demo = model.demo; }); - providers = await api("providers") - providers.sort((a, b) => a.label.localeCompare(b.label)); - let login_urls = {}; - providers.forEach((provider) => { - let option = document.createElement("option"); - option.value = provider.name; - option.dataset.label = provider.label; - option.text = provider.label - + (provider.vision ? " (Image Upload)" : "") - + (provider.image ? " (Image Generation)" : "") - + (provider.webdriver ? " (Webdriver)" : "") - + (provider.auth ? " (Auth)" : ""); - if (provider.parent) - option.dataset.parent = provider.parent; - providerSelect.appendChild(option); - - if (provider.parent) { - if (!login_urls[provider.parent]) { - login_urls[provider.parent] = [provider.label, provider.login_url, [provider.name]]; - } else { - login_urls[provider.parent][2].push(provider.name); + let login_urls; + if (is_demo) { + if (!localStorage.getItem("HuggingFace-api_key")) { + location.href = "/"; + return; + } + providerSelect.innerHTML = '' + document.getElementById("pin")?.remove(); + document.getElementById("refine")?.parentElement.remove(); + Array.from(modelSelect.querySelectorAll(':not([data-providers])')).forEach((option)=>{ + if (!option.disabled && option.value) { + option.remove(); } - } else if (provider.login_url) { - if (!login_urls[provider.name]) { - login_urls[provider.name] = [provider.label, provider.login_url, []]; - } else { - login_urls[provider.name][0] = provider.label; - login_urls[provider.name][1] = provider.login_url; + }); + login_urls = { + "Custom": ["Custom Provider", "", []], + "HuggingFace": ["HuggingFace", "", []], + }; + } else { + providers = await api("providers") + providers.sort((a, b) => a.label.localeCompare(b.label)); + login_urls = {}; + providers.forEach((provider) => { + let option = document.createElement("option"); + option.value = provider.name; + option.dataset.label = provider.label; + option.text = provider.label + + (provider.vision ? " (Image Upload)" : "") + + (provider.image ? " (Image Generation)" : "") + + (provider.webdriver ? " (Webdriver)" : "") + + (provider.auth ? " (Auth)" : ""); + if (provider.parent) + option.dataset.parent = provider.parent; + providerSelect.appendChild(option); + + if (provider.parent) { + if (!login_urls[provider.parent]) { + login_urls[provider.parent] = [provider.label, provider.login_url, [provider.name]]; + } else { + login_urls[provider.parent][2].push(provider.name); + } + } else if (provider.login_url) { + if (!login_urls[provider.name]) { + login_urls[provider.name] = [provider.label, provider.login_url, []]; + } else { + login_urls[provider.name][0] = provider.label; + login_urls[provider.name][1] = provider.login_url; + } } - } - }); + }); + providers.forEach((provider) => { + if (!provider.parent) { + option = document.createElement("div"); + option.classList.add("field"); + option.innerHTML = ` + Enable ${provider.label} + + + `; + option.querySelector("input").addEventListener("change", (event) => load_provider_option(event.target, provider.name)); + settings.querySelector(".paper").appendChild(option); + provider_options[provider.name] = option; + } + }); + await load_provider_models(appStorage.getItem("provider")) + } for (let [name, [label, login_url, childs]] of Object.entries(login_urls)) { - if (!login_url) { + if (!login_url && !is_demo) { continue; } option = document.createElement("div"); - option.classList.add("field", "box", "hidden"); + option.classList.add("field", "box"); + if (!is_demo) { + option.classList.add("hidden"); + } childs = childs.map((child)=>`${child}-api_key`).join(" "); option.innerHTML = ` - - Get API key - `; + + ` + (login_url ? `Get API key` : ""); settings.querySelector(".paper").appendChild(option); } - providers.forEach((provider) => { - if (!provider.parent) { - option = document.createElement("div"); - option.classList.add("field"); - option.innerHTML = ` - Enable ${provider.label} - - - `; - option.querySelector("input").addEventListener("change", (event) => load_provider_option(event.target, provider.name)); - settings.querySelector(".paper").appendChild(option); - provider_options[provider.name] = option; - } - }); - await load_provider_models(appStorage.getItem("provider")) register_settings_storage(); await load_settings_storage() @@ -2093,7 +2137,7 @@ function get_selected_model() { } } -async function api(ressource, args=null, files=null, message_id=null, scroll=true) { +async function api(ressource, args=null, files=null, message_id=null, scroll=true, finish_message=null) { if (window?.pywebview) { if (args !== null) { if (ressource == "conversation") { @@ -2138,16 +2182,43 @@ async function api(ressource, args=null, files=null, message_id=null, scroll=tru headers: headers, body: body, }); - return read_response(response, message_id, args.provider || null, scroll); + // On Ratelimit + if (response.status == 429) { + // They are still pending requests? + for (let key in controller_storage) { + if (!controller_storage[key].signal.aborted) { + console.error(response); + await finish_message(); + return; + } + } + setTimeout(async () => { + response = await fetch(url, { + method: 'POST', + signal: controller_storage[message_id].signal, + headers: headers, + body: body, + }); + if (response.status != 200) { + console.error(response); + } + await read_response(response, message_id, args.provider || null, scroll, finish_message); + await finish_message(); + }, 20000) // Wait 20 secounds on rate limit + } else { + await read_response(response, message_id, args.provider || null, scroll, finish_message); + await finish_message(); + return; + } } response = await fetch(url, {headers: headers}); - if (response.status == 200) { - return await response.json(); + if (response.status != 200) { + console.error(response); } - console.error(response); + return await response.json(); } -async function read_response(response, message_id, provider, scroll) { +async function read_response(response, message_id, provider, scroll, finish_message) { const reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); let buffer = "" while (true) { @@ -2160,7 +2231,7 @@ async function read_response(response, message_id, provider, scroll) { continue; } try { - add_message_chunk(JSON.parse(buffer + line), message_id, provider, scroll); + add_message_chunk(JSON.parse(buffer + line), message_id, provider, scroll, finish_message); buffer = ""; } catch { buffer += line @@ -2278,6 +2349,25 @@ function save_storage() { } } +function add_memory() { + let conversations = []; + for (let i = 0; i < appStorage.length; i++) { + if (appStorage.key(i).startsWith("conversation:")) { + let conversation = appStorage.getItem(appStorage.key(i)); + conversations.push(JSON.parse(conversation)); + } + } + conversations.sort((a, b) => (b.updated||0)-(a.updated||0)); + conversations.forEach(async (conversation)=>{ + let body = JSON.stringify(conversation); + response = await fetch("/backend-api/v2/memory", { + method: 'POST', + body: body, + headers: {"content-type": "application/json"} + }); + }); +} + const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; if (SpeechRecognition) { const mircoIcon = microLabel.querySelector("i"); diff --git a/g4f/gui/server/backend_api.py b/g4f/gui/server/backend_api.py index a6775847..9c2d5261 100644 --- a/g4f/gui/server/backend_api.py +++ b/g4f/gui/server/backend_api.py @@ -71,7 +71,9 @@ class Backend_Api(Api): else: class Dummy(): def limit(self, value): - pass + def callback(v): + return v + return callback limiter = Dummy() @app.route('/backend-api/v2/models', methods=['GET']) -- cgit v1.2.3 From 42805ac7894d62996d68e4038c37900b26a14a9e Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Sun, 26 Jan 2025 16:32:32 +0100 Subject: Add leagcy port to docker-compose files Update demo model list Disable upload cookies in demo Track usage in demo mode Add messages without asking the ai Add hint for browser usage in provider list Add qwen2 prompt template to HuggingFace provider Trim automatic messages in HuggingFaceAPI --- docker-compose-slim.yml | 3 +- docker-compose.yml | 1 + g4f/Provider/Cloudflare.py | 3 +- g4f/Provider/Copilot.py | 1 - g4f/Provider/Pi.py | 6 +- g4f/Provider/You.py | 2 + g4f/Provider/mini_max/HailuoAI.py | 1 + g4f/Provider/needs_auth/CopilotAccount.py | 1 + g4f/Provider/needs_auth/Gemini.py | 1 + g4f/Provider/needs_auth/HuggingChat.py | 6 +- g4f/Provider/needs_auth/HuggingFace.py | 12 ++ g4f/Provider/needs_auth/HuggingFaceAPI.py | 17 ++- g4f/Provider/needs_auth/MicrosoftDesigner.py | 1 + g4f/Provider/needs_auth/OpenaiChat.py | 1 + g4f/api/__init__.py | 2 +- g4f/cli.py | 4 +- g4f/gui/client/demo.html | 1 + g4f/gui/client/index.html | 14 +- g4f/gui/client/static/js/chat.v1.js | 184 +++++++++++++++++++-------- g4f/gui/server/api.py | 1 + g4f/gui/server/backend_api.py | 71 ++++++++--- g4f/models.py | 12 +- 22 files changed, 252 insertions(+), 93 deletions(-) diff --git a/docker-compose-slim.yml b/docker-compose-slim.yml index 7a02810f..39754f72 100644 --- a/docker-compose-slim.yml +++ b/docker-compose-slim.yml @@ -11,4 +11,5 @@ services: volumes: - .:/app ports: - - '8080:8080' \ No newline at end of file + - '8080:8080' + - '1337:8080' \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index 1984bc0b..8d9dd722 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -11,6 +11,7 @@ services: - .:/app ports: - '8080:8080' + - '1337:8080' - '7900:7900' environment: - OLLAMA_HOST=host.docker.internal diff --git a/g4f/Provider/Cloudflare.py b/g4f/Provider/Cloudflare.py index e6f0dab3..89ee4193 100644 --- a/g4f/Provider/Cloudflare.py +++ b/g4f/Provider/Cloudflare.py @@ -15,9 +15,10 @@ from ..errors import ResponseStatusError, ModelNotFoundError class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): label = "Cloudflare AI" url = "https://playground.ai.cloudflare.com" + working = True + use_nodriver = True api_endpoint = "https://playground.ai.cloudflare.com/api/inference" models_url = "https://playground.ai.cloudflare.com/api/models" - working = True supports_stream = True supports_system_message = True supports_message_history = True diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py index bdbbcc4b..4a828c96 100644 --- a/g4f/Provider/Copilot.py +++ b/g4f/Provider/Copilot.py @@ -69,7 +69,6 @@ class Copilot(AbstractProvider, ProviderModelMixin): conversation: BaseConversation = None, return_conversation: bool = False, api_key: str = None, - web_search: bool = False, **kwargs ) -> CreateResult: if not has_curl_cffi: diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py index 6aabe7b1..8b07c0fe 100644 --- a/g4f/Provider/Pi.py +++ b/g4f/Provider/Pi.py @@ -7,9 +7,11 @@ from .base_provider import AsyncGeneratorProvider, format_prompt from ..requests import StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies class Pi(AsyncGeneratorProvider): - url = "https://pi.ai/talk" - working = True + url = "https://pi.ai/talk" + working = True + use_nodriver = True supports_stream = True + use_nodriver = True default_model = "pi" models = [default_model] _headers: dict = None diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py index 71b92837..7e5df0fd 100644 --- a/g4f/Provider/You.py +++ b/g4f/Provider/You.py @@ -75,6 +75,8 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): try: cookies = get_cookies(".you.com") except MissingRequirementsError: + pass + if not cookies or "afUserId" not in cookies: browser = await get_nodriver(proxy=proxy) try: page = await browser.get(cls.url) diff --git a/g4f/Provider/mini_max/HailuoAI.py b/g4f/Provider/mini_max/HailuoAI.py index c9b3e49c..d8c145a1 100644 --- a/g4f/Provider/mini_max/HailuoAI.py +++ b/g4f/Provider/mini_max/HailuoAI.py @@ -22,6 +22,7 @@ class HailuoAI(AsyncAuthedProvider, ProviderModelMixin): label = "Hailuo AI" url = "https://www.hailuo.ai" working = True + use_nodriver = True supports_stream = True default_model = "MiniMax" diff --git a/g4f/Provider/needs_auth/CopilotAccount.py b/g4f/Provider/needs_auth/CopilotAccount.py index 6d7964d1..527432c3 100644 --- a/g4f/Provider/needs_auth/CopilotAccount.py +++ b/g4f/Provider/needs_auth/CopilotAccount.py @@ -4,6 +4,7 @@ from ..Copilot import Copilot class CopilotAccount(Copilot): needs_auth = True + use_nodriver = True parent = "Copilot" default_model = "Copilot" default_vision_model = default_model diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py index 0f563b32..e6d71255 100644 --- a/g4f/Provider/needs_auth/Gemini.py +++ b/g4f/Provider/needs_auth/Gemini.py @@ -58,6 +58,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin): needs_auth = True working = True + use_nodriver = True default_model = 'gemini' default_image_model = default_model diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py index bf5abb8f..e72612b9 100644 --- a/g4f/Provider/needs_auth/HuggingChat.py +++ b/g4f/Provider/needs_auth/HuggingChat.py @@ -32,6 +32,7 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin): url = "https://huggingface.co/chat" working = True + use_nodriver = True supports_stream = True needs_auth = True @@ -68,10 +69,11 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin): ### Image ### "flux-dev": "black-forest-labs/FLUX.1-dev", "flux-schnell": "black-forest-labs/FLUX.1-schnell", - ### API ### + ### Used in other providers ### "qwen-2-vl-7b": "Qwen/Qwen2-VL-7B-Instruct", "gemma-2-27b": "google/gemma-2-27b-it", - "qvq-72b": "Qwen/QVQ-72B-Preview" + "qwen-2-72b": "Qwen/Qwen2-72B-Instruct", + "qvq-72b": "Qwen/QVQ-72B-Preview", } @classmethod diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py index b9ef5418..07e110b3 100644 --- a/g4f/Provider/needs_auth/HuggingFace.py +++ b/g4f/Provider/needs_auth/HuggingFace.py @@ -102,6 +102,8 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): ) as session: if payload is None: async with session.get(f"https://huggingface.co/api/models/{model}") as response: + if response.status == 404: + raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}") await raise_for_status(response) model_data = await response.json() model_type = None @@ -172,6 +174,14 @@ def format_prompt_qwen(messages: Messages, do_continue: bool = False) -> str: return prompt[:-len("\n<|im_end|>\n")] return prompt +def format_prompt_qwen2(messages: Messages, do_continue: bool = False) -> str: + prompt = "".join([ + f"\u003C|{message['role'].capitalize()}|\u003E{message['content']}\u003C|end▁of▁sentence|\u003E" for message in messages + ]) + ("" if do_continue else "\u003C|Assistant|\u003E") + if do_continue: + return prompt[:-len("\u003C|Assistant|\u003E")] + return prompt + def format_prompt_llama(messages: Messages, do_continue: bool = False) -> str: prompt = "<|begin_of_text|>" + "".join([ f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n{message['content']}\n<|eot_id|>\n" for message in messages @@ -199,6 +209,8 @@ def get_inputs(messages: Messages, model_data: dict, model_type: str, do_continu inputs = format_prompt_custom(messages, eos_token, do_continue) elif eos_token == "<|im_end|>": inputs = format_prompt_qwen(messages, do_continue) + elif "content" in eos_token and eos_token["content"] == "\u003C|end▁of▁sentence|\u003E": + inputs = format_prompt_qwen2(messages, do_continue) elif eos_token == "<|eot_id|>": inputs = format_prompt_llama(messages, do_continue) else: diff --git a/g4f/Provider/needs_auth/HuggingFaceAPI.py b/g4f/Provider/needs_auth/HuggingFaceAPI.py index dc783f4e..59c8f6ee 100644 --- a/g4f/Provider/needs_auth/HuggingFaceAPI.py +++ b/g4f/Provider/needs_auth/HuggingFaceAPI.py @@ -3,6 +3,7 @@ from __future__ import annotations from .OpenaiTemplate import OpenaiTemplate from .HuggingChat import HuggingChat from ...providers.types import Messages +from ... import debug class HuggingFaceAPI(OpenaiTemplate): label = "HuggingFace (Inference API)" @@ -31,6 +32,7 @@ class HuggingFaceAPI(OpenaiTemplate): messages: Messages, api_base: str = None, max_tokens: int = 2048, + max_inputs_lenght: int = 10000, **kwargs ): if api_base is None: @@ -38,5 +40,18 @@ class HuggingFaceAPI(OpenaiTemplate): if model in cls.model_aliases: model_name = cls.model_aliases[model] api_base = f"https://api-inference.huggingface.co/models/{model_name}/v1" + start = calculate_lenght(messages) + if start > max_inputs_lenght: + if len(messages) > 6: + messages = messages[:3] + messages[-3:] + if calculate_lenght(messages) > max_inputs_lenght: + if len(messages) > 2: + messages = [m for m in messages if m["role"] == "system"] + messages[-1:] + if len(messages) > 1 and calculate_lenght(messages) > max_inputs_lenght: + messages = [messages[-1]] + debug.log(f"Messages trimmed from: {start} to: {calculate_lenght(messages)}") async for chunk in super().create_async_generator(model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs): - yield chunk \ No newline at end of file + yield chunk + +def calculate_lenght(messages: Messages) -> int: + return sum([len(message["content"]) + 16 for message in messages]) \ No newline at end of file diff --git a/g4f/Provider/needs_auth/MicrosoftDesigner.py b/g4f/Provider/needs_auth/MicrosoftDesigner.py index 2a89e8be..c413c19b 100644 --- a/g4f/Provider/needs_auth/MicrosoftDesigner.py +++ b/g4f/Provider/needs_auth/MicrosoftDesigner.py @@ -21,6 +21,7 @@ class MicrosoftDesigner(AsyncGeneratorProvider, ProviderModelMixin): label = "Microsoft Designer" url = "https://designer.microsoft.com" working = True + use_nodriver = True needs_auth = True default_image_model = "dall-e-3" image_models = [default_image_model, "1024x1024", "1024x1792", "1792x1024"] diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index a8a4018e..29ada409 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -91,6 +91,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): label = "OpenAI ChatGPT" url = "https://chatgpt.com" working = True + use_nodriver = True supports_gpt_4 = True supports_message_history = True supports_system_message = True diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 896367e4..eb69c656 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -169,7 +169,7 @@ class Api: except HTTPException: user_g4f_api_key = None path = request.url.path - if path.startswith("/v1"): + if path.startswith("/v1") or (AppConfig.demo and path == '/backend-api/v2/upload_cookies'): if user_g4f_api_key is None: return ErrorResponse.from_message("G4F API key required", HTTP_401_UNAUTHORIZED) if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key): diff --git a/g4f/cli.py b/g4f/cli.py index af872489..5bb5c460 100644 --- a/g4f/cli.py +++ b/g4f/cli.py @@ -12,7 +12,7 @@ def get_api_parser(): api_parser.add_argument("--bind", default=None, help="The bind string. (Default: 0.0.0.0:1337)") api_parser.add_argument("--port", "-p", default=None, help="Change the port of the server.") api_parser.add_argument("--debug", "-d", action="store_true", help="Enable verbose logging.") - api_parser.add_argument("--gui", "-g", default=None, action="store_true", help="Add gui to the api.") + api_parser.add_argument("--gui", "-g", default=None, action="store_true", help="Start also the gui.") api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --reload and --workers)") api_parser.add_argument("--provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working], default=None, help="Default provider for chat completion. (incompatible with --reload and --workers)") @@ -28,7 +28,7 @@ def get_api_parser(): api_parser.add_argument("--cookie-browsers", nargs="+", choices=[browser.__name__ for browser in g4f.cookies.browsers], default=[], help="List of browsers to access or retrieve cookies from. (incompatible with --reload and --workers)") api_parser.add_argument("--reload", action="store_true", help="Enable reloading.") - api_parser.add_argument("--demo", action="store_true", help="Enable demo modus.") + api_parser.add_argument("--demo", action="store_true", help="Enable demo mode.") return api_parser def main(): diff --git a/g4f/gui/client/demo.html b/g4f/gui/client/demo.html index dd0c078f..ff5b993b 100644 --- a/g4f/gui/client/demo.html +++ b/g4f/gui/client/demo.html @@ -215,6 +215,7 @@ } localStorage.setItem("HuggingFace-api_key", accessToken); localStorage.setItem("HuggingFace-user", JSON.stringify(user)); + localStorage.setItem("user", user.name); location.href = "/chat/"; } input.addEventListener("input", () => check_access_token()); diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index 46b348d1..fe6eda18 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -142,6 +142,11 @@ +
+ Track usage + + +
@@ -184,6 +189,12 @@ Show log
+