From ca2b609e82b67633815c3953c057019d99169bd8 Mon Sep 17 00:00:00 2001 From: kqlio67 <166700875+kqlio67@users.noreply.github.com> Date: Sun, 15 Dec 2024 22:27:10 +0000 Subject: Add support for multiple AI models and enhance agent functionality in Blackbox provider (#2484) * refactor(g4f/Provider/Airforce.py): improve model handling and filtering - Add hidden_models set to exclude specific models - Add evil alias for uncensored model handling - Extend filtering for model-specific response tokens - Add response buffering for streamed content - Update model fetching with error handling * refactor(g4f/Provider/Blackbox.py): improve caching and model handling - Add caching system for validated values with file-based storage - Rename 'flux' model to 'ImageGeneration' and update references - Add temperature, top_p and max_tokens parameters to generator - Simplify HTTP headers and remove redundant options - Add model alias mapping for ImageGeneration - Add file system utilities for cache management * feat(g4f/Provider/RobocodersAPI.py): add caching and error handling - Add file-based caching system for access tokens and sessions - Add robust error handling with specific error messages - Add automatic dialog continuation on resource limits - Add HTML parsing with BeautifulSoup for token extraction - Add debug logging for error tracking - Add timeout configuration for API requests * refactor(g4f/Provider/DarkAI.py): update DarkAI default model and aliases - Change default model from llama-3-405b to llama-3-70b - Remove llama-3-405b from supported models list - Remove llama-3.1-405b from model aliases * feat(g4f/Provider/Blackbox2.py): add image generation support - Add image model 'flux' with dedicated API endpoint - Refactor generator to support both text and image outputs - Extract headers into reusable static method - Add type hints for AsyncGenerator return type - Split generation logic into _generate_text and _generate_image methods - Add ImageResponse handling for image generation results BREAKING CHANGE: create_async_generator now returns AsyncGenerator instead of AsyncResult * refactor(g4f/Provider/ChatGptEs.py): update ChatGptEs model configuration - Update models list to include gpt-3.5-turbo - Remove chatgpt-4o-latest from supported models - Remove model_aliases mapping for gpt-4o * feat(g4f/Provider/DeepInfraChat.py): add Accept-Language header support - Add Accept-Language header for internationalization - Maintain existing header configuration - Improve request compatibility with language preferences * refactor(g4f/Provider/needs_auth/Gemini.py): add ProviderModelMixin inheritance - Add ProviderModelMixin to class inheritance - Import ProviderModelMixin from base_provider - Move BaseConversation import to base_provider imports * refactor(g4f/Provider/Liaobots.py): update model details and aliases - Add version suffix to o1 model IDs - Update model aliases for o1-preview and o1-mini - Standardize version format across model definitions * refactor(g4f/Provider/PollinationsAI.py): enhance model support and generation - Split generation logic into dedicated image/text methods - Add additional text models including sur and claude - Add width/height parameters for image generation - Add model existence validation - Add hasattr checks for model lists initialization * chore(gitignore): add provider cache directory - Add g4f/Provider/.cache to gitignore patterns * refactor(g4f/Provider/ReplicateHome.py): update model configuration - Update default model to gemma-2b-it - Add default_image_model configuration - Remove llava-13b from supported models - Simplify request headers * feat(g4f/models.py): expand provider and model support - Add new providers DarkAI and PollinationsAI - Add new models for Mistral, Flux and image generation - Update provider lists for existing models - Add P1 and Evil models with experimental providers BREAKING CHANGE: Remove llava-13b model support * refactor(Airforce): Update type hint for split_message return - Change return type of from to for consistency with import. - Maintain overall functionality and structure of the class. - Ensure compatibility with type hinting standards in Python. * refactor(g4f/Provider/Airforce.py): Update type hint for split_message return - Change return type of 'split_message' from 'list[str]' to 'List[str]' for consistency with import. - Maintain overall functionality and structure of the 'Airforce' class. - Ensure compatibility with type hinting standards in Python. * feat(g4f/Provider/RobocodersAPI.py): Add support for optional BeautifulSoup dependency - Introduce a check for the BeautifulSoup library and handle its absence gracefully. - Raise a if BeautifulSoup is not installed, prompting the user to install it. - Remove direct import of BeautifulSoup to avoid import errors when the library is missing. * fix: Updating provider documentation and small fixes in providers * Disabled the provider (RobocodersAPI) * Fix: Conflicting file g4f/models.py * Update g4f/models.py g4f/Provider/Airforce.py * Update docs/providers-and-models.md g4f/models.py g4f/Provider/Airforce.py g4f/Provider/PollinationsAI.py * Update docs/providers-and-models.md * Update .gitignore * Update g4f/models.py * Update g4f/Provider/PollinationsAI.py * feat(g4f/Provider/Blackbox.py): add support for additional AI models and agents - Introduce new agent modes for Meta-Llama, Mistral, DeepSeek, DBRX, Qwen, and Nous-Hermes - Update model aliases to include newly supported models * Update (g4f/Provider/Blackbox.py) * Update (g4f/Provider/Blackbox.py) * feat(g4f/Provider/Blackbox2.py): add license key caching and validation - Add cache file management for license key persistence - Implement async license key extraction from JavaScript files - Add license key validation to text generation requests - Update type hints for async generators - Add error handling for cache file operations Breaking changes: - Text generation now requires license key validation --------- Co-authored-by: kqlio67 <> --- g4f/Provider/Blackbox2.py | 94 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 81 insertions(+), 13 deletions(-) (limited to 'g4f/Provider/Blackbox2.py') diff --git a/g4f/Provider/Blackbox2.py b/g4f/Provider/Blackbox2.py index d09cbf12..b47021f2 100644 --- a/g4f/Provider/Blackbox2.py +++ b/g4f/Provider/Blackbox2.py @@ -2,12 +2,16 @@ from __future__ import annotations import random import asyncio +import re +import json +from pathlib import Path from aiohttp import ClientSession from typing import AsyncGenerator from ..typing import AsyncResult, Messages from ..image import ImageResponse from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..cookies import get_cookies_dir from .. import debug @@ -28,23 +32,84 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin): image_models = ['flux'] models = [*chat_models, *image_models] + @classmethod + def _get_cache_file(cls) -> Path: + """Returns the path to the cache file.""" + dir = Path(get_cookies_dir()) + dir.mkdir(exist_ok=True) + return dir / 'blackbox2.json' + + @classmethod + def _load_cached_license(cls) -> str | None: + """Loads the license key from the cache.""" + cache_file = cls._get_cache_file() + if cache_file.exists(): + try: + with open(cache_file, 'r') as f: + data = json.load(f) + return data.get('license_key') + except Exception as e: + debug.log(f"Error reading cache file: {e}") + return None + + @classmethod + def _save_cached_license(cls, license_key: str): + """Saves the license key to the cache.""" + cache_file = cls._get_cache_file() + try: + with open(cache_file, 'w') as f: + json.dump({'license_key': license_key}, f) + except Exception as e: + debug.log(f"Error writing to cache file: {e}") + + @classmethod + async def _get_license_key(cls, session: ClientSession) -> str: + """Gets the license key from the cache or from JavaScript files.""" + cached_license = cls._load_cached_license() + if cached_license: + return cached_license + + try: + async with session.get(cls.url) as response: + html = await response.text() + js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', html) + + license_pattern = re.compile(r'j="(\d{6}-\d{6}-\d{6}-\d{6}-\d{6})"') + + for js_file in js_files: + js_url = f"{cls.url}/_next/{js_file}" + async with session.get(js_url) as js_response: + js_content = await js_response.text() + if license_match := license_pattern.search(js_content): + license_key = license_match.group(1) + cls._save_cached_license(license_key) + return license_key + + raise ValueError("License key not found") + except Exception as e: + debug.log(f"Error getting license key: {str(e)}") + raise + @classmethod async def create_async_generator( cls, model: str, messages: Messages, + prompt: str = None, proxy: str = None, max_retries: int = 3, delay: int = 1, + max_tokens: int = None, **kwargs - ) -> AsyncResult: + ) -> AsyncGenerator[str, None]: if not model: model = cls.default_model + if model in cls.chat_models: - async for result in cls._generate_text(model, messages, proxy, max_retries, delay): + async for result in cls._generate_text(model, messages, proxy, max_retries, delay, max_tokens): yield result elif model in cls.image_models: - prompt = messages[-1]["content"] if prompt is None else prompt + prompt = messages[-1]["content"] async for result in cls._generate_image(model, prompt, proxy): yield result else: @@ -57,17 +122,21 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin): messages: Messages, proxy: str = None, max_retries: int = 3, - delay: int = 1 - ) -> AsyncGenerator: + delay: int = 1, + max_tokens: int = None, + ) -> AsyncGenerator[str, None]: headers = cls._get_headers() - api_endpoint = cls.api_endpoints[model] - - data = { - "messages": messages, - "max_tokens": None - } async with ClientSession(headers=headers) as session: + license_key = await cls._get_license_key(session) + api_endpoint = cls.api_endpoints[model] + + data = { + "messages": messages, + "max_tokens": max_tokens, + "validated": license_key + } + for attempt in range(max_retries): try: async with session.post(api_endpoint, json=data, proxy=proxy) as response: @@ -92,7 +161,7 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin): model: str, prompt: str, proxy: str = None - ) -> AsyncGenerator: + ) -> AsyncGenerator[ImageResponse, None]: headers = cls._get_headers() api_endpoint = cls.api_endpoints[model] @@ -116,7 +185,6 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin): 'accept-language': 'en-US,en;q=0.9', 'content-type': 'text/plain;charset=UTF-8', 'origin': 'https://www.blackbox.ai', - 'priority': 'u=1, i', 'referer': 'https://www.blackbox.ai', 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' } -- cgit v1.2.3