From ca2b609e82b67633815c3953c057019d99169bd8 Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Sun, 15 Dec 2024 22:27:10 +0000
Subject: Add support for multiple AI models and enhance agent functionality in
Blackbox provider (#2484)
* refactor(g4f/Provider/Airforce.py): improve model handling and filtering
- Add hidden_models set to exclude specific models
- Add evil alias for uncensored model handling
- Extend filtering for model-specific response tokens
- Add response buffering for streamed content
- Update model fetching with error handling
* refactor(g4f/Provider/Blackbox.py): improve caching and model handling
- Add caching system for validated values with file-based storage
- Rename 'flux' model to 'ImageGeneration' and update references
- Add temperature, top_p and max_tokens parameters to generator
- Simplify HTTP headers and remove redundant options
- Add model alias mapping for ImageGeneration
- Add file system utilities for cache management
* feat(g4f/Provider/RobocodersAPI.py): add caching and error handling
- Add file-based caching system for access tokens and sessions
- Add robust error handling with specific error messages
- Add automatic dialog continuation on resource limits
- Add HTML parsing with BeautifulSoup for token extraction
- Add debug logging for error tracking
- Add timeout configuration for API requests
* refactor(g4f/Provider/DarkAI.py): update DarkAI default model and aliases
- Change default model from llama-3-405b to llama-3-70b
- Remove llama-3-405b from supported models list
- Remove llama-3.1-405b from model aliases
* feat(g4f/Provider/Blackbox2.py): add image generation support
- Add image model 'flux' with dedicated API endpoint
- Refactor generator to support both text and image outputs
- Extract headers into reusable static method
- Add type hints for AsyncGenerator return type
- Split generation logic into _generate_text and _generate_image methods
- Add ImageResponse handling for image generation results
BREAKING CHANGE: create_async_generator now returns AsyncGenerator instead of AsyncResult
* refactor(g4f/Provider/ChatGptEs.py): update ChatGptEs model configuration
- Update models list to include gpt-3.5-turbo
- Remove chatgpt-4o-latest from supported models
- Remove model_aliases mapping for gpt-4o
* feat(g4f/Provider/DeepInfraChat.py): add Accept-Language header support
- Add Accept-Language header for internationalization
- Maintain existing header configuration
- Improve request compatibility with language preferences
* refactor(g4f/Provider/needs_auth/Gemini.py): add ProviderModelMixin inheritance
- Add ProviderModelMixin to class inheritance
- Import ProviderModelMixin from base_provider
- Move BaseConversation import to base_provider imports
* refactor(g4f/Provider/Liaobots.py): update model details and aliases
- Add version suffix to o1 model IDs
- Update model aliases for o1-preview and o1-mini
- Standardize version format across model definitions
* refactor(g4f/Provider/PollinationsAI.py): enhance model support and generation
- Split generation logic into dedicated image/text methods
- Add additional text models including sur and claude
- Add width/height parameters for image generation
- Add model existence validation
- Add hasattr checks for model lists initialization
* chore(gitignore): add provider cache directory
- Add g4f/Provider/.cache to gitignore patterns
* refactor(g4f/Provider/ReplicateHome.py): update model configuration
- Update default model to gemma-2b-it
- Add default_image_model configuration
- Remove llava-13b from supported models
- Simplify request headers
* feat(g4f/models.py): expand provider and model support
- Add new providers DarkAI and PollinationsAI
- Add new models for Mistral, Flux and image generation
- Update provider lists for existing models
- Add P1 and Evil models with experimental providers
BREAKING CHANGE: Remove llava-13b model support
* refactor(Airforce): Update type hint for split_message return
- Change return type of from to for consistency with import.
- Maintain overall functionality and structure of the class.
- Ensure compatibility with type hinting standards in Python.
* refactor(g4f/Provider/Airforce.py): Update type hint for split_message return
- Change return type of 'split_message' from 'list[str]' to 'List[str]' for consistency with import.
- Maintain overall functionality and structure of the 'Airforce' class.
- Ensure compatibility with type hinting standards in Python.
* feat(g4f/Provider/RobocodersAPI.py): Add support for optional BeautifulSoup dependency
- Introduce a check for the BeautifulSoup library and handle its absence gracefully.
- Raise a if BeautifulSoup is not installed, prompting the user to install it.
- Remove direct import of BeautifulSoup to avoid import errors when the library is missing.
* fix: Updating provider documentation and small fixes in providers
* Disabled the provider (RobocodersAPI)
* Fix: Conflicting file g4f/models.py
* Update g4f/models.py g4f/Provider/Airforce.py
* Update docs/providers-and-models.md g4f/models.py g4f/Provider/Airforce.py g4f/Provider/PollinationsAI.py
* Update docs/providers-and-models.md
* Update .gitignore
* Update g4f/models.py
* Update g4f/Provider/PollinationsAI.py
* feat(g4f/Provider/Blackbox.py): add support for additional AI models and agents
- Introduce new agent modes for Meta-Llama, Mistral, DeepSeek, DBRX, Qwen, and Nous-Hermes
- Update model aliases to include newly supported models
* Update (g4f/Provider/Blackbox.py)
* Update (g4f/Provider/Blackbox.py)
* feat(g4f/Provider/Blackbox2.py): add license key caching and validation
- Add cache file management for license key persistence
- Implement async license key extraction from JavaScript files
- Add license key validation to text generation requests
- Update type hints for async generators
- Add error handling for cache file operations
Breaking changes:
- Text generation now requires license key validation
---------
Co-authored-by: kqlio67 <>
---
g4f/Provider/Blackbox.py | 57 +++++++++++++++++-----------
g4f/Provider/Blackbox2.py | 94 ++++++++++++++++++++++++++++++++++++++++-------
g4f/gui/client/index.html | 1 +
g4f/models.py | 43 ++++++++++++++++++++--
4 files changed, 157 insertions(+), 38 deletions(-)
(limited to 'g4f')
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 68ac7852..716ad5a4 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -35,7 +35,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
agentMode = {
- 'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}
+ 'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
+ 'meta-llama/Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"},
+ 'mistralai/Mistral-7B-Instruct-v0.2': {'mode': True, 'id': "mistralai/Mistral-7B-Instruct-v0.2", 'name': "Mistral-(7B)-Instruct-v0.2"},
+ 'deepseek-ai/deepseek-llm-67b-chat': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"},
+ 'databricks/dbrx-instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"},
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro': {'mode': True, 'id': "meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro", 'name': "Meta-Llama-3.1-405B-Instruct-Turbo"}, #
+ 'Qwen/QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"},
+ 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"}
}
trendingAgentMode = {
@@ -102,6 +109,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"gpt-4": "gpt-4o",
"gemini-flash": "gemini-1.5-flash",
"claude-3.5-sonnet": "claude-sonnet-3.5",
+ "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
+ "mixtral-7b": "mistralai/Mistral-7B-Instruct-v0.2",
+ "deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat",
+ "dbrx-instruct": "databricks/dbrx-instruct",
+ "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro",
+ "qwq-32b": "Qwen/QwQ-32B-Preview",
+ "hermes-2-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
### image ###
"flux": "ImageGeneration",
@@ -198,9 +212,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
proxy: str = None,
web_search: bool = False,
images: ImagesType = None,
- top_p: float = 0.9,
- temperature: float = 0.5,
- max_tokens: int = 1024,
+ top_p: float = None,
+ temperature: float = None,
+ max_tokens: int = None,
**kwargs
) -> AsyncResult:
message_id = cls.generate_id()
@@ -233,30 +247,31 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
}
data = {
- "messages": messages,
- "id": message_id,
- "previewToken": None,
- "userId": None,
- "codeModelMode": True,
"agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {},
- "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {},
- "isMicMode": False,
- "userSystemPrompt": None,
- "maxTokens": max_tokens,
- "playgroundTopP": top_p,
- "playgroundTemperature": temperature,
- "isChromeExt": False,
- "githubToken": None,
"clickedAnswer2": False,
"clickedAnswer3": False,
"clickedForceWebSearch": False,
- "visitFromDelta": False,
+ "codeModelMode": True,
+ "deepSearchMode": False,
+ "githubToken": None,
+ "id": message_id,
+ "imageGenerationMode": False,
+ "isChromeExt": False,
+ "isMicMode": False,
+ "maxTokens": max_tokens,
+ "messages": messages,
"mobileClient": False,
+ "playgroundTemperature": temperature,
+ "playgroundTopP": top_p,
+ "previewToken": None,
+ "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {},
+ "userId": None,
"userSelectedModel": model if model in cls.userSelectedModel else None,
- "webSearchMode": web_search,
+ "userSystemPrompt": None,
"validated": validated_value,
- "imageGenerationMode": False,
- "webSearchModePrompt": web_search
+ "visitFromDelta": False,
+ "webSearchModePrompt": False,
+ "webSearchMode": web_search
}
async with ClientSession(headers=headers) as session:
diff --git a/g4f/Provider/Blackbox2.py b/g4f/Provider/Blackbox2.py
index d09cbf12..b47021f2 100644
--- a/g4f/Provider/Blackbox2.py
+++ b/g4f/Provider/Blackbox2.py
@@ -2,12 +2,16 @@ from __future__ import annotations
import random
import asyncio
+import re
+import json
+from pathlib import Path
from aiohttp import ClientSession
from typing import AsyncGenerator
from ..typing import AsyncResult, Messages
from ..image import ImageResponse
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..cookies import get_cookies_dir
from .. import debug
@@ -28,23 +32,84 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin):
image_models = ['flux']
models = [*chat_models, *image_models]
+ @classmethod
+ def _get_cache_file(cls) -> Path:
+ """Returns the path to the cache file."""
+ dir = Path(get_cookies_dir())
+ dir.mkdir(exist_ok=True)
+ return dir / 'blackbox2.json'
+
+ @classmethod
+ def _load_cached_license(cls) -> str | None:
+ """Loads the license key from the cache."""
+ cache_file = cls._get_cache_file()
+ if cache_file.exists():
+ try:
+ with open(cache_file, 'r') as f:
+ data = json.load(f)
+ return data.get('license_key')
+ except Exception as e:
+ debug.log(f"Error reading cache file: {e}")
+ return None
+
+ @classmethod
+ def _save_cached_license(cls, license_key: str):
+ """Saves the license key to the cache."""
+ cache_file = cls._get_cache_file()
+ try:
+ with open(cache_file, 'w') as f:
+ json.dump({'license_key': license_key}, f)
+ except Exception as e:
+ debug.log(f"Error writing to cache file: {e}")
+
+ @classmethod
+ async def _get_license_key(cls, session: ClientSession) -> str:
+ """Gets the license key from the cache or from JavaScript files."""
+ cached_license = cls._load_cached_license()
+ if cached_license:
+ return cached_license
+
+ try:
+ async with session.get(cls.url) as response:
+ html = await response.text()
+ js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', html)
+
+ license_pattern = re.compile(r'j="(\d{6}-\d{6}-\d{6}-\d{6}-\d{6})"')
+
+ for js_file in js_files:
+ js_url = f"{cls.url}/_next/{js_file}"
+ async with session.get(js_url) as js_response:
+ js_content = await js_response.text()
+ if license_match := license_pattern.search(js_content):
+ license_key = license_match.group(1)
+ cls._save_cached_license(license_key)
+ return license_key
+
+ raise ValueError("License key not found")
+ except Exception as e:
+ debug.log(f"Error getting license key: {str(e)}")
+ raise
+
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
+ prompt: str = None,
proxy: str = None,
max_retries: int = 3,
delay: int = 1,
+ max_tokens: int = None,
**kwargs
- ) -> AsyncResult:
+ ) -> AsyncGenerator[str, None]:
if not model:
model = cls.default_model
+
if model in cls.chat_models:
- async for result in cls._generate_text(model, messages, proxy, max_retries, delay):
+ async for result in cls._generate_text(model, messages, proxy, max_retries, delay, max_tokens):
yield result
elif model in cls.image_models:
- prompt = messages[-1]["content"] if prompt is None else prompt
+ prompt = messages[-1]["content"]
async for result in cls._generate_image(model, prompt, proxy):
yield result
else:
@@ -57,17 +122,21 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin):
messages: Messages,
proxy: str = None,
max_retries: int = 3,
- delay: int = 1
- ) -> AsyncGenerator:
+ delay: int = 1,
+ max_tokens: int = None,
+ ) -> AsyncGenerator[str, None]:
headers = cls._get_headers()
- api_endpoint = cls.api_endpoints[model]
-
- data = {
- "messages": messages,
- "max_tokens": None
- }
async with ClientSession(headers=headers) as session:
+ license_key = await cls._get_license_key(session)
+ api_endpoint = cls.api_endpoints[model]
+
+ data = {
+ "messages": messages,
+ "max_tokens": max_tokens,
+ "validated": license_key
+ }
+
for attempt in range(max_retries):
try:
async with session.post(api_endpoint, json=data, proxy=proxy) as response:
@@ -92,7 +161,7 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
prompt: str,
proxy: str = None
- ) -> AsyncGenerator:
+ ) -> AsyncGenerator[ImageResponse, None]:
headers = cls._get_headers()
api_endpoint = cls.api_endpoints[model]
@@ -116,7 +185,6 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin):
'accept-language': 'en-US,en;q=0.9',
'content-type': 'text/plain;charset=UTF-8',
'origin': 'https://www.blackbox.ai',
- 'priority': 'u=1, i',
'referer': 'https://www.blackbox.ai',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
}
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index 9c066eea..7f8f7c02 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -285,6 +285,7 @@
+
diff --git a/g4f/models.py b/g4f/models.py
index cb7d9697..96fead58 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -76,8 +76,9 @@ default = Model(
DeepInfraChat,
Airforce,
Cloudflare,
- Mhystical,
PollinationsAI,
+ ChatGptEs,
+ ChatGpt,
])
)
@@ -195,10 +196,16 @@ llama_3_2_11b = Model(
llama_3_3_70b = Model(
name = "llama-3.3-70b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([HuggingChat, HuggingFace, PerplexityLabs])
+ best_provider = IterListProvider([Blackbox, HuggingChat, HuggingFace, PerplexityLabs])
)
### Mistral ###
+mixtral_7b = Model(
+ name = "mixtral-7b",
+ base_provider = "Mistral",
+ best_provider = Blackbox
+)
+
mixtral_8x7b = Model(
name = "mixtral-8x7b",
base_provider = "Mistral",
@@ -221,7 +228,7 @@ mistral_large = Model(
hermes_2_dpo = Model(
name = "hermes-2-dpo",
base_provider = "NousResearch",
- best_provider = Airforce
+ best_provider = IterListProvider([Blackbox, Airforce])
)
hermes_2_pro = Model(
@@ -368,7 +375,7 @@ qwen_2_5_coder_32b = Model(
qwq_32b = Model(
name = 'qwq-32b',
base_provider = 'Qwen',
- best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace])
+ best_provider = IterListProvider([DeepInfraChat, Blackbox, HuggingChat, HuggingFace])
)
### Inflection ###
@@ -379,6 +386,12 @@ pi = Model(
)
### DeepSeek ###
+deepseek_chat = Model(
+ name = 'deepseek-chat',
+ base_provider = 'DeepSeek',
+ best_provider = Blackbox
+)
+
deepseek_coder = Model(
name = 'deepseek-coder',
base_provider = 'DeepSeek',
@@ -463,6 +476,20 @@ neural_7b = Model(
best_provider = Airforce
)
+### Databricks ###
+dbrx_instruct = Model(
+ name = 'dbrx-instruct',
+ base_provider = 'Databricks',
+ best_provider = Blackbox
+)
+
+### PollinationsAI ###
+p1 = Model(
+ name = 'p1',
+ base_provider = 'PollinationsAI',
+ best_provider = PollinationsAI
+)
+
### Uncensored AI ###
evil = Model(
name = 'evil',
@@ -654,6 +681,7 @@ class ModelUtils:
llama_3_3_70b.name: llama_3_3_70b,
### Mistral ###
+ mixtral_7b.name: mixtral_7b,
mixtral_8x7b.name: mixtral_8x7b,
mistral_nemo.name: mistral_nemo,
mistral_large.name: mistral_large,
@@ -728,6 +756,7 @@ class ModelUtils:
sonar_chat.name: sonar_chat,
### DeepSeek ###
+ deepseek_chat.name: deepseek_chat,
deepseek_coder.name: deepseek_coder,
### TheBloke ###
@@ -748,6 +777,12 @@ class ModelUtils:
### Inferless ###
neural_7b.name: neural_7b,
+ ### Databricks ###
+ dbrx_instruct.name: dbrx_instruct,
+
+ ### PollinationsAI ###
+ p1.name: p1,
+
### Uncensored AI ###
evil.name: evil,
--
cgit v1.2.3