From 831c722a9fd9907bd9066c1f3c3feb7704f93b09 Mon Sep 17 00:00:00 2001 From: kqlio67 <> Date: Sun, 12 Jan 2025 13:25:41 +0200 Subject: Update providers, documentation improvements and bug fixes --- g4f/Provider/Airforce.py | 37 +---- g4f/Provider/AutonomousAI.py | 90 ++++++++++++ g4f/Provider/BlackboxCreateAgent.py | 2 +- g4f/Provider/CablyAI.py | 71 ++++++++++ g4f/Provider/ChatGLM.py | 92 ++++++++++++ g4f/Provider/ChatGptEs.py | 34 ++--- g4f/Provider/ChatGptt.py | 70 +++++++++ g4f/Provider/DDG.py | 157 +++++++++------------ g4f/Provider/DarkAI.py | 53 ++++--- g4f/Provider/DeepInfraChat.py | 6 +- g4f/Provider/ImageLabs.py | 95 +++++++++++++ g4f/Provider/Jmuz.py | 10 +- g4f/Provider/PollinationsAI.py | 75 ++++++---- g4f/Provider/__init__.py | 5 + g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py | 13 +- .../hf_space/BlackForestLabsFlux1Schnell.py | 10 +- g4f/Provider/hf_space/Qwen_QVQ_72B.py | 7 +- g4f/Provider/hf_space/Qwen_Qwen_2_72B_Instruct.py | 124 ++++++++++++++++ g4f/Provider/hf_space/StableDiffusion35Large.py | 6 +- g4f/Provider/hf_space/VoodoohopFlux1Schnell.py | 9 +- g4f/Provider/hf_space/__init__.py | 7 +- g4f/Provider/needs_auth/DeepSeek.py | 3 +- g4f/Provider/needs_auth/HuggingChat.py | 7 +- g4f/models.py | 124 +++++++++++----- 24 files changed, 853 insertions(+), 254 deletions(-) create mode 100644 g4f/Provider/AutonomousAI.py create mode 100644 g4f/Provider/CablyAI.py create mode 100644 g4f/Provider/ChatGLM.py create mode 100644 g4f/Provider/ChatGptt.py create mode 100644 g4f/Provider/ImageLabs.py create mode 100644 g4f/Provider/hf_space/Qwen_Qwen_2_72B_Instruct.py (limited to 'g4f') diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py index d8091fdc..88b6f439 100644 --- a/g4f/Provider/Airforce.py +++ b/g4f/Provider/Airforce.py @@ -31,7 +31,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): api_endpoint_completions = "https://api.airforce/chat/completions" api_endpoint_imagine2 = "https://api.airforce/imagine2" - working = True + working = False supports_stream = True supports_system_message = True supports_message_history = True @@ -115,29 +115,6 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): """Get the actual model name from alias""" return cls.model_aliases.get(model, model or cls.default_model) - @classmethod - async def check_api_key(cls, api_key: str) -> bool: - """ - Always returns True to allow all models. - """ - if not api_key or api_key == "null": - return True # No restrictions if no key. - - headers = { - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36", - "Accept": "*/*", - } - try: - async with ClientSession(headers=headers) as session: - async with session.get(f"https://api.airforce/check?key={api_key}") as response: - if response.status == 200: - data = await response.json() - return data.get('info') in ['Sponsor key', 'Premium key'] - return False - except Exception as e: - print(f"Error checking API key: {str(e)}") - return False - @classmethod def _filter_content(cls, part_response: str) -> str: """ @@ -177,7 +154,6 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): cls, model: str, prompt: str, - api_key: str, size: str, seed: int, proxy: str = None @@ -188,7 +164,6 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip, deflate, br", "Content-Type": "application/json", - "Authorization": f"Bearer {api_key}", } params = {"model": model, "prompt": prompt, "size": size, "seed": seed} @@ -210,7 +185,6 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): temperature: float, top_p: float, stream: bool, - api_key: str, proxy: str = None ) -> AsyncResult: """ @@ -222,7 +196,6 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip, deflate, br", "Content-Type": "application/json", - "Authorization": f"Bearer {api_key}", } final_messages = [] @@ -286,22 +259,18 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): temperature: float = 1, top_p: float = 1, stream: bool = True, - api_key: str = None, size: str = "1:1", seed: int = None, **kwargs ) -> AsyncResult: - if not await cls.check_api_key(api_key): - pass - model = cls.get_model(model) if model in cls.image_models: if prompt is None: prompt = messages[-1]['content'] if seed is None: seed = random.randint(0, 10000) - async for result in cls.generate_image(model, prompt, api_key, size, seed, proxy): + async for result in cls.generate_image(model, prompt, size, seed, proxy): yield result else: - async for result in cls.generate_text(model, messages, max_tokens, temperature, top_p, stream, api_key, proxy): + async for result in cls.generate_text(model, messages, max_tokens, temperature, top_p, stream, proxy): yield result diff --git a/g4f/Provider/AutonomousAI.py b/g4f/Provider/AutonomousAI.py new file mode 100644 index 00000000..dcd81c57 --- /dev/null +++ b/g4f/Provider/AutonomousAI.py @@ -0,0 +1,90 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import base64 +import json + +from ..typing import AsyncResult, Messages +from ..requests.raise_for_status import raise_for_status +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + +class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://www.autonomous.ai/anon/" + api_endpoints = { + "llama": "https://chatgpt.autonomous.ai/api/v1/ai/chat", + "qwen_coder": "https://chatgpt.autonomous.ai/api/v1/ai/chat", + "hermes": "https://chatgpt.autonomous.ai/api/v1/ai/chat-hermes", + "vision": "https://chatgpt.autonomous.ai/api/v1/ai/chat-vision", + "summary": "https://chatgpt.autonomous.ai/api/v1/ai/summary" + } + + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = "llama" + models = [default_model, "qwen_coder", "hermes", "vision", "summary"] + + model_aliases = { + "llama-3.3-70b": default_model, + "qwen-2.5-coder-32b": "qwen_coder", + "hermes-3": "hermes", + "llama-3.2-90b": "vision", + "llama-3.3-70b": "summary" + } + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + stream: bool = False, + **kwargs + ) -> AsyncResult: + api_endpoint = cls.api_endpoints[model] + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'content-type': 'application/json', + 'country-code': 'US', + 'origin': 'https://www.autonomous.ai', + 'referer': 'https://www.autonomous.ai/', + 'time-zone': 'America/New_York', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' + } + + async with ClientSession(headers=headers) as session: + prompt = format_prompt(messages) + + # Encode message + message = [{"role": "user", "content": prompt}] + message_json = json.dumps(message) + encoded_message = base64.b64encode(message_json.encode('utf-8')).decode('utf-8') + + data = { + "messages": encoded_message, + "threadId": model, + "stream": stream, + "aiAgent": model + } + + async with session.post(api_endpoint, json=data, proxy=proxy) as response: + await raise_for_status(response) + async for chunk in response.content: + if chunk: + chunk_str = chunk.decode() + if chunk_str == "data: [DONE]": + continue + + try: + # Remove "data: " prefix and parse JSON + chunk_data = json.loads(chunk_str.replace("data: ", "")) + if "choices" in chunk_data and chunk_data["choices"]: + delta = chunk_data["choices"][0].get("delta", {}) + if "content" in delta: + yield delta["content"] + except json.JSONDecodeError: + continue diff --git a/g4f/Provider/BlackboxCreateAgent.py b/g4f/Provider/BlackboxCreateAgent.py index 1257e1ab..f0c9ee91 100644 --- a/g4f/Provider/BlackboxCreateAgent.py +++ b/g4f/Provider/BlackboxCreateAgent.py @@ -72,7 +72,7 @@ class BlackboxCreateAgent(AsyncGeneratorProvider, ProviderModelMixin): return cached_value js_file_pattern = r'static/chunks/\d{4}-[a-fA-F0-9]+\.js' - v_pattern = r'j\s*=\s*[\'"]([0-9a-fA-F-]{36})[\'"]' + v_pattern = r'L\s*=\s*[\'"]([0-9a-fA-F-]{36})[\'"]' def is_valid_context(text: str) -> bool: """Checks if the context is valid.""" diff --git a/g4f/Provider/CablyAI.py b/g4f/Provider/CablyAI.py new file mode 100644 index 00000000..594a866d --- /dev/null +++ b/g4f/Provider/CablyAI.py @@ -0,0 +1,71 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import json + +from ..typing import AsyncResult, Messages +from ..requests.raise_for_status import raise_for_status +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class CablyAI(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://cablyai.com" + api_endpoint = "https://cablyai.com/v1/chat/completions" + + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = "Cably-80B" + models = [default_model] + + model_aliases = {"cably-80b": default_model} + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + stream: bool = False, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + headers = { + 'Accept': '*/*', + 'Accept-Language': 'en-US,en;q=0.9', + 'Content-Type': 'application/json', + 'Origin': 'https://cablyai.com', + 'Referer': 'https://cablyai.com/chat', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' + } + + async with ClientSession(headers=headers) as session: + data = { + "model": model, + "messages": messages, + "stream": stream + } + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + await raise_for_status(response) + buffer = "" + async for chunk in response.content: + if chunk: + buffer += chunk.decode() + while "\n\n" in buffer: + chunk_data, buffer = buffer.split("\n\n", 1) + if chunk_data.startswith("data: "): + try: + json_data = json.loads(chunk_data[6:]) + if "choices" in json_data and json_data["choices"]: + content = json_data["choices"][0]["delta"].get("content", "") + if content: + yield content + except json.JSONDecodeError: + # Skip invalid JSON + pass + elif chunk_data.strip() == "data: [DONE]": + return diff --git a/g4f/Provider/ChatGLM.py b/g4f/Provider/ChatGLM.py new file mode 100644 index 00000000..7869f849 --- /dev/null +++ b/g4f/Provider/ChatGLM.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +import uuid +import json + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from ..requests.raise_for_status import raise_for_status +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + +class ChatGLM(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://chatglm.cn" + api_endpoint = "https://chatglm.cn/chatglm/mainchat-api/guest/stream" + + working = True + supports_stream = True + supports_system_message = False + supports_message_history = True + + default_model = "all-tools-230b" + models = [default_model] + model_aliases = {"glm-4": default_model} + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + device_id = str(uuid.uuid4()).replace('-', '') + + headers = { + 'Accept-Language': 'en-US,en;q=0.9', + 'App-Name': 'chatglm', + 'Authorization': 'undefined', + 'Content-Type': 'application/json', + 'Origin': 'https://chatglm.cn', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', + 'X-App-Platform': 'pc', + 'X-App-Version': '0.0.1', + 'X-Device-Id': device_id, + 'Accept': 'text/event-stream' + } + + async with ClientSession(headers=headers) as session: + prompt = format_prompt(messages) + data = { + "assistant_id": "65940acff94777010aa6b796", + "conversation_id": "", + "meta_data": { + "if_plus_model": False, + "is_test": False, + "input_question_type": "xxxx", + "channel": "", + "draft_id": "", + "quote_log_id": "", + "platform": "pc" + }, + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": prompt + } + ] + } + ] + } + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + await raise_for_status(response) + async for chunk in response.content: + if chunk: + decoded_chunk = chunk.decode('utf-8') + if decoded_chunk.startswith('data: '): + try: + json_data = json.loads(decoded_chunk[6:]) + parts = json_data.get('parts', []) + if parts: + content = parts[0].get('content', []) + if content: + text = content[0].get('text', '') + if text: + yield text + except json.JSONDecodeError: + pass diff --git a/g4f/Provider/ChatGptEs.py b/g4f/Provider/ChatGptEs.py index d42baffd..9222131e 100644 --- a/g4f/Provider/ChatGptEs.py +++ b/g4f/Provider/ChatGptEs.py @@ -1,11 +1,13 @@ from __future__ import annotations -from aiohttp import ClientSession import os -import json import re +import json + +from aiohttp import ClientSession from ..typing import AsyncResult, Messages +from ..requests.raise_for_status import raise_for_status from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import format_prompt @@ -19,16 +21,15 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin): supports_message_history = True default_model = 'gpt-4o' - models = ['gpt-4', 'gpt-4o', 'gpt-4o-mini'] + models = ['gpt-4', default_model, 'gpt-4o-mini'] + + SYSTEM_PROMPT = "Your default language is English. Always respond in English unless the user's message is in a different language. If the user's message is not in English, respond in the language of the user's message. Maintain this language behavior throughout the conversation unless explicitly instructed otherwise. User input:" @classmethod def get_model(cls, model: str) -> str: if model in cls.models: return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model + return cls.model_aliases[model] @classmethod async def create_async_generator( @@ -52,29 +53,22 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin): initial_response = await session.get(cls.url) nonce_ = re.findall(r'data-nonce="(.+?)"', await initial_response.text())[0] post_id = re.findall(r'data-post-id="(.+?)"', await initial_response.text())[0] - - formatted_prompt = format_prompt(messages) - - conversation_history = [] - - for message in messages[:-1]: - if message['role'] == "user": - conversation_history.append(f"Human: {message['content']}") - else: - conversation_history.append(f"AI: {message['content']}") + + prompt = f"{cls.SYSTEM_PROMPT} {format_prompt(messages)}" payload = { - 'wpaicg_user_agree': '1', + 'check_51710191': '1', '_wpnonce': nonce_, 'post_id': post_id, 'url': cls.url, 'action': 'wpaicg_chat_shortcode_message', - 'message': formatted_prompt, + 'message': prompt, 'bot_id': '0', 'chatbot_identity': 'shortcode', 'wpaicg_chat_client_id': os.urandom(5).hex(), - 'wpaicg_chat_history': json.dumps(conversation_history) + 'wpaicg_chat_history': None } + print(payload['message']) async with session.post(cls.api_endpoint, headers=headers, data=payload) as response: response.raise_for_status() diff --git a/g4f/Provider/ChatGptt.py b/g4f/Provider/ChatGptt.py new file mode 100644 index 00000000..19c7287a --- /dev/null +++ b/g4f/Provider/ChatGptt.py @@ -0,0 +1,70 @@ +from __future__ import annotations + +import os +import re +import json + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from ..requests.raise_for_status import raise_for_status +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + +class ChatGptt(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://chatgptt.me" + api_endpoint = "https://chatgptt.me/wp-admin/admin-ajax.php" + + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-4o' + models = ['gpt-4', default_model, 'gpt-4o-mini'] + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + return cls.model_aliases[model] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "authority": "chatgptt.me", + "accept": "application/json", + "origin": cls.url, + "referer": f"{cls.url}/chat", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36", + } + + async with ClientSession(headers=headers) as session: + initial_response = await session.get(cls.url) + nonce_ = re.findall(r'data-nonce="(.+?)"', await initial_response.text())[0] + post_id = re.findall(r'data-post-id="(.+?)"', await initial_response.text())[0] + + payload = { + '_wpnonce': nonce_, + 'post_id': post_id, + 'url': cls.url, + 'action': 'wpaicg_chat_shortcode_message', + 'message': format_prompt(messages), + 'bot_id': '0', + 'chatbot_identity': 'shortcode', + 'wpaicg_chat_client_id': os.urandom(5).hex(), + 'wpaicg_chat_history': None + } + + async with session.post(cls.api_endpoint, headers=headers, data=payload) as response: + await raise_for_status(response) + result = await response.json() + yield result['data'] diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py index 19ba747a..22f32ef3 100644 --- a/g4f/Provider/DDG.py +++ b/g4f/Provider/DDG.py @@ -1,33 +1,27 @@ from __future__ import annotations -import asyncio -from aiohttp import ClientSession, ClientTimeout, ClientError, ClientResponseError +from aiohttp import ClientSession, ClientTimeout, ClientError import json +import asyncio +import random from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation -from ..providers.response import FinishReason -from .. import debug +from ..requests.raise_for_status import raise_for_status +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt -class Conversation(BaseConversation): - vqd: str = None - message_history: Messages = [] - cookies: dict = {} - - def __init__(self, model: str): - self.model = model class DDG(AsyncGeneratorProvider, ProviderModelMixin): label = "DuckDuckGo AI Chat" url = "https://duckduckgo.com/aichat" api_endpoint = "https://duckduckgo.com/duckchat/v1/chat" - + status_url = "https://duckduckgo.com/duckchat/v1/status" + working = True - needs_auth = False supports_stream = True supports_system_message = True supports_message_history = True - + default_model = "gpt-4o-mini" models = [default_model, "claude-3-haiku-20240307", "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "mistralai/Mixtral-8x7B-Instruct-v0.1"] @@ -39,100 +33,83 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin): } @classmethod - async def fetch_vqd(cls, session: ClientSession) -> str: + async def fetch_vqd(cls, session: ClientSession, max_retries: int = 3) -> str: """ - Fetches the required VQD token for the chat session. - - Args: - session (ClientSession): The active HTTP session. - - Returns: - str: The VQD token. - - Raises: - Exception: If the token cannot be fetched. + Fetches the required VQD token for the chat session with retries. """ - async with session.get("https://duckduckgo.com/duckchat/v1/status", headers={"x-vqd-accept": "1"}) as response: - if response.status == 200: - vqd = response.headers.get("x-vqd-4", "") - if not vqd: - raise Exception("Failed to fetch VQD token: Empty token.") - return vqd - else: - raise Exception(f"Failed to fetch VQD token: {response.status} {await response.text()}") + headers = { + "accept": "text/event-stream", + "content-type": "application/json", + "x-vqd-accept": "1", + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36" + } + + for attempt in range(max_retries): + try: + async with session.get(cls.status_url, headers=headers) as response: + if response.status == 200: + vqd = response.headers.get("x-vqd-4", "") + if vqd: + return vqd + elif response.status == 429: + if attempt < max_retries - 1: + wait_time = random.uniform(1, 3) * (attempt + 1) + await asyncio.sleep(wait_time) + continue + response_text = await response.text() + raise Exception(f"Failed to fetch VQD token: {response.status} {response_text}") + except Exception as e: + if attempt < max_retries - 1: + wait_time = random.uniform(1, 3) * (attempt + 1) + await asyncio.sleep(wait_time) + else: + raise Exception(f"Failed to fetch VQD token after {max_retries} attempts: {str(e)}") + + raise Exception("Failed to fetch VQD token: Maximum retries exceeded") @classmethod async def create_async_generator( cls, model: str, messages: Messages, - conversation: Conversation = None, - return_conversation: bool = False, proxy: str = None, - headers: dict = { - "Content-Type": "application/json", - }, - cookies: dict = None, - max_retries: int = 0, + timeout: int = 30, **kwargs - ) -> AsyncResult: - if cookies is None and conversation is not None: - cookies = conversation.cookies - async with ClientSession(headers=headers, cookies=cookies, timeout=ClientTimeout(total=30)) as session: - # Fetch VQD token - if conversation is None: - conversation = Conversation(model) - conversation.cookies = session.cookie_jar - conversation.vqd = await cls.fetch_vqd(session) - - if conversation.vqd is not None: - headers["x-vqd-4"] = conversation.vqd - - if return_conversation: - yield conversation - - if len(messages) >= 2: - conversation.message_history.extend([messages[-2], messages[-1]]) - elif len(messages) == 1: - conversation.message_history.append(messages[-1]) - - payload = { - "model": conversation.model, - "messages": conversation.message_history, - } - + ) -> AsyncResult: + model = cls.get_model(model) + + async with ClientSession(timeout=ClientTimeout(total=timeout)) as session: try: - async with session.post(cls.api_endpoint, headers=headers, json=payload, proxy=proxy) as response: - conversation.vqd = response.headers.get("x-vqd-4") - response.raise_for_status() - reason = None + # Fetch VQD token with retries + vqd = await cls.fetch_vqd(session) + + headers = { + "accept": "text/event-stream", + "content-type": "application/json", + "x-vqd-4": vqd, + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36" + } + + data = { + "model": model, + "messages": [{"role": "user", "content": format_prompt(messages)}], + } + + async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response: + await raise_for_status(response) async for line in response.content: line = line.decode("utf-8").strip() if line.startswith("data:"): try: message = json.loads(line[5:].strip()) - if "message" in message and message["message"]: - yield message["message"] - reason = "max_tokens" - elif message.get("message") == '': - reason = "stop" + if "message" in message: + yield message["message"] except json.JSONDecodeError: continue - if reason is not None: - yield FinishReason(reason) - except ClientResponseError as e: - if e.code in (400, 429) and max_retries > 0: - debug.log(f"Retry: max_retries={max_retries}, wait={512 - max_retries * 48}: {e}") - await asyncio.sleep(512 - max_retries * 48) - is_started = False - async for chunk in cls.create_async_generator(model, messages, conversation, return_conversation, max_retries=max_retries-1, **kwargs): - if chunk: - yield chunk - is_started = True - if is_started: - return - raise e + except ClientError as e: raise Exception(f"HTTP ClientError occurred: {e}") except asyncio.TimeoutError: raise Exception("Request timed out.") + except Exception as e: + raise Exception(f"An error occurred: {str(e)}") diff --git a/g4f/Provider/DarkAI.py b/g4f/Provider/DarkAI.py index 9d3d1d38..1562703a 100644 --- a/g4f/Provider/DarkAI.py +++ b/g4f/Provider/DarkAI.py @@ -1,7 +1,7 @@ from __future__ import annotations import json -from aiohttp import ClientSession +from aiohttp import ClientSession, ClientTimeout, StreamReader from ..typing import AsyncResult, Messages from ..requests.raise_for_status import raise_for_status @@ -16,8 +16,8 @@ class DarkAI(AsyncGeneratorProvider, ProviderModelMixin): default_model = 'llama-3-70b' models = [ - 'gpt-4o', # Uncensored - 'gpt-3.5-turbo', # Uncensored + 'gpt-4o', + 'gpt-3.5-turbo', default_model, ] model_aliases = { @@ -39,7 +39,10 @@ class DarkAI(AsyncGeneratorProvider, ProviderModelMixin): "content-type": "application/json", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36" } - async with ClientSession(headers=headers) as session: + + timeout = ClientTimeout(total=600) # Increase timeout to 10 minutes + + async with ClientSession(headers=headers, timeout=timeout) as session: prompt = format_prompt(messages) data = { "query": prompt, @@ -47,23 +50,27 @@ class DarkAI(AsyncGeneratorProvider, ProviderModelMixin): } async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: await raise_for_status(response) - first = True - async for line in response.content: - if line: - try: - line_str = line.decode().strip() - if line_str.startswith('data: '): - chunk_data = json.loads(line_str[6:]) - if chunk_data['event'] == 'text-chunk': - chunk = chunk_data['data']['text'] - if first: - chunk = chunk.lstrip() - if chunk: - first = False + reader: StreamReader = response.content + buffer = b"" + while True: + chunk = await reader.read(1024) # Read in smaller chunks + if not chunk: + break + buffer += chunk + while b"\n" in buffer: + line, buffer = buffer.split(b"\n", 1) + line = line.strip() + if line: + try: + line_str = line.decode() + if line_str.startswith('data: '): + chunk_data = json.loads(line_str[6:]) + if chunk_data['event'] == 'text-chunk': + chunk = chunk_data['data']['text'] yield chunk - elif chunk_data['event'] == 'stream-end': - return - except json.JSONDecodeError: - pass - except Exception: - pass \ No newline at end of file + elif chunk_data['event'] == 'stream-end': + return + except json.JSONDecodeError: + pass + except Exception: + pass diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py index e947383d..5db74012 100644 --- a/g4f/Provider/DeepInfraChat.py +++ b/g4f/Provider/DeepInfraChat.py @@ -10,18 +10,18 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin): url = "https://deepinfra.com/chat" api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions" + working = True - needs_auth = False supports_stream = True supports_system_message = True supports_message_history = True - default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' + default_model = 'meta-llama/Llama-3.3-70B-Instruct-Turbo' models = [ 'meta-llama/Llama-3.3-70B-Instruct', 'meta-llama/Meta-Llama-3.1-8B-Instruct', - 'meta-llama/Llama-3.3-70B-Instruct-Turbo', default_model, + 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', 'Qwen/QwQ-32B-Preview', 'microsoft/WizardLM-2-8x22B', 'Qwen/Qwen2.5-72B-Instruct', diff --git a/g4f/Provider/ImageLabs.py b/g4f/Provider/ImageLabs.py new file mode 100644 index 00000000..c52860d0 --- /dev/null +++ b/g4f/Provider/ImageLabs.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import time +import asyncio + +from ..typing import AsyncResult, Messages +from ..image import ImageResponse +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin + + +class ImageLabs(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://editor.imagelabs.net" + api_endpoint = "https://editor.imagelabs.net/txt2img" + + working = True + supports_stream = False + supports_system_message = False + supports_message_history = False + + default_model = 'general' + default_image_model = default_model + image_models = [default_image_model] + models = image_models + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + # Image + prompt: str = None, + negative_prompt: str = "", + width: int = 1152, + height: int = 896, + **kwargs + ) -> AsyncResult: + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': cls.url, + 'referer': f'{cls.url}/', + 'x-requested-with': 'XMLHttpRequest', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' + } + + async with ClientSession(headers=headers) as session: + prompt = messages[-1]["content"] + + # Generate image + payload = { + "prompt": prompt, + "seed": str(int(time.time())), + "subseed": str(int(time.time() * 1000)), + "attention": 0, + "width": width, + "height": height, + "tiling": False, + "negative_prompt": negative_prompt, + "reference_image": "", + "reference_image_type": None, + "reference_strength": 30 + } + + async with session.post(f'{cls.url}/txt2img', json=payload, proxy=proxy) as generate_response: + generate_data = await generate_response.json() + task_id = generate_data.get('task_id') + + # Poll for progress + while True: + async with session.post(f'{cls.url}/progress', json={"task_id": task_id}, proxy=proxy) as progress_response: + progress_data = await progress_response.json() + + # Check for completion or error states + if progress_data.get('status') == 'Done' or progress_data.get('final_image_url'): + # Yield ImageResponse with the final image URL + yield ImageResponse( + images=[progress_data.get('final_image_url')], + alt=prompt + ) + break + + # Check for queue or error states + if 'error' in progress_data.get('status', '').lower(): + raise Exception(f"Image generation error: {progress_data}") + + # Wait between polls + await asyncio.sleep(1) + + @classmethod + def get_model(cls, model: str) -> str: + return cls.default_model diff --git a/g4f/Provider/Jmuz.py b/g4f/Provider/Jmuz.py index e9d1820e..c713398b 100644 --- a/g4f/Provider/Jmuz.py +++ b/g4f/Provider/Jmuz.py @@ -5,6 +5,7 @@ from .needs_auth.OpenaiAPI import OpenaiAPI class Jmuz(OpenaiAPI): label = "Jmuz" + url = "https://jmuz.me" login_url = None api_base = "https://jmuz.me/gpt/api/v2" api_key = "prod" @@ -15,7 +16,12 @@ class Jmuz(OpenaiAPI): supports_system_message = False default_model = 'gpt-4o' - + model_aliases = { + "gemini": "gemini-exp", + "deepseek-chat": "deepseek-2.5", + "qwq-32b": "qwq-32b-preview" + } + @classmethod def get_models(cls): if not cls.models: @@ -54,4 +60,4 @@ class Jmuz(OpenaiAPI): stream=cls.supports_stream, headers=headers, **kwargs - ) \ No newline at end of file + ) diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py index d4b46ea0..f7328b30 100644 --- a/g4f/Provider/PollinationsAI.py +++ b/g4f/Provider/PollinationsAI.py @@ -11,13 +11,13 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..requests.raise_for_status import raise_for_status from ..typing import AsyncResult, Messages from ..image import ImageResponse +from .helper import format_prompt class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): label = "Pollinations AI" url = "https://pollinations.ai" working = True - needs_auth = True supports_stream = True supports_system_message = True supports_message_history = True @@ -26,8 +26,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): api_base = "https://text.pollinations.ai/openai" # API endpoints - text_api_endpoint = "https://text.pollinations.ai" - image_api_endpoint = "https://image.pollinations.ai" + text_api_endpoint = "https://text.pollinations.ai/" + image_api_endpoint = "https://image.pollinations.ai/" # Models configuration default_model = "openai" @@ -37,15 +37,19 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): models = [] additional_models_image = ["midjourney", "dall-e-3"] - additional_models_text = ["sur", "sur-mistral", "claude"] + additional_models_text = ["claude", "karma", "command-r", "llamalight", "mistral-large", "sur", "sur-mistral"] model_aliases = { - "gpt-4o": "openai", + "gpt-4o": default_model, + "qwen-2-72b": "qwen", + "qwen-2.5-coder-32b": "qwen-coder", + "llama-3.3-70b": "llama", "mistral-nemo": "mistral", - "llama-3.1-70b": "llama", + #"": "karma", "gpt-4": "searchgpt", "gpt-4": "claude", - "qwen-2.5-coder-32b": "qwen-coder", "claude-3.5-sonnet": "sur", + "deepseek-chat": "deepseek", + "llama-3.2-3b": "llamalight", } @classmethod @@ -91,12 +95,11 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): enhance: bool = False, safe: bool = False, # Text specific parameters - api_key: str = None, temperature: float = 0.5, presence_penalty: float = 0, top_p: float = 1, frequency_penalty: float = 0, - stream: bool = True, + stream: bool = False, **kwargs ) -> AsyncResult: model = cls.get_model(model) @@ -124,7 +127,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): model=model, messages=messages, proxy=proxy, - api_key=api_key, temperature=temperature, presence_penalty=presence_penalty, top_p=top_p, @@ -170,9 +172,9 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): params = {k: v for k, v in params.items() if v is not None} async with ClientSession(headers=headers) as session: - prompt = messages[-1]["content"] if prompt is None else prompt + prompt = quote(messages[-1]["content"] if prompt is None else prompt) param_string = "&".join(f"{k}={v}" for k, v in params.items()) - url = f"{cls.image_api_endpoint}/prompt/{quote(prompt)}?{param_string}" + url = f"{cls.image_api_endpoint}/prompt/{prompt}?{param_string}" async with session.head(url, proxy=proxy) as response: if response.status == 200: @@ -185,24 +187,23 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): model: str, messages: Messages, proxy: str, - api_key: str, temperature: float, presence_penalty: float, top_p: float, frequency_penalty: float, - stream: bool - ) -> AsyncResult: - if api_key is None: - api_key = "dummy" # Default value if api_key is not provided - + stream: bool, + seed: Optional[int] = None + ) -> AsyncResult: headers = { "accept": "*/*", "accept-language": "en-US,en;q=0.9", - "authorization": f"Bearer {api_key}", "content-type": "application/json", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36" } - + + if seed is None: + seed = random.randint(0, 10000) + async with ClientSession(headers=headers) as session: data = { "messages": messages, @@ -212,7 +213,9 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): "top_p": top_p, "frequency_penalty": frequency_penalty, "jsonMode": False, - "stream": stream + "stream": stream, + "seed": seed, + "cache": False } async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response: @@ -220,9 +223,31 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): async for chunk in response.content: if chunk: decoded_chunk = chunk.decode() + + # Skip [DONE]. + if "data: [DONE]" in decoded_chunk: + continue + + # Processing plain text + if not decoded_chunk.startswith("data:"): + clean_text = decoded_chunk.strip() + if clean_text: + yield clean_text + continue + + # Processing JSON format try: - json_response = json.loads(decoded_chunk) - content = json_response['choices'][0]['message']['content'] - yield content + # Remove the prefix “data: “ and parse JSON + json_str = decoded_chunk.replace("data:", "").strip() + json_response = json.loads(json_str) + + if "choices" in json_response and json_response["choices"]: + if "delta" in json_response["choices"][0]: + content = json_response["choices"][0]["delta"].get("content") + if content: + # Remove escaped slashes before parentheses + clean_content = content.replace("\\(", "(").replace("\\)", ")") + yield clean_content except json.JSONDecodeError: - pass + # If JSON could not be parsed, skip + continue diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index c2a16a6d..b095fa73 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -14,10 +14,14 @@ from .hf_space import HuggingSpace from .Airforce import Airforce from .AmigoChat import AmigoChat +from .AutonomousAI import AutonomousAI from .Blackbox import Blackbox from .BlackboxCreateAgent import BlackboxCreateAgent +from .CablyAI import CablyAI +from .ChatGLM import ChatGLM from .ChatGpt import ChatGpt from .ChatGptEs import ChatGptEs +from .ChatGptt import ChatGptt from .ClaudeSon import ClaudeSon from .Cloudflare import Cloudflare from .Copilot import Copilot @@ -27,6 +31,7 @@ from .DeepInfraChat import DeepInfraChat from .Free2GPT import Free2GPT from .FreeGpt import FreeGpt from .GizAI import GizAI +from .ImageLabs import ImageLabs from .Jmuz import Jmuz from .Liaobots import Liaobots from .Mhystical import Mhystical diff --git a/g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py b/g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py index 74c9502d..a640dd1b 100644 --- a/g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py +++ b/g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py @@ -14,13 +14,17 @@ class BlackForestLabsFlux1Dev(AsyncGeneratorProvider, ProviderModelMixin): working = True - default_model = 'flux-dev' - models = [default_model] - image_models = [default_model] + default_model = 'black-forest-labs-flux-1-dev' + default_image_model = default_model + image_models = [default_image_model] + models = image_models + model_aliases = {"flux-dev": default_model} @classmethod async def create_async_generator( - cls, model: str, messages: Messages, + cls, + model: str, + messages: Messages, prompt: str = None, api_key: str = None, proxy: str = None, @@ -32,6 +36,7 @@ class BlackForestLabsFlux1Dev(AsyncGeneratorProvider, ProviderModelMixin): randomize_seed: bool = True, **kwargs ) -> AsyncResult: + model = cls.get_model(model) headers = { "Content-Type": "application/json", "Accept": "application/json", diff --git a/g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py b/g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py index 2dd129d2..5b2f517f 100644 --- a/g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py +++ b/g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py @@ -15,11 +15,11 @@ class BlackForestLabsFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): working = True - default_model = "flux-schnell" + default_model = "black-forest-labs-flux-1-schnell" default_image_model = default_model image_models = [default_image_model] - models = [*image_models] - model_aliases = {"flux-schnell-black-forest-labs": default_model} + models = image_models + model_aliases = {"flux-schnell": default_model} @classmethod async def create_async_generator( @@ -36,6 +36,8 @@ class BlackForestLabsFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): **kwargs ) -> AsyncResult: + model = cls.get_model(model) + width = max(32, width - (width % 8)) height = max(32, height - (height % 8)) @@ -74,4 +76,4 @@ class BlackForestLabsFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): json_data = json.loads(data) image_url = json_data[0]['url'] yield ImageResponse(images=[image_url], alt=prompt) - return \ No newline at end of file + return diff --git a/g4f/Provider/hf_space/Qwen_QVQ_72B.py b/g4f/Provider/hf_space/Qwen_QVQ_72B.py index 853b9770..a9d224ea 100644 --- a/g4f/Provider/hf_space/Qwen_QVQ_72B.py +++ b/g4f/Provider/hf_space/Qwen_QVQ_72B.py @@ -16,8 +16,9 @@ class Qwen_QVQ_72B(AsyncGeneratorProvider, ProviderModelMixin): working = True - default_model = "Qwen/QwQ-32B-Preview" + default_model = "qwen-qvq-72b-preview" models = [default_model] + model_aliases = {"qwq-32b": default_model} @classmethod async def create_async_generator( @@ -37,7 +38,7 @@ class Qwen_QVQ_72B(AsyncGeneratorProvider, ProviderModelMixin): data = FormData() data_bytes = to_bytes(images[0][0]) data.add_field("files", data_bytes, content_type=is_accepted_format(data_bytes), filename=images[0][1]) - url = f"https://qwen-qvq-72b-preview.hf.space/gradio_api/upload?upload_id={get_random_string()}" + url = f"{cls.url}/gradio_api/upload?upload_id={get_random_string()}" async with session.post(url, data=data, proxy=proxy) as response: await raise_for_status(response) image = await response.json() @@ -67,4 +68,4 @@ class Qwen_QVQ_72B(AsyncGeneratorProvider, ProviderModelMixin): yield data[0][text_position:] text_position = len(data[0]) else: - break \ No newline at end of file + break diff --git a/g4f/Provider/hf_space/Qwen_Qwen_2_72B_Instruct.py b/g4f/Provider/hf_space/Qwen_Qwen_2_72B_Instruct.py new file mode 100644 index 00000000..573c64b8 --- /dev/null +++ b/g4f/Provider/hf_space/Qwen_Qwen_2_72B_Instruct.py @@ -0,0 +1,124 @@ +from __future__ import annotations + +import asyncio +import aiohttp +import json +import uuid +import re + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + + +class Qwen_Qwen_2_72B_Instruct(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://qwen-qwen2-72b-instruct.hf.space" + api_endpoint = "https://qwen-qwen2-72b-instruct.hf.space/queue/join?" + + working = True + supports_stream = True + supports_system_message = True + supports_message_history = False + + default_model = "qwen-qwen2-72b-instruct" + models = [default_model] + model_aliases = {"qwen-2.5-72b": default_model} + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + def generate_session_hash(): + """Generate a unique session hash.""" + return str(uuid.uuid4()).replace('-', '')[:12] + + # Generate a unique session hash + session_hash = generate_session_hash() + + headers_join = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'content-type': 'application/json', + 'origin': f'{cls.url}', + 'referer': f'{cls.url}/', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' + } + + # Prepare the prompt + prompt = format_prompt(messages) + + payload_join = { + "data": [prompt, [], ""], + "event_data": None, + "fn_index": 0, + "trigger_id": 11, + "session_hash": session_hash + } + + async with aiohttp.ClientSession() as session: + # Send join request + async with session.post(cls.api_endpoint, headers=headers_join, json=payload_join) as response: + event_id = (await response.json())['event_id'] + + # Prepare data stream request + url_data = f'{cls.url}/queue/data' + + headers_data = { + 'accept': 'text/event-stream', + 'accept-language': 'en-US,en;q=0.9', + 'referer': f'{cls.url}/', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' + } + + params_data = { + 'session_hash': session_hash + } + + # Send data stream request + async with session.get(url_data, headers=headers_data, params=params_data) as response: + full_response = "" + final_full_response = "" + async for line in response.content: + decoded_line = line.decode('utf-8') + if decoded_line.startswith('data: '): + try: + json_data = json.loads(decoded_line[6:]) + + # Look for generation stages + if json_data.get('msg') == 'process_generating': + if 'output' in json_data and 'data' in json_data['output']: + output_data = json_data['output']['data'] + if len(output_data) > 1 and len(output_data[1]) > 0: + for item in output_data[1]: + if isinstance(item, list) and len(item) > 1: + fragment = str(item[1]) + # Ignore [0, 1] type fragments and duplicates + if not re.match(r'^\[.*\]$', fragment) and fragment not in full_response: + full_response += fragment + yield fragment + + # Check for completion + if json_data.get('msg') == 'process_completed': + # Final check to ensure we get the complete response + if 'output' in json_data and 'data' in json_data['output']: + output_data = json_data['output']['data'] + if len(output_data) > 1 and len(output_data[1]) > 0: + final_full_response = output_data[1][0][1] + + # Clean up the final response + if final_full_response.startswith(full_response): + final_full_response = final_full_response[len(full_response):] + + # Yield the remaining part of the final response + if final_full_response: + yield final_full_response + break + + except json.JSONDecodeError: + print("Could not parse JSON:", decoded_line) + except Exception as e: + print(f"Error processing response: {e}") diff --git a/g4f/Provider/hf_space/StableDiffusion35Large.py b/g4f/Provider/hf_space/StableDiffusion35Large.py index 445ce7ea..e047181d 100644 --- a/g4f/Provider/hf_space/StableDiffusion35Large.py +++ b/g4f/Provider/hf_space/StableDiffusion35Large.py @@ -14,9 +14,11 @@ class StableDiffusion35Large(AsyncGeneratorProvider, ProviderModelMixin): working = True - default_model = 'stable-diffusion-3.5-large' - models = [default_model] + default_model = 'stabilityai-stable-diffusion-3-5-large' + default_image_model = default_model image_models = [default_model] + models = image_models + model_aliases = {"sd-3.5": default_model} @classmethod async def create_async_generator( diff --git a/g4f/Provider/hf_space/VoodoohopFlux1Schnell.py b/g4f/Provider/hf_space/VoodoohopFlux1Schnell.py index c1778d94..3679d323 100644 --- a/g4f/Provider/hf_space/VoodoohopFlux1Schnell.py +++ b/g4f/Provider/hf_space/VoodoohopFlux1Schnell.py @@ -12,13 +12,14 @@ from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin class VoodoohopFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): url = "https://voodoohop-flux-1-schnell.hf.space" api_endpoint = "https://voodoohop-flux-1-schnell.hf.space/call/infer" + working = True - default_model = "flux-schnell" + default_model = "voodoohop-flux-1-schnell" default_image_model = default_model image_models = [default_image_model] - models = [*image_models] - model_aliases = {"flux-schnell-voodoohop": default_model} + models = image_models + model_aliases = {"flux-schnell": default_model} @classmethod async def create_async_generator( @@ -72,4 +73,4 @@ class VoodoohopFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): json_data = json.loads(data) image_url = json_data[0]['url'] yield ImageResponse(images=[image_url], alt=prompt) - return \ No newline at end of file + return diff --git a/g4f/Provider/hf_space/__init__.py b/g4f/Provider/hf_space/__init__.py index 5ab7ad22..64be9d34 100644 --- a/g4f/Provider/hf_space/__init__.py +++ b/g4f/Provider/hf_space/__init__.py @@ -9,14 +9,17 @@ from .BlackForestLabsFlux1Schnell import BlackForestLabsFlux1Schnell from .VoodoohopFlux1Schnell import VoodoohopFlux1Schnell from .StableDiffusion35Large import StableDiffusion35Large from .Qwen_QVQ_72B import Qwen_QVQ_72B +from .Qwen_Qwen_2_72B_Instruct import Qwen_Qwen_2_72B_Instruct class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin): url = "https://huggingface.co/spaces" parent = "HuggingFace" + working = True + default_model = BlackForestLabsFlux1Dev.default_model default_vision_model = Qwen_QVQ_72B.default_model - providers = [BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell, StableDiffusion35Large, Qwen_QVQ_72B] + providers = [BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell, StableDiffusion35Large, Qwen_QVQ_72B, Qwen_Qwen_2_72B_Instruct] @classmethod def get_parameters(cls, **kwargs) -> dict: @@ -63,4 +66,4 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin): raise e error = e if not is_started and error is not None: - raise error \ No newline at end of file + raise error diff --git a/g4f/Provider/needs_auth/DeepSeek.py b/g4f/Provider/needs_auth/DeepSeek.py index 88b187be..0f1b96d6 100644 --- a/g4f/Provider/needs_auth/DeepSeek.py +++ b/g4f/Provider/needs_auth/DeepSeek.py @@ -11,4 +11,5 @@ class DeepSeek(OpenaiAPI): needs_auth = True supports_stream = True supports_message_history = True - default_model = "deepseek-chat" \ No newline at end of file + default_model = "deepseek-chat" + models = [default_model] diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py index 9557a6f9..4ddfd597 100644 --- a/g4f/Provider/needs_auth/HuggingChat.py +++ b/g4f/Provider/needs_auth/HuggingChat.py @@ -36,7 +36,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): "black-forest-labs/FLUX.1-schnell", ] models = [ - default_model, + 'Qwen/Qwen2.5-Coder-32B-Instruct', 'meta-llama/Llama-3.3-70B-Instruct', 'CohereForAI/c4ai-command-r-plus-08-2024', 'Qwen/QwQ-32B-Preview', @@ -46,11 +46,10 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): 'NousResearch/Hermes-3-Llama-3.1-8B', 'mistralai/Mistral-Nemo-Instruct-2407', 'microsoft/Phi-3.5-mini-instruct', - *image_models - ] + ] + image_models model_aliases = { ### Chat ### - "qwen-2.5-72b": "Qwen/Qwen2.5-72B-Instruct", + "qwen-2.5-72b": "Qwen/Qwen2.5-Coder-32B-Instruct", "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct", "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", "qwq-32b": "Qwen/QwQ-32B-Preview", diff --git a/g4f/models.py b/g4f/models.py index 6ceb9ed5..b688e41d 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -4,11 +4,16 @@ from dataclasses import dataclass from .Provider import IterListProvider, ProviderType from .Provider import ( + Airforce, + AutonomousAI, Blackbox, BlackboxCreateAgent, BingCreateImages, + CablyAI, + ChatGLM, ChatGpt, ChatGptEs, + ChatGptt, ClaudeSon, Cloudflare, Copilot, @@ -21,9 +26,10 @@ from .Provider import ( GeminiPro, HuggingChat, HuggingFace, + HuggingSpace, + Jmuz, Liaobots, Mhystical, - Airforce, MetaAI, MicrosoftDesigner, OpenaiChat, @@ -36,7 +42,6 @@ from .Provider import ( ReplicateHome, RubiksAI, TeachAnything, - HuggingSpace, ) @dataclass(unsafe_hash=True) @@ -70,13 +75,17 @@ default = Model( Pizzagpt, Blackbox, Copilot, - DeepInfraChat, - Airforce, - Cloudflare, + ChatGptEs, + ChatGptt, PollinationsAI, + Jmuz, + CablyAI, OpenaiChat, - Mhystical, + DarkAI, ClaudeSon, + DeepInfraChat, + Airforce, + Cloudflare, ]) ) @@ -96,20 +105,20 @@ gpt_35_turbo = Model( gpt_4 = Model( name = 'gpt-4', base_provider = 'OpenAI', - best_provider = IterListProvider([DDG, Blackbox, ChatGptEs, PollinationsAI, Copilot, OpenaiChat, Liaobots, Mhystical]) + best_provider = IterListProvider([DDG, Blackbox, Jmuz, ChatGptEs, ChatGptt, PollinationsAI, Copilot, OpenaiChat, Liaobots, Mhystical]) ) # gpt-4o gpt_4o = Model( name = 'gpt-4o', base_provider = 'OpenAI', - best_provider = IterListProvider([Blackbox, PollinationsAI, DarkAI, ChatGpt, Liaobots, OpenaiChat]) + best_provider = IterListProvider([Blackbox, ChatGptt, Jmuz, ChatGptEs, PollinationsAI, DarkAI, ChatGpt, Liaobots, OpenaiChat]) ) gpt_4o_mini = Model( name = 'gpt-4o-mini', base_provider = 'OpenAI', - best_provider = IterListProvider([DDG, Pizzagpt, ChatGpt, RubiksAI, Liaobots, OpenaiChat]) + best_provider = IterListProvider([DDG, Pizzagpt, ChatGptEs, ChatGptt, Jmuz, ChatGpt, RubiksAI, Liaobots, OpenaiChat]) ) # o1 @@ -162,13 +171,19 @@ llama_3_8b = Model( llama_3_1_8b = Model( name = "llama-3.1-8b", base_provider = "Meta Llama", - best_provider = IterListProvider([Blackbox, DeepInfraChat, Cloudflare, Airforce, PerplexityLabs]) + best_provider = IterListProvider([Blackbox, Jmuz, DeepInfraChat, Cloudflare, Airforce, PerplexityLabs]) ) llama_3_1_70b = Model( name = "llama-3.1-70b", base_provider = "Meta Llama", - best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, BlackboxCreateAgent, TeachAnything, DarkAI, Airforce, RubiksAI, PerplexityLabs]) + best_provider = IterListProvider([DDG, Jmuz, Blackbox, DeepInfraChat, BlackboxCreateAgent, TeachAnything, DarkAI, Airforce, RubiksAI, PerplexityLabs]) +) + +llama_3_1_405b = Model( + name = "llama-3.1-405b", + base_provider = "Meta Llama", + best_provider = IterListProvider([Blackbox, Jmuz]) ) # llama 3.2 @@ -178,17 +193,29 @@ llama_3_2_1b = Model( best_provider = Cloudflare ) +llama_3_2_3b = Model( + name = "llama-3.2-3b", + base_provider = "Meta Llama", + best_provider = PollinationsAI +) + llama_3_2_11b = Model( name = "llama-3.2-11b", base_provider = "Meta Llama", - best_provider = IterListProvider([HuggingChat, HuggingFace]) + best_provider = IterListProvider([Jmuz, HuggingChat, HuggingFace]) +) + +llama_3_2_90b = Model( + name = "llama-3.2-90b", + base_provider = "Meta Llama", + best_provider = IterListProvider([AutonomousAI, Jmuz]) ) # llama 3.3 llama_3_3_70b = Model( name = "llama-3.3-70b", base_provider = "Meta Llama", - best_provider = IterListProvider([Blackbox, HuggingChat, HuggingFace, PerplexityLabs]) + best_provider = IterListProvider([Blackbox, PollinationsAI, AutonomousAI, Jmuz, HuggingChat, HuggingFace, PerplexityLabs]) ) ### Mistral ### @@ -201,7 +228,7 @@ mixtral_7b = Model( mixtral_8x7b = Model( name = "mixtral-8x7b", base_provider = "Mistral", - best_provider = DDG + best_provider = IterListProvider([DDG, Jmuz]) ) mistral_nemo = Model( @@ -212,7 +239,8 @@ mistral_nemo = Model( mistral_large = Model( name = "mistral-large", - base_provider = "Mistral" + base_provider = "Mistral", + best_provider = PollinationsAI ) ### NousResearch ### @@ -231,7 +259,7 @@ hermes_2_pro = Model( hermes_3 = Model( name = "hermes-3", base_provider = "NousResearch", - best_provider = IterListProvider([HuggingChat, HuggingFace]) + best_provider = IterListProvider([AutonomousAI, HuggingChat, HuggingFace]) ) @@ -253,20 +281,20 @@ phi_3_5_mini = Model( gemini = Model( name = 'gemini', base_provider = 'Google', - best_provider = Gemini + best_provider = IterListProvider([Jmuz, Gemini]) ) # gemini-1.5 gemini_1_5_pro = Model( name = 'gemini-1.5-pro', base_provider = 'Google DeepMind', - best_provider = IterListProvider([Blackbox, Gemini, GeminiPro, Liaobots]) + best_provider = IterListProvider([Blackbox, Jmuz, Gemini, GeminiPro, Liaobots]) ) gemini_1_5_flash = Model( name = 'gemini-1.5-flash', base_provider = 'Google DeepMind', - best_provider = IterListProvider([Blackbox, Gemini, GeminiPro, Liaobots]) + best_provider = IterListProvider([Blackbox, Jmuz, Gemini, GeminiPro, Liaobots]) ) # gemini-2.0 @@ -291,10 +319,10 @@ gemma_2b = Model( ### Anthropic ### # claude 3 -claude_3_opus = Model( - name = 'claude-3-opus', +claude_3_haiku = Model( + name = 'claude-3-haiku', base_provider = 'Anthropic', - best_provider = Liaobots + best_provider = IterListProvider([DDG, Jmuz]) ) claude_3_sonnet = Model( @@ -303,17 +331,18 @@ claude_3_sonnet = Model( best_provider = Liaobots ) -claude_3_haiku = Model( - name = 'claude-3-haiku', +claude_3_opus = Model( + name = 'claude-3-opus', base_provider = 'Anthropic', - best_provider = DDG + best_provider = IterListProvider([Jmuz, Liaobots]) ) + # claude 3.5 claude_3_5_sonnet = Model( name = 'claude-3.5-sonnet', base_provider = 'Anthropic', - best_provider = IterListProvider([Blackbox, PollinationsAI, ClaudeSon, Liaobots]) + best_provider = IterListProvider([Blackbox, PollinationsAI, Jmuz, ClaudeSon, Liaobots]) ) ### Reka AI ### @@ -346,6 +375,7 @@ command_r_plus = Model( command_r = Model( name = 'command-r', base_provider = 'CohereForAI', + best_provider = PollinationsAI ) ### Qwen ### @@ -360,26 +390,26 @@ qwen_1_5_7b = Model( qwen_2_72b = Model( name = 'qwen-2-72b', base_provider = 'Qwen', - best_provider = DeepInfraChat + best_provider = IterListProvider([PollinationsAI, DeepInfraChat]) ) # qwen 2.5 qwen_2_5_72b = Model( name = 'qwen-2.5-72b', base_provider = 'Qwen', - best_provider = IterListProvider([HuggingChat, HuggingFace]) + best_provider = IterListProvider([Jmuz, HuggingSpace]) ) qwen_2_5_coder_32b = Model( name = 'qwen-2.5-coder-32b', base_provider = 'Qwen', - best_provider = IterListProvider([DeepInfraChat, PollinationsAI, HuggingChat, HuggingFace]) + best_provider = IterListProvider([Jmuz, PollinationsAI, AutonomousAI, DeepInfraChat, HuggingChat]) ) qwq_32b = Model( name = 'qwq-32b', base_provider = 'Qwen', - best_provider = IterListProvider([DeepInfraChat, Blackbox, HuggingChat, HuggingFace]) + best_provider = IterListProvider([Blackbox, Jmuz, HuggingSpace, DeepInfraChat, HuggingChat]) ) ### Inflection ### @@ -393,7 +423,7 @@ pi = Model( deepseek_chat = Model( name = 'deepseek-chat', base_provider = 'DeepSeek', - best_provider = Blackbox + best_provider = IterListProvider([Blackbox, Jmuz, PollinationsAI]) ) deepseek_coder = Model( @@ -406,7 +436,7 @@ deepseek_coder = Model( wizardlm_2_8x22b = Model( name = 'wizardlm-2-8x22b', base_provider = 'WizardLM', - best_provider = DeepInfraChat + best_provider = IterListProvider([Jmuz, DeepInfraChat]) ) ### OpenChat ### @@ -494,6 +524,20 @@ p1 = Model( best_provider = PollinationsAI ) +### CablyAI ### +cably_80b = Model( + name = 'cably-80b', + base_provider = 'CablyAI', + best_provider = CablyAI +) + +### THUDM ### +glm_4 = Model( + name = 'glm-4', + base_provider = 'THUDM', + best_provider = ChatGLM +) + ### Uncensored AI ### evil = Model( name = 'evil', @@ -543,6 +587,12 @@ sd_3 = ImageModel( best_provider = ReplicateHome ) +sd_3_5 = ImageModel( + name = 'sd-3.5', + base_provider = 'Stability AI', + best_provider = HuggingSpace +) + ### Playground ### playground_v2_5 = ImageModel( name = 'playground-v2.5', @@ -681,10 +731,13 @@ class ModelUtils: # llama-3.1 llama_3_1_8b.name: llama_3_1_8b, llama_3_1_70b.name: llama_3_1_70b, + llama_3_1_405b.name: llama_3_1_405b, # llama-3.2 llama_3_2_1b.name: llama_3_2_1b, + llama_3_2_3b.name: llama_3_2_3b, llama_3_2_11b.name: llama_3_2_11b, + llama_3_2_90b.name: llama_3_2_90b, # llama-3.3 llama_3_3_70b.name: llama_3_3_70b, @@ -798,6 +851,12 @@ class ModelUtils: ### PollinationsAI ### p1.name: p1, + ### CablyAI ### + cably_80b.name: cably_80b, + + ### THUDM ### + glm_4.name: glm_4, + ### Uncensored AI ### evil.name: evil, @@ -814,6 +873,7 @@ class ModelUtils: ### Stability AI ### sdxl.name: sdxl, sd_3.name: sd_3, + sd_3_5.name: sd_3_5, ### Playground ### playground_v2_5.name: playground_v2_5, -- cgit v1.2.3