From 64e6e47c305014fcc18bc20e9fd6e3bf2964cdba Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 12 Jan 2025 15:15:15 +0100 Subject: Add conversation and continue support in DDG --- g4f/Provider/AutonomousAI.py | 15 ++-- g4f/Provider/CablyAI.py | 58 +++----------- g4f/Provider/ChatGLM.py | 13 ++-- g4f/Provider/ChatGptEs.py | 14 +--- g4f/Provider/ChatGptt.py | 11 +-- g4f/Provider/DDG.py | 94 ++++++++++++++--------- g4f/Provider/ImageLabs.py | 2 +- g4f/Provider/Pizzagpt.py | 2 + g4f/Provider/PollinationsAI.py | 7 +- g4f/Provider/hf_space/Qwen_Qwen_2_72B_Instruct.py | 19 +++-- g4f/Provider/needs_auth/DeepSeek.py | 2 +- g4f/Provider/needs_auth/OpenaiAPI.py | 1 + 12 files changed, 104 insertions(+), 134 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/AutonomousAI.py b/g4f/Provider/AutonomousAI.py index dcd81c57..990fb48e 100644 --- a/g4f/Provider/AutonomousAI.py +++ b/g4f/Provider/AutonomousAI.py @@ -6,8 +6,8 @@ import json from ..typing import AsyncResult, Messages from ..requests.raise_for_status import raise_for_status +from ..providers.response import FinishReason from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.autonomous.ai/anon/" @@ -32,7 +32,6 @@ class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin): "qwen-2.5-coder-32b": "qwen_coder", "hermes-3": "hermes", "llama-3.2-90b": "vision", - "llama-3.3-70b": "summary" } @classmethod @@ -57,12 +56,8 @@ class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin): } async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - - # Encode message - message = [{"role": "user", "content": prompt}] - message_json = json.dumps(message) - encoded_message = base64.b64encode(message_json.encode('utf-8')).decode('utf-8') + message_json = json.dumps(messages) + encoded_message = base64.b64encode(message_json.encode()).decode(errors="ignore") data = { "messages": encoded_message, @@ -84,7 +79,9 @@ class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin): chunk_data = json.loads(chunk_str.replace("data: ", "")) if "choices" in chunk_data and chunk_data["choices"]: delta = chunk_data["choices"][0].get("delta", {}) - if "content" in delta: + if "content" in delta and delta["content"]: yield delta["content"] + if "finish_reason" in chunk_data and chunk_data["finish_reason"]: + yield FinishReason(chunk_data["finish_reason"]) except json.JSONDecodeError: continue diff --git a/g4f/Provider/CablyAI.py b/g4f/Provider/CablyAI.py index 594a866d..7bbbb430 100644 --- a/g4f/Provider/CablyAI.py +++ b/g4f/Provider/CablyAI.py @@ -1,38 +1,26 @@ from __future__ import annotations -from aiohttp import ClientSession -import json - from ..typing import AsyncResult, Messages -from ..requests.raise_for_status import raise_for_status -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - +from .needs_auth import OpenaiAPI -class CablyAI(AsyncGeneratorProvider, ProviderModelMixin): +class CablyAI(OpenaiAPI): url = "https://cablyai.com" - api_endpoint = "https://cablyai.com/v1/chat/completions" - + login_url = None + needs_auth = False + api_base = "https://cablyai.com/v1" working = True - supports_stream = True - supports_system_message = True - supports_message_history = True default_model = "Cably-80B" models = [default_model] - model_aliases = {"cably-80b": default_model} @classmethod - async def create_async_generator( + def create_async_generator( cls, model: str, messages: Messages, - stream: bool = False, - proxy: str = None, **kwargs ) -> AsyncResult: - model = cls.get_model(model) headers = { 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.9', @@ -41,31 +29,9 @@ class CablyAI(AsyncGeneratorProvider, ProviderModelMixin): 'Referer': 'https://cablyai.com/chat', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' } - - async with ClientSession(headers=headers) as session: - data = { - "model": model, - "messages": messages, - "stream": stream - } - - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - await raise_for_status(response) - buffer = "" - async for chunk in response.content: - if chunk: - buffer += chunk.decode() - while "\n\n" in buffer: - chunk_data, buffer = buffer.split("\n\n", 1) - if chunk_data.startswith("data: "): - try: - json_data = json.loads(chunk_data[6:]) - if "choices" in json_data and json_data["choices"]: - content = json_data["choices"][0]["delta"].get("content", "") - if content: - yield content - except json.JSONDecodeError: - # Skip invalid JSON - pass - elif chunk_data.strip() == "data: [DONE]": - return + return super().create_async_generator( + model=model, + messages=messages, + headers=headers, + **kwargs + ) \ No newline at end of file diff --git a/g4f/Provider/ChatGLM.py b/g4f/Provider/ChatGLM.py index 7869f849..f0b2da98 100644 --- a/g4f/Provider/ChatGLM.py +++ b/g4f/Provider/ChatGLM.py @@ -8,7 +8,6 @@ from aiohttp import ClientSession from ..typing import AsyncResult, Messages from ..requests.raise_for_status import raise_for_status from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt class ChatGLM(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chatglm.cn" @@ -17,7 +16,7 @@ class ChatGLM(AsyncGeneratorProvider, ProviderModelMixin): working = True supports_stream = True supports_system_message = False - supports_message_history = True + supports_message_history = False default_model = "all-tools-230b" models = [default_model] @@ -47,7 +46,6 @@ class ChatGLM(AsyncGeneratorProvider, ProviderModelMixin): } async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) data = { "assistant_id": "65940acff94777010aa6b796", "conversation_id": "", @@ -62,17 +60,19 @@ class ChatGLM(AsyncGeneratorProvider, ProviderModelMixin): }, "messages": [ { - "role": "user", + "role": message["role"], "content": [ { "type": "text", - "text": prompt + "text": message["content"] } ] } + for message in messages ] } + yield_text = 0 async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: await raise_for_status(response) async for chunk in response.content: @@ -85,8 +85,9 @@ class ChatGLM(AsyncGeneratorProvider, ProviderModelMixin): if parts: content = parts[0].get('content', []) if content: - text = content[0].get('text', '') + text = content[0].get('text', '')[yield_text:] if text: yield text + yield_text += len(text) except json.JSONDecodeError: pass diff --git a/g4f/Provider/ChatGptEs.py b/g4f/Provider/ChatGptEs.py index 9222131e..f5e2ece6 100644 --- a/g4f/Provider/ChatGptEs.py +++ b/g4f/Provider/ChatGptEs.py @@ -2,7 +2,6 @@ from __future__ import annotations import os import re -import json from aiohttp import ClientSession @@ -17,20 +16,14 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin): working = True supports_stream = True - supports_system_message = True - supports_message_history = True + supports_system_message = False + supports_message_history = False default_model = 'gpt-4o' models = ['gpt-4', default_model, 'gpt-4o-mini'] SYSTEM_PROMPT = "Your default language is English. Always respond in English unless the user's message is in a different language. If the user's message is not in English, respond in the language of the user's message. Maintain this language behavior throughout the conversation unless explicitly instructed otherwise. User input:" - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - return cls.model_aliases[model] - @classmethod async def create_async_generator( cls, @@ -68,10 +61,9 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin): 'wpaicg_chat_client_id': os.urandom(5).hex(), 'wpaicg_chat_history': None } - print(payload['message']) async with session.post(cls.api_endpoint, headers=headers, data=payload) as response: - response.raise_for_status() + await raise_for_status(response) result = await response.json() if "Du musst das Kästchen anklicken!" in result['data']: raise ValueError(result['data']) diff --git a/g4f/Provider/ChatGptt.py b/g4f/Provider/ChatGptt.py index 19c7287a..f61b95fe 100644 --- a/g4f/Provider/ChatGptt.py +++ b/g4f/Provider/ChatGptt.py @@ -2,7 +2,6 @@ from __future__ import annotations import os import re -import json from aiohttp import ClientSession @@ -23,12 +22,6 @@ class ChatGptt(AsyncGeneratorProvider, ProviderModelMixin): default_model = 'gpt-4o' models = ['gpt-4', default_model, 'gpt-4o-mini'] - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - return cls.model_aliases[model] - @classmethod async def create_async_generator( cls, @@ -64,7 +57,7 @@ class ChatGptt(AsyncGeneratorProvider, ProviderModelMixin): 'wpaicg_chat_history': None } - async with session.post(cls.api_endpoint, headers=headers, data=payload) as response: + async with session.post(cls.api_endpoint, headers=headers, data=payload, proxy=proxy) as response: await raise_for_status(response) result = await response.json() - yield result['data'] + yield result['data'] \ No newline at end of file diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py index 22f32ef3..81d7e3fc 100644 --- a/g4f/Provider/DDG.py +++ b/g4f/Provider/DDG.py @@ -1,15 +1,23 @@ from __future__ import annotations -from aiohttp import ClientSession, ClientTimeout, ClientError +from aiohttp import ClientSession, ClientTimeout import json import asyncio import random -from ..typing import AsyncResult, Messages +from ..typing import AsyncResult, Messages, Cookies from ..requests.raise_for_status import raise_for_status from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import format_prompt +from ..providers.response import FinishReason, JsonConversation +class Conversation(JsonConversation): + vqd: str = None + message_history: Messages = [] + cookies: dict = {} + + def __init__(self, model: str): + self.model = model class DDG(AsyncGeneratorProvider, ProviderModelMixin): label = "DuckDuckGo AI Chat" @@ -74,42 +82,54 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin): messages: Messages, proxy: str = None, timeout: int = 30, + cookies: Cookies = None, + conversation: Conversation = None, + return_conversation: bool = False, **kwargs ) -> AsyncResult: model = cls.get_model(model) - - async with ClientSession(timeout=ClientTimeout(total=timeout)) as session: - try: - # Fetch VQD token with retries - vqd = await cls.fetch_vqd(session) - - headers = { - "accept": "text/event-stream", - "content-type": "application/json", - "x-vqd-4": vqd, - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36" - } - - data = { - "model": model, - "messages": [{"role": "user", "content": format_prompt(messages)}], - } - - async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response: - await raise_for_status(response) - async for line in response.content: - line = line.decode("utf-8").strip() - if line.startswith("data:"): - try: - message = json.loads(line[5:].strip()) - if "message" in message: + if cookies is None and conversation is not None: + cookies = conversation.cookies + async with ClientSession(timeout=ClientTimeout(total=timeout), cookies=cookies) as session: + # Fetch VQD token + if conversation is None: + conversation = Conversation(model) + conversation.vqd = await cls.fetch_vqd(session) + conversation.message_history = [{"role": "user", "content": format_prompt(messages)}] + else: + conversation.message_history.append(messages[-1]) + headers = { + "accept": "text/event-stream", + "content-type": "application/json", + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36", + "x-vqd-4": conversation.vqd, + } + data = { + "model": model, + "messages": conversation.message_history, + } + async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response: + await raise_for_status(response) + reason = None + full_message = "" + async for line in response.content: + line = line.decode("utf-8").strip() + if line.startswith("data:"): + try: + message = json.loads(line[5:].strip()) + if "message" in message: + if message["message"]: yield message["message"] - except json.JSONDecodeError: - continue - - except ClientError as e: - raise Exception(f"HTTP ClientError occurred: {e}") - except asyncio.TimeoutError: - raise Exception("Request timed out.") - except Exception as e: - raise Exception(f"An error occurred: {str(e)}") + full_message += message["message"] + reason = "length" + else: + reason = "stop" + except json.JSONDecodeError: + continue + if return_conversation: + conversation.message_history.append({"role": "assistant", "content": full_message}) + conversation.vqd = response.headers.get("x-vqd-4", conversation.vqd) + conversation.cookies = {n: c.value for n, c in session.cookie_jar.filter_cookies(cls.url).items()} + yield conversation + if reason is not None: + yield FinishReason(reason) \ No newline at end of file diff --git a/g4f/Provider/ImageLabs.py b/g4f/Provider/ImageLabs.py index c52860d0..2493cf4a 100644 --- a/g4f/Provider/ImageLabs.py +++ b/g4f/Provider/ImageLabs.py @@ -48,7 +48,7 @@ class ImageLabs(AsyncGeneratorProvider, ProviderModelMixin): } async with ClientSession(headers=headers) as session: - prompt = messages[-1]["content"] + prompt = messages[-1]["content"] if prompt is None else prompt # Generate image payload = { diff --git a/g4f/Provider/Pizzagpt.py b/g4f/Provider/Pizzagpt.py index 16946a1e..9829e59d 100644 --- a/g4f/Provider/Pizzagpt.py +++ b/g4f/Provider/Pizzagpt.py @@ -46,4 +46,6 @@ class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin): response_json = await response.json() content = response_json.get("answer", response_json).get("content") if content: + if "misuse detected. please get in touch" in content: + raise ValueError(content) yield content diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py index f7328b30..7755c930 100644 --- a/g4f/Provider/PollinationsAI.py +++ b/g4f/Provider/PollinationsAI.py @@ -11,14 +11,13 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..requests.raise_for_status import raise_for_status from ..typing import AsyncResult, Messages from ..image import ImageResponse -from .helper import format_prompt class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): label = "Pollinations AI" url = "https://pollinations.ai" working = True - supports_stream = True + supports_stream = False supports_system_message = True supports_message_history = True @@ -172,9 +171,9 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): params = {k: v for k, v in params.items() if v is not None} async with ClientSession(headers=headers) as session: - prompt = quote(messages[-1]["content"] if prompt is None else prompt) + prompt = messages[-1]["content"] if prompt is None else prompt param_string = "&".join(f"{k}={v}" for k, v in params.items()) - url = f"{cls.image_api_endpoint}/prompt/{prompt}?{param_string}" + url = f"{cls.image_api_endpoint}/prompt/{quote(prompt)}?{param_string}" async with session.head(url, proxy=proxy) as response: if response.status == 200: diff --git a/g4f/Provider/hf_space/Qwen_Qwen_2_72B_Instruct.py b/g4f/Provider/hf_space/Qwen_Qwen_2_72B_Instruct.py index 573c64b8..a49a0deb 100644 --- a/g4f/Provider/hf_space/Qwen_Qwen_2_72B_Instruct.py +++ b/g4f/Provider/hf_space/Qwen_Qwen_2_72B_Instruct.py @@ -1,6 +1,5 @@ from __future__ import annotations -import asyncio import aiohttp import json import uuid @@ -9,7 +8,7 @@ import re from ...typing import AsyncResult, Messages from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..helper import format_prompt - +from ... import debug class Qwen_Qwen_2_72B_Instruct(AsyncGeneratorProvider, ProviderModelMixin): url = "https://qwen-qwen2-72b-instruct.hf.space" @@ -49,10 +48,12 @@ class Qwen_Qwen_2_72B_Instruct(AsyncGeneratorProvider, ProviderModelMixin): } # Prepare the prompt + system_prompt = "\n".join([message["content"] for message in messages if message["role"] == "system"]) + messages = [message for message in messages if message["role"] != "system"] prompt = format_prompt(messages) payload_join = { - "data": [prompt, [], ""], + "data": [prompt, [], system_prompt], "event_data": None, "fn_index": 0, "trigger_id": 11, @@ -87,7 +88,7 @@ class Qwen_Qwen_2_72B_Instruct(AsyncGeneratorProvider, ProviderModelMixin): if decoded_line.startswith('data: '): try: json_data = json.loads(decoded_line[6:]) - + # Look for generation stages if json_data.get('msg') == 'process_generating': if 'output' in json_data and 'data' in json_data['output']: @@ -97,10 +98,10 @@ class Qwen_Qwen_2_72B_Instruct(AsyncGeneratorProvider, ProviderModelMixin): if isinstance(item, list) and len(item) > 1: fragment = str(item[1]) # Ignore [0, 1] type fragments and duplicates - if not re.match(r'^\[.*\]$', fragment) and fragment not in full_response: + if not re.match(r'^\[.*\]$', fragment) and not full_response.endswith(fragment): full_response += fragment yield fragment - + # Check for completion if json_data.get('msg') == 'process_completed': # Final check to ensure we get the complete response @@ -117,8 +118,6 @@ class Qwen_Qwen_2_72B_Instruct(AsyncGeneratorProvider, ProviderModelMixin): if final_full_response: yield final_full_response break - + except json.JSONDecodeError: - print("Could not parse JSON:", decoded_line) - except Exception as e: - print(f"Error processing response: {e}") + debug.log("Could not parse JSON:", decoded_line) diff --git a/g4f/Provider/needs_auth/DeepSeek.py b/g4f/Provider/needs_auth/DeepSeek.py index 0f1b96d6..b0a40898 100644 --- a/g4f/Provider/needs_auth/DeepSeek.py +++ b/g4f/Provider/needs_auth/DeepSeek.py @@ -12,4 +12,4 @@ class DeepSeek(OpenaiAPI): supports_stream = True supports_message_history = True default_model = "deepseek-chat" - models = [default_model] + fallback_models = [default_model] diff --git a/g4f/Provider/needs_auth/OpenaiAPI.py b/g4f/Provider/needs_auth/OpenaiAPI.py index 75c3574f..927acd69 100644 --- a/g4f/Provider/needs_auth/OpenaiAPI.py +++ b/g4f/Provider/needs_auth/OpenaiAPI.py @@ -136,6 +136,7 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin): finish = cls.read_finish_reason(choice) if finish is not None: yield finish + break @staticmethod def read_finish_reason(choice: dict) -> Optional[FinishReason]: -- cgit v1.2.3