From 30381f687e3b284c34fcb12ea36539ac02923029 Mon Sep 17 00:00:00 2001
From: hlohaus <983577+hlohaus@users.noreply.github.com>
Date: Thu, 30 Jan 2025 00:35:51 +0100
Subject: Add Janus_Pro_7B provider Add proof of DeepSeekAPI provider Add
BackendApi provider from HuggingSpace
---
g4f/Provider/CablyAI.py | 2 +-
g4f/Provider/DeepInfraChat.py | 2 +-
g4f/Provider/Jmuz.py | 2 +-
g4f/Provider/Mhystical.py | 2 +-
g4f/Provider/OIVSCode.py | 2 +-
g4f/Provider/__init__.py | 2 +
g4f/Provider/hf_space/Janus_Pro_7B.py | 144 +++++++++++++++++++++++
g4f/Provider/hf_space/__init__.py | 8 +-
g4f/Provider/mini_max/MiniMax.py | 2 +-
g4f/Provider/needs_auth/CopilotAccount.py | 12 +-
g4f/Provider/needs_auth/Custom.py | 2 +-
g4f/Provider/needs_auth/DeepInfra.py | 2 +-
g4f/Provider/needs_auth/DeepSeekAPI.py | 105 +++++++++++++++++
g4f/Provider/needs_auth/GlhfChat.py | 2 +-
g4f/Provider/needs_auth/Groq.py | 2 +-
g4f/Provider/needs_auth/HuggingFaceAPI.py | 2 +-
g4f/Provider/needs_auth/OpenaiAPI.py | 2 +-
g4f/Provider/needs_auth/OpenaiTemplate.py | 187 ------------------------------
g4f/Provider/needs_auth/PerplexityApi.py | 2 +-
g4f/Provider/needs_auth/ThebApi.py | 2 +-
g4f/Provider/needs_auth/__init__.py | 1 +
g4f/Provider/needs_auth/xAI.py | 2 +-
g4f/Provider/template/BackendApi.py | 124 ++++++++++++++++++++
g4f/Provider/template/OpenaiTemplate.py | 187 ++++++++++++++++++++++++++++++
g4f/Provider/template/__init__.py | 2 +
g4f/cookies.py | 2 +
g4f/gui/server/api.py | 3 +
g4f/providers/base_provider.py | 6 +-
g4f/providers/retry_provider.py | 2 +-
g4f/requests/__init__.py | 2 +-
30 files changed, 602 insertions(+), 215 deletions(-)
create mode 100644 g4f/Provider/hf_space/Janus_Pro_7B.py
create mode 100644 g4f/Provider/needs_auth/DeepSeekAPI.py
delete mode 100644 g4f/Provider/needs_auth/OpenaiTemplate.py
create mode 100644 g4f/Provider/template/BackendApi.py
create mode 100644 g4f/Provider/template/OpenaiTemplate.py
create mode 100644 g4f/Provider/template/__init__.py
diff --git a/g4f/Provider/CablyAI.py b/g4f/Provider/CablyAI.py
index 07e9fe7f..d13d7a58 100644
--- a/g4f/Provider/CablyAI.py
+++ b/g4f/Provider/CablyAI.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from ..typing import AsyncResult, Messages
-from .needs_auth.OpenaiTemplate import OpenaiTemplate
+from .template import OpenaiTemplate
class CablyAI(OpenaiTemplate):
url = "https://cablyai.com"
diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py
index f384a3ff..9b968e9f 100644
--- a/g4f/Provider/DeepInfraChat.py
+++ b/g4f/Provider/DeepInfraChat.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from ..typing import AsyncResult, Messages
-from .needs_auth.OpenaiTemplate import OpenaiTemplate
+from .template import OpenaiTemplate
class DeepInfraChat(OpenaiTemplate):
url = "https://deepinfra.com/chat"
diff --git a/g4f/Provider/Jmuz.py b/g4f/Provider/Jmuz.py
index 27c10233..5b4c6632 100644
--- a/g4f/Provider/Jmuz.py
+++ b/g4f/Provider/Jmuz.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from ..typing import AsyncResult, Messages
-from .needs_auth.OpenaiTemplate import OpenaiTemplate
+from .template import OpenaiTemplate
class Jmuz(OpenaiTemplate):
url = "https://discord.gg/Ew6JzjA2NR"
diff --git a/g4f/Provider/Mhystical.py b/g4f/Provider/Mhystical.py
index 1257a80f..3a60e958 100644
--- a/g4f/Provider/Mhystical.py
+++ b/g4f/Provider/Mhystical.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from ..typing import AsyncResult, Messages
-from .needs_auth.OpenaiTemplate import OpenaiTemplate
+from .template import OpenaiTemplate
class Mhystical(OpenaiTemplate):
url = "https://mhystical.cc"
diff --git a/g4f/Provider/OIVSCode.py b/g4f/Provider/OIVSCode.py
index 6a918869..ef88c5ed 100644
--- a/g4f/Provider/OIVSCode.py
+++ b/g4f/Provider/OIVSCode.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from .needs_auth.OpenaiTemplate import OpenaiTemplate
+from .template import OpenaiTemplate
class OIVSCode(OpenaiTemplate):
label = "OI VSCode Server"
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 5db86513..0e0b2f4d 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -11,6 +11,7 @@ from .not_working import *
from .local import *
from .hf_space import HuggingSpace
from .mini_max import HailuoAI, MiniMax
+from .template import OpenaiTemplate, BackendApi
from .AIChatFree import AIChatFree
from .AIUncensored import AIUncensored
@@ -55,6 +56,7 @@ __providers__: list[ProviderType] = [
if isinstance(provider, type)
and issubclass(provider, BaseProvider)
]
+__providers__ = __providers__ + HuggingSpace.providers
__all__: list[str] = [
provider.__name__ for provider in __providers__
]
diff --git a/g4f/Provider/hf_space/Janus_Pro_7B.py b/g4f/Provider/hf_space/Janus_Pro_7B.py
new file mode 100644
index 00000000..43e5f518
--- /dev/null
+++ b/g4f/Provider/hf_space/Janus_Pro_7B.py
@@ -0,0 +1,144 @@
+from __future__ import annotations
+
+import json
+import uuid
+import re
+from datetime import datetime, timezone, timedelta
+import urllib.parse
+
+from ...typing import AsyncResult, Messages, Cookies
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+from ...providers.response import JsonConversation, ImageResponse
+from ...requests.aiohttp import StreamSession, StreamResponse
+from ...requests.raise_for_status import raise_for_status
+from ...cookies import get_cookies
+from ...errors import ResponseError
+from ... import debug
+
+class Janus_Pro_7B(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://huggingface.co/spaces/deepseek-ai/Janus-Pro-7B"
+ api_url = "https://deepseek-ai-janus-pro-7b.hf.space"
+ referer = f"{api_url}?__theme=light"
+
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = "janus-pro-7b"
+ default_image_model = "janus-pro-7b-image"
+ models = [default_model, default_image_model]
+ image_models = [default_image_model]
+
+ @classmethod
+ def run(cls, method: str, session: StreamSession, prompt: str, conversation: JsonConversation):
+ if method == "post":
+ return session.post(f"{cls.api_url}/gradio_api/queue/join?__theme=light", **{
+ "headers": {
+ "content-type": "application/json",
+ "x-zerogpu-token": conversation.zerogpu_token,
+ "x-zerogpu-uuid": conversation.uuid,
+ "referer": cls.referer,
+ },
+ "json": {"data":[None,prompt,42,0.95,0.1],"event_data":None,"fn_index":2,"trigger_id":10,"session_hash":conversation.session_hash},
+ })
+ elif method == "image":
+ return session.post(f"{cls.api_url}/gradio_api/queue/join?__theme=light", **{
+ "headers": {
+ "content-type": "application/json",
+ "x-zerogpu-token": conversation.zerogpu_token,
+ "x-zerogpu-uuid": conversation.uuid,
+ "referer": cls.referer,
+ },
+ "json": {"data":[prompt,1234,5,1],"event_data":None,"fn_index":3,"trigger_id":20,"session_hash":conversation.session_hash},
+ })
+ return session.get(f"{cls.api_url}/gradio_api/queue/data?session_hash={conversation.session_hash}", **{
+ "headers": {
+ "accept": "text/event-stream",
+ "content-type": "application/json",
+ "referer": cls.referer,
+ }
+ })
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ prompt: str = None,
+ proxy: str = None,
+ cookies: Cookies = None,
+ return_conversation: bool = False,
+ conversation: JsonConversation = None,
+ **kwargs
+ ) -> AsyncResult:
+ def generate_session_hash():
+ """Generate a unique session hash."""
+ return str(uuid.uuid4()).replace('-', '')[:12]
+
+ method = "post"
+ if model == cls.default_image_model or prompt is not None:
+ method = "image"
+
+ prompt = format_prompt(messages) if prompt is None and conversation is None else prompt
+ prompt = messages[-1]["content"] if prompt is None else prompt
+
+ session_hash = generate_session_hash() if conversation is None else getattr(conversation, "session_hash")
+ async with StreamSession(proxy=proxy, impersonate="chrome") as session:
+ session_hash = generate_session_hash() if conversation is None else getattr(conversation, "session_hash")
+ user_uuid = None if conversation is None else getattr(conversation, "user_uuid", None)
+ zerogpu_token = "[object Object]"
+
+ cookies = get_cookies("huggingface.co", raise_requirements_error=False) if cookies is None else cookies
+ if cookies:
+ # Get current UTC time + 10 minutes
+ dt = (datetime.now(timezone.utc) + timedelta(minutes=10)).isoformat(timespec='milliseconds')
+ encoded_dt = urllib.parse.quote(dt)
+ async with session.get(f"https://huggingface.co/api/spaces/deepseek-ai/Janus-Pro-7B/jwt?expiration={encoded_dt}&include_pro_status=true", cookies=cookies) as response:
+ zerogpu_token = (await response.json())
+ zerogpu_token = zerogpu_token["token"]
+ if user_uuid is None:
+ async with session.get(cls.url, cookies=cookies) as response:
+ match = re.search(r""token":"([^&]+?)"", await response.text())
+ if match:
+ zerogpu_token = match.group(1)
+ match = re.search(r""sessionUuid":"([^&]+?)"", await response.text())
+ if match:
+ user_uuid = match.group(1)
+
+ if conversation is None or not hasattr(conversation, "session_hash"):
+ conversation = JsonConversation(session_hash=session_hash, zerogpu_token=zerogpu_token, uuid=user_uuid)
+ conversation.zerogpu_token = zerogpu_token
+ if return_conversation:
+ yield conversation
+
+ async with cls.run(method, session, prompt, conversation) as response:
+ await raise_for_status(response)
+
+ async with cls.run("get", session, prompt, conversation) as response:
+ response: StreamResponse = response
+ async for line in response.iter_lines():
+ decoded_line = line.decode(errors="replace")
+ if decoded_line.startswith('data: '):
+ try:
+ json_data = json.loads(decoded_line[6:])
+ if json_data.get('msg') == 'log':
+ debug.log(json_data["log"])
+
+ if json_data.get('msg') == 'process_generating':
+ if 'output' in json_data and 'data' in json_data['output']:
+ yield f"data: {json.dumps(json_data['output']['data'])}"
+
+ if json_data.get('msg') == 'process_completed':
+ if 'output' in json_data and 'error' in json_data['output']:
+ raise ResponseError("Text model is not working. Try out image model" if "AttributeError" in json_data['output']['error'] else json_data['output']['error'])
+ if 'output' in json_data and 'data' in json_data['output']:
+ if "image" in json_data['output']['data'][0][0]:
+ yield ImageResponse([image["image"]["url"] for image in json_data['output']['data'][0]], prompt)
+ else:
+ yield f"data: {json.dumps(json_data['output']['data'])}"
+ break
+
+ except json.JSONDecodeError:
+ debug.log("Could not parse JSON:", decoded_line)
\ No newline at end of file
diff --git a/g4f/Provider/hf_space/__init__.py b/g4f/Provider/hf_space/__init__.py
index accd00d0..3e40a37b 100644
--- a/g4f/Provider/hf_space/__init__.py
+++ b/g4f/Provider/hf_space/__init__.py
@@ -10,6 +10,7 @@ from .BlackForestLabsFlux1Dev import BlackForestLabsFlux1Dev
from .BlackForestLabsFlux1Schnell import BlackForestLabsFlux1Schnell
from .VoodoohopFlux1Schnell import VoodoohopFlux1Schnell
from .CohereForAI import CohereForAI
+from .Janus_Pro_7B import Janus_Pro_7B
from .Qwen_QVQ_72B import Qwen_QVQ_72B
from .Qwen_Qwen_2_5M_Demo import Qwen_Qwen_2_5M_Demo
from .Qwen_Qwen_2_72B_Instruct import Qwen_Qwen_2_72B_Instruct
@@ -25,8 +26,11 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
default_image_model = BlackForestLabsFlux1Dev.default_model
default_vision_model = Qwen_QVQ_72B.default_model
providers = [
- BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell,
- CohereForAI, Qwen_QVQ_72B, Qwen_Qwen_2_5M_Demo, Qwen_Qwen_2_72B_Instruct, StableDiffusion35Large
+ BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell,
+ VoodoohopFlux1Schnell,
+ CohereForAI, Janus_Pro_7B,
+ Qwen_QVQ_72B, Qwen_Qwen_2_5M_Demo, Qwen_Qwen_2_72B_Instruct,
+ StableDiffusion35Large
]
@classmethod
diff --git a/g4f/Provider/mini_max/MiniMax.py b/g4f/Provider/mini_max/MiniMax.py
index c954fb62..fbca13ea 100644
--- a/g4f/Provider/mini_max/MiniMax.py
+++ b/g4f/Provider/mini_max/MiniMax.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from ..needs_auth.OpenaiTemplate import OpenaiTemplate
+from ..template import OpenaiTemplate
class MiniMax(OpenaiTemplate):
label = "MiniMax API"
diff --git a/g4f/Provider/needs_auth/CopilotAccount.py b/g4f/Provider/needs_auth/CopilotAccount.py
index d46eae8d..a532e0bb 100644
--- a/g4f/Provider/needs_auth/CopilotAccount.py
+++ b/g4f/Provider/needs_auth/CopilotAccount.py
@@ -38,7 +38,7 @@ class CopilotAccount(AsyncAuthedProvider, Copilot):
raise h
yield AuthResult(
api_key=cls._access_token,
- cookies=cls._cookies,
+ cookies=cls._cookies if isinstance(cls._cookies, dict) else {c.name: c.value for c in cls._cookies},
)
@classmethod
@@ -49,9 +49,9 @@ class CopilotAccount(AsyncAuthedProvider, Copilot):
auth_result: AuthResult,
**kwargs
) -> AsyncResult:
- Copilot._access_token = getattr(auth_result, "api_key")
- Copilot._cookies = getattr(auth_result, "cookies")
- Copilot.needs_auth = cls.needs_auth
- for chunk in Copilot.create_completion(model, messages, **kwargs):
+ cls._access_token = getattr(auth_result, "api_key")
+ cls._cookies = getattr(auth_result, "cookies")
+ cls.needs_auth = cls.needs_auth
+ for chunk in cls.create_completion(model, messages, **kwargs):
yield chunk
- auth_result.cookies = Copilot._cookies if isinstance(Copilot._cookies, dict) else {c.name: c.value for c in Copilot._cookies}
\ No newline at end of file
+ auth_result.cookies = cls._cookies if isinstance(cls._cookies, dict) else {c.name: c.value for c in cls._cookies}
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Custom.py b/g4f/Provider/needs_auth/Custom.py
index 2bd7c014..4e86e523 100644
--- a/g4f/Provider/needs_auth/Custom.py
+++ b/g4f/Provider/needs_auth/Custom.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from .OpenaiTemplate import OpenaiTemplate
+from ..template import OpenaiTemplate
class Custom(OpenaiTemplate):
label = "Custom Provider"
diff --git a/g4f/Provider/needs_auth/DeepInfra.py b/g4f/Provider/needs_auth/DeepInfra.py
index 6c077a5e..a5f6350a 100644
--- a/g4f/Provider/needs_auth/DeepInfra.py
+++ b/g4f/Provider/needs_auth/DeepInfra.py
@@ -4,7 +4,7 @@ import requests
from ...typing import AsyncResult, Messages
from ...requests import StreamSession, raise_for_status
from ...image import ImageResponse
-from .OpenaiTemplate import OpenaiTemplate
+from ..template import OpenaiTemplate
class DeepInfra(OpenaiTemplate):
url = "https://deepinfra.com"
diff --git a/g4f/Provider/needs_auth/DeepSeekAPI.py b/g4f/Provider/needs_auth/DeepSeekAPI.py
new file mode 100644
index 00000000..8158d2c6
--- /dev/null
+++ b/g4f/Provider/needs_auth/DeepSeekAPI.py
@@ -0,0 +1,105 @@
+from __future__ import annotations
+
+import os
+import json
+import time
+from typing import AsyncIterator
+import asyncio
+
+from ..base_provider import AsyncAuthedProvider
+from ...requests import get_args_from_nodriver
+from ...providers.response import AuthResult, RequestLogin, Reasoning, JsonConversation, FinishReason
+from ...typing import AsyncResult, Messages
+
+try:
+ from curl_cffi import requests
+ from dsk.api import DeepSeekAPI, AuthenticationError, DeepSeekPOW
+except ImportError:
+ pass
+
+class DeepSeekAPIArgs(DeepSeekAPI):
+ def __init__(self, args: dict):
+ args.pop("headers")
+ self.auth_token = args.pop("api_key")
+ if not self.auth_token or not isinstance(self.auth_token, str):
+ raise AuthenticationError("Invalid auth token provided")
+ self.args = args
+ self.pow_solver = DeepSeekPOW()
+
+ def _make_request(self, method: str, endpoint: str, json_data: dict, pow_required: bool = False):
+ url = f"{self.BASE_URL}{endpoint}"
+ headers = self._get_headers()
+ if pow_required:
+ challenge = self._get_pow_challenge()
+ pow_response = self.pow_solver.solve_challenge(challenge)
+ headers = self._get_headers(pow_response)
+
+ response = requests.request(
+ method=method,
+ url=url,
+ json=json_data, **{
+ "headers":headers,
+ "impersonate":'chrome',
+ "timeout":None,
+ **self.args
+ }
+ )
+ return response.json()
+
+class DeepSeekAPI(AsyncAuthedProvider):
+ url = "https://chat.deepseek.com"
+ working = False
+ needs_auth = True
+ use_nodriver = True
+ _access_token = None
+
+ @classmethod
+ async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
+ yield RequestLogin(cls.__name__, os.environ.get("G4F_LOGIN_URL") or "")
+ async def callback(page):
+ while True:
+ await asyncio.sleep(1)
+ cls._access_token = json.loads(await page.evaluate("localStorage.getItem('userToken')") or "{}").get("value")
+ if cls._access_token:
+ break
+ args = await get_args_from_nodriver(cls.url, proxy, callback=callback)
+ yield AuthResult(
+ api_key=cls._access_token,
+ **args
+ )
+
+ @classmethod
+ async def create_authed(
+ cls,
+ model: str,
+ messages: Messages,
+ auth_result: AuthResult,
+ conversation: JsonConversation = None,
+ **kwargs
+ ) -> AsyncResult:
+ # Initialize with your auth token
+ api = DeepSeekAPIArgs(auth_result.get_dict())
+
+ # Create a new chat session
+ if conversation is None:
+ chat_id = api.create_chat_session()
+ conversation = JsonConversation(chat_id=chat_id)
+
+ is_thinking = 0
+ for chunk in api.chat_completion(
+ conversation.chat_id,
+ messages[-1]["content"],
+ thinking_enabled=True
+ ):
+ if chunk['type'] == 'thinking':
+ if not is_thinking:
+ yield Reasoning(None, "Is thinking...")
+ is_thinking = time.time()
+ yield Reasoning(chunk['content'])
+ elif chunk['type'] == 'text':
+ if is_thinking:
+ yield Reasoning(None, f"Thought for {time.time() - is_thinking:.2f}s")
+ is_thinking = 0
+ yield chunk['content']
+ if chunk['finish_reason']:
+ yield FinishReason(chunk['finish_reason'])
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/GlhfChat.py b/g4f/Provider/needs_auth/GlhfChat.py
index fbd7ebcd..3d2c7bbf 100644
--- a/g4f/Provider/needs_auth/GlhfChat.py
+++ b/g4f/Provider/needs_auth/GlhfChat.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from .OpenaiTemplate import OpenaiTemplate
+from ..template import OpenaiTemplate
class GlhfChat(OpenaiTemplate):
url = "https://glhf.chat"
diff --git a/g4f/Provider/needs_auth/Groq.py b/g4f/Provider/needs_auth/Groq.py
index 4299d211..a2af3941 100644
--- a/g4f/Provider/needs_auth/Groq.py
+++ b/g4f/Provider/needs_auth/Groq.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from .OpenaiTemplate import OpenaiTemplate
+from ..template import OpenaiTemplate
class Groq(OpenaiTemplate):
url = "https://console.groq.com/playground"
diff --git a/g4f/Provider/needs_auth/HuggingFaceAPI.py b/g4f/Provider/needs_auth/HuggingFaceAPI.py
index 59c8f6ee..c9a15260 100644
--- a/g4f/Provider/needs_auth/HuggingFaceAPI.py
+++ b/g4f/Provider/needs_auth/HuggingFaceAPI.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from .OpenaiTemplate import OpenaiTemplate
+from ..template import OpenaiTemplate
from .HuggingChat import HuggingChat
from ...providers.types import Messages
from ... import debug
diff --git a/g4f/Provider/needs_auth/OpenaiAPI.py b/g4f/Provider/needs_auth/OpenaiAPI.py
index 1720564e..34b1c739 100644
--- a/g4f/Provider/needs_auth/OpenaiAPI.py
+++ b/g4f/Provider/needs_auth/OpenaiAPI.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from . OpenaiTemplate import OpenaiTemplate
+from ..template import OpenaiTemplate
class OpenaiAPI(OpenaiTemplate):
label = "OpenAI API"
diff --git a/g4f/Provider/needs_auth/OpenaiTemplate.py b/g4f/Provider/needs_auth/OpenaiTemplate.py
deleted file mode 100644
index 62a44c55..00000000
--- a/g4f/Provider/needs_auth/OpenaiTemplate.py
+++ /dev/null
@@ -1,187 +0,0 @@
-from __future__ import annotations
-
-import json
-import time
-import requests
-
-from ..helper import filter_none
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
-from ...typing import Union, Optional, AsyncResult, Messages, ImagesType
-from ...requests import StreamSession, raise_for_status
-from ...providers.response import FinishReason, ToolCalls, Usage, Reasoning, ImageResponse
-from ...errors import MissingAuthError, ResponseError
-from ...image import to_data_uri
-from ... import debug
-
-class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin):
- api_base = ""
- supports_message_history = True
- supports_system_message = True
- default_model = ""
- fallback_models = []
- sort_models = True
- ssl = None
-
- @classmethod
- def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]:
- if not cls.models:
- try:
- headers = {}
- if api_base is None:
- api_base = cls.api_base
- if api_key is not None:
- headers["authorization"] = f"Bearer {api_key}"
- response = requests.get(f"{api_base}/models", headers=headers, verify=cls.ssl)
- raise_for_status(response)
- data = response.json()
- data = data.get("data") if isinstance(data, dict) else data
- cls.image_models = [model.get("id") for model in data if model.get("image")]
- cls.models = [model.get("id") for model in data]
- if cls.sort_models:
- cls.models.sort()
- except Exception as e:
- debug.log(e)
- return cls.fallback_models
- return cls.models
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- timeout: int = 120,
- images: ImagesType = None,
- api_key: str = None,
- api_endpoint: str = None,
- api_base: str = None,
- temperature: float = None,
- max_tokens: int = None,
- top_p: float = None,
- stop: Union[str, list[str]] = None,
- stream: bool = False,
- prompt: str = None,
- headers: dict = None,
- impersonate: str = None,
- tools: Optional[list] = None,
- extra_data: dict = {},
- **kwargs
- ) -> AsyncResult:
- if cls.needs_auth and api_key is None:
- raise MissingAuthError('Add a "api_key"')
- async with StreamSession(
- proxy=proxy,
- headers=cls.get_headers(stream, api_key, headers),
- timeout=timeout,
- impersonate=impersonate,
- ) as session:
- model = cls.get_model(model, api_key=api_key, api_base=api_base)
- if api_base is None:
- api_base = cls.api_base
-
- # Proxy for image generation feature
- if model and model in cls.image_models:
- data = {
- "prompt": messages[-1]["content"] if prompt is None else prompt,
- "model": model,
- }
- async with session.post(f"{api_base.rstrip('/')}/images/generations", json=data, ssl=cls.ssl) as response:
- data = await response.json()
- cls.raise_error(data)
- await raise_for_status(response)
- yield ImageResponse([image["url"] for image in data["data"]], prompt)
- return
-
- if images is not None and messages:
- if not model and hasattr(cls, "default_vision_model"):
- model = cls.default_vision_model
- last_message = messages[-1].copy()
- last_message["content"] = [
- *[{
- "type": "image_url",
- "image_url": {"url": to_data_uri(image)}
- } for image, _ in images],
- {
- "type": "text",
- "text": messages[-1]["content"]
- }
- ]
- messages[-1] = last_message
- data = filter_none(
- messages=messages,
- model=model,
- temperature=temperature,
- max_tokens=max_tokens,
- top_p=top_p,
- stop=stop,
- stream=stream,
- tools=tools,
- **extra_data
- )
- if api_endpoint is None:
- api_endpoint = f"{api_base.rstrip('/')}/chat/completions"
- async with session.post(api_endpoint, json=data, ssl=cls.ssl) as response:
- content_type = response.headers.get("content-type", "text/event-stream" if stream else "application/json")
- if content_type.startswith("application/json"):
- data = await response.json()
- cls.raise_error(data)
- await raise_for_status(response)
- choice = data["choices"][0]
- if "content" in choice["message"] and choice["message"]["content"]:
- yield choice["message"]["content"].strip()
- elif "tool_calls" in choice["message"]:
- yield ToolCalls(choice["message"]["tool_calls"])
- if "usage" in data:
- yield Usage(**data["usage"])
- if "finish_reason" in choice and choice["finish_reason"] is not None:
- yield FinishReason(choice["finish_reason"])
- return
- elif content_type.startswith("text/event-stream"):
- await raise_for_status(response)
- first = True
- is_thinking = 0
- async for line in response.iter_lines():
- if line.startswith(b"data: "):
- chunk = line[6:]
- if chunk == b"[DONE]":
- break
- data = json.loads(chunk)
- cls.raise_error(data)
- choice = data["choices"][0]
- if "content" in choice["delta"] and choice["delta"]["content"]:
- delta = choice["delta"]["content"]
- if first:
- delta = delta.lstrip()
- if delta:
- first = False
- if is_thinking:
- if "" in delta:
- yield Reasoning(None, f"Finished in {round(time.time()-is_thinking, 2)} seconds")
- is_thinking = 0
- else:
- yield Reasoning(delta)
- elif "" in delta:
- is_thinking = time.time()
- yield Reasoning(None, "Is thinking...")
- else:
- yield delta
- if "usage" in data and data["usage"]:
- yield Usage(**data["usage"])
- if "finish_reason" in choice and choice["finish_reason"] is not None:
- yield FinishReason(choice["finish_reason"])
- break
- else:
- await raise_for_status(response)
- raise ResponseError(f"Not supported content-type: {content_type}")
-
- @classmethod
- def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
- return {
- "Accept": "text/event-stream" if stream else "application/json",
- "Content-Type": "application/json",
- **(
- {"Authorization": f"Bearer {api_key}"}
- if api_key else {}
- ),
- **({} if headers is None else headers)
- }
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/PerplexityApi.py b/g4f/Provider/needs_auth/PerplexityApi.py
index 57b67cb4..4d8e0ccd 100644
--- a/g4f/Provider/needs_auth/PerplexityApi.py
+++ b/g4f/Provider/needs_auth/PerplexityApi.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from .OpenaiTemplate import OpenaiTemplate
+from ..template import OpenaiTemplate
class PerplexityApi(OpenaiTemplate):
label = "Perplexity API"
diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py
index 50b95a1c..41717bd2 100644
--- a/g4f/Provider/needs_auth/ThebApi.py
+++ b/g4f/Provider/needs_auth/ThebApi.py
@@ -2,7 +2,7 @@ from __future__ import annotations
from ...typing import CreateResult, Messages
from ..helper import filter_none
-from .OpenaiTemplate import OpenaiTemplate
+from ..template import OpenaiTemplate
models = {
"theb-ai": "TheB.AI",
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 348d9056..691bc5be 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -6,6 +6,7 @@ from .Custom import Custom
from .Custom import Feature
from .DeepInfra import DeepInfra
from .DeepSeek import DeepSeek
+from .DeepSeekAPI import DeepSeekAPI
from .Gemini import Gemini
from .GeminiPro import GeminiPro
from .GigaChat import GigaChat
diff --git a/g4f/Provider/needs_auth/xAI.py b/g4f/Provider/needs_auth/xAI.py
index 154d18d0..8f21e9cc 100644
--- a/g4f/Provider/needs_auth/xAI.py
+++ b/g4f/Provider/needs_auth/xAI.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from .OpenaiTemplate import OpenaiTemplate
+from ..template.OpenaiTemplate import OpenaiTemplate
class xAI(OpenaiTemplate):
url = "https://console.x.ai"
diff --git a/g4f/Provider/template/BackendApi.py b/g4f/Provider/template/BackendApi.py
new file mode 100644
index 00000000..caf28219
--- /dev/null
+++ b/g4f/Provider/template/BackendApi.py
@@ -0,0 +1,124 @@
+from __future__ import annotations
+
+import re
+import json
+import time
+from urllib.parse import quote_plus
+
+from ...typing import Messages, AsyncResult
+from ...requests import StreamSession
+from ...providers.base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...providers.response import ProviderInfo, JsonConversation, PreviewResponse, SynthesizeData, TitleGeneration, RequestLogin
+from ...providers.response import Parameters, FinishReason, Usage, Reasoning
+from ...errors import ModelNotSupportedError
+from ..needs_auth.OpenaiAccount import OpenaiAccount
+from ..needs_auth.HuggingChat import HuggingChat
+from ... import debug
+
+class BackendApi(AsyncGeneratorProvider, ProviderModelMixin):
+ ssl = False
+
+ models = [
+ *OpenaiAccount.get_models(),
+ *HuggingChat.get_models(),
+ "flux",
+ "flux-pro",
+ "MiniMax-01",
+ "Microsoft Copilot",
+ ]
+
+ @classmethod
+ def get_model(cls, model: str):
+ if "MiniMax" in model:
+ model = "MiniMax"
+ elif "Copilot" in model:
+ model = "Copilot"
+ elif "FLUX" in model:
+ model = f"flux-{model.split('-')[-1]}"
+ elif "flux" in model:
+ model = model.split(' ')[-1]
+ elif model in OpenaiAccount.get_models():
+ pass
+ elif model in HuggingChat.get_models():
+ pass
+ else:
+ raise ModelNotSupportedError(f"Model: {model}")
+ return model
+
+ @classmethod
+ def get_provider(cls, model: str):
+ if model.startswith("MiniMax"):
+ return "HailuoAI"
+ elif model == "Copilot":
+ return "CopilotAccount"
+ elif model in OpenaiAccount.get_models():
+ return "OpenaiAccount"
+ elif model in HuggingChat.get_models():
+ return "HuggingChat"
+ return None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_key: str = None,
+ proxy: str = None,
+ timeout: int = 0,
+ **kwargs
+ ) -> AsyncResult:
+ debug.log(f"{__name__}: {api_key}")
+
+ async with StreamSession(
+ proxy=proxy,
+ headers={"Accept": "text/event-stream"},
+ timeout=timeout
+ ) as session:
+ model = cls.get_model(model)
+ provider = cls.get_provider(model)
+ async with session.post(f"{cls.url}/backend-api/v2/conversation", json={
+ "model": model,
+ "messages": messages,
+ "provider": provider,
+ **kwargs
+ }, ssl=cls.ssl) as response:
+ async for line in response.iter_lines():
+ data = json.loads(line)
+ data_type = data.pop("type")
+ if data_type == "provider":
+ yield ProviderInfo(**data[data_type])
+ provider = data[data_type]["name"]
+ elif data_type == "conversation":
+ yield JsonConversation(**data[data_type][provider] if provider in data[data_type] else data[data_type][""])
+ elif data_type == "conversation_id":
+ pass
+ elif data_type == "message":
+ yield Exception(data)
+ elif data_type == "preview":
+ yield PreviewResponse(data[data_type])
+ elif data_type == "content":
+ def on_image(match):
+ extension = match.group(3).split(".")[-1].split("?")[0]
+ extension = "" if not extension or len(extension) > 4 else f".{extension}"
+ filename = f"{int(time.time())}_{quote_plus(match.group(1)[:100], '')}{extension}"
+ download_url = f"/download/{filename}?url={cls.url}{match.group(3)}"
+ return f"[](/images/{filename})"
+ yield re.sub(r'\[\!\[(.+?)\]\(([^)]+?)\)\]\(([^)]+?)\)', on_image, data["content"])
+ elif data_type =="synthesize":
+ yield SynthesizeData(**data[data_type])
+ elif data_type == "parameters":
+ yield Parameters(**data[data_type])
+ elif data_type == "usage":
+ yield Usage(**data[data_type])
+ elif data_type == "reasoning":
+ yield Reasoning(**data)
+ elif data_type == "login":
+ pass
+ elif data_type == "title":
+ yield TitleGeneration(data[data_type])
+ elif data_type == "finish":
+ yield FinishReason(data[data_type]["reason"])
+ elif data_type == "log":
+ debug.log(data[data_type])
+ else:
+ debug.log(f"Unknown data: ({data_type}) {data}")
\ No newline at end of file
diff --git a/g4f/Provider/template/OpenaiTemplate.py b/g4f/Provider/template/OpenaiTemplate.py
new file mode 100644
index 00000000..62a44c55
--- /dev/null
+++ b/g4f/Provider/template/OpenaiTemplate.py
@@ -0,0 +1,187 @@
+from __future__ import annotations
+
+import json
+import time
+import requests
+
+from ..helper import filter_none
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
+from ...typing import Union, Optional, AsyncResult, Messages, ImagesType
+from ...requests import StreamSession, raise_for_status
+from ...providers.response import FinishReason, ToolCalls, Usage, Reasoning, ImageResponse
+from ...errors import MissingAuthError, ResponseError
+from ...image import to_data_uri
+from ... import debug
+
+class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin):
+ api_base = ""
+ supports_message_history = True
+ supports_system_message = True
+ default_model = ""
+ fallback_models = []
+ sort_models = True
+ ssl = None
+
+ @classmethod
+ def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]:
+ if not cls.models:
+ try:
+ headers = {}
+ if api_base is None:
+ api_base = cls.api_base
+ if api_key is not None:
+ headers["authorization"] = f"Bearer {api_key}"
+ response = requests.get(f"{api_base}/models", headers=headers, verify=cls.ssl)
+ raise_for_status(response)
+ data = response.json()
+ data = data.get("data") if isinstance(data, dict) else data
+ cls.image_models = [model.get("id") for model in data if model.get("image")]
+ cls.models = [model.get("id") for model in data]
+ if cls.sort_models:
+ cls.models.sort()
+ except Exception as e:
+ debug.log(e)
+ return cls.fallback_models
+ return cls.models
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ images: ImagesType = None,
+ api_key: str = None,
+ api_endpoint: str = None,
+ api_base: str = None,
+ temperature: float = None,
+ max_tokens: int = None,
+ top_p: float = None,
+ stop: Union[str, list[str]] = None,
+ stream: bool = False,
+ prompt: str = None,
+ headers: dict = None,
+ impersonate: str = None,
+ tools: Optional[list] = None,
+ extra_data: dict = {},
+ **kwargs
+ ) -> AsyncResult:
+ if cls.needs_auth and api_key is None:
+ raise MissingAuthError('Add a "api_key"')
+ async with StreamSession(
+ proxy=proxy,
+ headers=cls.get_headers(stream, api_key, headers),
+ timeout=timeout,
+ impersonate=impersonate,
+ ) as session:
+ model = cls.get_model(model, api_key=api_key, api_base=api_base)
+ if api_base is None:
+ api_base = cls.api_base
+
+ # Proxy for image generation feature
+ if model and model in cls.image_models:
+ data = {
+ "prompt": messages[-1]["content"] if prompt is None else prompt,
+ "model": model,
+ }
+ async with session.post(f"{api_base.rstrip('/')}/images/generations", json=data, ssl=cls.ssl) as response:
+ data = await response.json()
+ cls.raise_error(data)
+ await raise_for_status(response)
+ yield ImageResponse([image["url"] for image in data["data"]], prompt)
+ return
+
+ if images is not None and messages:
+ if not model and hasattr(cls, "default_vision_model"):
+ model = cls.default_vision_model
+ last_message = messages[-1].copy()
+ last_message["content"] = [
+ *[{
+ "type": "image_url",
+ "image_url": {"url": to_data_uri(image)}
+ } for image, _ in images],
+ {
+ "type": "text",
+ "text": messages[-1]["content"]
+ }
+ ]
+ messages[-1] = last_message
+ data = filter_none(
+ messages=messages,
+ model=model,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ top_p=top_p,
+ stop=stop,
+ stream=stream,
+ tools=tools,
+ **extra_data
+ )
+ if api_endpoint is None:
+ api_endpoint = f"{api_base.rstrip('/')}/chat/completions"
+ async with session.post(api_endpoint, json=data, ssl=cls.ssl) as response:
+ content_type = response.headers.get("content-type", "text/event-stream" if stream else "application/json")
+ if content_type.startswith("application/json"):
+ data = await response.json()
+ cls.raise_error(data)
+ await raise_for_status(response)
+ choice = data["choices"][0]
+ if "content" in choice["message"] and choice["message"]["content"]:
+ yield choice["message"]["content"].strip()
+ elif "tool_calls" in choice["message"]:
+ yield ToolCalls(choice["message"]["tool_calls"])
+ if "usage" in data:
+ yield Usage(**data["usage"])
+ if "finish_reason" in choice and choice["finish_reason"] is not None:
+ yield FinishReason(choice["finish_reason"])
+ return
+ elif content_type.startswith("text/event-stream"):
+ await raise_for_status(response)
+ first = True
+ is_thinking = 0
+ async for line in response.iter_lines():
+ if line.startswith(b"data: "):
+ chunk = line[6:]
+ if chunk == b"[DONE]":
+ break
+ data = json.loads(chunk)
+ cls.raise_error(data)
+ choice = data["choices"][0]
+ if "content" in choice["delta"] and choice["delta"]["content"]:
+ delta = choice["delta"]["content"]
+ if first:
+ delta = delta.lstrip()
+ if delta:
+ first = False
+ if is_thinking:
+ if "" in delta:
+ yield Reasoning(None, f"Finished in {round(time.time()-is_thinking, 2)} seconds")
+ is_thinking = 0
+ else:
+ yield Reasoning(delta)
+ elif "" in delta:
+ is_thinking = time.time()
+ yield Reasoning(None, "Is thinking...")
+ else:
+ yield delta
+ if "usage" in data and data["usage"]:
+ yield Usage(**data["usage"])
+ if "finish_reason" in choice and choice["finish_reason"] is not None:
+ yield FinishReason(choice["finish_reason"])
+ break
+ else:
+ await raise_for_status(response)
+ raise ResponseError(f"Not supported content-type: {content_type}")
+
+ @classmethod
+ def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
+ return {
+ "Accept": "text/event-stream" if stream else "application/json",
+ "Content-Type": "application/json",
+ **(
+ {"Authorization": f"Bearer {api_key}"}
+ if api_key else {}
+ ),
+ **({} if headers is None else headers)
+ }
\ No newline at end of file
diff --git a/g4f/Provider/template/__init__.py b/g4f/Provider/template/__init__.py
new file mode 100644
index 00000000..758bc5b4
--- /dev/null
+++ b/g4f/Provider/template/__init__.py
@@ -0,0 +1,2 @@
+from .BackendApi import BackendApi
+from .OpenaiTemplate import OpenaiTemplate
\ No newline at end of file
diff --git a/g4f/cookies.py b/g4f/cookies.py
index 2328f0f2..88d1f019 100644
--- a/g4f/cookies.py
+++ b/g4f/cookies.py
@@ -60,6 +60,8 @@ DOMAINS = [
"chatgpt.com",
".cerebras.ai",
"github.com",
+ "huggingface.co",
+ ".huggingface.co"
]
if has_browser_cookie3 and os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null":
diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py
index 12ec1b60..713cce72 100644
--- a/g4f/gui/server/api.py
+++ b/g4f/gui/server/api.py
@@ -157,6 +157,8 @@ class Api:
yield self._format_json('error', type(e).__name__, message=get_error_message(e))
return
if not isinstance(provider_handler, BaseRetryProvider):
+ if not provider:
+ provider = provider_handler.__name__
yield self.handle_provider(provider_handler, model)
if hasattr(provider_handler, "get_parameters"):
yield self._format_json("parameters", provider_handler.get_parameters(as_json=True))
@@ -221,6 +223,7 @@ class Api:
return {
'type': response_type,
response_type: content,
+ **kwargs
}
return {
'type': response_type,
diff --git a/g4f/providers/base_provider.py b/g4f/providers/base_provider.py
index d46ed3a8..d27b9035 100644
--- a/g4f/providers/base_provider.py
+++ b/g4f/providers/base_provider.py
@@ -415,10 +415,10 @@ class AsyncAuthedProvider(AsyncGeneratorProvider):
model: str,
messages: Messages,
**kwargs
- ) -> CreateResult:
+) -> CreateResult:
+ auth_result = AuthResult()
+ cache_file = cls.get_cache_file()
try:
- auth_result = AuthResult()
- cache_file = cls.get_cache_file()
if cache_file.exists():
with cache_file.open("r") as f:
auth_result = AuthResult(**json.load(f))
diff --git a/g4f/providers/retry_provider.py b/g4f/providers/retry_provider.py
index dcd46d3c..d80db83f 100644
--- a/g4f/providers/retry_provider.py
+++ b/g4f/providers/retry_provider.py
@@ -87,7 +87,7 @@ class IterListProvider(BaseRetryProvider):
for provider in self.get_providers(stream and not ignore_stream, ignored):
self.last_provider = provider
debug.log(f"Using {provider.__name__} provider")
- yield ProviderInfo(provider.get_dict())
+ yield ProviderInfo(**provider.get_dict())
try:
response = provider.get_async_create_function()(model, messages, stream=stream, **kwargs)
if hasattr(response, "__aiter__"):
diff --git a/g4f/requests/__init__.py b/g4f/requests/__init__.py
index 0dfe6e68..03516951 100644
--- a/g4f/requests/__init__.py
+++ b/g4f/requests/__init__.py
@@ -167,7 +167,7 @@ async def get_nodriver(
)
except:
if util.get_registered_instances():
- browser = util.get_registered_instances()[-1]
+ browser = util.get_registered_instances().pop()
else:
raise
stop = browser.stop
--
cgit v1.2.3