summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/needs_auth
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/needs_auth')
-rw-r--r--g4f/Provider/needs_auth/BingCreateImages.py54
-rw-r--r--g4f/Provider/needs_auth/CopilotAccount.py8
-rw-r--r--g4f/Provider/needs_auth/DeepInfra.py58
-rw-r--r--g4f/Provider/needs_auth/DeepInfraImage.py80
-rw-r--r--g4f/Provider/needs_auth/Gemini.py84
-rw-r--r--g4f/Provider/needs_auth/GeminiPro.py111
-rw-r--r--g4f/Provider/needs_auth/Groq.py27
-rw-r--r--g4f/Provider/needs_auth/HuggingFace.py91
-rw-r--r--g4f/Provider/needs_auth/MetaAI.py239
-rw-r--r--g4f/Provider/needs_auth/MetaAIAccount.py23
-rw-r--r--g4f/Provider/needs_auth/OpenRouter.py32
-rw-r--r--g4f/Provider/needs_auth/OpenaiAPI.py (renamed from g4f/Provider/needs_auth/Openai.py)7
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py356
-rw-r--r--g4f/Provider/needs_auth/PerplexityApi.py7
-rw-r--r--g4f/Provider/needs_auth/Poe.py1
-rw-r--r--g4f/Provider/needs_auth/Raycast.py8
-rw-r--r--g4f/Provider/needs_auth/Replicate.py88
-rw-r--r--g4f/Provider/needs_auth/Theb.py1
-rw-r--r--g4f/Provider/needs_auth/ThebApi.py6
-rw-r--r--g4f/Provider/needs_auth/WhiteRabbitNeo.py57
-rw-r--r--g4f/Provider/needs_auth/__init__.py32
-rw-r--r--g4f/Provider/needs_auth/gigachat/GigaChat.py92
-rw-r--r--g4f/Provider/needs_auth/gigachat/__init__.py2
-rw-r--r--g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt33
24 files changed, 1181 insertions, 316 deletions
diff --git a/g4f/Provider/needs_auth/BingCreateImages.py b/g4f/Provider/needs_auth/BingCreateImages.py
new file mode 100644
index 00000000..80984d40
--- /dev/null
+++ b/g4f/Provider/needs_auth/BingCreateImages.py
@@ -0,0 +1,54 @@
+from __future__ import annotations
+
+from ...cookies import get_cookies
+from ...image import ImageResponse
+from ...errors import MissingAuthError
+from ...typing import AsyncResult, Messages, Cookies
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..bing.create_images import create_images, create_session
+
+class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Microsoft Designer in Bing"
+ parent = "Bing"
+ url = "https://www.bing.com/images/create"
+ working = True
+ needs_auth = True
+ image_models = ["dall-e"]
+
+ def __init__(self, cookies: Cookies = None, proxy: str = None, api_key: str = None) -> None:
+ if api_key is not None:
+ if cookies is None:
+ cookies = {}
+ cookies["_U"] = api_key
+ self.cookies = cookies
+ self.proxy = proxy
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_key: str = None,
+ cookies: Cookies = None,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ session = BingCreateImages(cookies, proxy, api_key)
+ yield await session.generate(messages[-1]["content"])
+
+ async def generate(self, prompt: str) -> ImageResponse:
+ """
+ Asynchronously creates a markdown formatted string with images based on the prompt.
+
+ Args:
+ prompt (str): Prompt to generate images.
+
+ Returns:
+ str: Markdown formatted string with images.
+ """
+ cookies = self.cookies or get_cookies(".bing.com", False)
+ if cookies is None or "_U" not in cookies:
+ raise MissingAuthError('Missing "_U" cookie')
+ async with create_session(cookies, self.proxy) as session:
+ images = await create_images(session, prompt)
+ return ImageResponse(images, prompt, {"preview": "{image}?w=200&h=200"} if len(images) > 1 else {}) \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/CopilotAccount.py b/g4f/Provider/needs_auth/CopilotAccount.py
new file mode 100644
index 00000000..fa43867e
--- /dev/null
+++ b/g4f/Provider/needs_auth/CopilotAccount.py
@@ -0,0 +1,8 @@
+from __future__ import annotations
+
+from ..Copilot import Copilot
+
+class CopilotAccount(Copilot):
+ needs_auth = True
+ parent = "Copilot"
+ default_model = "" \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/DeepInfra.py b/g4f/Provider/needs_auth/DeepInfra.py
new file mode 100644
index 00000000..35e7ca7f
--- /dev/null
+++ b/g4f/Provider/needs_auth/DeepInfra.py
@@ -0,0 +1,58 @@
+from __future__ import annotations
+
+import requests
+from ...typing import AsyncResult, Messages
+from .OpenaiAPI import OpenaiAPI
+
+class DeepInfra(OpenaiAPI):
+ label = "DeepInfra"
+ url = "https://deepinfra.com"
+ working = True
+ needs_auth = True
+ supports_stream = True
+ supports_message_history = True
+ default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
+
+ @classmethod
+ def get_models(cls):
+ if not cls.models:
+ url = 'https://api.deepinfra.com/models/featured'
+ models = requests.get(url).json()
+ cls.models = [model['model_name'] for model in models if model["type"] == "text-generation"]
+ return cls.models
+
+ @classmethod
+ def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ api_base: str = "https://api.deepinfra.com/v1/openai",
+ temperature: float = 0.7,
+ max_tokens: int = 1028,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Accept-Language': 'en-US',
+ 'Connection': 'keep-alive',
+ 'Origin': 'https://deepinfra.com',
+ 'Referer': 'https://deepinfra.com/',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-site',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
+ 'X-Deepinfra-Source': 'web-embed',
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ }
+ return super().create_async_generator(
+ model, messages,
+ stream=stream,
+ api_base=api_base,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ headers=headers,
+ **kwargs
+ )
diff --git a/g4f/Provider/needs_auth/DeepInfraImage.py b/g4f/Provider/needs_auth/DeepInfraImage.py
new file mode 100644
index 00000000..2310c1c8
--- /dev/null
+++ b/g4f/Provider/needs_auth/DeepInfraImage.py
@@ -0,0 +1,80 @@
+from __future__ import annotations
+
+import requests
+
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...typing import AsyncResult, Messages
+from ...requests import StreamSession, raise_for_status
+from ...image import ImageResponse
+
+class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://deepinfra.com"
+ parent = "DeepInfra"
+ working = True
+ needs_auth = True
+ default_model = ''
+ image_models = [default_model]
+
+ @classmethod
+ def get_models(cls):
+ if not cls.models:
+ url = 'https://api.deepinfra.com/models/featured'
+ models = requests.get(url).json()
+ cls.models = [model['model_name'] for model in models if model["reported_type"] == "text-to-image"]
+ cls.image_models = cls.models
+ return cls.models
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ **kwargs
+ ) -> AsyncResult:
+ yield await cls.create_async(messages[-1]["content"], model, **kwargs)
+
+ @classmethod
+ async def create_async(
+ cls,
+ prompt: str,
+ model: str,
+ api_key: str = None,
+ api_base: str = "https://api.deepinfra.com/v1/inference",
+ proxy: str = None,
+ timeout: int = 180,
+ extra_data: dict = {},
+ **kwargs
+ ) -> ImageResponse:
+ headers = {
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Accept-Language': 'en-US',
+ 'Connection': 'keep-alive',
+ 'Origin': 'https://deepinfra.com',
+ 'Referer': 'https://deepinfra.com/',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-site',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
+ 'X-Deepinfra-Source': 'web-embed',
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ }
+ if api_key is not None:
+ headers["Authorization"] = f"Bearer {api_key}"
+ async with StreamSession(
+ proxies={"all": proxy},
+ headers=headers,
+ timeout=timeout
+ ) as session:
+ model = cls.get_model(model)
+ data = {"prompt": prompt, **extra_data}
+ data = {"input": data} if model == cls.default_model else data
+ async with session.post(f"{api_base.rstrip('/')}/{model}", json=data) as response:
+ await raise_for_status(response)
+ data = await response.json()
+ images = data["output"] if "output" in data else data["images"]
+ if not images:
+ raise RuntimeError(f"Response: {data}")
+ images = images[0] if len(images) == 1 else images
+ return ImageResponse(images, prompt)
diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py
index eddd25fa..781aa410 100644
--- a/g4f/Provider/needs_auth/Gemini.py
+++ b/g4f/Provider/needs_auth/Gemini.py
@@ -6,24 +6,20 @@ import random
import re
from aiohttp import ClientSession, BaseConnector
-
-from ..helper import get_connector
-
try:
- from selenium.webdriver.common.by import By
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.support import expected_conditions as EC
+ import nodriver
+ has_nodriver = True
except ImportError:
- pass
+ has_nodriver = False
from ... import debug
from ...typing import Messages, Cookies, ImageType, AsyncResult, AsyncIterator
from ..base_provider import AsyncGeneratorProvider, BaseConversation
from ..helper import format_prompt, get_cookies
from ...requests.raise_for_status import raise_for_status
-from ...errors import MissingAuthError, MissingRequirementsError
+from ...requests.aiohttp import get_connector
+from ...errors import MissingAuthError
from ...image import ImageResponse, to_bytes
-from ...webdriver import get_browser, get_driver_cookies
REQUEST_HEADERS = {
"authority": "gemini.google.com",
@@ -54,17 +50,19 @@ class Gemini(AsyncGeneratorProvider):
url = "https://gemini.google.com"
needs_auth = True
working = True
+ default_model = 'gemini'
image_models = ["gemini"]
default_vision_model = "gemini"
+ models = ["gemini", "gemini-1.5-flash", "gemini-1.5-pro"]
_cookies: Cookies = None
_snlm0e: str = None
_sid: str = None
@classmethod
async def nodriver_login(cls, proxy: str = None) -> AsyncIterator[str]:
- try:
- import nodriver as uc
- except ImportError:
+ if not has_nodriver:
+ if debug.logging:
+ print("Skip nodriver login in Gemini provider")
return
try:
from platformdirs import user_config_dir
@@ -73,7 +71,7 @@ class Gemini(AsyncGeneratorProvider):
user_data_dir = None
if debug.logging:
print(f"Open nodriver with user_dir: {user_data_dir}")
- browser = await uc.start(
+ browser = await nodriver.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
)
@@ -90,30 +88,6 @@ class Gemini(AsyncGeneratorProvider):
cls._cookies = cookies
@classmethod
- async def webdriver_login(cls, proxy: str) -> AsyncIterator[str]:
- driver = None
- try:
- driver = get_browser(proxy=proxy)
- try:
- driver.get(f"{cls.url}/app")
- WebDriverWait(driver, 5).until(
- EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))
- )
- except:
- login_url = os.environ.get("G4F_LOGIN_URL")
- if login_url:
- yield f"Please login: [Google Gemini]({login_url})\n\n"
- WebDriverWait(driver, 240).until(
- EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))
- )
- cls._cookies = get_driver_cookies(driver)
- except MissingRequirementsError:
- pass
- finally:
- if driver:
- driver.close()
-
- @classmethod
async def create_async_generator(
cls,
model: str,
@@ -141,9 +115,6 @@ class Gemini(AsyncGeneratorProvider):
if not cls._snlm0e:
async for chunk in cls.nodriver_login(proxy):
yield chunk
- if cls._cookies is None:
- async for chunk in cls.webdriver_login(proxy):
- yield chunk
if not cls._snlm0e:
if cls._cookies is None or "__Secure-1PSID" not in cls._cookies:
raise MissingAuthError('Missing "__Secure-1PSID" cookie')
@@ -209,20 +180,23 @@ class Gemini(AsyncGeneratorProvider):
yield content[last_content_len:]
last_content_len = len(content)
if image_prompt:
- images = [image[0][3][3] for image in response_part[4][0][12][7][0]]
- if response_format == "b64_json":
- yield ImageResponse(images, image_prompt, {"cookies": cls._cookies})
- else:
- resolved_images = []
- preview = []
- for image in images:
- async with client.get(image, allow_redirects=False) as fetch:
- image = fetch.headers["location"]
- async with client.get(image, allow_redirects=False) as fetch:
- image = fetch.headers["location"]
- resolved_images.append(image)
- preview.append(image.replace('=s512', '=s200'))
- yield ImageResponse(resolved_images, image_prompt, {"orginal_links": images, "preview": preview})
+ try:
+ images = [image[0][3][3] for image in response_part[4][0][12][7][0]]
+ if response_format == "b64_json":
+ yield ImageResponse(images, image_prompt, {"cookies": cls._cookies})
+ else:
+ resolved_images = []
+ preview = []
+ for image in images:
+ async with client.get(image, allow_redirects=False) as fetch:
+ image = fetch.headers["location"]
+ async with client.get(image, allow_redirects=False) as fetch:
+ image = fetch.headers["location"]
+ resolved_images.append(image)
+ preview.append(image.replace('=s512', '=s200'))
+ yield ImageResponse(resolved_images, image_prompt, {"orginal_links": images, "preview": preview})
+ except TypeError:
+ pass
def build_request(
prompt: str,
@@ -305,4 +279,4 @@ class Conversation(BaseConversation):
) -> None:
self.conversation_id = conversation_id
self.response_id = response_id
- self.choice_id = choice_id \ No newline at end of file
+ self.choice_id = choice_id
diff --git a/g4f/Provider/needs_auth/GeminiPro.py b/g4f/Provider/needs_auth/GeminiPro.py
new file mode 100644
index 00000000..a7f1e0aa
--- /dev/null
+++ b/g4f/Provider/needs_auth/GeminiPro.py
@@ -0,0 +1,111 @@
+from __future__ import annotations
+
+import base64
+import json
+from aiohttp import ClientSession, BaseConnector
+
+from ...typing import AsyncResult, Messages, ImageType
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import to_bytes, is_accepted_format
+from ...errors import MissingAuthError
+from ..helper import get_connector
+
+class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Gemini API"
+ url = "https://ai.google.dev"
+ working = True
+ supports_message_history = True
+ needs_auth = True
+ default_model = "gemini-1.5-pro"
+ default_vision_model = default_model
+ models = [default_model, "gemini-pro", "gemini-1.5-flash", "gemini-1.5-flash-8b"]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ proxy: str = None,
+ api_key: str = None,
+ api_base: str = "https://generativelanguage.googleapis.com/v1beta",
+ use_auth_header: bool = False,
+ image: ImageType = None,
+ connector: BaseConnector = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ if not api_key:
+ raise MissingAuthError('Add a "api_key"')
+
+ headers = params = None
+ if use_auth_header:
+ headers = {"Authorization": f"Bearer {api_key}"}
+ else:
+ params = {"key": api_key}
+
+ method = "streamGenerateContent" if stream else "generateContent"
+ url = f"{api_base.rstrip('/')}/models/{model}:{method}"
+ async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
+ contents = [
+ {
+ "role": "model" if message["role"] == "assistant" else "user",
+ "parts": [{"text": message["content"]}]
+ }
+ for message in messages
+ if message["role"] != "system"
+ ]
+ if image is not None:
+ image = to_bytes(image)
+ contents[-1]["parts"].append({
+ "inline_data": {
+ "mime_type": is_accepted_format(image),
+ "data": base64.b64encode(image).decode()
+ }
+ })
+ data = {
+ "contents": contents,
+ "generationConfig": {
+ "stopSequences": kwargs.get("stop"),
+ "temperature": kwargs.get("temperature"),
+ "maxOutputTokens": kwargs.get("max_tokens"),
+ "topP": kwargs.get("top_p"),
+ "topK": kwargs.get("top_k"),
+ }
+ }
+ system_prompt = "\n".join(
+ message["content"]
+ for message in messages
+ if message["role"] == "system"
+ )
+ if system_prompt:
+ data["system_instruction"] = {"parts": {"text": system_prompt}}
+ async with session.post(url, params=params, json=data) as response:
+ if not response.ok:
+ data = await response.json()
+ data = data[0] if isinstance(data, list) else data
+ raise RuntimeError(f"Response {response.status}: {data['error']['message']}")
+ if stream:
+ lines = []
+ async for chunk in response.content:
+ if chunk == b"[{\n":
+ lines = [b"{\n"]
+ elif chunk == b",\r\n" or chunk == b"]":
+ try:
+ data = b"".join(lines)
+ data = json.loads(data)
+ yield data["candidates"][0]["content"]["parts"][0]["text"]
+ except:
+ data = data.decode(errors="ignore") if isinstance(data, bytes) else data
+ raise RuntimeError(f"Read chunk failed: {data}")
+ lines = []
+ else:
+ lines.append(chunk)
+ else:
+ data = await response.json()
+ candidate = data["candidates"][0]
+ if candidate["finishReason"] == "STOP":
+ yield candidate["content"]["parts"][0]["text"]
+ else:
+ yield candidate["finishReason"] + ' ' + candidate["safetyRatings"] \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Groq.py b/g4f/Provider/needs_auth/Groq.py
index d11f6a82..943fc81a 100644
--- a/g4f/Provider/needs_auth/Groq.py
+++ b/g4f/Provider/needs_auth/Groq.py
@@ -1,14 +1,33 @@
from __future__ import annotations
-from .Openai import Openai
+from .OpenaiAPI import OpenaiAPI
from ...typing import AsyncResult, Messages
-class Groq(Openai):
+class Groq(OpenaiAPI):
label = "Groq"
url = "https://console.groq.com/playground"
working = True
default_model = "mixtral-8x7b-32768"
- models = ["mixtral-8x7b-32768", "llama2-70b-4096", "gemma-7b-it"]
+ models = [
+ "distil-whisper-large-v3-en",
+ "gemma2-9b-it",
+ "gemma-7b-it",
+ "llama3-groq-70b-8192-tool-use-preview",
+ "llama3-groq-8b-8192-tool-use-preview",
+ "llama-3.1-70b-versatile",
+ "llama-3.1-8b-instant",
+ "llama-3.2-1b-preview",
+ "llama-3.2-3b-preview",
+ "llama-3.2-11b-vision-preview",
+ "llama-3.2-90b-vision-preview",
+ "llama-guard-3-8b",
+ "llava-v1.5-7b-4096-preview",
+ "llama3-70b-8192",
+ "llama3-8b-8192",
+ "mixtral-8x7b-32768",
+ "whisper-large-v3",
+ "whisper-large-v3-turbo",
+ ]
model_aliases = {"mixtral-8x7b": "mixtral-8x7b-32768", "llama2-70b": "llama2-70b-4096"}
@classmethod
@@ -21,4 +40,4 @@ class Groq(Openai):
) -> AsyncResult:
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
- ) \ No newline at end of file
+ )
diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py
new file mode 100644
index 00000000..35270e60
--- /dev/null
+++ b/g4f/Provider/needs_auth/HuggingFace.py
@@ -0,0 +1,91 @@
+from __future__ import annotations
+
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...errors import ModelNotFoundError
+from ...requests import StreamSession, raise_for_status
+
+from ..HuggingChat import HuggingChat
+
+class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://huggingface.co/chat"
+ working = True
+ needs_auth = True
+ supports_message_history = True
+ default_model = HuggingChat.default_model
+ models = HuggingChat.models
+ model_aliases = HuggingChat.model_aliases
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = True,
+ proxy: str = None,
+ api_base: str = "https://api-inference.huggingface.co",
+ api_key: str = None,
+ max_new_tokens: int = 1024,
+ temperature: float = 0.7,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en',
+ 'cache-control': 'no-cache',
+ 'origin': 'https://huggingface.co',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://huggingface.co/chat/',
+ 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
+ }
+ if api_key is not None:
+ headers["Authorization"] = f"Bearer {api_key}"
+ params = {
+ "return_full_text": False,
+ "max_new_tokens": max_new_tokens,
+ "temperature": temperature,
+ **kwargs
+ }
+ payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream}
+ async with StreamSession(
+ headers=headers,
+ proxy=proxy
+ ) as session:
+ async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response:
+ if response.status == 404:
+ raise ModelNotFoundError(f"Model is not supported: {model}")
+ await raise_for_status(response)
+ if stream:
+ first = True
+ async for line in response.iter_lines():
+ if line.startswith(b"data:"):
+ data = json.loads(line[5:])
+ if not data["token"]["special"]:
+ chunk = data["token"]["text"]
+ if first:
+ first = False
+ chunk = chunk.lstrip()
+ if chunk:
+ yield chunk
+ else:
+ yield (await response.json())[0]["generated_text"].strip()
+
+def format_prompt(messages: Messages) -> str:
+ system_messages = [message["content"] for message in messages if message["role"] == "system"]
+ question = " ".join([messages[-1]["content"], *system_messages])
+ history = "".join([
+ f"<s>[INST]{messages[idx-1]['content']} [/INST] {message['content']}</s>"
+ for idx, message in enumerate(messages)
+ if message["role"] == "assistant"
+ ])
+ return f"{history}<s>[INST] {question} [/INST]" \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/MetaAI.py b/g4f/Provider/needs_auth/MetaAI.py
new file mode 100644
index 00000000..568de701
--- /dev/null
+++ b/g4f/Provider/needs_auth/MetaAI.py
@@ -0,0 +1,239 @@
+from __future__ import annotations
+
+import json
+import uuid
+import random
+import time
+from typing import Dict, List
+
+from aiohttp import ClientSession, BaseConnector
+
+from ...typing import AsyncResult, Messages, Cookies
+from ...requests import raise_for_status, DEFAULT_HEADERS
+from ...image import ImageResponse, ImagePreview
+from ...errors import ResponseError
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt, get_connector, format_cookies
+
+class Sources():
+ def __init__(self, link_list: List[Dict[str, str]]) -> None:
+ self.list = link_list
+
+ def __str__(self) -> str:
+ return "\n\n" + ("\n".join([f"[{link['title']}]({link['link']})" for link in self.list]))
+
+class AbraGeoBlockedError(Exception):
+ pass
+
+class MetaAI(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Meta AI"
+ url = "https://www.meta.ai"
+ working = True
+ default_model = ''
+
+ def __init__(self, proxy: str = None, connector: BaseConnector = None):
+ self.session = ClientSession(connector=get_connector(connector, proxy), headers=DEFAULT_HEADERS)
+ self.cookies: Cookies = None
+ self.access_token: str = None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ async for chunk in cls(proxy).prompt(format_prompt(messages)):
+ yield chunk
+
+ async def update_access_token(self, birthday: str = "1999-01-01"):
+ url = "https://www.meta.ai/api/graphql/"
+ payload = {
+ "lsd": self.lsd,
+ "fb_api_caller_class": "RelayModern",
+ "fb_api_req_friendly_name": "useAbraAcceptTOSForTempUserMutation",
+ "variables": json.dumps({
+ "dob": birthday,
+ "icebreaker_type": "TEXT",
+ "__relay_internal__pv__WebPixelRatiorelayprovider": 1,
+ }),
+ "doc_id": "7604648749596940",
+ }
+ headers = {
+ "x-fb-friendly-name": "useAbraAcceptTOSForTempUserMutation",
+ "x-fb-lsd": self.lsd,
+ "x-asbd-id": "129477",
+ "alt-used": "www.meta.ai",
+ "sec-fetch-site": "same-origin"
+ }
+ async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response:
+ await raise_for_status(response, "Fetch access_token failed")
+ auth_json = await response.json(content_type=None)
+ self.access_token = auth_json["data"]["xab_abra_accept_terms_of_service"]["new_temp_user_auth"]["access_token"]
+
+ async def prompt(self, message: str, cookies: Cookies = None) -> AsyncResult:
+ if self.cookies is None:
+ await self.update_cookies(cookies)
+ if cookies is not None:
+ self.access_token = None
+ if self.access_token is None and cookies is None:
+ await self.update_access_token()
+ if self.access_token is None:
+ url = "https://www.meta.ai/api/graphql/"
+ payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg}
+ headers = {'x-fb-lsd': self.lsd}
+ else:
+ url = "https://graph.meta.ai/graphql?locale=user"
+ payload = {"access_token": self.access_token}
+ headers = {}
+ headers = {
+ 'content-type': 'application/x-www-form-urlencoded',
+ 'cookie': format_cookies(self.cookies),
+ 'origin': 'https://www.meta.ai',
+ 'referer': 'https://www.meta.ai/',
+ 'x-asbd-id': '129477',
+ 'x-fb-friendly-name': 'useAbraSendMessageMutation',
+ **headers
+ }
+ payload = {
+ **payload,
+ 'fb_api_caller_class': 'RelayModern',
+ 'fb_api_req_friendly_name': 'useAbraSendMessageMutation',
+ "variables": json.dumps({
+ "message": {"sensitive_string_value": message},
+ "externalConversationId": str(uuid.uuid4()),
+ "offlineThreadingId": generate_offline_threading_id(),
+ "suggestedPromptIndex": None,
+ "flashVideoRecapInput": {"images": []},
+ "flashPreviewInput": None,
+ "promptPrefix": None,
+ "entrypoint": "ABRA__CHAT__TEXT",
+ "icebreaker_type": "TEXT",
+ "__relay_internal__pv__AbraDebugDevOnlyrelayprovider": False,
+ "__relay_internal__pv__WebPixelRatiorelayprovider": 1,
+ }),
+ 'server_timestamps': 'true',
+ 'doc_id': '7783822248314888'
+ }
+ async with self.session.post(url, headers=headers, data=payload) as response:
+ await raise_for_status(response, "Fetch response failed")
+ last_snippet_len = 0
+ fetch_id = None
+ async for line in response.content:
+ if b"<h1>Something Went Wrong</h1>" in line:
+ raise ResponseError("Response: Something Went Wrong")
+ try:
+ json_line = json.loads(line)
+ except json.JSONDecodeError:
+ continue
+ if json_line.get("errors"):
+ raise RuntimeError("\n".join([error.get("message") for error in json_line.get("errors")]))
+ bot_response_message = json_line.get("data", {}).get("node", {}).get("bot_response_message", {})
+ streaming_state = bot_response_message.get("streaming_state")
+ fetch_id = bot_response_message.get("fetch_id") or fetch_id
+ if streaming_state in ("STREAMING", "OVERALL_DONE"):
+ imagine_card = bot_response_message.get("imagine_card")
+ if imagine_card is not None:
+ imagine_session = imagine_card.get("session")
+ if imagine_session is not None:
+ imagine_medias = imagine_session.get("media_sets", {}).pop().get("imagine_media")
+ if imagine_medias is not None:
+ image_class = ImageResponse if streaming_state == "OVERALL_DONE" else ImagePreview
+ yield image_class([media["uri"] for media in imagine_medias], imagine_medias[0]["prompt"])
+ snippet = bot_response_message["snippet"]
+ new_snippet_len = len(snippet)
+ if new_snippet_len > last_snippet_len:
+ yield snippet[last_snippet_len:]
+ last_snippet_len = new_snippet_len
+ #if last_streamed_response is None:
+ # if attempts > 3:
+ # raise Exception("MetaAI is having issues and was not able to respond (Server Error)")
+ # access_token = await self.get_access_token()
+ # return await self.prompt(message=message, attempts=attempts + 1)
+ if fetch_id is not None:
+ sources = await self.fetch_sources(fetch_id)
+ if sources is not None:
+ yield sources
+
+ async def update_cookies(self, cookies: Cookies = None):
+ async with self.session.get("https://www.meta.ai/", cookies=cookies) as response:
+ await raise_for_status(response, "Fetch home failed")
+ text = await response.text()
+ if "AbraGeoBlockedError" in text:
+ raise AbraGeoBlockedError("Meta AI isn't available yet in your country")
+ if cookies is None:
+ cookies = {
+ "_js_datr": self.extract_value(text, "_js_datr"),
+ "abra_csrf": self.extract_value(text, "abra_csrf"),
+ "datr": self.extract_value(text, "datr"),
+ }
+ self.lsd = self.extract_value(text, start_str='"LSD",[],{"token":"', end_str='"}')
+ self.dtsg = self.extract_value(text, start_str='"DTSGInitialData",[],{"token":"', end_str='"}')
+ self.cookies = cookies
+
+ async def fetch_sources(self, fetch_id: str) -> Sources:
+ if self.access_token is None:
+ url = "https://www.meta.ai/api/graphql/"
+ payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg}
+ headers = {'x-fb-lsd': self.lsd}
+ else:
+ url = "https://graph.meta.ai/graphql?locale=user"
+ payload = {"access_token": self.access_token}
+ headers = {}
+ payload = {
+ **payload,
+ "fb_api_caller_class": "RelayModern",
+ "fb_api_req_friendly_name": "AbraSearchPluginDialogQuery",
+ "variables": json.dumps({"abraMessageFetchID": fetch_id}),
+ "server_timestamps": "true",
+ "doc_id": "6946734308765963",
+ }
+ headers = {
+ "authority": "graph.meta.ai",
+ "x-fb-friendly-name": "AbraSearchPluginDialogQuery",
+ **headers
+ }
+ async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response:
+ await raise_for_status(response, "Fetch sources failed")
+ text = await response.text()
+ if "<h1>Something Went Wrong</h1>" in text:
+ raise ResponseError("Response: Something Went Wrong")
+ try:
+ response_json = json.loads(text)
+ message = response_json["data"]["message"]
+ if message is not None:
+ searchResults = message["searchResults"]
+ if searchResults is not None:
+ return Sources(searchResults["references"])
+ except (KeyError, TypeError, json.JSONDecodeError):
+ raise RuntimeError(f"Response: {text}")
+
+ @staticmethod
+ def extract_value(text: str, key: str = None, start_str = None, end_str = '",') -> str:
+ if start_str is None:
+ start_str = f'{key}":{{"value":"'
+ start = text.find(start_str)
+ if start >= 0:
+ start+= len(start_str)
+ end = text.find(end_str, start)
+ if end >= 0:
+ return text[start:end]
+
+def generate_offline_threading_id() -> str:
+ """
+ Generates an offline threading ID.
+
+ Returns:
+ str: The generated offline threading ID.
+ """
+ # Generate a random 64-bit integer
+ random_value = random.getrandbits(64)
+
+ # Get the current timestamp in milliseconds
+ timestamp = int(time.time() * 1000)
+
+ # Combine timestamp and random value
+ threading_id = (timestamp << 22) | (random_value & ((1 << 22) - 1))
+
+ return str(threading_id)
diff --git a/g4f/Provider/needs_auth/MetaAIAccount.py b/g4f/Provider/needs_auth/MetaAIAccount.py
new file mode 100644
index 00000000..0a586006
--- /dev/null
+++ b/g4f/Provider/needs_auth/MetaAIAccount.py
@@ -0,0 +1,23 @@
+from __future__ import annotations
+
+from ...typing import AsyncResult, Messages, Cookies
+from ..helper import format_prompt, get_cookies
+from .MetaAI import MetaAI
+
+class MetaAIAccount(MetaAI):
+ needs_auth = True
+ parent = "MetaAI"
+ image_models = ["meta"]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ cookies: Cookies = None,
+ **kwargs
+ ) -> AsyncResult:
+ cookies = get_cookies(".meta.ai", True, True) if cookies is None else cookies
+ async for chunk in cls(proxy).prompt(format_prompt(messages), cookies):
+ yield chunk
diff --git a/g4f/Provider/needs_auth/OpenRouter.py b/g4f/Provider/needs_auth/OpenRouter.py
deleted file mode 100644
index 7945784a..00000000
--- a/g4f/Provider/needs_auth/OpenRouter.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from __future__ import annotations
-
-import requests
-
-from .Openai import Openai
-from ...typing import AsyncResult, Messages
-
-class OpenRouter(Openai):
- label = "OpenRouter"
- url = "https://openrouter.ai"
- working = True
- default_model = "mistralai/mistral-7b-instruct:free"
-
- @classmethod
- def get_models(cls):
- if not cls.models:
- url = 'https://openrouter.ai/api/v1/models'
- models = requests.get(url).json()["data"]
- cls.models = [model['id'] for model in models]
- return cls.models
-
- @classmethod
- def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- api_base: str = "https://openrouter.ai/api/v1",
- **kwargs
- ) -> AsyncResult:
- return super().create_async_generator(
- model, messages, api_base=api_base, **kwargs
- ) \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Openai.py b/g4f/Provider/needs_auth/OpenaiAPI.py
index 9da6bad8..116b5f6f 100644
--- a/g4f/Provider/needs_auth/Openai.py
+++ b/g4f/Provider/needs_auth/OpenaiAPI.py
@@ -9,13 +9,14 @@ from ...requests import StreamSession, raise_for_status
from ...errors import MissingAuthError, ResponseError
from ...image import to_data_uri
-class Openai(AsyncGeneratorProvider, ProviderModelMixin):
+class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI API"
- url = "https://openai.com"
+ url = "https://platform.openai.com"
working = True
needs_auth = True
supports_message_history = True
supports_system_message = True
+ default_model = ""
@classmethod
async def create_async_generator(
@@ -120,4 +121,4 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
if api_key is not None else {}
),
**({} if headers is None else headers)
- } \ No newline at end of file
+ }
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index f40ae961..13e15f1d 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -1,37 +1,39 @@
from __future__ import annotations
+import re
import asyncio
import uuid
import json
import base64
import time
+import requests
from aiohttp import ClientWebSocketResponse
from copy import copy
try:
- import webview
- has_webview = True
+ import nodriver
+ from nodriver.cdp.network import get_response_body
+ has_nodriver = True
except ImportError:
- has_webview = False
-
+ has_nodriver = False
try:
- from selenium.webdriver.common.by import By
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.support import expected_conditions as EC
+ from platformdirs import user_config_dir
+ has_platformdirs = True
except ImportError:
- pass
+ has_platformdirs = False
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ...webdriver import get_browser
from ...typing import AsyncResult, Messages, Cookies, ImageType, AsyncIterator
-from ...requests import get_args_from_browser, raise_for_status
+from ...requests.raise_for_status import raise_for_status
from ...requests.aiohttp import StreamSession
from ...image import ImageResponse, ImageRequest, to_image, to_bytes, is_accepted_format
from ...errors import MissingAuthError, ResponseError
from ...providers.conversation import BaseConversation
from ..helper import format_cookies
-from ..openai.har_file import getArkoseAndAccessToken, NoValidHarFileError
+from ..openai.har_file import get_request_config, NoValidHarFileError
+from ..openai.har_file import RequestConfig, arkReq, arkose_url, start_url, conversation_url, backend_url, backend_anon_url
from ..openai.proofofwork import generate_proof_token
+from ..openai.new import get_requirements_token
from ... import debug
DEFAULT_HEADERS = {
@@ -55,25 +57,33 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI ChatGPT"
url = "https://chatgpt.com"
working = True
- supports_gpt_35_turbo = True
+ needs_auth = True
supports_gpt_4 = True
supports_message_history = True
supports_system_message = True
- default_model = None
+ default_model = "auto"
default_vision_model = "gpt-4o"
- models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo", "gpt-4o", "auto"]
- model_aliases = {
- "text-davinci-002-render-sha": "gpt-3.5-turbo",
- "": "gpt-3.5-turbo",
- "gpt-4-turbo-preview": "gpt-4",
- "dall-e": "gpt-4",
- }
+ fallback_models = ["auto", "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini"]
+ vision_models = fallback_models
+
_api_key: str = None
_headers: dict = None
_cookies: Cookies = None
_expires: int = None
@classmethod
+ def get_models(cls):
+ if not cls.models:
+ try:
+ response = requests.get(f"{cls.url}/backend-anon/models")
+ response.raise_for_status()
+ data = response.json()
+ cls.models = [model.get("slug") for model in data.get("models")]
+ except Exception:
+ cls.models = cls.fallback_models
+ return cls.models
+
+ @classmethod
async def create(
cls,
prompt: str = None,
@@ -196,7 +206,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
async with session.get(url, headers=headers) as response:
cls._update_request_args(session)
if response.status == 401:
- raise MissingAuthError('Add a "api_key" or a .har file' if cls._api_key is None else "Invalid api key")
+ raise MissingAuthError('Add a .har file for OpenaiChat' if cls._api_key is None else "Invalid api key")
await raise_for_status(response)
data = await response.json()
if "categories" in data:
@@ -219,9 +229,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"""
# Create a message object with the user role and the content
messages = [{
- "id": str(uuid.uuid4()),
"author": {"role": message["role"]},
"content": {"content_type": "text", "parts": [message["content"]]},
+ "id": str(uuid.uuid4()),
+ "create_time": int(time.time()),
+ "id": str(uuid.uuid4()),
+ "metadata": {"serialization_metadata": {"custom_symbol_offsets": []}}
} for message in messages]
# Check if there is an image response
@@ -250,7 +263,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
return messages
@classmethod
- async def get_generated_image(cls, session: StreamSession, headers: dict, line: dict) -> ImageResponse:
+ async def get_generated_image(cls, session: StreamSession, headers: dict, element: dict) -> ImageResponse:
"""
Retrieves the image response based on the message content.
@@ -269,15 +282,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
Raises:
RuntimeError: If there'san error in downloading the image, including issues with the HTTP request or response.
"""
- if "parts" not in line["message"]["content"]:
- return
- first_part = line["message"]["content"]["parts"][0]
- if "asset_pointer" not in first_part or "metadata" not in first_part:
- return
- if first_part["metadata"] is None or first_part["metadata"]["dalle"] is None:
- return
- prompt = first_part["metadata"]["dalle"]["prompt"]
- file_id = first_part["asset_pointer"].split("file-service://", 1)[1]
+ prompt = element["metadata"]["dalle"]["prompt"]
+ file_id = element["asset_pointer"].split("file-service://", 1)[1]
try:
async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response:
cls._update_request_args(session)
@@ -364,20 +370,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
) as session:
if cls._expires is not None and cls._expires < time.time():
cls._headers = cls._api_key = None
- arkose_token = None
- proofTokens = None
try:
- arkose_token, api_key, cookies, headers, proofTokens = await getArkoseAndAccessToken(proxy)
- cls._create_request_args(cookies, headers)
- cls._set_api_key(api_key)
+ await get_request_config(proxy)
+ cls._create_request_args(RequestConfig.cookies, RequestConfig.headers)
+ cls._set_api_key(RequestConfig.access_token)
except NoValidHarFileError as e:
- if cls._api_key is None and cls.needs_auth:
- raise e
- cls._create_request_args()
-
- if cls.default_model is None:
- cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
-
+ await cls.nodriver_auth(proxy)
try:
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
except Exception as e:
@@ -385,9 +383,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if debug.logging:
print("OpenaiChat: Upload image failed")
print(f"{e.__class__.__name__}: {e}")
-
model = cls.get_model(model)
- model = "text-davinci-002-render-sha" if model == "gpt-3.5-turbo" else model
if conversation is None:
conversation = Conversation(conversation_id, str(uuid.uuid4()) if parent_id is None else parent_id)
else:
@@ -400,31 +396,32 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
f"{cls.url}/backend-anon/sentinel/chat-requirements"
if cls._api_key is None else
f"{cls.url}/backend-api/sentinel/chat-requirements",
- json={"p": generate_proof_token(True, user_agent=cls._headers["user-agent"], proofTokens=proofTokens)},
+ json={"p": get_requirements_token(RequestConfig.proof_token)},
headers=cls._headers
) as response:
cls._update_request_args(session)
await raise_for_status(response)
- requirements = await response.json()
- need_arkose = requirements.get("arkose", {}).get("required")
- chat_token = requirements["token"]
-
- if need_arkose and arkose_token is None:
- arkose_token, api_key, cookies, headers, proofTokens = await getArkoseAndAccessToken(proxy)
- cls._create_request_args(cookies, headers)
- cls._set_api_key(api_key)
- if arkose_token is None:
+ chat_requirements = await response.json()
+ need_turnstile = chat_requirements.get("turnstile", {}).get("required", False)
+ need_arkose = chat_requirements.get("arkose", {}).get("required", False)
+ chat_token = chat_requirements.get("token")
+
+ if need_arkose and RequestConfig.arkose_token is None:
+ await get_request_config(proxy)
+ cls._create_request_args(RequestConfig,cookies, RequestConfig.headers)
+ cls._set_api_key(RequestConfig.access_token)
+ if RequestConfig.arkose_token is None:
raise MissingAuthError("No arkose token found in .har file")
- if "proofofwork" in requirements:
+ if "proofofwork" in chat_requirements:
proofofwork = generate_proof_token(
- **requirements["proofofwork"],
+ **chat_requirements["proofofwork"],
user_agent=cls._headers["user-agent"],
- proofTokens=proofTokens
+ proof_token=RequestConfig.proof_token
)
if debug.logging:
print(
- 'Arkose:', False if not need_arkose else arkose_token[:12]+"...",
+ 'Arkose:', False if not need_arkose else RequestConfig.arkose_token[:12]+"...",
'Proofofwork:', False if proofofwork is None else proofofwork[:12]+"...",
)
ws = None
@@ -433,18 +430,20 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
wss_url = (await response.json()).get("wss_url")
if wss_url:
ws = await session.ws_connect(wss_url)
- websocket_request_id = str(uuid.uuid4())
data = {
"action": action,
- "conversation_mode": {"kind": "primary_assistant"},
- "force_paragen": False,
- "force_rate_limit": False,
- "conversation_id": conversation.conversation_id,
+ "messages": None,
"parent_message_id": conversation.message_id,
"model": model,
+ "paragen_cot_summary_display_override": "allow",
"history_and_training_disabled": history_disabled and not auto_continue and not return_conversation,
- "websocket_request_id": websocket_request_id
+ "conversation_mode": {"kind":"primary_assistant"},
+ "websocket_request_id": str(uuid.uuid4()),
+ "supported_encodings": ["v1"],
+ "supports_buffering": True
}
+ if conversation.conversation_id is not None:
+ data["conversation_id"] = conversation.conversation_id
if action != "continue":
messages = messages if conversation_id is None else [messages[-1]]
data["messages"] = cls.create_messages(messages, image_request)
@@ -453,10 +452,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"Openai-Sentinel-Chat-Requirements-Token": chat_token,
**cls._headers
}
- if need_arkose:
- headers["Openai-Sentinel-Arkose-Token"] = arkose_token
+ if RequestConfig.arkose_token:
+ headers["Openai-Sentinel-Arkose-Token"] = RequestConfig.arkose_token
if proofofwork is not None:
headers["Openai-Sentinel-Proof-Token"] = proofofwork
+ if need_turnstile and RequestConfig.turnstile_token is not None:
+ headers['openai-sentinel-turnstile-token'] = RequestConfig.turnstile_token
async with session.post(
f"{cls.url}/backend-anon/conversation"
if cls._api_key is None else
@@ -505,7 +506,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
fields: Conversation,
ws = None
) -> AsyncIterator:
- last_message: int = 0
async for message in messages:
if message.startswith(b'{"wss_url":'):
message = json.loads(message)
@@ -522,10 +522,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
async for chunk in cls.iter_messages_line(session, message, fields):
if fields.finish_reason is not None:
break
- elif isinstance(chunk, str):
- if len(chunk) > last_message:
- yield chunk[last_message:]
- last_message = len(chunk)
else:
yield chunk
if fields.finish_reason is not None:
@@ -543,148 +539,99 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
line = json.loads(line[6:])
except:
return
- if "message" not in line:
- return
- if "error" in line and line["error"]:
- raise RuntimeError(line["error"])
- if "message_type" not in line["message"]["metadata"]:
- return
- image_response = await cls.get_generated_image(session, cls._headers, line)
- if image_response is not None:
- yield image_response
- if line["message"]["author"]["role"] != "assistant":
- return
- if line["message"]["content"]["content_type"] != "text":
+ if isinstance(line, dict) and "v" in line:
+ v = line.get("v")
+ if isinstance(v, str):
+ yield v
+ elif isinstance(v, list):
+ for m in v:
+ if m.get("p") == "/message/content/parts/0":
+ yield m.get("v")
+ elif m.get("p") == "/message/metadata":
+ fields.finish_reason = m.get("v", {}).get("finish_details", {}).get("type")
+ break
+ elif isinstance(v, dict):
+ if fields.conversation_id is None:
+ fields.conversation_id = v.get("conversation_id")
+ fields.message_id = v.get("message", {}).get("id")
+ c = v.get("message", {}).get("content", {})
+ if c.get("content_type") == "multimodal_text":
+ generated_images = []
+ for element in c.get("parts"):
+ if element.get("content_type") == "image_asset_pointer":
+ generated_images.append(
+ cls.get_generated_image(session, cls._headers, element)
+ )
+ elif element.get("content_type") == "text":
+ for part in element.get("parts", []):
+ yield part
+ for image_response in await asyncio.gather(*generated_images):
+ yield image_response
return
- if line["message"]["metadata"]["message_type"] not in ("next", "continue", "variant"):
- return
- if line["message"]["recipient"] != "all":
- return
- if fields.conversation_id is None:
- fields.conversation_id = line["conversation_id"]
- fields.message_id = line["message"]["id"]
- if "parts" in line["message"]["content"]:
- yield line["message"]["content"]["parts"][0]
- if "finish_details" in line["message"]["metadata"]:
- fields.finish_reason = line["message"]["metadata"]["finish_details"]["type"]
-
- @classmethod
- async def webview_access_token(cls) -> str:
- window = webview.create_window("OpenAI Chat", cls.url)
- await asyncio.sleep(3)
- prompt_input = None
- while not prompt_input:
- try:
- await asyncio.sleep(1)
- prompt_input = window.dom.get_element("#prompt-textarea")
- except:
- ...
- window.evaluate_js("""
-this._fetch = this.fetch;
-this.fetch = async (url, options) => {
- const response = await this._fetch(url, options);
- if (url == "https://chatgpt.com/backend-api/conversation") {
- this._headers = options.headers;
- return response;
- }
- return response;
-};
-""")
- window.evaluate_js("""
- document.querySelector('.from-token-main-surface-secondary').click();
- """)
- headers = None
- while headers is None:
- headers = window.evaluate_js("this._headers")
- await asyncio.sleep(1)
- headers["User-Agent"] = window.evaluate_js("this.navigator.userAgent")
- cookies = [list(*cookie.items()) for cookie in window.get_cookies()]
- window.destroy()
- cls._cookies = dict([(name, cookie.value) for name, cookie in cookies])
- cls._headers = headers
- cls._expires = int(time.time()) + 60 * 60 * 4
- cls._update_cookie_header()
+ if "error" in line and line.get("error"):
+ raise RuntimeError(line.get("error"))
@classmethod
- async def nodriver_access_token(cls, proxy: str = None):
- try:
- import nodriver as uc
- except ImportError:
+ async def nodriver_auth(cls, proxy: str = None):
+ if not has_nodriver:
return
- try:
- from platformdirs import user_config_dir
+ if has_platformdirs:
user_data_dir = user_config_dir("g4f-nodriver")
- except:
+ else:
user_data_dir = None
if debug.logging:
print(f"Open nodriver with user_dir: {user_data_dir}")
- browser = await uc.start(
+ browser = await nodriver.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
)
- page = await browser.get("https://chatgpt.com/")
- await page.select("[id^=headlessui-menu-button-]", 240)
- api_key = await page.evaluate(
- "(async () => {"
- "let session = await fetch('/api/auth/session');"
- "let data = await session.json();"
- "let accessToken = data['accessToken'];"
- "let expires = new Date(); expires.setTime(expires.getTime() + 60 * 60 * 4 * 1000);"
- "document.cookie = 'access_token=' + accessToken + ';expires=' + expires.toUTCString() + ';path=/';"
- "return accessToken;"
- "})();",
- await_promise=True
- )
- cookies = {}
- for c in await page.browser.cookies.get_all():
- if c.domain.endswith("chatgpt.com"):
- cookies[c.name] = c.value
- user_agent = await page.evaluate("window.navigator.userAgent")
- await page.close()
- cls._create_request_args(cookies, user_agent=user_agent)
- cls._set_api_key(api_key)
-
- @classmethod
- def browse_access_token(cls, proxy: str = None, timeout: int = 1200) -> None:
- """
- Browse to obtain an access token.
-
- Args:
- proxy (str): Proxy to use for browsing.
-
- Returns:
- tuple[str, dict]: A tuple containing the access token and cookies.
- """
- driver = get_browser(proxy=proxy)
+ page = browser.main_tab
+ def on_request(event: nodriver.cdp.network.RequestWillBeSent):
+ if event.request.url == start_url or event.request.url.startswith(conversation_url):
+ RequestConfig.access_request_id = event.request_id
+ RequestConfig.headers = event.request.headers
+ elif event.request.url in (backend_url, backend_anon_url):
+ if "OpenAI-Sentinel-Proof-Token" in event.request.headers:
+ RequestConfig.proof_token = json.loads(base64.b64decode(
+ event.request.headers["OpenAI-Sentinel-Proof-Token"].split("gAAAAAB", 1)[-1].encode()
+ ).decode())
+ if "OpenAI-Sentinel-Turnstile-Token" in event.request.headers:
+ RequestConfig.turnstile_token = event.request.headers["OpenAI-Sentinel-Turnstile-Token"]
+ if "Authorization" in event.request.headers:
+ RequestConfig.access_token = event.request.headers["Authorization"].split()[-1]
+ elif event.request.url == arkose_url:
+ RequestConfig.arkose_request = arkReq(
+ arkURL=event.request.url,
+ arkBx=None,
+ arkHeader=event.request.headers,
+ arkBody=event.request.post_data,
+ userAgent=event.request.headers.get("user-agent")
+ )
+ await page.send(nodriver.cdp.network.enable())
+ page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
+ page = await browser.get(cls.url)
try:
- driver.get(f"{cls.url}/")
- WebDriverWait(driver, timeout).until(EC.presence_of_element_located((By.ID, "prompt-textarea")))
- access_token = driver.execute_script(
- "let session = await fetch('/api/auth/session');"
- "let data = await session.json();"
- "let accessToken = data['accessToken'];"
- "let expires = new Date(); expires.setTime(expires.getTime() + 60 * 60 * 4 * 1000);"
- "document.cookie = 'access_token=' + accessToken + ';expires=' + expires.toUTCString() + ';path=/';"
- "return accessToken;"
- )
- args = get_args_from_browser(f"{cls.url}/", driver, do_bypass_cloudflare=False)
- cls._headers = args["headers"]
- cls._cookies = args["cookies"]
- cls._update_cookie_header()
- cls._set_api_key(access_token)
- finally:
- driver.close()
-
- @classmethod
- async def fetch_access_token(cls, session: StreamSession, headers: dict):
- async with session.get(
- f"{cls.url}/api/auth/session",
- headers=headers
- ) as response:
- if response.ok:
- data = await response.json()
- if "accessToken" in data:
- return data["accessToken"]
+ if RequestConfig.access_request_id is not None:
+ body = await page.send(get_response_body(RequestConfig.access_request_id))
+ if isinstance(body, tuple) and body:
+ body = body[0]
+ if body:
+ match = re.search(r'"accessToken":"(.*?)"', body)
+ if match:
+ RequestConfig.access_token = match.group(1)
+ except KeyError:
+ pass
+ for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
+ RequestConfig.cookies[c.name] = c.value
+ RequestConfig.user_agent = await page.evaluate("window.navigator.userAgent")
+ await page.select("#prompt-textarea", 240)
+ while True:
+ if RequestConfig.proof_token:
+ break
+ await asyncio.sleep(1)
+ await page.close()
+ cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=RequestConfig.user_agent)
+ cls._set_api_key(RequestConfig.access_token)
@staticmethod
def get_default_headers() -> dict:
@@ -711,7 +658,8 @@ this.fetch = async (url, options) => {
def _set_api_key(cls, api_key: str):
cls._api_key = api_key
cls._expires = int(time.time()) + 60 * 60 * 4
- cls._headers["authorization"] = f"Bearer {api_key}"
+ if api_key:
+ cls._headers["authorization"] = f"Bearer {api_key}"
@classmethod
def _update_cookie_header(cls):
diff --git a/g4f/Provider/needs_auth/PerplexityApi.py b/g4f/Provider/needs_auth/PerplexityApi.py
index 35d8d9d6..85d7cc98 100644
--- a/g4f/Provider/needs_auth/PerplexityApi.py
+++ b/g4f/Provider/needs_auth/PerplexityApi.py
@@ -1,9 +1,9 @@
from __future__ import annotations
-from .Openai import Openai
+from .OpenaiAPI import OpenaiAPI
from ...typing import AsyncResult, Messages
-class PerplexityApi(Openai):
+class PerplexityApi(OpenaiAPI):
label = "Perplexity API"
url = "https://www.perplexity.ai"
working = True
@@ -15,7 +15,6 @@ class PerplexityApi(Openai):
"llama-3-sonar-large-32k-online",
"llama-3-8b-instruct",
"llama-3-70b-instruct",
- "mixtral-8x7b-instruct"
]
@classmethod
@@ -28,4 +27,4 @@ class PerplexityApi(Openai):
) -> AsyncResult:
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
- ) \ No newline at end of file
+ )
diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py
index 0c969d27..65fdbef9 100644
--- a/g4f/Provider/needs_auth/Poe.py
+++ b/g4f/Provider/needs_auth/Poe.py
@@ -26,6 +26,7 @@ class Poe(AbstractProvider):
needs_auth = True
supports_gpt_35_turbo = True
supports_stream = True
+ models = models.keys()
@classmethod
def create_completion(
diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py
index 07abeda3..b8ec5a97 100644
--- a/g4f/Provider/needs_auth/Raycast.py
+++ b/g4f/Provider/needs_auth/Raycast.py
@@ -16,6 +16,11 @@ class Raycast(AbstractProvider):
needs_auth = True
working = True
+ models = [
+ "gpt-3.5-turbo",
+ "gpt-4"
+ ]
+
@staticmethod
def create_completion(
model: str,
@@ -25,6 +30,9 @@ class Raycast(AbstractProvider):
**kwargs,
) -> CreateResult:
auth = kwargs.get('auth')
+ if not auth:
+ raise ValueError("Raycast needs an auth token, pass it with the `auth` parameter")
+
headers = {
'Accept': 'application/json',
'Accept-Language': 'en-US,en;q=0.9',
diff --git a/g4f/Provider/needs_auth/Replicate.py b/g4f/Provider/needs_auth/Replicate.py
new file mode 100644
index 00000000..ec993aa4
--- /dev/null
+++ b/g4f/Provider/needs_auth/Replicate.py
@@ -0,0 +1,88 @@
+from __future__ import annotations
+
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt, filter_none
+from ...typing import AsyncResult, Messages
+from ...requests import raise_for_status
+from ...requests.aiohttp import StreamSession
+from ...errors import ResponseError, MissingAuthError
+
+class Replicate(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://replicate.com"
+ working = True
+ needs_auth = True
+ default_model = "meta/meta-llama-3-70b-instruct"
+ model_aliases = {
+ "meta-llama/Meta-Llama-3-70B-Instruct": default_model
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_key: str = None,
+ proxy: str = None,
+ timeout: int = 180,
+ system_prompt: str = None,
+ max_new_tokens: int = None,
+ temperature: float = None,
+ top_p: float = None,
+ top_k: float = None,
+ stop: list = None,
+ extra_data: dict = {},
+ headers: dict = {
+ "accept": "application/json",
+ },
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ if cls.needs_auth and api_key is None:
+ raise MissingAuthError("api_key is missing")
+ if api_key is not None:
+ headers["Authorization"] = f"Bearer {api_key}"
+ api_base = "https://api.replicate.com/v1/models/"
+ else:
+ api_base = "https://replicate.com/api/models/"
+ async with StreamSession(
+ proxy=proxy,
+ headers=headers,
+ timeout=timeout
+ ) as session:
+ data = {
+ "stream": True,
+ "input": {
+ "prompt": format_prompt(messages),
+ **filter_none(
+ system_prompt=system_prompt,
+ max_new_tokens=max_new_tokens,
+ temperature=temperature,
+ top_p=top_p,
+ top_k=top_k,
+ stop_sequences=",".join(stop) if stop else None
+ ),
+ **extra_data
+ },
+ }
+ url = f"{api_base.rstrip('/')}/{model}/predictions"
+ async with session.post(url, json=data) as response:
+ message = "Model not found" if response.status == 404 else None
+ await raise_for_status(response, message)
+ result = await response.json()
+ if "id" not in result:
+ raise ResponseError(f"Invalid response: {result}")
+ async with session.get(result["urls"]["stream"], headers={"Accept": "text/event-stream"}) as response:
+ await raise_for_status(response)
+ event = None
+ async for line in response.iter_lines():
+ if line.startswith(b"event: "):
+ event = line[7:]
+ if event == b"done":
+ break
+ elif event == b"output":
+ if line.startswith(b"data: "):
+ new_text = line[6:].decode()
+ if new_text:
+ yield new_text
+ else:
+ yield "\n"
diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py
index af690063..c7d7d58e 100644
--- a/g4f/Provider/needs_auth/Theb.py
+++ b/g4f/Provider/needs_auth/Theb.py
@@ -38,6 +38,7 @@ class Theb(AbstractProvider):
supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_stream = True
+ models = models.keys()
@classmethod
def create_completion(
diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py
index 22fc62ed..2006f7ad 100644
--- a/g4f/Provider/needs_auth/ThebApi.py
+++ b/g4f/Provider/needs_auth/ThebApi.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from ...typing import CreateResult, Messages
-from .Openai import Openai
+from .OpenaiAPI import OpenaiAPI
models = {
"theb-ai": "TheB.AI",
@@ -27,7 +27,7 @@ models = {
"qwen-7b-chat": "Qwen 7B"
}
-class ThebApi(Openai):
+class ThebApi(OpenaiAPI):
label = "TheB.AI API"
url = "https://theb.ai"
working = True
@@ -58,4 +58,4 @@ class ThebApi(Openai):
"top_p": top_p,
}
}
- return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs) \ No newline at end of file
+ return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs)
diff --git a/g4f/Provider/needs_auth/WhiteRabbitNeo.py b/g4f/Provider/needs_auth/WhiteRabbitNeo.py
new file mode 100644
index 00000000..82275c1c
--- /dev/null
+++ b/g4f/Provider/needs_auth/WhiteRabbitNeo.py
@@ -0,0 +1,57 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession, BaseConnector
+
+from ...typing import AsyncResult, Messages, Cookies
+from ...requests.raise_for_status import raise_for_status
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_cookies, get_connector, get_random_string
+
+class WhiteRabbitNeo(AsyncGeneratorProvider):
+ url = "https://www.whiterabbitneo.com"
+ working = True
+ supports_message_history = True
+ needs_auth = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ cookies: Cookies = None,
+ connector: BaseConnector = None,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ if cookies is None:
+ cookies = get_cookies("www.whiterabbitneo.com")
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:123.0) Gecko/20100101 Firefox/123.0",
+ "Accept": "*/*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/",
+ "Content-Type": "text/plain;charset=UTF-8",
+ "Origin": cls.url,
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "TE": "trailers"
+ }
+ async with ClientSession(
+ headers=headers,
+ cookies=cookies,
+ connector=get_connector(connector, proxy)
+ ) as session:
+ data = {
+ "messages": messages,
+ "id": get_random_string(6),
+ "enhancePrompt": False,
+ "useFunctions": False
+ }
+ async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
+ await raise_for_status(response)
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode(errors="ignore")
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index b5463b71..0f430ab5 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -1,11 +1,21 @@
-from .Gemini import Gemini
-from .Raycast import Raycast
-from .Theb import Theb
-from .ThebApi import ThebApi
-from .OpenaiChat import OpenaiChat
-from .Poe import Poe
-from .Openai import Openai
-from .Groq import Groq
-from .OpenRouter import OpenRouter
-from .OpenaiAccount import OpenaiAccount
-from .PerplexityApi import PerplexityApi \ No newline at end of file
+from .gigachat import *
+
+from .BingCreateImages import BingCreateImages
+from .CopilotAccount import CopilotAccount
+from .DeepInfra import DeepInfra
+from .DeepInfraImage import DeepInfraImage
+from .Gemini import Gemini
+from .GeminiPro import GeminiPro
+from .Groq import Groq
+from .HuggingFace import HuggingFace
+from .MetaAI import MetaAI
+from .MetaAIAccount import MetaAIAccount
+from .OpenaiAPI import OpenaiAPI
+from .OpenaiChat import OpenaiChat
+from .PerplexityApi import PerplexityApi
+from .Poe import Poe
+from .Raycast import Raycast
+from .Replicate import Replicate
+from .Theb import Theb
+from .ThebApi import ThebApi
+from .WhiteRabbitNeo import WhiteRabbitNeo
diff --git a/g4f/Provider/needs_auth/gigachat/GigaChat.py b/g4f/Provider/needs_auth/gigachat/GigaChat.py
new file mode 100644
index 00000000..c9f1c011
--- /dev/null
+++ b/g4f/Provider/needs_auth/gigachat/GigaChat.py
@@ -0,0 +1,92 @@
+from __future__ import annotations
+
+import os
+import ssl
+import time
+import uuid
+
+import json
+from aiohttp import ClientSession, TCPConnector, BaseConnector
+from g4f.requests import raise_for_status
+
+from ....typing import AsyncResult, Messages
+from ...base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ....errors import MissingAuthError
+from ...helper import get_connector
+
+access_token = ""
+token_expires_at = 0
+
+class GigaChat(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://developers.sber.ru/gigachat"
+ working = True
+ supports_message_history = True
+ supports_system_message = True
+ supports_stream = True
+ needs_auth = True
+ default_model = "GigaChat:latest"
+ models = ["GigaChat:latest", "GigaChat-Plus", "GigaChat-Pro"]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = True,
+ proxy: str = None,
+ api_key: str = None,
+ connector: BaseConnector = None,
+ scope: str = "GIGACHAT_API_PERS",
+ update_interval: float = 0,
+ **kwargs
+ ) -> AsyncResult:
+ global access_token, token_expires_at
+ model = cls.get_model(model)
+ if not api_key:
+ raise MissingAuthError('Missing "api_key"')
+
+ cafile = os.path.join(os.path.dirname(__file__), "russian_trusted_root_ca_pem.crt")
+ ssl_context = ssl.create_default_context(cafile=cafile) if os.path.exists(cafile) else None
+ if connector is None and ssl_context is not None:
+ connector = TCPConnector(ssl_context=ssl_context)
+ async with ClientSession(connector=get_connector(connector, proxy)) as session:
+ if token_expires_at - int(time.time() * 1000) < 60000:
+ async with session.post(url="https://ngw.devices.sberbank.ru:9443/api/v2/oauth",
+ headers={"Authorization": f"Bearer {api_key}",
+ "RqUID": str(uuid.uuid4()),
+ "Content-Type": "application/x-www-form-urlencoded"},
+ data={"scope": scope}) as response:
+ await raise_for_status(response)
+ data = await response.json()
+ access_token = data['access_token']
+ token_expires_at = data['expires_at']
+
+ async with session.post(url="https://gigachat.devices.sberbank.ru/api/v1/chat/completions",
+ headers={"Authorization": f"Bearer {access_token}"},
+ json={
+ "model": model,
+ "messages": messages,
+ "stream": stream,
+ "update_interval": update_interval,
+ **kwargs
+ }) as response:
+ await raise_for_status(response)
+
+ async for line in response.content:
+ if not stream:
+ yield json.loads(line.decode("utf-8"))['choices'][0]['message']['content']
+ return
+
+ if line and line.startswith(b"data:"):
+ line = line[6:-1] # remove "data: " prefix and "\n" suffix
+ if line.strip() == b"[DONE]":
+ return
+ else:
+ msg = json.loads(line.decode("utf-8"))['choices'][0]
+ content = msg['delta']['content']
+
+ if content:
+ yield content
+
+ if 'finish_reason' in msg:
+ return
diff --git a/g4f/Provider/needs_auth/gigachat/__init__.py b/g4f/Provider/needs_auth/gigachat/__init__.py
new file mode 100644
index 00000000..c9853742
--- /dev/null
+++ b/g4f/Provider/needs_auth/gigachat/__init__.py
@@ -0,0 +1,2 @@
+from .GigaChat import GigaChat
+
diff --git a/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt
new file mode 100644
index 00000000..4c143a21
--- /dev/null
+++ b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt
@@ -0,0 +1,33 @@
+-----BEGIN CERTIFICATE-----
+MIIFwjCCA6qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwcDELMAkGA1UEBhMCUlUx
+PzA9BgNVBAoMNlRoZSBNaW5pc3RyeSBvZiBEaWdpdGFsIERldmVsb3BtZW50IGFu
+ZCBDb21tdW5pY2F0aW9uczEgMB4GA1UEAwwXUnVzc2lhbiBUcnVzdGVkIFJvb3Qg
+Q0EwHhcNMjIwMzAxMjEwNDE1WhcNMzIwMjI3MjEwNDE1WjBwMQswCQYDVQQGEwJS
+VTE/MD0GA1UECgw2VGhlIE1pbmlzdHJ5IG9mIERpZ2l0YWwgRGV2ZWxvcG1lbnQg
+YW5kIENvbW11bmljYXRpb25zMSAwHgYDVQQDDBdSdXNzaWFuIFRydXN0ZWQgUm9v
+dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMfFOZ8pUAL3+r2n
+qqE0Zp52selXsKGFYoG0GM5bwz1bSFtCt+AZQMhkWQheI3poZAToYJu69pHLKS6Q
+XBiwBC1cvzYmUYKMYZC7jE5YhEU2bSL0mX7NaMxMDmH2/NwuOVRj8OImVa5s1F4U
+zn4Kv3PFlDBjjSjXKVY9kmjUBsXQrIHeaqmUIsPIlNWUnimXS0I0abExqkbdrXbX
+YwCOXhOO2pDUx3ckmJlCMUGacUTnylyQW2VsJIyIGA8V0xzdaeUXg0VZ6ZmNUr5Y
+Ber/EAOLPb8NYpsAhJe2mXjMB/J9HNsoFMBFJ0lLOT/+dQvjbdRZoOT8eqJpWnVD
+U+QL/qEZnz57N88OWM3rabJkRNdU/Z7x5SFIM9FrqtN8xewsiBWBI0K6XFuOBOTD
+4V08o4TzJ8+Ccq5XlCUW2L48pZNCYuBDfBh7FxkB7qDgGDiaftEkZZfApRg2E+M9
+G8wkNKTPLDc4wH0FDTijhgxR3Y4PiS1HL2Zhw7bD3CbslmEGgfnnZojNkJtcLeBH
+BLa52/dSwNU4WWLubaYSiAmA9IUMX1/RpfpxOxd4Ykmhz97oFbUaDJFipIggx5sX
+ePAlkTdWnv+RWBxlJwMQ25oEHmRguNYf4Zr/Rxr9cS93Y+mdXIZaBEE0KS2iLRqa
+OiWBki9IMQU4phqPOBAaG7A+eP8PAgMBAAGjZjBkMB0GA1UdDgQWBBTh0YHlzlpf
+BKrS6badZrHF+qwshzAfBgNVHSMEGDAWgBTh0YHlzlpfBKrS6badZrHF+qwshzAS
+BgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsF
+AAOCAgEAALIY1wkilt/urfEVM5vKzr6utOeDWCUczmWX/RX4ljpRdgF+5fAIS4vH
+tmXkqpSCOVeWUrJV9QvZn6L227ZwuE15cWi8DCDal3Ue90WgAJJZMfTshN4OI8cq
+W9E4EG9wglbEtMnObHlms8F3CHmrw3k6KmUkWGoa+/ENmcVl68u/cMRl1JbW2bM+
+/3A+SAg2c6iPDlehczKx2oa95QW0SkPPWGuNA/CE8CpyANIhu9XFrj3RQ3EqeRcS
+AQQod1RNuHpfETLU/A2gMmvn/w/sx7TB3W5BPs6rprOA37tutPq9u6FTZOcG1Oqj
+C/B7yTqgI7rbyvox7DEXoX7rIiEqyNNUguTk/u3SZ4VXE2kmxdmSh3TQvybfbnXV
+4JbCZVaqiZraqc7oZMnRoWrXRG3ztbnbes/9qhRGI7PqXqeKJBztxRTEVj8ONs1d
+WN5szTwaPIvhkhO3CO5ErU2rVdUr89wKpNXbBODFKRtgxUT70YpmJ46VVaqdAhOZ
+D9EUUn4YaeLaS8AjSF/h7UkjOibNc4qVDiPP+rkehFWM66PVnP1Msh93tc+taIfC
+EYVMxjh8zNbFuoc7fzvvrFILLe7ifvEIUqSVIC/AzplM/Jxw7buXFeGP1qVCBEHq
+391d/9RAfaZ12zkwFsl+IKwE/OZxW8AHa9i1p4GO0YSNuczzEm4=
+-----END CERTIFICATE----- \ No newline at end of file