summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/Copilot.py5
-rw-r--r--g4f/Provider/needs_auth/Gemini.py65
-rw-r--r--g4f/Provider/needs_auth/GithubCopilot.py93
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py325
-rw-r--r--g4f/Provider/needs_auth/__init__.py1
-rw-r--r--g4f/Provider/openai/har_file.py35
-rw-r--r--g4f/api/__init__.py142
-rw-r--r--g4f/client/__init__.py18
-rw-r--r--g4f/client/helper.py30
-rw-r--r--g4f/gui/client/index.html5
-rw-r--r--g4f/gui/client/static/css/style.css37
-rw-r--r--g4f/gui/client/static/js/chat.v1.js244
-rw-r--r--g4f/gui/server/api.py17
-rw-r--r--g4f/gui/server/backend.py69
-rw-r--r--g4f/image.py10
-rw-r--r--g4f/providers/asyncio.py65
-rw-r--r--g4f/providers/base_provider.py74
-rw-r--r--g4f/providers/response.py13
18 files changed, 758 insertions, 490 deletions
diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py
index e10a55e8..2f37b1eb 100644
--- a/g4f/Provider/Copilot.py
+++ b/g4f/Provider/Copilot.py
@@ -73,10 +73,9 @@ class Copilot(AbstractProvider):
else:
access_token = conversation.access_token
debug.log(f"Copilot: Access token: {access_token[:7]}...{access_token[-5:]}")
- debug.log(f"Copilot: Cookies: {';'.join([*cookies])}")
websocket_url = f"{websocket_url}&accessToken={quote(access_token)}"
- headers = {"authorization": f"Bearer {access_token}", "cookie": format_cookies(cookies)}
-
+ headers = {"authorization": f"Bearer {access_token}"}
+
with Session(
timeout=timeout,
proxy=proxy,
diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py
index 781aa410..89f6f802 100644
--- a/g4f/Provider/needs_auth/Gemini.py
+++ b/g4f/Provider/needs_auth/Gemini.py
@@ -4,8 +4,10 @@ import os
import json
import random
import re
+import base64
from aiohttp import ClientSession, BaseConnector
+
try:
import nodriver
has_nodriver = True
@@ -14,12 +16,13 @@ except ImportError:
from ... import debug
from ...typing import Messages, Cookies, ImageType, AsyncResult, AsyncIterator
-from ..base_provider import AsyncGeneratorProvider, BaseConversation
+from ..base_provider import AsyncGeneratorProvider, BaseConversation, SynthesizeData
from ..helper import format_prompt, get_cookies
from ...requests.raise_for_status import raise_for_status
from ...requests.aiohttp import get_connector
from ...errors import MissingAuthError
from ...image import ImageResponse, to_bytes
+from ... import debug
REQUEST_HEADERS = {
"authority": "gemini.google.com",
@@ -54,6 +57,7 @@ class Gemini(AsyncGeneratorProvider):
image_models = ["gemini"]
default_vision_model = "gemini"
models = ["gemini", "gemini-1.5-flash", "gemini-1.5-pro"]
+ synthesize_content_type = "audio/vnd.wav"
_cookies: Cookies = None
_snlm0e: str = None
_sid: str = None
@@ -106,6 +110,7 @@ class Gemini(AsyncGeneratorProvider):
prompt = format_prompt(messages) if conversation is None else messages[-1]["content"]
cls._cookies = cookies or cls._cookies or get_cookies(".google.com", False, True)
base_connector = get_connector(connector, proxy)
+
async with ClientSession(
headers=REQUEST_HEADERS,
connector=base_connector
@@ -122,6 +127,7 @@ class Gemini(AsyncGeneratorProvider):
if not cls._snlm0e:
raise RuntimeError("Invalid cookies. SNlM0e not found")
+ yield SynthesizeData(cls.__name__, {"text": messages[-1]["content"]})
image_url = await cls.upload_image(base_connector, to_bytes(image), image_name) if image else None
async with ClientSession(
@@ -198,6 +204,39 @@ class Gemini(AsyncGeneratorProvider):
except TypeError:
pass
+ @classmethod
+ async def synthesize(cls, params: dict, proxy: str = None) -> AsyncIterator[bytes]:
+ if "text" not in params:
+ raise ValueError("Missing parameter text")
+ async with ClientSession(
+ cookies=cls._cookies,
+ headers=REQUEST_HEADERS,
+ connector=get_connector(proxy=proxy),
+ ) as session:
+ if not cls._snlm0e:
+ await cls.fetch_snlm0e(session, cls._cookies) if cls._cookies else None
+ inner_data = json.dumps([None, params["text"], "de-DE", None, 2])
+ async with session.post(
+ "https://gemini.google.com/_/BardChatUi/data/batchexecute",
+ data={
+ "f.req": json.dumps([[["XqA3Ic", inner_data, None, "generic"]]]),
+ "at": cls._snlm0e,
+ },
+ params={
+ "rpcids": "XqA3Ic",
+ "source-path": "/app/2704fb4aafcca926",
+ "bl": "boq_assistant-bard-web-server_20241119.00_p1",
+ "f.sid": "" if cls._sid is None else cls._sid,
+ "hl": "de",
+ "_reqid": random.randint(1111, 9999),
+ "rt": "c"
+ },
+ ) as response:
+ await raise_for_status(response)
+ iter_base64_response = iter_filter_base64(response.content.iter_chunked(1024))
+ async for chunk in iter_base64_decode(iter_base64_response):
+ yield chunk
+
def build_request(
prompt: str,
language: str,
@@ -280,3 +319,27 @@ class Conversation(BaseConversation):
self.conversation_id = conversation_id
self.response_id = response_id
self.choice_id = choice_id
+async def iter_filter_base64(response_iter: AsyncIterator[bytes]) -> AsyncIterator[bytes]:
+ search_for = b'[["wrb.fr","XqA3Ic","[\\"'
+ end_with = b'\\'
+ is_started = False
+ async for chunk in response_iter:
+ if is_started:
+ if end_with in chunk:
+ yield chunk.split(end_with, 1).pop(0)
+ break
+ else:
+ yield chunk
+ elif search_for in chunk:
+ is_started = True
+ yield chunk.split(search_for, 1).pop()
+ else:
+ raise RuntimeError(f"Response: {chunk}")
+
+async def iter_base64_decode(response_iter: AsyncIterator[bytes]) -> AsyncIterator[bytes]:
+ buffer = b""
+ async for chunk in response_iter:
+ chunk = buffer + chunk
+ rest = len(chunk) % 4
+ buffer = chunk[-rest:]
+ yield base64.b64decode(chunk[:-rest]) \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/GithubCopilot.py b/g4f/Provider/needs_auth/GithubCopilot.py
new file mode 100644
index 00000000..0c12dfd0
--- /dev/null
+++ b/g4f/Provider/needs_auth/GithubCopilot.py
@@ -0,0 +1,93 @@
+from __future__ import annotations
+
+import json
+
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation
+from ...typing import AsyncResult, Messages, Cookies
+from ...requests.raise_for_status import raise_for_status
+from ...requests import StreamSession
+from ...providers.helper import format_prompt
+from ...cookies import get_cookies
+
+class Conversation(BaseConversation):
+ conversation_id: str
+
+ def __init__(self, conversation_id: str):
+ self.conversation_id = conversation_id
+
+class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://copilot.microsoft.com"
+ working = True
+ needs_auth = True
+ supports_stream = True
+ default_model = "gpt-4o"
+ models = [default_model, "o1-mini", "o1-preview", "claude-3.5-sonnet"]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ api_key: str = "X2eRgXPamxGK_TXS6seGGYy541mQuVJdH1CYljrvSPuc38je5J4KK4Aw0y5X2oVRFMjA4B1fo9sdsr4VJcl-VBae7H0Mr4U9GIkFnGx3hSs=",
+ proxy: str = None,
+ cookies: Cookies = None,
+ conversation_id: str = None,
+ conversation: Conversation = None,
+ return_conversation: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ model = cls.default_model
+ if cookies is None:
+ cookies = get_cookies(".github.com")
+ async with StreamSession(
+ proxy=proxy,
+ impersonate="chrome",
+ cookies=cookies,
+ headers={
+ "GitHub-Verified-Fetch": "true",
+ }
+ ) as session:
+ headers = {}
+ if api_key is None:
+ async with session.post("https://github.com/github-copilot/chat/token") as response:
+ await raise_for_status(response, "Get token")
+ api_key = (await response.json()).get("token")
+ headers = {
+ "Authorization": f"GitHub-Bearer {api_key}",
+ }
+ if conversation is not None:
+ conversation_id = conversation.conversation_id
+ if conversation_id is None:
+ print(headers)
+ async with session.post("https://api.individual.githubcopilot.com/github/chat/threads", headers=headers) as response:
+ await raise_for_status(response)
+ conversation_id = (await response.json()).get("thread_id")
+ if return_conversation:
+ yield Conversation(conversation_id)
+ content = messages[-1]["content"]
+ else:
+ content = format_prompt(messages)
+ json_data = {
+ "content": content,
+ "intent": "conversation",
+ "references":[],
+ "context": [],
+ "currentURL": f"https://github.com/copilot/c/{conversation_id}",
+ "streaming": True,
+ "confirmations": [],
+ "customInstructions": [],
+ "model": model,
+ "mode": "immersive"
+ }
+ async with session.post(
+ f"https://api.individual.githubcopilot.com/github/chat/threads/{conversation_id}/messages",
+ json=json_data,
+ headers=headers
+ ) as response:
+ async for line in response.iter_lines():
+ if line.startswith(b"data: "):
+ data = json.loads(line[6:])
+ if data.get("type") == "content":
+ yield data.get("body") \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 587c0a23..37bdf074 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -7,7 +7,6 @@ import json
import base64
import time
import requests
-from aiohttp import ClientWebSocketResponse
from copy import copy
try:
@@ -16,19 +15,15 @@ try:
has_nodriver = True
except ImportError:
has_nodriver = False
-try:
- from platformdirs import user_config_dir
- has_platformdirs = True
-except ImportError:
- has_platformdirs = False
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...typing import AsyncResult, Messages, Cookies, ImageType, AsyncIterator
from ...requests.raise_for_status import raise_for_status
-from ...requests.aiohttp import StreamSession
+from ...requests import StreamSession
+from ...requests import get_nodriver
from ...image import ImageResponse, ImageRequest, to_image, to_bytes, is_accepted_format
-from ...errors import MissingAuthError, ResponseError
-from ...providers.response import BaseConversation
+from ...errors import MissingAuthError
+from ...providers.response import BaseConversation, FinishReason, SynthesizeData
from ..helper import format_cookies
from ..openai.har_file import get_request_config, NoValidHarFileError
from ..openai.har_file import RequestConfig, arkReq, arkose_url, start_url, conversation_url, backend_url, backend_anon_url
@@ -63,9 +58,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
default_model = "auto"
default_vision_model = "gpt-4o"
- fallback_models = ["auto", "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini"]
+ fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini"]
vision_models = fallback_models
image_models = fallback_models
+ synthesize_content_type = "audio/mpeg"
_api_key: str = None
_headers: dict = None
@@ -85,51 +81,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
return cls.models
@classmethod
- async def create(
- cls,
- prompt: str = None,
- model: str = "",
- messages: Messages = [],
- action: str = "next",
- **kwargs
- ) -> Response:
- """
- Create a new conversation or continue an existing one
-
- Args:
- prompt: The user input to start or continue the conversation
- model: The name of the model to use for generating responses
- messages: The list of previous messages in the conversation
- history_disabled: A flag indicating if the history and training should be disabled
- action: The type of action to perform, either "next", "continue", or "variant"
- conversation_id: The ID of the existing conversation, if any
- parent_id: The ID of the parent message, if any
- image: The image to include in the user input, if any
- **kwargs: Additional keyword arguments to pass to the generator
-
- Returns:
- A Response object that contains the generator, action, messages, and options
- """
- # Add the user input to the messages list
- if prompt is not None:
- messages.append({
- "role": "user",
- "content": prompt
- })
- generator = cls.create_async_generator(
- model,
- messages,
- return_conversation=True,
- **kwargs
- )
- return Response(
- generator,
- action,
- messages,
- kwargs
- )
-
- @classmethod
async def upload_image(
cls,
session: StreamSession,
@@ -160,7 +111,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
# Post the image data to the service and get the image data
async with session.post(f"{cls.url}/backend-api/files", json=data, headers=headers) as response:
cls._update_request_args(session)
- await raise_for_status(response)
+ await raise_for_status(response, "Create file failed")
image_data = {
**data,
**await response.json(),
@@ -178,7 +129,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"x-ms-blob-type": "BlockBlob"
}
) as response:
- await raise_for_status(response)
+ await raise_for_status(response, "Send file failed")
# Post the file ID to the service and get the download URL
async with session.post(
f"{cls.url}/backend-api/files/{image_data['file_id']}/uploaded",
@@ -186,38 +137,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
headers=headers
) as response:
cls._update_request_args(session)
- await raise_for_status(response)
+ await raise_for_status(response, "Get download url failed")
image_data["download_url"] = (await response.json())["download_url"]
return ImageRequest(image_data)
@classmethod
- async def get_default_model(cls, session: StreamSession, headers: dict):
- """
- Get the default model name from the service
-
- Args:
- session: The StreamSession object to use for requests
- headers: The headers to include in the requests
-
- Returns:
- The default model name as a string
- """
- if not cls.default_model:
- url = f"{cls.url}/backend-anon/models" if cls._api_key is None else f"{cls.url}/backend-api/models"
- async with session.get(url, headers=headers) as response:
- cls._update_request_args(session)
- if response.status == 401:
- raise MissingAuthError('Add a .har file for OpenaiChat' if cls._api_key is None else "Invalid api key")
- await raise_for_status(response)
- data = await response.json()
- if "categories" in data:
- cls.default_model = data["categories"][-1]["default_model"]
- return cls.default_model
- raise ResponseError(data)
- return cls.default_model
-
- @classmethod
- def create_messages(cls, messages: Messages, image_request: ImageRequest = None):
+ def create_messages(cls, messages: Messages, image_request: ImageRequest = None, system_hints: list = None):
"""
Create a list of messages for the user input
@@ -235,7 +160,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"id": str(uuid.uuid4()),
"create_time": int(time.time()),
"id": str(uuid.uuid4()),
- "metadata": {"serialization_metadata": {"custom_symbol_offsets": []}}
+ "metadata": {"serialization_metadata": {"custom_symbol_offsets": []}, "system_hints": system_hints},
} for message in messages]
# Check if there is an image response
@@ -264,7 +189,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
return messages
@classmethod
- async def get_generated_image(cls, session: StreamSession, headers: dict, element: dict) -> ImageResponse:
+ async def get_generated_image(cls, session: StreamSession, headers: dict, element: dict, prompt: str = None) -> ImageResponse:
"""
Retrieves the image response based on the message content.
@@ -286,6 +211,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
try:
prompt = element["metadata"]["dalle"]["prompt"]
file_id = element["asset_pointer"].split("file-service://", 1)[1]
+ except TypeError:
+ return
except Exception as e:
raise RuntimeError(f"No Image: {e.__class__.__name__}: {e}")
try:
@@ -298,37 +225,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
raise RuntimeError(f"Error in downloading image: {e}")
@classmethod
- async def delete_conversation(cls, session: StreamSession, headers: dict, conversation_id: str):
- """
- Deletes a conversation by setting its visibility to False.
-
- This method sends an HTTP PATCH request to update the visibility of a conversation.
- It's used to effectively delete a conversation from being accessed or displayed in the future.
-
- Args:
- session (StreamSession): The StreamSession object used for making HTTP requests.
- headers (dict): HTTP headers to be used for the request.
- conversation_id (str): The unique identifier of the conversation to be deleted.
-
- Raises:
- HTTPError: If the HTTP request fails or returns an unsuccessful status code.
- """
- async with session.patch(
- f"{cls.url}/backend-api/conversation/{conversation_id}",
- json={"is_visible": False},
- headers=headers
- ) as response:
- cls._update_request_args(session)
- ...
-
- @classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 180,
- api_key: str = None,
cookies: Cookies = None,
auto_continue: bool = False,
history_disabled: bool = False,
@@ -340,6 +242,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
image_name: str = None,
return_conversation: bool = False,
max_retries: int = 3,
+ web_search: bool = False,
**kwargs
) -> AsyncResult:
"""
@@ -367,19 +270,13 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
Raises:
RuntimeError: If an error occurs during processing.
"""
+ await cls.login(proxy)
+
async with StreamSession(
proxy=proxy,
impersonate="chrome",
timeout=timeout
) as session:
- if cls._expires is not None and cls._expires < time.time():
- cls._headers = cls._api_key = None
- try:
- await get_request_config(proxy)
- cls._create_request_args(RequestConfig.cookies, RequestConfig.headers)
- cls._set_api_key(RequestConfig.access_token)
- except NoValidHarFileError as e:
- await cls.nodriver_auth(proxy)
try:
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
except Exception as e:
@@ -419,12 +316,13 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if "proofofwork" in chat_requirements:
proofofwork = generate_proof_token(
**chat_requirements["proofofwork"],
- user_agent=cls._headers["user-agent"],
+ user_agent=cls._headers.get("user-agent"),
proof_token=RequestConfig.proof_token
)
[debug.log(text) for text in (
f"Arkose: {'False' if not need_arkose else RequestConfig.arkose_token[:12]+'...'}",
f"Proofofwork: {'False' if proofofwork is None else proofofwork[:12]+'...'}",
+ f"AccessToken: {'False' if cls._api_key is None else cls._api_key[:12]+'...'}",
)]
data = {
"action": action,
@@ -436,23 +334,25 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"conversation_mode": {"kind":"primary_assistant"},
"websocket_request_id": str(uuid.uuid4()),
"supported_encodings": ["v1"],
- "supports_buffering": True
+ "supports_buffering": True,
+ "system_hints": ["search"] if web_search else None
}
if conversation.conversation_id is not None:
data["conversation_id"] = conversation.conversation_id
debug.log(f"OpenaiChat: Use conversation: {conversation.conversation_id}")
if action != "continue":
messages = messages if conversation_id is None else [messages[-1]]
- data["messages"] = cls.create_messages(messages, image_request)
+ data["messages"] = cls.create_messages(messages, image_request, ["search"] if web_search else None)
headers = {
+ **cls._headers,
"accept": "text/event-stream",
- "Openai-Sentinel-Chat-Requirements-Token": chat_token,
- **cls._headers
+ "content-type": "application/json",
+ "openai-sentinel-chat-requirements-token": chat_token,
}
if RequestConfig.arkose_token:
- headers["Openai-Sentinel-Arkose-Token"] = RequestConfig.arkose_token
+ headers["openai-sentinel-arkose-token"] = RequestConfig.arkose_token
if proofofwork is not None:
- headers["Openai-Sentinel-Proof-Token"] = proofofwork
+ headers["openai-sentinel-proof-token"] = proofofwork
if need_turnstile and RequestConfig.turnstile_token is not None:
headers['openai-sentinel-turnstile-token'] = RequestConfig.turnstile_token
async with session.post(
@@ -469,31 +369,24 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
await asyncio.sleep(5)
continue
await raise_for_status(response)
- async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, conversation):
- if return_conversation:
- history_disabled = False
- return_conversation = False
- yield conversation
- yield chunk
+ if return_conversation:
+ yield conversation
+ async for line in response.iter_lines():
+ async for chunk in cls.iter_messages_line(session, line, conversation):
+ yield chunk
+ if not history_disabled:
+ yield SynthesizeData(cls.__name__, {
+ "conversation_id": conversation.conversation_id,
+ "message_id": conversation.message_id,
+ "voice": "maple",
+ })
if auto_continue and conversation.finish_reason == "max_tokens":
conversation.finish_reason = None
action = "continue"
await asyncio.sleep(5)
else:
break
- if history_disabled and auto_continue:
- await cls.delete_conversation(session, cls._headers, conversation.conversation_id)
-
- @classmethod
- async def iter_messages_chunk(
- cls,
- messages: AsyncIterator,
- session: StreamSession,
- fields: Conversation,
- ) -> AsyncIterator:
- async for message in messages:
- async for chunk in cls.iter_messages_line(session, message, fields):
- yield chunk
+ yield FinishReason(conversation.finish_reason)
@classmethod
async def iter_messages_line(cls, session: StreamSession, line: bytes, fields: Conversation) -> AsyncIterator:
@@ -530,9 +423,9 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
generated_images = []
for element in c.get("parts"):
if isinstance(element, dict) and element.get("content_type") == "image_asset_pointer":
- generated_images.append(
- cls.get_generated_image(session, cls._headers, element)
- )
+ image = cls.get_generated_image(session, cls._headers, element)
+ if image is not None:
+ generated_images.append(image)
for image_response in await asyncio.gather(*generated_images):
yield image_response
if m.get("author", {}).get("role") == "assistant":
@@ -542,18 +435,38 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
raise RuntimeError(line.get("error"))
@classmethod
+ async def synthesize(cls, params: dict) -> AsyncIterator[bytes]:
+ await cls.login()
+ async with StreamSession(
+ impersonate="chrome",
+ timeout=900
+ ) as session:
+ async with session.get(
+ f"{cls.url}/backend-api/synthesize",
+ params=params,
+ headers=cls._headers
+ ) as response:
+ await raise_for_status(response)
+ async for chunk in response.iter_content():
+ yield chunk
+
+ @classmethod
+ async def login(cls, proxy: str = None):
+ if cls._expires is not None and cls._expires < time.time():
+ cls._headers = cls._api_key = None
+ try:
+ await get_request_config(proxy)
+ cls._create_request_args(RequestConfig.cookies, RequestConfig.headers)
+ cls._set_api_key(RequestConfig.access_token)
+ except NoValidHarFileError:
+ if has_nodriver:
+ await cls.nodriver_auth(proxy)
+ else:
+ raise
+
+ @classmethod
async def nodriver_auth(cls, proxy: str = None):
- if not has_nodriver:
- return
- if has_platformdirs:
- user_data_dir = user_config_dir("g4f-nodriver")
- else:
- user_data_dir = None
- debug.log(f"Open nodriver with user_dir: {user_data_dir}")
- browser = await nodriver.start(
- user_data_dir=user_data_dir,
- browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
- )
+ browser = await get_nodriver(proxy=proxy)
page = browser.main_tab
def on_request(event: nodriver.cdp.network.RequestWillBeSent):
if event.request.url == start_url or event.request.url.startswith(conversation_url):
@@ -592,14 +505,14 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
pass
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
RequestConfig.cookies[c.name] = c.value
- RequestConfig.user_agent = await page.evaluate("window.navigator.userAgent")
+ user_agent = await page.evaluate("window.navigator.userAgent")
await page.select("#prompt-textarea", 240)
while True:
if RequestConfig.proof_token:
break
await asyncio.sleep(1)
await page.close()
- cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=RequestConfig.user_agent)
+ cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=user_agent)
cls._set_api_key(RequestConfig.access_token)
@staticmethod
@@ -642,90 +555,4 @@ class Conversation(BaseConversation):
self.conversation_id = conversation_id
self.message_id = message_id
self.finish_reason = finish_reason
- self.is_recipient = False
-
-class Response():
- """
- Class to encapsulate a response from the chat service.
- """
- def __init__(
- self,
- generator: AsyncResult,
- action: str,
- messages: Messages,
- options: dict
- ):
- self._generator = generator
- self.action = action
- self.is_end = False
- self._message = None
- self._messages = messages
- self._options = options
- self._fields = None
-
- async def generator(self) -> AsyncIterator:
- if self._generator is not None:
- self._generator = None
- chunks = []
- async for chunk in self._generator:
- if isinstance(chunk, Conversation):
- self._fields = chunk
- else:
- yield chunk
- chunks.append(str(chunk))
- self._message = "".join(chunks)
- if self._fields is None:
- raise RuntimeError("Missing response fields")
- self.is_end = self._fields.finish_reason == "stop"
-
- def __aiter__(self):
- return self.generator()
-
- async def get_message(self) -> str:
- await self.generator()
- return self._message
-
- async def get_fields(self) -> dict:
- await self.generator()
- return {
- "conversation_id": self._fields.conversation_id,
- "parent_id": self._fields.message_id
- }
-
- async def create_next(self, prompt: str, **kwargs) -> Response:
- return await OpenaiChat.create(
- **self._options,
- prompt=prompt,
- messages=await self.get_messages(),
- action="next",
- **await self.get_fields(),
- **kwargs
- )
-
- async def do_continue(self, **kwargs) -> Response:
- fields = await self.get_fields()
- if self.is_end:
- raise RuntimeError("Can't continue message. Message already finished.")
- return await OpenaiChat.create(
- **self._options,
- messages=await self.get_messages(),
- action="continue",
- **fields,
- **kwargs
- )
-
- async def create_variant(self, **kwargs) -> Response:
- if self.action != "next":
- raise RuntimeError("Can't create variant from continue or variant request.")
- return await OpenaiChat.create(
- **self._options,
- messages=self._messages,
- action="variant",
- **await self.get_fields(),
- **kwargs
- )
-
- async def get_messages(self) -> list:
- messages = self._messages
- messages.append({"role": "assistant", "content": await self.message()})
- return messages
+ self.is_recipient = False \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 1c7fe7c5..f3391706 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -7,6 +7,7 @@ from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
from .Gemini import Gemini
from .GeminiPro import GeminiPro
+from .GithubCopilot import GithubCopilot
from .Groq import Groq
from .HuggingFace import HuggingFace
from .HuggingFace2 import HuggingFace2
diff --git a/g4f/Provider/openai/har_file.py b/g4f/Provider/openai/har_file.py
index 4569e1b7..e863b6ac 100644
--- a/g4f/Provider/openai/har_file.py
+++ b/g4f/Provider/openai/har_file.py
@@ -25,7 +25,6 @@ class NoValidHarFileError(Exception):
pass
class RequestConfig:
- user_agent: str = None
cookies: dict = None
headers: dict = None
access_request_id: str = None
@@ -63,28 +62,30 @@ def readHAR():
continue
for v in harFile['log']['entries']:
v_headers = get_headers(v)
- try:
- if "openai-sentinel-proof-token" in v_headers:
- RequestConfig.proof_token = json.loads(base64.b64decode(
- v_headers["openai-sentinel-proof-token"].split("gAAAAAB", 1)[-1].encode()
- ).decode())
- if "openai-sentinel-turnstile-token" in v_headers:
- RequestConfig.turnstile_token = v_headers["openai-sentinel-turnstile-token"]
- except Exception as e:
- debug.log(f"Read proof token: {e}")
if arkose_url == v['request']['url']:
RequestConfig.arkose_request = parseHAREntry(v)
- elif v['request']['url'] == start_url or v['request']['url'].startswith(conversation_url):
+ elif v['request']['url'].startswith(start_url):
try:
match = re.search(r'"accessToken":"(.*?)"', v["response"]["content"]["text"])
if match:
RequestConfig.access_token = match.group(1)
except KeyError:
- continue
- RequestConfig.cookies = {c['name']: c['value'] for c in v['request']['cookies'] if c['name'] != "oai-did"}
- RequestConfig.headers = v_headers
- if RequestConfig.access_token is None:
- raise NoValidHarFileError("No accessToken found in .har files")
+ pass
+ try:
+ if "openai-sentinel-proof-token" in v_headers:
+ RequestConfig.headers = v_headers
+ RequestConfig.proof_token = json.loads(base64.b64decode(
+ v_headers["openai-sentinel-proof-token"].split("gAAAAAB", 1)[-1].encode()
+ ).decode())
+ if "openai-sentinel-turnstile-token" in v_headers:
+ RequestConfig.turnstile_token = v_headers["openai-sentinel-turnstile-token"]
+ if "authorization" in v_headers:
+ RequestConfig.access_token = v_headers["authorization"].split(" ")[1]
+ RequestConfig.cookies = {c['name']: c['value'] for c in v['request']['cookies']}
+ except Exception as e:
+ debug.log(f"Error on read headers: {e}")
+ if RequestConfig.proof_token is None:
+ raise NoValidHarFileError("No proof_token found in .har files")
def get_headers(entry) -> dict:
return {h['name'].lower(): h['value'] for h in entry['request']['headers'] if h['name'].lower() not in ['content-length', 'cookie'] and not h['name'].startswith(':')}
@@ -149,7 +150,7 @@ def getN() -> str:
return base64.b64encode(timestamp.encode()).decode()
async def get_request_config(proxy: str) -> RequestConfig:
- if RequestConfig.access_token is None:
+ if RequestConfig.proof_token is None:
readHAR()
if RequestConfig.arkose_request is not None:
RequestConfig.arkose_token = await sendRequest(genArkReq(RequestConfig.arkose_request), proxy)
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index 21e69388..628d7512 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -5,8 +5,10 @@ import json
import uvicorn
import secrets
import os
+import shutil
-from fastapi import FastAPI, Response, Request
+import os.path
+from fastapi import FastAPI, Response, Request, UploadFile
from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
from fastapi.exceptions import RequestValidationError
from fastapi.security import APIKeyHeader
@@ -16,16 +18,18 @@ from fastapi.encoders import jsonable_encoder
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import FileResponse
from pydantic import BaseModel
-from typing import Union, Optional
+from typing import Union, Optional, List
import g4f
import g4f.debug
-from g4f.client import AsyncClient, ChatCompletion
+from g4f.client import AsyncClient, ChatCompletion, convert_to_provider
from g4f.providers.response import BaseConversation
from g4f.client.helper import filter_none
from g4f.image import is_accepted_format, images_dir
from g4f.typing import Messages
-from g4f.cookies import read_cookie_files
+from g4f.errors import ProviderNotFoundError
+from g4f.cookies import read_cookie_files, get_cookies_dir
+from g4f.Provider import ProviderType, ProviderUtils, __providers__
logger = logging.getLogger(__name__)
@@ -77,6 +81,18 @@ class ImageGenerationConfig(BaseModel):
api_key: Optional[str] = None
proxy: Optional[str] = None
+class ProviderResponseModel(BaseModel):
+ id: str
+ object: str = "provider"
+ created: int
+ owned_by: Optional[str]
+
+class ModelResponseModel(BaseModel):
+ id: str
+ object: str = "model"
+ created: int
+ owned_by: Optional[str]
+
class AppConfig:
ignored_providers: Optional[list[str]] = None
g4f_api_key: Optional[str] = None
@@ -108,7 +124,7 @@ class Api:
def register_authorization(self):
@self.app.middleware("http")
async def authorization(request: Request, call_next):
- if self.g4f_api_key and request.url.path in ["/v1/chat/completions", "/v1/completions", "/v1/images/generate"]:
+ if self.g4f_api_key and request.url.path not in ("/", "/v1"):
try:
user_g4f_api_key = await self.get_g4f_api_key(request)
except HTTPException as e:
@@ -122,9 +138,7 @@ class Api:
status_code=HTTP_403_FORBIDDEN,
content=jsonable_encoder({"detail": "Invalid G4F API key"}),
)
-
- response = await call_next(request)
- return response
+ return await call_next(request)
def register_validation_exception_handler(self):
@self.app.exception_handler(RequestValidationError)
@@ -152,25 +166,26 @@ class Api:
return HTMLResponse('g4f API: Go to '
'<a href="/v1/models">models</a>, '
'<a href="/v1/chat/completions">chat/completions</a>, or '
- '<a href="/v1/images/generate">images/generate</a>.')
+ '<a href="/v1/images/generate">images/generate</a> <br><br>'
+ 'Open Swagger UI at: '
+ '<a href="/docs">/docs</a>')
@self.app.get("/v1/models")
- async def models():
+ async def models() -> list[ModelResponseModel]:
model_list = dict(
(model, g4f.models.ModelUtils.convert[model])
for model in g4f.Model.__all__()
)
- model_list = [{
+ return [{
'id': model_id,
'object': 'model',
'created': 0,
'owned_by': model.base_provider
} for model_id, model in model_list.items()]
- return JSONResponse(model_list)
@self.app.get("/v1/models/{model_name}")
async def model_info(model_name: str):
- try:
+ if model_name in g4f.models.ModelUtils.convert:
model_info = g4f.models.ModelUtils.convert[model_name]
return JSONResponse({
'id': model_name,
@@ -178,8 +193,7 @@ class Api:
'created': 0,
'owned_by': model_info.base_provider
})
- except:
- return JSONResponse({"error": "The model does not exist."})
+ return JSONResponse({"error": "The model does not exist."}, 404)
@self.app.post("/v1/chat/completions")
async def chat_completions(config: ChatCompletionsConfig, request: Request = None, provider: str = None):
@@ -274,12 +288,68 @@ class Api:
logger.exception(e)
return Response(content=format_exception(e, config, True), status_code=500, media_type="application/json")
- @self.app.post("/v1/completions")
- async def completions():
- return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json")
+ @self.app.get("/v1/providers")
+ async def providers() -> list[ProviderResponseModel]:
+ return [{
+ 'id': provider.__name__,
+ 'object': 'provider',
+ 'created': 0,
+ 'url': provider.url,
+ 'label': getattr(provider, "label", None),
+ } for provider in __providers__ if provider.working]
+
+ @self.app.get("/v1/providers/{provider}")
+ async def providers_info(provider: str) -> ProviderResponseModel:
+ if provider not in ProviderUtils.convert:
+ return JSONResponse({"error": "The provider does not exist."}, 404)
+ provider: ProviderType = ProviderUtils.convert[provider]
+ def safe_get_models(provider: ProviderType) -> list[str]:
+ try:
+ return provider.get_models() if hasattr(provider, "get_models") else []
+ except:
+ return []
+ return {
+ 'id': provider.__name__,
+ 'object': 'provider',
+ 'created': 0,
+ 'url': provider.url,
+ 'label': getattr(provider, "label", None),
+ 'models': safe_get_models(provider),
+ 'image_models': getattr(provider, "image_models", []) or [],
+ 'vision_models': [model for model in [getattr(provider, "default_vision_model", None)] if model],
+ 'params': [*provider.get_parameters()] if hasattr(provider, "get_parameters") else []
+ }
+
+ @self.app.post("/v1/upload_cookies")
+ def upload_cookies(files: List[UploadFile]):
+ response_data = []
+ for file in files:
+ try:
+ if file and file.filename.endswith(".json") or file.filename.endswith(".har"):
+ filename = os.path.basename(file.filename)
+ with open(os.path.join(get_cookies_dir(), filename), 'wb') as f:
+ shutil.copyfileobj(file.file, f)
+ response_data.append({"filename": filename})
+ finally:
+ file.file.close()
+ return response_data
+
+ @self.app.get("/v1/synthesize/{provider}")
+ async def synthesize(request: Request, provider: str):
+ try:
+ provider_handler = convert_to_provider(provider)
+ except ProviderNotFoundError:
+ return Response("Provider not found", 404)
+ if not hasattr(provider_handler, "synthesize"):
+ return Response("Provider doesn't support synthesize", 500)
+ if len(request.query_params) == 0:
+ return Response("Missing query params", 500)
+ response_data = provider_handler.synthesize({**request.query_params})
+ content_type = getattr(provider_handler, "synthesize_content_type", "application/octet-stream")
+ return StreamingResponse(response_data, media_type=content_type)
@self.app.get("/images/{filename}")
- async def get_image(filename):
+ async def get_image(filename) -> FileResponse:
target = os.path.join(images_dir, filename)
if not os.path.isfile(target):
@@ -290,6 +360,40 @@ class Api:
return FileResponse(target, media_type=content_type)
+ @self.app.get("/providers")
+ async def providers():
+ model_list = [{
+ 'id': provider.__name__,
+ 'object': 'provider',
+ 'created': 0,
+ 'url': provider.url,
+ 'label': getattr(provider, "label", None),
+ } for provider in __providers__ if provider.working]
+ return JSONResponse(model_list)
+
+ @self.app.get("/providers/{provider}")
+ async def providers_info(provider: str):
+ if provider not in ProviderUtils.convert:
+ return JSONResponse({"error": "The model does not exist."}, 404)
+ provider: ProviderType = ProviderUtils.convert[provider]
+ def safe_get_models(provider: ProviderType) -> list[str]:
+ try:
+ return provider.get_models() if hasattr(provider, "get_models") else []
+ except:
+ return []
+ provider_info = {
+ 'id': provider.__name__,
+ 'object': 'provider',
+ 'created': 0,
+ 'url': provider.url,
+ 'label': getattr(provider, "label", None),
+ 'models': safe_get_models(provider),
+ 'image_models': getattr(provider, "image_models", []) or [],
+ 'vision_models': [model for model in [getattr(provider, "default_vision_model", None)] if model],
+ 'params': [*provider.get_parameters()] if hasattr(provider, "get_parameters") else []
+ }
+ return JSONResponse(provider_info)
+
def format_exception(e: Exception, config: Union[ChatCompletionsConfig, ImageGenerationConfig], image: bool = False) -> str:
last_provider = {} if not image else g4f.get_last_provider(True)
provider = (AppConfig.image_provider if image else AppConfig.provider) if config.provider is None else config.provider
diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py
index dea19a60..f6a0f5e8 100644
--- a/g4f/client/__init__.py
+++ b/g4f/client/__init__.py
@@ -6,23 +6,22 @@ import random
import string
import asyncio
import base64
-import logging
from typing import Union, AsyncIterator, Iterator, Coroutine, Optional
from ..providers.base_provider import AsyncGeneratorProvider
from ..image import ImageResponse, copy_images, images_dir
from ..typing import Messages, Image, ImageType
from ..providers.types import ProviderType
-from ..providers.response import ResponseType, FinishReason, BaseConversation
+from ..providers.response import ResponseType, FinishReason, BaseConversation, SynthesizeData
from ..errors import NoImageResponseError, ModelNotFoundError
from ..providers.retry_provider import IterListProvider
-from ..providers.base_provider import get_running_loop
+from ..providers.asyncio import get_running_loop, to_sync_generator, async_generator_to_list
from ..Provider.needs_auth.BingCreateImages import BingCreateImages
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
from .image_models import ImageModels
from .types import IterResponse, ImageProvider, Client as BaseClient
from .service import get_model_and_provider, get_last_provider, convert_to_provider
-from .helper import find_stop, filter_json, filter_none, safe_aclose, to_sync_iter, to_async_iterator
+from .helper import find_stop, filter_json, filter_none, safe_aclose, to_async_iterator
ChatCompletionResponseType = Iterator[Union[ChatCompletion, ChatCompletionChunk, BaseConversation]]
AsyncChatCompletionResponseType = AsyncIterator[Union[ChatCompletion, ChatCompletionChunk, BaseConversation]]
@@ -50,8 +49,7 @@ def iter_response(
idx = 0
if hasattr(response, '__aiter__'):
- # It's an async iterator, wrap it into a sync iterator
- response = to_sync_iter(response)
+ response = to_sync_generator(response)
for chunk in response:
if isinstance(chunk, FinishReason):
@@ -60,6 +58,8 @@ def iter_response(
elif isinstance(chunk, BaseConversation):
yield chunk
continue
+ elif isinstance(chunk, SynthesizeData):
+ continue
chunk = str(chunk)
content += chunk
@@ -121,6 +121,8 @@ async def async_iter_response(
elif isinstance(chunk, BaseConversation):
yield chunk
continue
+ elif isinstance(chunk, SynthesizeData):
+ continue
chunk = str(chunk)
content += chunk
@@ -227,10 +229,10 @@ class Completions:
response = asyncio.run(response)
if stream and hasattr(response, '__aiter__'):
# It's an async generator, wrap it into a sync iterator
- response = to_sync_iter(response)
+ response = to_sync_generator(response)
elif hasattr(response, '__aiter__'):
# If response is an async generator, collect it into a list
- response = list(to_sync_iter(response))
+ response = asyncio.run(async_generator_to_list(response))
response = iter_response(response, stream, response_format, max_tokens, stop)
response = iter_append_model_and_provider(response)
if stream:
diff --git a/g4f/client/helper.py b/g4f/client/helper.py
index 93588c07..909cc132 100644
--- a/g4f/client/helper.py
+++ b/g4f/client/helper.py
@@ -1,10 +1,7 @@
from __future__ import annotations
import re
-import queue
-import threading
import logging
-import asyncio
from typing import AsyncIterator, Iterator, AsyncGenerator, Optional
@@ -53,33 +50,6 @@ async def safe_aclose(generator: AsyncGenerator) -> None:
except Exception as e:
logging.warning(f"Error while closing generator: {e}")
-# Helper function to convert an async generator to a synchronous iterator
-def to_sync_iter(async_gen: AsyncIterator) -> Iterator:
- q = queue.Queue()
- loop = asyncio.new_event_loop()
- done = object()
-
- def _run():
- asyncio.set_event_loop(loop)
-
- async def iterate():
- try:
- async for item in async_gen:
- q.put(item)
- finally:
- q.put(done)
-
- loop.run_until_complete(iterate())
- loop.close()
-
- threading.Thread(target=_run).start()
-
- while True:
- item = q.get()
- if item is done:
- break
- yield item
-
# Helper function to convert a synchronous iterator to an async iterator
async def to_async_iterator(iterator: Iterator) -> AsyncIterator:
for item in iterator:
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index 6c2ad8b6..8cbcd578 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -191,6 +191,9 @@
<button class="slide-systemPrompt">
<i class="fa-solid fa-angles-up"></i>
</button>
+ <div class="media_player">
+ <i class="fa-regular fa-x"></i>
+ </div>
<div class="toolbar">
<div id="input-count" class="">
<button class="hide-input">
@@ -224,7 +227,7 @@
<i class="fa-solid fa-camera"></i>
</label>
<label class="file-label" for="file">
- <input type="file" id="file" name="file" accept="text/plain, text/html, text/xml, application/json, text/javascript, .sh, .py, .php, .css, .yaml, .sql, .log, .csv, .twig, .md" required/>
+ <input type="file" id="file" name="file" accept="text/plain, text/html, text/xml, application/json, text/javascript, .har, .sh, .py, .php, .css, .yaml, .sql, .log, .csv, .twig, .md" required/>
<i class="fa-solid fa-paperclip"></i>
</label>
<label class="micro-label" for="micro">
diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css
index c4b61d87..57b75bae 100644
--- a/g4f/gui/client/static/css/style.css
+++ b/g4f/gui/client/static/css/style.css
@@ -259,7 +259,6 @@ body {
flex-direction: column;
gap: var(--section-gap);
padding: var(--inner-gap) var(--section-gap);
- padding-bottom: 0;
}
.message.print {
@@ -271,7 +270,11 @@ body {
}
.message.regenerate {
- opacity: 0.75;
+ background-color: var(--colour-6);
+}
+
+.white .message.regenerate {
+ background-color: var(--colour-4);
}
.message:last-child {
@@ -407,6 +410,7 @@ body {
.message .count .fa-clipboard.clicked,
.message .count .fa-print.clicked,
+.message .count .fa-rotate.clicked,
.message .count .fa-volume-high.active {
color: var(--accent);
}
@@ -430,6 +434,28 @@ body {
font-size: 12px;
}
+.media_player {
+ display: none;
+}
+
+.media_player audio {
+ right: 28px;
+ position: absolute;
+ top: -4px;
+ z-index: 900;
+}
+
+.media_player.show {
+ display: block;
+}
+
+.media_player .fa-x {
+ position: absolute;
+ right: 8px;
+ top: 8px;
+ z-index: 1000;
+}
+
.count_total {
font-size: 12px;
padding-left: 25px;
@@ -1159,7 +1185,10 @@ a:-webkit-any-link {
.message .user {
display: none;
}
- .message.regenerate {
- opacity: 1;
+ body {
+ height: auto;
+ }
+ .box {
+ backdrop-filter: none;
}
}
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index 0136f9c4..fd9cc50e 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -28,6 +28,7 @@ let message_storage = {};
let controller_storage = {};
let content_storage = {};
let error_storage = {};
+let synthesize_storage = {};
messageInput.addEventListener("blur", () => {
window.scrollTo(0, 0);
@@ -43,17 +44,18 @@ appStorage = window.localStorage || {
removeItem: (key) => delete self[key],
length: 0
}
-
-const markdown = window.markdownit();
-const markdown_render = (content) => {
- return markdown.render(content
- .replaceAll(/<!-- generated images start -->|<!-- generated images end -->/gm, "")
- .replaceAll(/<img data-prompt="[^>]+">/gm, "")
- )
- .replaceAll("<a href=", '<a target="_blank" href=')
- .replaceAll('<code>', '<code class="language-plaintext">')
+let markdown_render = () => null;
+if (window.markdownit) {
+ const markdown = window.markdownit();
+ markdown_render = (content) => {
+ return markdown.render(content
+ .replaceAll(/<!-- generated images start -->|<!-- generated images end -->/gm, "")
+ .replaceAll(/<img data-prompt="[^>]+">/gm, "")
+ )
+ .replaceAll("<a href=", '<a target="_blank" href=')
+ .replaceAll('<code>', '<code class="language-plaintext">')
+ }
}
-
function filter_message(text) {
return text.replaceAll(
/<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm, ""
@@ -134,6 +136,24 @@ const register_message_buttons = async () => {
if (!("click" in el.dataset)) {
el.dataset.click = "true";
el.addEventListener("click", async () => {
+ const message_el = el.parentElement.parentElement.parentElement;
+ let audio;
+ if (message_el.dataset.synthesize_url) {
+ el.classList.add("active");
+ setTimeout(()=>el.classList.remove("active"), 2000);
+ const media_player = document.querySelector(".media_player");
+ if (!media_player.classList.contains("show")) {
+ media_player.classList.add("show");
+ audio = new Audio(message_el.dataset.synthesize_url);
+ audio.controls = true;
+ media_player.appendChild(audio);
+ } else {
+ audio = media_player.querySelector("audio");
+ audio.src = message_el.dataset.synthesize_url;
+ }
+ audio.play();
+ return;
+ }
let playlist = [];
function play_next() {
const next = playlist.shift();
@@ -155,8 +175,7 @@ const register_message_buttons = async () => {
el.dataset.running = true;
el.classList.add("blink")
el.classList.add("active")
- const content_el = el.parentElement.parentElement;
- const message_el = content_el.parentElement;
+
let speechText = await get_message(window.conversation_id, message_el.dataset.index);
speechText = speechText.replaceAll(/([^0-9])\./gm, "$1.;");
@@ -215,8 +234,8 @@ const register_message_buttons = async () => {
const message_el = el.parentElement.parentElement.parentElement;
el.classList.add("clicked");
setTimeout(() => el.classList.remove("clicked"), 1000);
- await ask_gpt(message_el.dataset.index, get_message_id());
- })
+ await ask_gpt(get_message_id(), message_el.dataset.index);
+ });
}
});
document.querySelectorAll(".message .fa-whatsapp").forEach(async (el) => {
@@ -301,25 +320,29 @@ const handle_ask = async () => {
<i class="fa-regular fa-clipboard"></i>
<a><i class="fa-brands fa-whatsapp"></i></a>
<i class="fa-solid fa-print"></i>
+ <i class="fa-solid fa-rotate"></i>
</div>
</div>
</div>
`;
highlight(message_box);
- stop_generating.classList.remove("stop_generating-hidden");
- await ask_gpt(-1, message_id);
+ await ask_gpt(message_id);
};
-async function remove_cancel_button() {
+async function safe_remove_cancel_button() {
+ for (let key in controller_storage) {
+ if (!controller_storage[key].signal.aborted) {
+ return;
+ }
+ }
stop_generating.classList.add("stop_generating-hidden");
}
regenerate.addEventListener("click", async () => {
regenerate.classList.add("regenerate-hidden");
setTimeout(()=>regenerate.classList.remove("regenerate-hidden"), 3000);
- stop_generating.classList.remove("stop_generating-hidden");
await hide_message(window.conversation_id);
- await ask_gpt(-1, get_message_id());
+ await ask_gpt(get_message_id());
});
stop_generating.addEventListener("click", async () => {
@@ -337,21 +360,28 @@ stop_generating.addEventListener("click", async () => {
}
}
}
- await load_conversation(window.conversation_id);
+ await load_conversation(window.conversation_id, false);
+});
+
+document.querySelector(".media_player .fa-x").addEventListener("click", ()=>{
+ const media_player = document.querySelector(".media_player");
+ media_player.classList.remove("show");
+ const audio = document.querySelector(".media_player audio");
+ media_player.removeChild(audio);
});
const prepare_messages = (messages, message_index = -1) => {
+ if (message_index >= 0) {
+ messages = messages.filter((_, index) => message_index >= index);
+ }
+
// Removes none user messages at end
- if (message_index == -1) {
- let last_message;
- while (last_message = messages.pop()) {
- if (last_message["role"] == "user") {
- messages.push(last_message);
- break;
- }
+ let last_message;
+ while (last_message = messages.pop()) {
+ if (last_message["role"] == "user") {
+ messages.push(last_message);
+ break;
}
- } else if (message_index >= 0) {
- messages = messages.filter((_, index) => message_index >= index);
}
let new_messages = [];
@@ -377,9 +407,11 @@ const prepare_messages = (messages, message_index = -1) => {
// Remove generated images from history
new_message.content = filter_message(new_message.content);
delete new_message.provider;
+ delete new_message.synthesize;
new_messages.push(new_message)
}
});
+
return new_messages;
}
@@ -427,6 +459,8 @@ async function add_message_chunk(message, message_id) {
let p = document.createElement("p");
p.innerText = message.log;
log_storage.appendChild(p);
+ } else if (message.type == "synthesize") {
+ synthesize_storage[message_id] = message.synthesize;
}
let scroll_down = ()=>{
if (message_box.scrollTop >= message_box.scrollHeight - message_box.clientHeight - 100) {
@@ -434,8 +468,10 @@ async function add_message_chunk(message, message_id) {
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
}
}
- setTimeout(scroll_down, 200);
- setTimeout(scroll_down, 1000);
+ if (!content_map.container.classList.contains("regenerate")) {
+ scroll_down();
+ setTimeout(scroll_down, 200);
+ }
}
cameraInput?.addEventListener("click", (e) => {
@@ -452,45 +488,58 @@ imageInput?.addEventListener("click", (e) => {
}
});
-const ask_gpt = async (message_index = -1, message_id) => {
+const ask_gpt = async (message_id, message_index = -1) => {
let messages = await get_messages(window.conversation_id);
- let total_messages = messages.length;
messages = prepare_messages(messages, message_index);
- message_index = total_messages
message_storage[message_id] = "";
- stop_generating.classList.remove(".stop_generating-hidden");
+ stop_generating.classList.remove("stop_generating-hidden");
- message_box.scrollTop = message_box.scrollHeight;
- window.scrollTo(0, 0);
+ if (message_index == -1) {
+ await scroll_to_bottom();
+ }
let count_total = message_box.querySelector('.count_total');
count_total ? count_total.parentElement.removeChild(count_total) : null;
- message_box.innerHTML += `
- <div class="message" data-index="${message_index}">
- <div class="assistant">
- ${gpt_image}
- <i class="fa-solid fa-xmark"></i>
- <i class="fa-regular fa-phone-arrow-down-left"></i>
- </div>
- <div class="content" id="gpt_${message_id}">
- <div class="provider"></div>
- <div class="content_inner"><span class="cursor"></span></div>
- <div class="count"></div>
- </div>
+ const message_el = document.createElement("div");
+ message_el.classList.add("message");
+ if (message_index != -1) {
+ message_el.classList.add("regenerate");
+ }
+ message_el.innerHTML += `
+ <div class="assistant">
+ ${gpt_image}
+ <i class="fa-solid fa-xmark"></i>
+ <i class="fa-regular fa-phone-arrow-down-left"></i>
+ </div>
+ <div class="content" id="gpt_${message_id}">
+ <div class="provider"></div>
+ <div class="content_inner"><span class="cursor"></span></div>
+ <div class="count"></div>
</div>
`;
+ if (message_index == -1) {
+ message_box.appendChild(message_el);
+ } else {
+ parent_message = message_box.querySelector(`.message[data-index="${message_index}"]`);
+ if (!parent_message) {
+ return;
+ }
+ parent_message.after(message_el);
+ }
controller_storage[message_id] = new AbortController();
let content_el = document.getElementById(`gpt_${message_id}`)
let content_map = content_storage[message_id] = {
+ container: message_el,
content: content_el,
inner: content_el.querySelector('.content_inner'),
count: content_el.querySelector('.count'),
}
-
- await scroll_to_bottom();
+ if (message_index == -1) {
+ await scroll_to_bottom();
+ }
try {
const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput;
const file = input && input.files.length > 0 ? input.files[0] : null;
@@ -527,14 +576,23 @@ const ask_gpt = async (message_index = -1, message_id) => {
delete controller_storage[message_id];
if (!error_storage[message_id] && message_storage[message_id]) {
const message_provider = message_id in provider_storage ? provider_storage[message_id] : null;
- await add_message(window.conversation_id, "assistant", message_storage[message_id], message_provider);
- await safe_load_conversation(window.conversation_id);
+ await add_message(
+ window.conversation_id,
+ "assistant",
+ message_storage[message_id],
+ message_provider,
+ message_index,
+ synthesize_storage[message_id]
+ );
+ await safe_load_conversation(window.conversation_id, message_index == -1);
} else {
- let cursorDiv = message_box.querySelector(".cursor");
+ let cursorDiv = message_el.querySelector(".cursor");
if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv);
}
- await scroll_to_bottom();
- await remove_cancel_button();
+ if (message_index == -1) {
+ await scroll_to_bottom();
+ }
+ await safe_remove_cancel_button();
await register_message_buttons();
await load_conversations();
regenerate.classList.remove("regenerate-hidden");
@@ -687,8 +745,17 @@ const load_conversation = async (conversation_id, scroll=true) => {
${item.provider.model ? ' with ' + item.provider.model : ''}
</div>
` : "";
+ let synthesize_params = {text: item.content}
+ let synthesize_provider = "Gemini";
+ if (item.synthesize) {
+ synthesize_params = item.synthesize.data
+ synthesize_provider = item.synthesize.provider;
+ }
+ synthesize_params = (new URLSearchParams(synthesize_params)).toString();
+ let synthesize_url = `/backend-api/v2/synthesize/${synthesize_provider}?${synthesize_params}`;
+
elements += `
- <div class="message${item.regenerate ? " regenerate": ""}" data-index="${i}">
+ <div class="message${item.regenerate ? " regenerate": ""}" data-index="${i}" data-synthesize_url="${synthesize_url}">
<div class="${item.role}">
${item.role == "assistant" ? gpt_image : user_image}
<i class="fa-solid fa-xmark"></i>
@@ -706,6 +773,7 @@ const load_conversation = async (conversation_id, scroll=true) => {
<i class="fa-regular fa-clipboard"></i>
<a><i class="fa-brands fa-whatsapp"></i></a>
<i class="fa-solid fa-print"></i>
+ <i class="fa-solid fa-rotate"></i>
</div>
</div>
</div>
@@ -830,14 +898,35 @@ const get_message = async (conversation_id, index) => {
return messages[index]["content"];
};
-const add_message = async (conversation_id, role, content, provider) => {
+const add_message = async (
+ conversation_id, role, content,
+ provider = null,
+ message_index = -1,
+ synthesize_data = null
+) => {
const conversation = await get_conversation(conversation_id);
if (!conversation) return;
- conversation.items.push({
+ const new_message = {
role: role,
content: content,
- provider: provider
- });
+ provider: provider,
+ };
+ if (synthesize_data) {
+ new_message.synthesize = synthesize_data;
+ }
+ if (message_index == -1) {
+ conversation.items.push(new_message);
+ } else {
+ const new_messages = [];
+ conversation.items.forEach((item, index)=>{
+ new_messages.push(item);
+ if (index == message_index) {
+ new_message.regenerate = true;
+ new_messages.push(new_message);
+ }
+ });
+ conversation.items = new_messages;
+ }
await save_conversation(conversation_id, conversation);
return conversation.items.length - 1;
};
@@ -1267,17 +1356,25 @@ fileInput.addEventListener('click', async (event) => {
delete fileInput.dataset.text;
});
+async function upload_cookies() {
+ const file = fileInput.files[0];
+ const formData = new FormData();
+ formData.append('file', file);
+ response = await fetch("/backend-api/v2/upload_cookies", {
+ method: 'POST',
+ body: formData,
+ });
+ if (response.status == 200) {
+ inputCount.innerText = `${file.name} was uploaded successfully`;
+ }
+ fileInput.value = "";
+}
+
fileInput.addEventListener('change', async (event) => {
if (fileInput.files.length) {
- type = fileInput.files[0].type;
- if (type && type.indexOf('/')) {
- type = type.split('/').pop().replace('x-', '')
- type = type.replace('plain', 'plaintext')
- .replace('shellscript', 'sh')
- .replace('svg+xml', 'svg')
- .replace('vnd.trolltech.linguist', 'ts')
- } else {
- type = fileInput.files[0].name.split('.').pop()
+ type = fileInput.files[0].name.split('.').pop()
+ if (type == "har") {
+ return await upload_cookies();
}
fileInput.dataset.type = type
const reader = new FileReader();
@@ -1286,14 +1383,19 @@ fileInput.addEventListener('change', async (event) => {
if (type == "json") {
const data = JSON.parse(fileInput.dataset.text);
if ("g4f" in data.options) {
+ let count = 0;
Object.keys(data).forEach(key => {
if (key != "options" && !localStorage.getItem(key)) {
appStorage.setItem(key, JSON.stringify(data[key]));
- }
+ count += 1;
+ }
});
delete fileInput.dataset.text;
await load_conversations();
fileInput.value = "";
+ inputCount.innerText = `${count} Conversations were imported successfully`;
+ } else {
+ await upload_cookies();
}
}
});
diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py
index 00eb7182..ecf7bc54 100644
--- a/g4f/gui/server/api.py
+++ b/g4f/gui/server/api.py
@@ -13,7 +13,7 @@ from g4f.errors import VersionNotFoundError
from g4f.image import ImagePreview, ImageResponse, copy_images, ensure_images_dir, images_dir
from g4f.Provider import ProviderType, __providers__, __map__
from g4f.providers.base_provider import ProviderModelMixin
-from g4f.providers.response import BaseConversation, FinishReason
+from g4f.providers.response import BaseConversation, FinishReason, SynthesizeData
from g4f.client.service import convert_to_provider
from g4f import debug
@@ -140,13 +140,12 @@ class Api:
}
def _create_response_stream(self, kwargs: dict, conversation_id: str, provider: str, download_images: bool = True) -> Iterator:
- if debug.logging:
- debug.logs = []
- print_callback = debug.log_handler
- def log_handler(text: str):
- debug.logs.append(text)
- print_callback(text)
- debug.log_handler = log_handler
+ debug.logs = []
+ print_callback = debug.log_handler
+ def log_handler(text: str):
+ debug.logs.append(text)
+ print_callback(text)
+ debug.log_handler = log_handler
try:
result = ChatCompletion.create(**kwargs)
first = True
@@ -177,6 +176,8 @@ class Api:
images = asyncio.run(copy_images(chunk.get_list(), chunk.options.get("cookies")))
images = ImageResponse(images, chunk.alt)
yield self._format_json("content", str(images))
+ elif isinstance(chunk, SynthesizeData):
+ yield self._format_json("synthesize", chunk.to_json())
elif not isinstance(chunk, FinishReason):
yield self._format_json("content", str(chunk))
if debug.logs:
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index 917d779e..3dcae546 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -1,8 +1,28 @@
import json
+import flask
+import os
+import logging
+import asyncio
from flask import request, Flask
+from typing import Generator
+from werkzeug.utils import secure_filename
+
from g4f.image import is_allowed_extension, to_image
+from g4f.client.service import convert_to_provider
+from g4f.providers.asyncio import to_sync_generator
+from g4f.errors import ProviderNotFoundError
+from g4f.cookies import get_cookies_dir
from .api import Api
+logger = logging.getLogger(__name__)
+
+def safe_iter_generator(generator: Generator) -> Generator:
+ start = next(generator)
+ def iter_generator():
+ yield start
+ yield from generator
+ return iter_generator()
+
class Backend_Api(Api):
"""
Handles various endpoints in a Flask application for backend operations.
@@ -47,8 +67,12 @@ class Backend_Api(Api):
'function': self.handle_conversation,
'methods': ['POST']
},
- '/backend-api/v2/error': {
- 'function': self.handle_error,
+ '/backend-api/v2/synthesize/<provider>': {
+ 'function': self.handle_synthesize,
+ 'methods': ['GET']
+ },
+ '/backend-api/v2/upload_cookies': {
+ 'function': self.upload_cookies,
'methods': ['POST']
},
'/images/<path:name>': {
@@ -57,15 +81,17 @@ class Backend_Api(Api):
}
}
- def handle_error(self):
- """
- Initialize the backend API with the given Flask application.
-
- Args:
- app (Flask): Flask application instance to attach routes to.
- """
- print(request.json)
- return 'ok', 200
+ def upload_cookies(self):
+ file = None
+ if "file" in request.files:
+ file = request.files['file']
+ if file.filename == '':
+ return 'No selected file', 400
+ if file and file.filename.endswith(".json") or file.filename.endswith(".har"):
+ filename = secure_filename(file.filename)
+ file.save(os.path.join(get_cookies_dir(), filename))
+ return "File saved", 200
+ return 'Not supported file', 400
def handle_conversation(self):
"""
@@ -98,11 +124,30 @@ class Backend_Api(Api):
mimetype='text/event-stream'
)
+ def handle_synthesize(self, provider: str):
+ try:
+ provider_handler = convert_to_provider(provider)
+ except ProviderNotFoundError:
+ return "Provider not found", 404
+ if not hasattr(provider_handler, "synthesize"):
+ return "Provider doesn't support synthesize", 500
+ response_data = provider_handler.synthesize({**request.args})
+ if asyncio.iscoroutinefunction(provider_handler.synthesize):
+ response_data = asyncio.run(response_data)
+ else:
+ if hasattr(response_data, "__aiter__"):
+ response_data = to_sync_generator(response_data)
+ response_data = safe_iter_generator(response_data)
+ content_type = getattr(provider_handler, "synthesize_content_type", "application/octet-stream")
+ response = flask.Response(response_data, content_type=content_type)
+ response.headers['Cache-Control'] = "max-age=604800"
+ return response
+
def get_provider_models(self, provider: str):
api_key = None if request.authorization is None else request.authorization.token
models = super().get_provider_models(provider, api_key)
if models is None:
- return 404, "Provider not found"
+ return "Provider not found", 404
return models
def _format_json(self, response_type: str, content) -> str:
diff --git a/g4f/image.py b/g4f/image.py
index 114dcc13..e9abcb6e 100644
--- a/g4f/image.py
+++ b/g4f/image.py
@@ -33,10 +33,14 @@ EXTENSIONS_MAP: dict[str, str] = {
# Define the directory for generated images
images_dir = "./generated_images"
-def fix_url(url:str) -> str:
+def fix_url(url: str) -> str:
""" replace ' ' by '+' (to be markdown compliant)"""
return url.replace(" ","+")
+def fix_title(title: str) -> str:
+ if title:
+ return title.replace("\n", "").replace('"', '')
+
def to_image(image: ImageType, is_svg: bool = False) -> Image:
"""
Converts the input image to a PIL Image object.
@@ -226,12 +230,12 @@ def format_images_markdown(images: Union[str, list], alt: str, preview: Union[st
str: The formatted markdown string.
"""
if isinstance(images, str):
- result = f"[![{alt}]({fix_url(preview.replace('{image}', images) if preview else images)})]({fix_url(images)})"
+ result = f"[![{fix_title(alt)}]({fix_url(preview.replace('{image}', images) if preview else images)})]({fix_url(images)})"
else:
if not isinstance(preview, list):
preview = [preview.replace('{image}', image) if preview else image for image in images]
result = "\n".join(
- f"[![#{idx+1} {alt}]({fix_url(preview[idx])})]({fix_url(image)})"
+ f"[![#{idx+1} {fix_title(alt)}]({fix_url(preview[idx])})]({fix_url(image)})"
for idx, image in enumerate(images)
)
start_flag = "<!-- generated images start -->\n"
diff --git a/g4f/providers/asyncio.py b/g4f/providers/asyncio.py
new file mode 100644
index 00000000..cf0ce1a0
--- /dev/null
+++ b/g4f/providers/asyncio.py
@@ -0,0 +1,65 @@
+from __future__ import annotations
+
+import asyncio
+from asyncio import AbstractEventLoop, runners
+from typing import Union, Callable, AsyncGenerator, Generator
+
+from ..errors import NestAsyncioError
+
+try:
+ import nest_asyncio
+ has_nest_asyncio = True
+except ImportError:
+ has_nest_asyncio = False
+try:
+ import uvloop
+ has_uvloop = True
+except ImportError:
+ has_uvloop = False
+
+def get_running_loop(check_nested: bool) -> Union[AbstractEventLoop, None]:
+ try:
+ loop = asyncio.get_running_loop()
+ # Do not patch uvloop loop because its incompatible.
+ if has_uvloop:
+ if isinstance(loop, uvloop.Loop):
+ return loop
+ if not hasattr(loop.__class__, "_nest_patched"):
+ if has_nest_asyncio:
+ nest_asyncio.apply(loop)
+ elif check_nested:
+ raise NestAsyncioError('Install "nest_asyncio" package | pip install -U nest_asyncio')
+ return loop
+ except RuntimeError:
+ pass
+
+# Fix for RuntimeError: async generator ignored GeneratorExit
+async def await_callback(callback: Callable):
+ return await callback()
+
+async def async_generator_to_list(generator: AsyncGenerator) -> list:
+ return [item async for item in generator]
+
+def to_sync_generator(generator: AsyncGenerator) -> Generator:
+ loop = get_running_loop(check_nested=False)
+ new_loop = False
+ if loop is None:
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ new_loop = True
+ gen = generator.__aiter__()
+ try:
+ while True:
+ yield loop.run_until_complete(await_callback(gen.__anext__))
+ except StopAsyncIteration:
+ pass
+ finally:
+ if new_loop:
+ try:
+ runners._cancel_all_tasks(loop)
+ loop.run_until_complete(loop.shutdown_asyncgens())
+ if hasattr(loop, "shutdown_default_executor"):
+ loop.run_until_complete(loop.shutdown_default_executor())
+ finally:
+ asyncio.set_event_loop(None)
+ loop.close() \ No newline at end of file
diff --git a/g4f/providers/base_provider.py b/g4f/providers/base_provider.py
index b6df48e8..80a9e09d 100644
--- a/g4f/providers/base_provider.py
+++ b/g4f/providers/base_provider.py
@@ -7,30 +7,14 @@ from asyncio import AbstractEventLoop
from concurrent.futures import ThreadPoolExecutor
from abc import abstractmethod
from inspect import signature, Parameter
-from typing import Callable, Union
from ..typing import CreateResult, AsyncResult, Messages
from .types import BaseProvider
-from .response import FinishReason, BaseConversation
-from ..errors import NestAsyncioError, ModelNotSupportedError
+from .asyncio import get_running_loop, to_sync_generator
+from .response import FinishReason, BaseConversation, SynthesizeData
+from ..errors import ModelNotSupportedError
from .. import debug
-if sys.version_info < (3, 10):
- NoneType = type(None)
-else:
- from types import NoneType
-
-try:
- import nest_asyncio
- has_nest_asyncio = True
-except ImportError:
- has_nest_asyncio = False
-try:
- import uvloop
- has_uvloop = True
-except ImportError:
- has_uvloop = False
-
# Set Windows event loop policy for better compatibility with asyncio and curl_cffi
if sys.platform == 'win32':
try:
@@ -41,26 +25,6 @@ if sys.platform == 'win32':
except ImportError:
pass
-def get_running_loop(check_nested: bool) -> Union[AbstractEventLoop, None]:
- try:
- loop = asyncio.get_running_loop()
- # Do not patch uvloop loop because its incompatible.
- if has_uvloop:
- if isinstance(loop, uvloop.Loop):
- return loop
- if not hasattr(loop.__class__, "_nest_patched"):
- if has_nest_asyncio:
- nest_asyncio.apply(loop)
- elif check_nested:
- raise NestAsyncioError('Install "nest_asyncio" package | pip install -U nest_asyncio')
- return loop
- except RuntimeError:
- pass
-
-# Fix for RuntimeError: async generator ignored GeneratorExit
-async def await_callback(callback: Callable):
- return await callback()
-
class AbstractProvider(BaseProvider):
"""
Abstract class for providing asynchronous functionality to derived classes.
@@ -102,11 +66,12 @@ class AbstractProvider(BaseProvider):
@classmethod
def get_parameters(cls) -> dict[str, Parameter]:
- return signature(
+ return {name: parameter for name, parameter in signature(
cls.create_async_generator if issubclass(cls, AsyncGeneratorProvider) else
cls.create_async if issubclass(cls, AsyncProvider) else
cls.create_completion
- ).parameters
+ ).parameters.items() if name not in ["kwargs", "model", "messages"]
+ and (name != "stream" or cls.supports_stream)}
@classmethod
@property
@@ -126,8 +91,6 @@ class AbstractProvider(BaseProvider):
args = ""
for name, param in cls.get_parameters().items():
- if name in ("self", "kwargs") or (name == "stream" and not cls.supports_stream):
- continue
args += f"\n {name}"
args += f": {get_type_name(param.annotation)}" if param.annotation is not Parameter.empty else ""
default_value = f'"{param.default}"' if isinstance(param.default, str) else param.default
@@ -136,7 +99,6 @@ class AbstractProvider(BaseProvider):
return f"g4f.Provider.{cls.__name__} supports: ({args}\n)"
-
class AsyncProvider(AbstractProvider):
"""
Provides asynchronous functionality for creating completions.
@@ -218,25 +180,9 @@ class AsyncGeneratorProvider(AsyncProvider):
Returns:
CreateResult: The result of the streaming completion creation.
"""
- loop = get_running_loop(check_nested=False)
- new_loop = False
- if loop is None:
- loop = asyncio.new_event_loop()
- asyncio.set_event_loop(loop)
- new_loop = True
-
- generator = cls.create_async_generator(model, messages, stream=stream, **kwargs)
- gen = generator.__aiter__()
-
- try:
- while True:
- yield loop.run_until_complete(await_callback(gen.__anext__))
- except StopAsyncIteration:
- pass
- finally:
- if new_loop:
- loop.close()
- asyncio.set_event_loop(None)
+ return to_sync_generator(
+ cls.create_async_generator(model, messages, stream=stream, **kwargs)
+ )
@classmethod
async def create_async(
@@ -259,7 +205,7 @@ class AsyncGeneratorProvider(AsyncProvider):
"""
return "".join([
str(chunk) async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)
- if not isinstance(chunk, (Exception, FinishReason, BaseConversation))
+ if not isinstance(chunk, (Exception, FinishReason, BaseConversation, SynthesizeData))
])
@staticmethod
diff --git a/g4f/providers/response.py b/g4f/providers/response.py
index a4d1467a..3fddbf4f 100644
--- a/g4f/providers/response.py
+++ b/g4f/providers/response.py
@@ -23,4 +23,17 @@ class Sources(ResponseType):
class BaseConversation(ResponseType):
def __str__(self) -> str:
+ return ""
+
+class SynthesizeData(ResponseType):
+ def __init__(self, provider: str, data: dict):
+ self.provider = provider
+ self.data = data
+
+ def to_json(self) -> dict:
+ return {
+ **self.__dict__
+ }
+
+ def __str__(self) -> str:
return "" \ No newline at end of file