summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--.github/workflows/publish-workflow.yaml22
-rw-r--r--README.md12
-rw-r--r--docker/Dockerfile-slim45
-rw-r--r--docs/async_client.md11
-rw-r--r--docs/client.md5
-rw-r--r--etc/tool/openapi.py11
-rw-r--r--g4f/Provider/Airforce.py22
-rw-r--r--g4f/Provider/Copilot.py58
-rw-r--r--g4f/Provider/PollinationsAI.py3
-rw-r--r--g4f/Provider/needs_auth/Gemini.py86
-rw-r--r--g4f/Provider/needs_auth/GithubCopilot.py93
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py27
-rw-r--r--g4f/Provider/needs_auth/__init__.py1
-rw-r--r--g4f/api/__init__.py331
-rw-r--r--g4f/cli.py10
-rw-r--r--g4f/client/__init__.py92
-rw-r--r--g4f/client/helper.py3
-rw-r--r--g4f/client/stubs.py238
-rw-r--r--g4f/gui/__init__.py28
-rw-r--r--g4f/gui/client/index.html3
-rw-r--r--g4f/gui/client/static/css/style.css23
-rw-r--r--g4f/gui/client/static/js/chat.v1.js66
-rw-r--r--g4f/gui/server/api.py53
-rw-r--r--g4f/gui/server/backend.py47
-rw-r--r--g4f/gui/server/internet.py1
-rw-r--r--g4f/gui/server/website.py7
-rw-r--r--g4f/image.py10
-rw-r--r--g4f/providers/base_provider.py7
-rw-r--r--g4f/requests/raise_for_status.py2
-rw-r--r--requirements-slim.txt3
-rw-r--r--requirements.txt3
-rw-r--r--setup.py5
32 files changed, 894 insertions, 434 deletions
diff --git a/.github/workflows/publish-workflow.yaml b/.github/workflows/publish-workflow.yaml
index 67d9dc53..ebddb1f3 100644
--- a/.github/workflows/publish-workflow.yaml
+++ b/.github/workflows/publish-workflow.yaml
@@ -6,6 +6,26 @@ on:
- '**'
jobs:
+ openapi:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python 3.13
+ uses: actions/setup-python@v4
+ with:
+ python-version: "3.13"
+ cache: 'pip'
+ - name: Install requirements
+ run: |
+ pip install fastapi uvicorn python-multipart
+ pip install -r requirements-min.txt
+ - name: Generate openapi.json
+ run: |
+ python -m etc.tool.openapi
+ - uses: actions/upload-artifact@v4
+ with:
+ name: openapi
+ path: openapi.json
publish:
runs-on: ubuntu-latest
steps:
@@ -42,7 +62,7 @@ jobs:
with:
context: .
file: docker/Dockerfile-slim
- platforms: linux/amd64,linux/arm64
+ platforms: linux/amd64,linux/arm64,linux/arm/v7
push: true
tags: |
hlohaus789/g4f:latest-slim
diff --git a/README.md b/README.md
index 4f61d791..f882b13c 100644
--- a/README.md
+++ b/README.md
@@ -105,23 +105,23 @@ docker run \
hlohaus789/g4f:latest
```
-Start the GUI without a browser requirement and in debug mode.
-There's no need to update the Docker image every time.
-Simply remove the g4f package from the image and install the Python package:
+To run the slim docker image. Use this command:
+
```bash
docker run \
- -p 8080:8080 \
+ -p 1337:1337 \
-v ${PWD}/har_and_cookies:/app/har_and_cookies \
-v ${PWD}/generated_images:/app/generated_images \
hlohaus789/g4f:latest-slim \
rm -r -f /app/g4f/ \
&& pip install -U g4f[slim] \
- && python -m g4f.cli gui -d
+ && python -m g4f.cli api --gui --debug
```
+It also updates the `g4f` package at startup and installs any new required dependencies.
3. **Access the Client:**
- - To use the included client, navigate to: [http://localhost:8080/chat/](http://localhost:8080/chat/)
+ - To use the included client, navigate to: [http://localhost:8080/chat/](http://localhost:8080/chat/) or [http://localhost:1337/chat/](http://localhost:1337/chat/)
- Or set the API base for your client to: [http://localhost:1337/v1](http://localhost:1337/v1)
4. **(Optional) Provider Login:**
diff --git a/docker/Dockerfile-slim b/docker/Dockerfile-slim
index 6c0afc64..04238144 100644
--- a/docker/Dockerfile-slim
+++ b/docker/Dockerfile-slim
@@ -1,9 +1,8 @@
-FROM python:bookworm
+FROM python:slim-bookworm
ARG G4F_VERSION
ARG G4F_USER=g4f
ARG G4F_USER_ID=1000
-ARG PYDANTIC_VERSION=1.8.1
ENV G4F_VERSION $G4F_VERSION
ENV G4F_USER $G4F_USER
@@ -12,57 +11,29 @@ ENV G4F_DIR /app
RUN apt-get update && apt-get upgrade -y \
&& apt-get install -y git \
- && apt-get install --quiet --yes --no-install-recommends \
- build-essential \
# Add user and user group
&& groupadd -g $G4F_USER_ID $G4F_USER \
&& useradd -rm -G sudo -u $G4F_USER_ID -g $G4F_USER_ID $G4F_USER \
&& mkdir -p /var/log/supervisor \
&& chown "${G4F_USER_ID}:${G4F_USER_ID}" /var/log/supervisor \
- && echo "${G4F_USER}:${G4F_USER}" | chpasswd
+ && echo "${G4F_USER}:${G4F_USER}" | chpasswd \
+ && python -m pip install --upgrade pip \
+ && apt-get clean \
+ && rm --recursive --force /var/lib/apt/lists/* /tmp/* /var/tmp/*
USER $G4F_USER_ID
WORKDIR $G4F_DIR
ENV HOME /home/$G4F_USER
-ENV PATH "${HOME}/.local/bin:${HOME}/.cargo/bin:${PATH}"
+ENV PATH "${HOME}/.local/bin:${PATH}"
# Create app dir and copy the project's requirements file into it
RUN mkdir -p $G4F_DIR
+COPY requirements-min.txt $G4F_DIR
COPY requirements-slim.txt $G4F_DIR
-# Install rust toolchain
-RUN curl https://sh.rustup.rs -sSf | bash -s -- -y
-
# Upgrade pip for the latest features and install the project's Python dependencies.
-RUN python -m pip install --upgrade pip \
- && pip install --no-cache-dir \
- Cython==0.29.22 \
- setuptools \
- # Install PyDantic
- && pip install \
- -vvv \
- --no-cache-dir \
- --no-binary pydantic \
- --global-option=build_ext \
- --global-option=-j8 \
- pydantic==${PYDANTIC_VERSION} \
- && pip install --no-cache-dir -r requirements-slim.txt \
- # Remove build packages
- && pip uninstall --yes \
- Cython \
- setuptools
-
-USER root
-
-# Clean up build deps
-RUN rustup self uninstall -y \
- && apt-get purge --auto-remove --yes \
- build-essential \
- && apt-get clean \
- && rm --recursive --force /var/lib/apt/lists/* /tmp/* /var/tmp/*
-
-USER $G4F_USER_ID
+RUN cat requirements-slim.txt | xargs -n 1 pip install --no-cache-dir || true
# Copy the entire package into the container.
ADD --chown=$G4F_USER:$G4F_USER g4f $G4F_DIR/g4f \ No newline at end of file
diff --git a/docs/async_client.md b/docs/async_client.md
index fe6f46ff..e501aefa 100644
--- a/docs/async_client.md
+++ b/docs/async_client.md
@@ -154,13 +154,14 @@ import asyncio
from g4f.client import AsyncClient
async def main():
- client = AsyncClient()
-
+ client = AsyncClient(
+ provider=g4f.Provider.CopilotAccount
+ )
+
image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw
-
+
response = await client.chat.completions.create(
model=g4f.models.default,
- provider=g4f.Provider.Bing,
messages=[
{
"role": "user",
@@ -169,7 +170,7 @@ async def main():
],
image=image
)
-
+
print(response.choices[0].message.content)
asyncio.run(main())
diff --git a/docs/client.md b/docs/client.md
index da45d7fd..c318bee3 100644
--- a/docs/client.md
+++ b/docs/client.md
@@ -265,7 +265,9 @@ from g4f.client import Client
image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw
# Or: image = open("docs/cat.jpeg", "rb")
-client = Client()
+client = Client(
+ provider=CopilotAccount
+)
response = client.chat.completions.create(
model=g4f.models.default,
@@ -275,7 +277,6 @@ response = client.chat.completions.create(
"content": "What are on this image?"
}
],
- provider=g4f.Provider.Bing,
image=image
# Add any other necessary parameters
)
diff --git a/etc/tool/openapi.py b/etc/tool/openapi.py
new file mode 100644
index 00000000..83359e4e
--- /dev/null
+++ b/etc/tool/openapi.py
@@ -0,0 +1,11 @@
+import json
+
+from g4f.api import create_app
+
+app = create_app()
+
+with open("openapi.json", "w") as f:
+ data = json.dumps(app.openapi())
+ f.write(data)
+
+print(f"openapi.json - {round(len(data)/1024, 2)} kbytes") \ No newline at end of file
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index f5bcfefa..5200d6f7 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -7,6 +7,7 @@ import re
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+from urllib.parse import quote
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
@@ -36,7 +37,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
default_model = "gpt-4o-mini"
default_image_model = "flux"
- additional_models_imagine = ["stable-diffusion-xl-base", "stable-diffusion-xl-lightning", "Flux-1.1-Pro"]
+ additional_models_imagine = ["stable-diffusion-xl-base", "stable-diffusion-xl-lightning", "flux-1.1-pro"]
@classmethod
def get_models(cls):
@@ -86,7 +87,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
### imagine ###
"sdxl": "stable-diffusion-xl-base",
"sdxl": "stable-diffusion-xl-lightning",
- "flux-pro": "Flux-1.1-Pro",
+ "flux-pro": "flux-1.1-pro",
}
@classmethod
@@ -95,14 +96,18 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
+ prompt: str = None,
seed: int = None,
size: str = "1:1", # "1:1", "16:9", "9:16", "21:9", "9:21", "1:2", "2:1"
stream: bool = False,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
+
if model in cls.image_models:
- return cls._generate_image(model, messages, proxy, seed, size)
+ if prompt is None:
+ prompt = messages[-1]['content']
+ return cls._generate_image(model, prompt, proxy, seed, size)
else:
return cls._generate_text(model, messages, proxy, stream, **kwargs)
@@ -110,7 +115,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
async def _generate_image(
cls,
model: str,
- messages: Messages,
+ prompt: str,
proxy: str = None,
seed: int = None,
size: str = "1:1",
@@ -125,7 +130,6 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
}
if seed is None:
seed = random.randint(0, 100000)
- prompt = messages[-1]['content']
async with StreamSession(headers=headers, proxy=proxy) as session:
params = {
@@ -140,12 +144,8 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
if 'application/json' in content_type:
raise RuntimeError(await response.json().get("error", {}).get("message"))
- elif 'image' in content_type:
- image_data = b""
- async for chunk in response.iter_content():
- if chunk:
- image_data += chunk
- image_url = f"{cls.api_endpoint_imagine}?model={model}&prompt={prompt}&size={size}&seed={seed}"
+ elif content_type.startswith("image/"):
+ image_url = f"{cls.api_endpoint_imagine}?model={model}&prompt={quote(prompt)}&size={size}&seed={seed}"
yield ImageResponse(images=image_url, alt=prompt)
@classmethod
diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py
index e10a55e8..5721f377 100644
--- a/g4f/Provider/Copilot.py
+++ b/g4f/Provider/Copilot.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import os
import json
import asyncio
from http.cookiejar import CookieJar
@@ -21,9 +22,11 @@ from .helper import format_prompt
from ..typing import CreateResult, Messages, ImageType
from ..errors import MissingRequirementsError
from ..requests.raise_for_status import raise_for_status
-from ..providers.helper import format_cookies
+from ..providers.asyncio import get_running_loop
+from ..Provider.openai.har_file import NoValidHarFileError, get_headers
from ..requests import get_nodriver
from ..image import ImageResponse, to_bytes, is_accepted_format
+from ..cookies import get_cookies_dir
from .. import debug
class Conversation(BaseConversation):
@@ -69,14 +72,21 @@ class Copilot(AbstractProvider):
cookies = conversation.cookie_jar if conversation is not None else None
if cls.needs_auth or image is not None:
if conversation is None or conversation.access_token is None:
- access_token, cookies = asyncio.run(cls.get_access_token_and_cookies(proxy))
+ try:
+ access_token, cookies = readHAR()
+ except NoValidHarFileError as h:
+ debug.log(f"Copilot: {h}")
+ try:
+ get_running_loop(check_nested=True)
+ access_token, cookies = asyncio.run(cls.get_access_token_and_cookies(proxy))
+ except MissingRequirementsError:
+ raise h
else:
access_token = conversation.access_token
debug.log(f"Copilot: Access token: {access_token[:7]}...{access_token[-5:]}")
- debug.log(f"Copilot: Cookies: {';'.join([*cookies])}")
websocket_url = f"{websocket_url}&accessToken={quote(access_token)}"
- headers = {"authorization": f"Bearer {access_token}", "cookie": format_cookies(cookies)}
-
+ headers = {"authorization": f"Bearer {access_token}"}
+
with Session(
timeout=timeout,
proxy=proxy,
@@ -160,7 +170,9 @@ class Copilot(AbstractProvider):
for (var i = 0; i < localStorage.length; i++) {
try {
item = JSON.parse(localStorage.getItem(localStorage.key(i)));
- if (item.credentialType == "AccessToken") {
+ if (item.credentialType == "AccessToken"
+ && item.expiresOn > Math.floor(Date.now() / 1000)
+ && item.target.includes("ChatAI")) {
return item.secret;
}
} catch(e) {}
@@ -173,4 +185,36 @@ class Copilot(AbstractProvider):
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
cookies[c.name] = c.value
await page.close()
- return access_token, cookies \ No newline at end of file
+ return access_token, cookies
+
+def readHAR():
+ harPath = []
+ for root, _, files in os.walk(get_cookies_dir()):
+ for file in files:
+ if file.endswith(".har"):
+ harPath.append(os.path.join(root, file))
+ if not harPath:
+ raise NoValidHarFileError("No .har file found")
+ api_key = None
+ cookies = None
+ for path in harPath:
+ with open(path, 'rb') as file:
+ try:
+ harFile = json.loads(file.read())
+ except json.JSONDecodeError:
+ # Error: not a HAR file!
+ continue
+ for v in harFile['log']['entries']:
+ v_headers = get_headers(v)
+ if v['request']['url'].startswith(Copilot.url):
+ try:
+ if "authorization" in v_headers:
+ api_key = v_headers["authorization"].split(maxsplit=1).pop()
+ except Exception as e:
+ debug.log(f"Error on read headers: {e}")
+ if v['request']['cookies']:
+ cookies = {c['name']: c['value'] for c in v['request']['cookies']}
+ if api_key is None:
+ raise NoValidHarFileError("No access token found in .har files")
+
+ return api_key, cookies \ No newline at end of file
diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py
index a30f896d..e82222b1 100644
--- a/g4f/Provider/PollinationsAI.py
+++ b/g4f/Provider/PollinationsAI.py
@@ -46,8 +46,7 @@ class PollinationsAI(OpenaiAPI):
seed: str = None,
**kwargs
) -> AsyncResult:
- if model:
- model = cls.get_model(model)
+ model = cls.get_model(model)
if model in cls.image_models:
if prompt is None:
prompt = messages[-1]["content"]
diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py
index 781aa410..e7c9de23 100644
--- a/g4f/Provider/needs_auth/Gemini.py
+++ b/g4f/Provider/needs_auth/Gemini.py
@@ -4,8 +4,10 @@ import os
import json
import random
import re
+import base64
from aiohttp import ClientSession, BaseConnector
+
try:
import nodriver
has_nodriver = True
@@ -14,12 +16,14 @@ except ImportError:
from ... import debug
from ...typing import Messages, Cookies, ImageType, AsyncResult, AsyncIterator
-from ..base_provider import AsyncGeneratorProvider, BaseConversation
+from ..base_provider import AsyncGeneratorProvider, BaseConversation, SynthesizeData
from ..helper import format_prompt, get_cookies
from ...requests.raise_for_status import raise_for_status
from ...requests.aiohttp import get_connector
+from ...requests import get_nodriver
from ...errors import MissingAuthError
from ...image import ImageResponse, to_bytes
+from ... import debug
REQUEST_HEADERS = {
"authority": "gemini.google.com",
@@ -54,6 +58,7 @@ class Gemini(AsyncGeneratorProvider):
image_models = ["gemini"]
default_vision_model = "gemini"
models = ["gemini", "gemini-1.5-flash", "gemini-1.5-pro"]
+ synthesize_content_type = "audio/vnd.wav"
_cookies: Cookies = None
_snlm0e: str = None
_sid: str = None
@@ -64,17 +69,7 @@ class Gemini(AsyncGeneratorProvider):
if debug.logging:
print("Skip nodriver login in Gemini provider")
return
- try:
- from platformdirs import user_config_dir
- user_data_dir = user_config_dir("g4f-nodriver")
- except:
- user_data_dir = None
- if debug.logging:
- print(f"Open nodriver with user_dir: {user_data_dir}")
- browser = await nodriver.start(
- user_data_dir=user_data_dir,
- browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
- )
+ browser = await get_nodriver(proxy=proxy)
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
yield f"Please login: [Google Gemini]({login_url})\n\n"
@@ -106,6 +101,7 @@ class Gemini(AsyncGeneratorProvider):
prompt = format_prompt(messages) if conversation is None else messages[-1]["content"]
cls._cookies = cookies or cls._cookies or get_cookies(".google.com", False, True)
base_connector = get_connector(connector, proxy)
+
async with ClientSession(
headers=REQUEST_HEADERS,
connector=base_connector
@@ -113,8 +109,11 @@ class Gemini(AsyncGeneratorProvider):
if not cls._snlm0e:
await cls.fetch_snlm0e(session, cls._cookies) if cls._cookies else None
if not cls._snlm0e:
- async for chunk in cls.nodriver_login(proxy):
- yield chunk
+ try:
+ async for chunk in cls.nodriver_login(proxy):
+ yield chunk
+ except Exception as e:
+ raise MissingAuthError('Missing "__Secure-1PSID" cookie', e)
if not cls._snlm0e:
if cls._cookies is None or "__Secure-1PSID" not in cls._cookies:
raise MissingAuthError('Missing "__Secure-1PSID" cookie')
@@ -122,6 +121,7 @@ class Gemini(AsyncGeneratorProvider):
if not cls._snlm0e:
raise RuntimeError("Invalid cookies. SNlM0e not found")
+ yield SynthesizeData(cls.__name__, {"text": messages[-1]["content"]})
image_url = await cls.upload_image(base_connector, to_bytes(image), image_name) if image else None
async with ClientSession(
@@ -198,6 +198,39 @@ class Gemini(AsyncGeneratorProvider):
except TypeError:
pass
+ @classmethod
+ async def synthesize(cls, params: dict, proxy: str = None) -> AsyncIterator[bytes]:
+ if "text" not in params:
+ raise ValueError("Missing parameter text")
+ async with ClientSession(
+ cookies=cls._cookies,
+ headers=REQUEST_HEADERS,
+ connector=get_connector(proxy=proxy),
+ ) as session:
+ if not cls._snlm0e:
+ await cls.fetch_snlm0e(session, cls._cookies) if cls._cookies else None
+ inner_data = json.dumps([None, params["text"], "de-DE", None, 2])
+ async with session.post(
+ "https://gemini.google.com/_/BardChatUi/data/batchexecute",
+ data={
+ "f.req": json.dumps([[["XqA3Ic", inner_data, None, "generic"]]]),
+ "at": cls._snlm0e,
+ },
+ params={
+ "rpcids": "XqA3Ic",
+ "source-path": "/app/2704fb4aafcca926",
+ "bl": "boq_assistant-bard-web-server_20241119.00_p1",
+ "f.sid": "" if cls._sid is None else cls._sid,
+ "hl": "de",
+ "_reqid": random.randint(1111, 9999),
+ "rt": "c"
+ },
+ ) as response:
+ await raise_for_status(response)
+ iter_base64_response = iter_filter_base64(response.content.iter_chunked(1024))
+ async for chunk in iter_base64_decode(iter_base64_response):
+ yield chunk
+
def build_request(
prompt: str,
language: str,
@@ -280,3 +313,28 @@ class Conversation(BaseConversation):
self.conversation_id = conversation_id
self.response_id = response_id
self.choice_id = choice_id
+
+async def iter_filter_base64(response_iter: AsyncIterator[bytes]) -> AsyncIterator[bytes]:
+ search_for = b'[["wrb.fr","XqA3Ic","[\\"'
+ end_with = b'\\'
+ is_started = False
+ async for chunk in response_iter:
+ if is_started:
+ if end_with in chunk:
+ yield chunk.split(end_with, 1).pop(0)
+ break
+ else:
+ yield chunk
+ elif search_for in chunk:
+ is_started = True
+ yield chunk.split(search_for, 1).pop()
+ else:
+ raise RuntimeError(f"Response: {chunk}")
+
+async def iter_base64_decode(response_iter: AsyncIterator[bytes]) -> AsyncIterator[bytes]:
+ buffer = b""
+ async for chunk in response_iter:
+ chunk = buffer + chunk
+ rest = len(chunk) % 4
+ buffer = chunk[-rest:]
+ yield base64.b64decode(chunk[:-rest]) \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/GithubCopilot.py b/g4f/Provider/needs_auth/GithubCopilot.py
new file mode 100644
index 00000000..3eb66b5e
--- /dev/null
+++ b/g4f/Provider/needs_auth/GithubCopilot.py
@@ -0,0 +1,93 @@
+from __future__ import annotations
+
+import json
+
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation
+from ...typing import AsyncResult, Messages, Cookies
+from ...requests.raise_for_status import raise_for_status
+from ...requests import StreamSession
+from ...providers.helper import format_prompt
+from ...cookies import get_cookies
+
+class Conversation(BaseConversation):
+ conversation_id: str
+
+ def __init__(self, conversation_id: str):
+ self.conversation_id = conversation_id
+
+class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://copilot.microsoft.com"
+ working = True
+ needs_auth = True
+ supports_stream = True
+ default_model = "gpt-4o"
+ models = [default_model, "o1-mini", "o1-preview", "claude-3.5-sonnet"]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ api_key: str = None,
+ proxy: str = None,
+ cookies: Cookies = None,
+ conversation_id: str = None,
+ conversation: Conversation = None,
+ return_conversation: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ model = cls.default_model
+ if cookies is None:
+ cookies = get_cookies(".github.com")
+ async with StreamSession(
+ proxy=proxy,
+ impersonate="chrome",
+ cookies=cookies,
+ headers={
+ "GitHub-Verified-Fetch": "true",
+ }
+ ) as session:
+ headers = {}
+ if api_key is None:
+ async with session.post("https://github.com/github-copilot/chat/token") as response:
+ await raise_for_status(response, "Get token")
+ api_key = (await response.json()).get("token")
+ headers = {
+ "Authorization": f"GitHub-Bearer {api_key}",
+ }
+ if conversation is not None:
+ conversation_id = conversation.conversation_id
+ if conversation_id is None:
+ print(headers)
+ async with session.post("https://api.individual.githubcopilot.com/github/chat/threads", headers=headers) as response:
+ await raise_for_status(response)
+ conversation_id = (await response.json()).get("thread_id")
+ if return_conversation:
+ yield Conversation(conversation_id)
+ content = messages[-1]["content"]
+ else:
+ content = format_prompt(messages)
+ json_data = {
+ "content": content,
+ "intent": "conversation",
+ "references":[],
+ "context": [],
+ "currentURL": f"https://github.com/copilot/c/{conversation_id}",
+ "streaming": True,
+ "confirmations": [],
+ "customInstructions": [],
+ "model": model,
+ "mode": "immersive"
+ }
+ async with session.post(
+ f"https://api.individual.githubcopilot.com/github/chat/threads/{conversation_id}/messages",
+ json=json_data,
+ headers=headers
+ ) as response:
+ async for line in response.iter_lines():
+ if line.startswith(b"data: "):
+ data = json.loads(line[6:])
+ if data.get("type") == "content":
+ yield data.get("body") \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index f50b9f9d..37bdf074 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -61,6 +61,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini"]
vision_models = fallback_models
image_models = fallback_models
+ synthesize_content_type = "audio/mpeg"
_api_key: str = None
_headers: dict = None
@@ -110,7 +111,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
# Post the image data to the service and get the image data
async with session.post(f"{cls.url}/backend-api/files", json=data, headers=headers) as response:
cls._update_request_args(session)
- await raise_for_status(response)
+ await raise_for_status(response, "Create file failed")
image_data = {
**data,
**await response.json(),
@@ -128,7 +129,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"x-ms-blob-type": "BlockBlob"
}
) as response:
- await raise_for_status(response)
+ await raise_for_status(response, "Send file failed")
# Post the file ID to the service and get the download URL
async with session.post(
f"{cls.url}/backend-api/files/{image_data['file_id']}/uploaded",
@@ -136,12 +137,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
headers=headers
) as response:
cls._update_request_args(session)
- await raise_for_status(response)
+ await raise_for_status(response, "Get download url failed")
image_data["download_url"] = (await response.json())["download_url"]
return ImageRequest(image_data)
@classmethod
- def create_messages(cls, messages: Messages, image_request: ImageRequest = None):
+ def create_messages(cls, messages: Messages, image_request: ImageRequest = None, system_hints: list = None):
"""
Create a list of messages for the user input
@@ -159,7 +160,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"id": str(uuid.uuid4()),
"create_time": int(time.time()),
"id": str(uuid.uuid4()),
- "metadata": {"serialization_metadata": {"custom_symbol_offsets": []}}
+ "metadata": {"serialization_metadata": {"custom_symbol_offsets": []}, "system_hints": system_hints},
} for message in messages]
# Check if there is an image response
@@ -188,7 +189,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
return messages
@classmethod
- async def get_generated_image(cls, session: StreamSession, headers: dict, element: dict) -> ImageResponse:
+ async def get_generated_image(cls, session: StreamSession, headers: dict, element: dict, prompt: str = None) -> ImageResponse:
"""
Retrieves the image response based on the message content.
@@ -210,6 +211,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
try:
prompt = element["metadata"]["dalle"]["prompt"]
file_id = element["asset_pointer"].split("file-service://", 1)[1]
+ except TypeError:
+ return
except Exception as e:
raise RuntimeError(f"No Image: {e.__class__.__name__}: {e}")
try:
@@ -239,6 +242,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
image_name: str = None,
return_conversation: bool = False,
max_retries: int = 3,
+ web_search: bool = False,
**kwargs
) -> AsyncResult:
"""
@@ -330,14 +334,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"conversation_mode": {"kind":"primary_assistant"},
"websocket_request_id": str(uuid.uuid4()),
"supported_encodings": ["v1"],
- "supports_buffering": True
+ "supports_buffering": True,
+ "system_hints": ["search"] if web_search else None
}
if conversation.conversation_id is not None:
data["conversation_id"] = conversation.conversation_id
debug.log(f"OpenaiChat: Use conversation: {conversation.conversation_id}")
if action != "continue":
messages = messages if conversation_id is None else [messages[-1]]
- data["messages"] = cls.create_messages(messages, image_request)
+ data["messages"] = cls.create_messages(messages, image_request, ["search"] if web_search else None)
headers = {
**cls._headers,
"accept": "text/event-stream",
@@ -418,9 +423,9 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
generated_images = []
for element in c.get("parts"):
if isinstance(element, dict) and element.get("content_type") == "image_asset_pointer":
- generated_images.append(
- cls.get_generated_image(session, cls._headers, element)
- )
+ image = cls.get_generated_image(session, cls._headers, element)
+ if image is not None:
+ generated_images.append(image)
for image_response in await asyncio.gather(*generated_images):
yield image_response
if m.get("author", {}).get("role") == "assistant":
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 1c7fe7c5..f3391706 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -7,6 +7,7 @@ from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
from .Gemini import Gemini
from .GeminiPro import GeminiPro
+from .GithubCopilot import GithubCopilot
from .Groq import Groq
from .HuggingFace import HuggingFace
from .HuggingFace2 import HuggingFace2
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index 21e69388..01c75dae 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -5,30 +5,51 @@ import json
import uvicorn
import secrets
import os
+import shutil
-from fastapi import FastAPI, Response, Request
+import os.path
+from fastapi import FastAPI, Response, Request, UploadFile, Depends
+from fastapi.middleware.wsgi import WSGIMiddleware
from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
from fastapi.exceptions import RequestValidationError
from fastapi.security import APIKeyHeader
from starlette.exceptions import HTTPException
-from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY, HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN
+from starlette.status import (
+ HTTP_200_OK,
+ HTTP_422_UNPROCESSABLE_ENTITY,
+ HTTP_404_NOT_FOUND,
+ HTTP_401_UNAUTHORIZED,
+ HTTP_403_FORBIDDEN,
+ HTTP_500_INTERNAL_SERVER_ERROR,
+)
from fastapi.encoders import jsonable_encoder
+from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import FileResponse
-from pydantic import BaseModel
-from typing import Union, Optional
+from pydantic import BaseModel, Field
+from typing import Union, Optional, List
+try:
+ from typing import Annotated
+except ImportError:
+ class Annotated:
+ pass
import g4f
import g4f.debug
-from g4f.client import AsyncClient, ChatCompletion
+from g4f.client import AsyncClient, ChatCompletion, ImagesResponse, convert_to_provider
from g4f.providers.response import BaseConversation
from g4f.client.helper import filter_none
-from g4f.image import is_accepted_format, images_dir
+from g4f.image import is_accepted_format, is_data_uri_an_image, images_dir
from g4f.typing import Messages
-from g4f.cookies import read_cookie_files
+from g4f.errors import ProviderNotFoundError, ModelNotFoundError, MissingAuthError
+from g4f.cookies import read_cookie_files, get_cookies_dir
+from g4f.Provider import ProviderType, ProviderUtils, __providers__
+from g4f.gui import get_gui_app
logger = logging.getLogger(__name__)
+DEFAULT_PORT = 1337
+
def create_app(g4f_api_key: str = None):
app = FastAPI()
@@ -42,10 +63,23 @@ def create_app(g4f_api_key: str = None):
)
api = Api(app, g4f_api_key=g4f_api_key)
+
+ if AppConfig.gui:
+ @app.get("/")
+ async def home():
+ return HTMLResponse(f'g4f v-{g4f.version.utils.current_version}:<br><br>'
+ 'Start to chat: <a href="/chat/">/chat/</a><br>'
+ 'Open Swagger UI at: '
+ '<a href="/docs">/docs</a>')
+
api.register_routes()
api.register_authorization()
api.register_validation_exception_handler()
+ if AppConfig.gui:
+ gui_app = WSGIMiddleware(get_gui_app())
+ app.mount("/", gui_app)
+
# Read cookie files if not ignored
if not AppConfig.ignore_cookie_files:
read_cookie_files()
@@ -57,17 +91,19 @@ def create_app_debug(g4f_api_key: str = None):
return create_app(g4f_api_key)
class ChatCompletionsConfig(BaseModel):
- messages: Messages
- model: str
+ messages: Messages = Field(examples=[[{"role": "system", "content": ""}, {"role": "user", "content": ""}]])
+ model: str = Field(default="")
provider: Optional[str] = None
stream: bool = False
+ image: Optional[str] = None
+ image_name: Optional[str] = None
temperature: Optional[float] = None
max_tokens: Optional[int] = None
stop: Union[list[str], str, None] = None
api_key: Optional[str] = None
web_search: Optional[bool] = None
proxy: Optional[str] = None
- conversation_id: str = None
+ conversation_id: Optional[str] = None
class ImageGenerationConfig(BaseModel):
prompt: str
@@ -77,6 +113,52 @@ class ImageGenerationConfig(BaseModel):
api_key: Optional[str] = None
proxy: Optional[str] = None
+class ProviderResponseModel(BaseModel):
+ id: str
+ object: str = "provider"
+ created: int
+ url: Optional[str]
+ label: Optional[str]
+
+class ProviderResponseDetailModel(ProviderResponseModel):
+ models: list[str]
+ image_models: list[str]
+ vision_models: list[str]
+ params: list[str]
+
+class ModelResponseModel(BaseModel):
+ id: str
+ object: str = "model"
+ created: int
+ owned_by: Optional[str]
+
+class ErrorResponseModel(BaseModel):
+ error: ErrorResponseMessageModel
+ model: Optional[str] = None
+ provider: Optional[str] = None
+
+class ErrorResponseMessageModel(BaseModel):
+ message: str
+
+class FileResponseModel(BaseModel):
+ filename: str
+
+class ErrorResponse(Response):
+ media_type = "application/json"
+
+ @classmethod
+ def from_exception(cls, exception: Exception,
+ config: Union[ChatCompletionsConfig, ImageGenerationConfig] = None,
+ status_code: int = HTTP_500_INTERNAL_SERVER_ERROR):
+ return cls(format_exception(exception, config), status_code)
+
+ @classmethod
+ def from_message(cls, message: str, status_code: int = HTTP_500_INTERNAL_SERVER_ERROR):
+ return cls(format_exception(message), status_code)
+
+ def render(self, content) -> bytes:
+ return str(content).encode(errors="ignore")
+
class AppConfig:
ignored_providers: Optional[list[str]] = None
g4f_api_key: Optional[str] = None
@@ -85,6 +167,7 @@ class AppConfig:
provider: str = None
image_provider: str = None
proxy: str = None
+ gui: bool = False
@classmethod
def set_config(cls, **data):
@@ -105,26 +188,20 @@ class Api:
self.get_g4f_api_key = APIKeyHeader(name="g4f-api-key")
self.conversations: dict[str, dict[str, BaseConversation]] = {}
+ security = HTTPBearer(auto_error=False)
+
def register_authorization(self):
@self.app.middleware("http")
async def authorization(request: Request, call_next):
- if self.g4f_api_key and request.url.path in ["/v1/chat/completions", "/v1/completions", "/v1/images/generate"]:
+ if self.g4f_api_key and request.url.path not in ("/", "/v1"):
try:
user_g4f_api_key = await self.get_g4f_api_key(request)
except HTTPException as e:
if e.status_code == 403:
- return JSONResponse(
- status_code=HTTP_401_UNAUTHORIZED,
- content=jsonable_encoder({"detail": "G4F API key required"}),
- )
+ return ErrorResponse.from_message("G4F API key required", HTTP_401_UNAUTHORIZED)
if not secrets.compare_digest(self.g4f_api_key, user_g4f_api_key):
- return JSONResponse(
- status_code=HTTP_403_FORBIDDEN,
- content=jsonable_encoder({"detail": "Invalid G4F API key"}),
- )
-
- response = await call_next(request)
- return response
+ return ErrorResponse.from_message("Invalid G4F API key", HTTP_403_FORBIDDEN)
+ return await call_next(request)
def register_validation_exception_handler(self):
@self.app.exception_handler(RequestValidationError)
@@ -152,25 +229,31 @@ class Api:
return HTMLResponse('g4f API: Go to '
'<a href="/v1/models">models</a>, '
'<a href="/v1/chat/completions">chat/completions</a>, or '
- '<a href="/v1/images/generate">images/generate</a>.')
+ '<a href="/v1/images/generate">images/generate</a> <br><br>'
+ 'Open Swagger UI at: '
+ '<a href="/docs">/docs</a>')
- @self.app.get("/v1/models")
+ @self.app.get("/v1/models", responses={
+ HTTP_200_OK: {"model": List[ModelResponseModel]},
+ })
async def models():
model_list = dict(
(model, g4f.models.ModelUtils.convert[model])
for model in g4f.Model.__all__()
)
- model_list = [{
+ return [{
'id': model_id,
'object': 'model',
'created': 0,
'owned_by': model.base_provider
} for model_id, model in model_list.items()]
- return JSONResponse(model_list)
- @self.app.get("/v1/models/{model_name}")
- async def model_info(model_name: str):
- try:
+ @self.app.get("/v1/models/{model_name}", responses={
+ HTTP_200_OK: {"model": ModelResponseModel},
+ HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
+ })
+ async def model_info(model_name: str) -> ModelResponseModel:
+ if model_name in g4f.models.ModelUtils.convert:
model_info = g4f.models.ModelUtils.convert[model_name]
return JSONResponse({
'id': model_name,
@@ -178,21 +261,26 @@ class Api:
'created': 0,
'owned_by': model_info.base_provider
})
- except:
- return JSONResponse({"error": "The model does not exist."})
-
- @self.app.post("/v1/chat/completions")
- async def chat_completions(config: ChatCompletionsConfig, request: Request = None, provider: str = None):
+ return ErrorResponse.from_message("The model does not exist.", HTTP_404_NOT_FOUND)
+
+ @self.app.post("/v1/chat/completions", responses={
+ HTTP_200_OK: {"model": ChatCompletion},
+ HTTP_401_UNAUTHORIZED: {"model": ErrorResponseModel},
+ HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
+ HTTP_422_UNPROCESSABLE_ENTITY: {"model": ErrorResponseModel},
+ HTTP_500_INTERNAL_SERVER_ERROR: {"model": ErrorResponseModel},
+ })
+ async def chat_completions(
+ config: ChatCompletionsConfig,
+ credentials: Annotated[HTTPAuthorizationCredentials, Depends(Api.security)] = None,
+ provider: str = None
+ ):
try:
config.provider = provider if config.provider is None else config.provider
if config.provider is None:
config.provider = AppConfig.provider
- if config.api_key is None and request is not None:
- auth_header = request.headers.get("Authorization")
- if auth_header is not None:
- api_key = auth_header.split(None, 1)[-1]
- if api_key and api_key != "Bearer":
- config.api_key = api_key
+ if credentials is not None:
+ config.api_key = credentials.credentials
conversation = return_conversation = None
if config.conversation_id is not None and config.provider is not None:
@@ -201,6 +289,12 @@ class Api:
if config.provider in self.conversations[config.conversation_id]:
conversation = self.conversations[config.conversation_id][config.provider]
+ if config.image is not None:
+ try:
+ is_data_uri_an_image(config.image)
+ except ValueError as e:
+ return ErrorResponse.from_message(f"The image you send must be a data URI. Example: data:image/webp;base64,...", status_code=HTTP_422_UNPROCESSABLE_ENTITY)
+
# Create the completion response
response = self.client.chat.completions.create(
**filter_none(
@@ -208,7 +302,7 @@ class Api:
"model": AppConfig.model,
"provider": AppConfig.provider,
"proxy": AppConfig.proxy,
- **config.dict(exclude_none=True),
+ **config.model_dump(exclude_none=True),
**{
"conversation_id": None,
"return_conversation": return_conversation,
@@ -220,8 +314,7 @@ class Api:
)
if not config.stream:
- response: ChatCompletion = await response
- return JSONResponse(response.to_json())
+ return await response
async def streaming():
try:
@@ -232,7 +325,7 @@ class Api:
self.conversations[config.conversation_id] = {}
self.conversations[config.conversation_id][config.provider] = chunk
else:
- yield f"data: {json.dumps(chunk.to_json())}\n\n"
+ yield f"data: {chunk.json()}\n\n"
except GeneratorExit:
pass
except Exception as e:
@@ -242,19 +335,32 @@ class Api:
return StreamingResponse(streaming(), media_type="text/event-stream")
+ except (ModelNotFoundError, ProviderNotFoundError) as e:
+ logger.exception(e)
+ return ErrorResponse.from_exception(e, config, HTTP_404_NOT_FOUND)
+ except MissingAuthError as e:
+ logger.exception(e)
+ return ErrorResponse.from_exception(e, config, HTTP_401_UNAUTHORIZED)
except Exception as e:
logger.exception(e)
- return Response(content=format_exception(e, config), status_code=500, media_type="application/json")
-
- @self.app.post("/v1/images/generate")
- @self.app.post("/v1/images/generations")
- async def generate_image(config: ImageGenerationConfig, request: Request):
- if config.api_key is None:
- auth_header = request.headers.get("Authorization")
- if auth_header is not None:
- api_key = auth_header.split(None, 1)[-1]
- if api_key and api_key != "Bearer":
- config.api_key = api_key
+ return ErrorResponse.from_exception(e, config, HTTP_500_INTERNAL_SERVER_ERROR)
+
+ responses = {
+ HTTP_200_OK: {"model": ImagesResponse},
+ HTTP_401_UNAUTHORIZED: {"model": ErrorResponseModel},
+ HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
+ HTTP_500_INTERNAL_SERVER_ERROR: {"model": ErrorResponseModel},
+ }
+
+ @self.app.post("/v1/images/generate", responses=responses)
+ @self.app.post("/v1/images/generations", responses=responses)
+ async def generate_image(
+ request: Request,
+ config: ImageGenerationConfig,
+ credentials: Annotated[HTTPAuthorizationCredentials, Depends(Api.security)] = None
+ ):
+ if credentials is not None:
+ config.api_key = credentials.credentials
try:
response = await self.client.images.generate(
prompt=config.prompt,
@@ -269,16 +375,92 @@ class Api:
for image in response.data:
if hasattr(image, "url") and image.url.startswith("/"):
image.url = f"{request.base_url}{image.url.lstrip('/')}"
- return JSONResponse(response.to_json())
+ return response
+ except (ModelNotFoundError, ProviderNotFoundError) as e:
+ logger.exception(e)
+ return ErrorResponse.from_exception(e, config, HTTP_404_NOT_FOUND)
+ except MissingAuthError as e:
+ logger.exception(e)
+ return ErrorResponse.from_exception(e, config, HTTP_401_UNAUTHORIZED)
except Exception as e:
logger.exception(e)
- return Response(content=format_exception(e, config, True), status_code=500, media_type="application/json")
-
- @self.app.post("/v1/completions")
- async def completions():
- return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json")
-
- @self.app.get("/images/{filename}")
+ return ErrorResponse.from_exception(e, config, HTTP_500_INTERNAL_SERVER_ERROR)
+
+ @self.app.get("/v1/providers", responses={
+ HTTP_200_OK: {"model": List[ProviderResponseModel]},
+ })
+ async def providers():
+ return [{
+ 'id': provider.__name__,
+ 'object': 'provider',
+ 'created': 0,
+ 'url': provider.url,
+ 'label': getattr(provider, "label", None),
+ } for provider in __providers__ if provider.working]
+
+ @self.app.get("/v1/providers/{provider}", responses={
+ HTTP_200_OK: {"model": ProviderResponseDetailModel},
+ HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
+ })
+ async def providers_info(provider: str):
+ if provider not in ProviderUtils.convert:
+ return ErrorResponse.from_message("The provider does not exist.", 404)
+ provider: ProviderType = ProviderUtils.convert[provider]
+ def safe_get_models(provider: ProviderType) -> list[str]:
+ try:
+ return provider.get_models() if hasattr(provider, "get_models") else []
+ except:
+ return []
+ return {
+ 'id': provider.__name__,
+ 'object': 'provider',
+ 'created': 0,
+ 'url': provider.url,
+ 'label': getattr(provider, "label", None),
+ 'models': safe_get_models(provider),
+ 'image_models': getattr(provider, "image_models", []) or [],
+ 'vision_models': [model for model in [getattr(provider, "default_vision_model", None)] if model],
+ 'params': [*provider.get_parameters()] if hasattr(provider, "get_parameters") else []
+ }
+
+ @self.app.post("/v1/upload_cookies", responses={
+ HTTP_200_OK: {"model": List[FileResponseModel]},
+ })
+ def upload_cookies(files: List[UploadFile]):
+ response_data = []
+ for file in files:
+ try:
+ if file and file.filename.endswith(".json") or file.filename.endswith(".har"):
+ filename = os.path.basename(file.filename)
+ with open(os.path.join(get_cookies_dir(), filename), 'wb') as f:
+ shutil.copyfileobj(file.file, f)
+ response_data.append({"filename": filename})
+ finally:
+ file.file.close()
+ return response_data
+
+ @self.app.get("/v1/synthesize/{provider}", responses={
+ HTTP_200_OK: {"content": {"audio/*": {}}},
+ HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
+ HTTP_422_UNPROCESSABLE_ENTITY: {"model": ErrorResponseModel},
+ })
+ async def synthesize(request: Request, provider: str):
+ try:
+ provider_handler = convert_to_provider(provider)
+ except ProviderNotFoundError as e:
+ return ErrorResponse.from_exception(e, status_code=HTTP_404_NOT_FOUND)
+ if not hasattr(provider_handler, "synthesize"):
+ return ErrorResponse.from_message("Provider doesn't support synthesize", HTTP_404_NOT_FOUND)
+ if len(request.query_params) == 0:
+ return ErrorResponse.from_message("Missing query params", HTTP_422_UNPROCESSABLE_ENTITY)
+ response_data = provider_handler.synthesize({**request.query_params})
+ content_type = getattr(provider_handler, "synthesize_content_type", "application/octet-stream")
+ return StreamingResponse(response_data, media_type=content_type)
+
+ @self.app.get("/images/{filename}", response_class=FileResponse, responses={
+ HTTP_200_OK: {"content": {"image/*": {}}},
+ HTTP_404_NOT_FOUND: {}
+ })
async def get_image(filename):
target = os.path.join(images_dir, filename)
@@ -290,12 +472,21 @@ class Api:
return FileResponse(target, media_type=content_type)
-def format_exception(e: Exception, config: Union[ChatCompletionsConfig, ImageGenerationConfig], image: bool = False) -> str:
+def format_exception(e: Union[Exception, str], config: Union[ChatCompletionsConfig, ImageGenerationConfig] = None, image: bool = False) -> str:
last_provider = {} if not image else g4f.get_last_provider(True)
- provider = (AppConfig.image_provider if image else AppConfig.provider) if config.provider is None else config.provider
- model = AppConfig.model if config.model is None else config.model
+ provider = (AppConfig.image_provider if image else AppConfig.provider)
+ model = AppConfig.model
+ if config is not None:
+ if config.provider is not None:
+ provider = config.provider
+ if config.model is not None:
+ model = config.model
+ if isinstance(e, str):
+ message = e
+ else:
+ message = f"{e.__class__.__name__}: {e}"
return json.dumps({
- "error": {"message": f"{e.__class__.__name__}: {e}"},
+ "error": {"message": message},
"model": last_provider.get("model") if model is None else model,
**filter_none(
provider=last_provider.get("name") if provider is None else provider
@@ -304,7 +495,7 @@ def format_exception(e: Exception, config: Union[ChatCompletionsConfig, ImageGen
def run_api(
host: str = '0.0.0.0',
- port: int = 1337,
+ port: int = None,
bind: str = None,
debug: bool = False,
workers: int = None,
@@ -316,6 +507,8 @@ def run_api(
use_colors = debug
if bind is not None:
host, port = bind.split(":")
+ if port is None:
+ port = DEFAULT_PORT
uvicorn.run(
f"g4f.api:create_app{'_debug' if debug else ''}",
host=host,
@@ -324,4 +517,4 @@ def run_api(
use_colors=use_colors,
factory=True,
reload=reload
- ) \ No newline at end of file
+ )
diff --git a/g4f/cli.py b/g4f/cli.py
index 04bfb6ad..a8a038a2 100644
--- a/g4f/cli.py
+++ b/g4f/cli.py
@@ -10,8 +10,10 @@ def main():
parser = argparse.ArgumentParser(description="Run gpt4free")
subparsers = parser.add_subparsers(dest="mode", help="Mode to run the g4f in.")
api_parser = subparsers.add_parser("api")
- api_parser.add_argument("--bind", default="0.0.0.0:1337", help="The bind string.")
- api_parser.add_argument("--debug", action="store_true", help="Enable verbose logging.")
+ api_parser.add_argument("--bind", default=None, help="The bind string. (Default: 0.0.0.0:1337)")
+ api_parser.add_argument("--port", default=None, help="Change the port of the server.")
+ api_parser.add_argument("--debug", "-d", action="store_true", help="Enable verbose logging.")
+ api_parser.add_argument("--gui", "-g", default=False, action="store_true", help="Add gui to the api.")
api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --reload and --workers)")
api_parser.add_argument("--provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working],
default=None, help="Default provider for chat completion. (incompatible with --reload and --workers)")
@@ -48,11 +50,13 @@ def run_api_args(args):
provider=args.provider,
image_provider=args.image_provider,
proxy=args.proxy,
- model=args.model
+ model=args.model,
+ gui=args.gui,
)
g4f.cookies.browsers = [g4f.cookies[browser] for browser in args.cookie_browsers]
run_api(
bind=args.bind,
+ port=args.port,
debug=args.debug,
workers=args.workers,
use_colors=not args.disable_colors,
diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py
index f6a0f5e8..86a81049 100644
--- a/g4f/client/__init__.py
+++ b/g4f/client/__init__.py
@@ -16,7 +16,7 @@ from ..providers.response import ResponseType, FinishReason, BaseConversation, S
from ..errors import NoImageResponseError, ModelNotFoundError
from ..providers.retry_provider import IterListProvider
from ..providers.asyncio import get_running_loop, to_sync_generator, async_generator_to_list
-from ..Provider.needs_auth.BingCreateImages import BingCreateImages
+from ..Provider.needs_auth import BingCreateImages, OpenaiAccount
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
from .image_models import ImageModels
from .types import IterResponse, ImageProvider, Client as BaseClient
@@ -73,7 +73,7 @@ def iter_response(
finish_reason = "stop"
if stream:
- yield ChatCompletionChunk(chunk, None, completion_id, int(time.time()))
+ yield ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
if finish_reason is not None:
break
@@ -83,12 +83,12 @@ def iter_response(
finish_reason = "stop" if finish_reason is None else finish_reason
if stream:
- yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time()))
+ yield ChatCompletionChunk.model_construct(None, finish_reason, completion_id, int(time.time()))
else:
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
content = filter_json(content)
- yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
+ yield ChatCompletion.model_construct(content, finish_reason, completion_id, int(time.time()))
# Synchronous iter_append_model_and_provider function
def iter_append_model_and_provider(response: ChatCompletionResponseType) -> ChatCompletionResponseType:
@@ -137,7 +137,7 @@ async def async_iter_response(
finish_reason = "stop"
if stream:
- yield ChatCompletionChunk(chunk, None, completion_id, int(time.time()))
+ yield ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
if finish_reason is not None:
break
@@ -145,15 +145,14 @@ async def async_iter_response(
finish_reason = "stop" if finish_reason is None else finish_reason
if stream:
- yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time()))
+ yield ChatCompletionChunk.model_construct(None, finish_reason, completion_id, int(time.time()))
else:
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
content = filter_json(content)
- yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
+ yield ChatCompletion.model_construct(content, finish_reason, completion_id, int(time.time()))
finally:
- if hasattr(response, 'aclose'):
- await safe_aclose(response)
+ await safe_aclose(response)
async def async_iter_append_model_and_provider(
response: AsyncChatCompletionResponseType
@@ -167,8 +166,7 @@ async def async_iter_append_model_and_provider(
chunk.provider = last_provider.get("name")
yield chunk
finally:
- if hasattr(response, 'aclose'):
- await safe_aclose(response)
+ await safe_aclose(response)
class Client(BaseClient):
def __init__(
@@ -266,33 +264,39 @@ class Images:
"""
return asyncio.run(self.async_generate(prompt, model, provider, response_format, proxy, **kwargs))
- async def async_generate(
- self,
- prompt: str,
- model: Optional[str] = None,
- provider: Optional[ProviderType] = None,
- response_format: Optional[str] = "url",
- proxy: Optional[str] = None,
- **kwargs
- ) -> ImagesResponse:
+ async def get_provider_handler(self, model: Optional[str], provider: Optional[ImageProvider], default: ImageProvider) -> ImageProvider:
if provider is None:
- provider_handler = self.models.get(model, provider or self.provider or BingCreateImages)
+ provider_handler = self.provider
+ if provider_handler is None:
+ provider_handler = self.models.get(model, default)
elif isinstance(provider, str):
provider_handler = convert_to_provider(provider)
else:
provider_handler = provider
if provider_handler is None:
- raise ModelNotFoundError(f"Unknown model: {model}")
+ return default
if isinstance(provider_handler, IterListProvider):
if provider_handler.providers:
provider_handler = provider_handler.providers[0]
else:
raise ModelNotFoundError(f"IterListProvider for model {model} has no providers")
+ return provider_handler
+
+ async def async_generate(
+ self,
+ prompt: str,
+ model: Optional[str] = None,
+ provider: Optional[ProviderType] = None,
+ response_format: Optional[str] = "url",
+ proxy: Optional[str] = None,
+ **kwargs
+ ) -> ImagesResponse:
+ provider_handler = await self.get_provider_handler(model, provider, BingCreateImages)
if proxy is None:
proxy = self.client.proxy
response = None
- if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
+ if hasattr(provider_handler, "create_async_generator"):
messages = [{"role": "user", "content": f"Generate a image: {prompt}"}]
async for item in provider_handler.create_async_generator(model, messages, prompt=prompt, **kwargs):
if isinstance(item, ImageResponse):
@@ -313,7 +317,7 @@ class Images:
response = item
break
else:
- raise ValueError(f"Provider {provider} does not support image generation")
+ raise ValueError(f"Provider {getattr(provider_handler, '__name__')} does not support image generation")
if isinstance(response, ImageResponse):
return await self._process_image_response(
response,
@@ -322,6 +326,8 @@ class Images:
model,
getattr(provider_handler, "__name__", None)
)
+ if response is None:
+ raise NoImageResponseError(f"No image response from {getattr(provider_handler, '__name__')}")
raise NoImageResponseError(f"Unexpected response type: {type(response)}")
def create_variation(
@@ -345,32 +351,26 @@ class Images:
proxy: Optional[str] = None,
**kwargs
) -> ImagesResponse:
- if provider is None:
- provider = self.models.get(model, provider or self.provider or BingCreateImages)
- if provider is None:
- raise ModelNotFoundError(f"Unknown model: {model}")
- if isinstance(provider, str):
- provider = convert_to_provider(provider)
+ provider_handler = await self.get_provider_handler(model, provider, OpenaiAccount)
if proxy is None:
proxy = self.client.proxy
- if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
+ if hasattr(provider_handler, "create_async_generator"):
messages = [{"role": "user", "content": "create a variation of this image"}]
generator = None
try:
- generator = provider.create_async_generator(model, messages, image=image, response_format=response_format, proxy=proxy, **kwargs)
+ generator = provider_handler.create_async_generator(model, messages, image=image, response_format=response_format, proxy=proxy, **kwargs)
async for chunk in generator:
if isinstance(chunk, ImageResponse):
response = chunk
break
finally:
- if generator and hasattr(generator, 'aclose'):
- await safe_aclose(generator)
- elif hasattr(provider, 'create_variation'):
- if asyncio.iscoroutinefunction(provider.create_variation):
- response = await provider.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs)
+ await safe_aclose(generator)
+ elif hasattr(provider_handler, 'create_variation'):
+ if asyncio.iscoroutinefunction(provider.provider_handler):
+ response = await provider_handler.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs)
else:
- response = provider.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs)
+ response = provider_handler.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs)
else:
raise NoImageResponseError(f"Provider {provider} does not support image variation")
@@ -378,6 +378,8 @@ class Images:
response = ImageResponse([response])
if isinstance(response, ImageResponse):
return self._process_image_response(response, response_format, proxy, model, getattr(provider, "__name__", None))
+ if response is None:
+ raise NoImageResponseError(f"No image response from {getattr(provider, '__name__')}")
raise NoImageResponseError(f"Unexpected response type: {type(response)}")
async def _process_image_response(
@@ -394,13 +396,13 @@ class Images:
if response_format == "b64_json":
with open(os.path.join(images_dir, os.path.basename(image_file)), "rb") as file:
image_data = base64.b64encode(file.read()).decode()
- return Image(url=image_file, b64_json=image_data, revised_prompt=response.alt)
- return Image(url=image_file, revised_prompt=response.alt)
+ return Image.model_construct(url=image_file, b64_json=image_data, revised_prompt=response.alt)
+ return Image.model_construct(url=image_file, revised_prompt=response.alt)
images = await asyncio.gather(*[process_image_item(image) for image in images])
else:
- images = [Image(url=image, revised_prompt=response.alt) for image in response.get_list()]
+ images = [Image.model_construct(url=image, revised_prompt=response.alt) for image in response.get_list()]
last_provider = get_last_provider(True)
- return ImagesResponse(
+ return ImagesResponse.model_construct(
images,
model=last_provider.get("model") if model is None else model,
provider=last_provider.get("name") if provider is None else provider
@@ -454,7 +456,11 @@ class AsyncCompletions:
)
stop = [stop] if isinstance(stop, str) else stop
- response = provider.create_completion(
+ if hasattr(provider, "create_async_generator"):
+ create_handler = provider.create_async_generator
+ else:
+ create_handler = provider.create_completion
+ response = create_handler(
model,
messages,
stream=stream,
diff --git a/g4f/client/helper.py b/g4f/client/helper.py
index 909cc132..7b022e8c 100644
--- a/g4f/client/helper.py
+++ b/g4f/client/helper.py
@@ -46,7 +46,8 @@ def filter_none(**kwargs) -> dict:
async def safe_aclose(generator: AsyncGenerator) -> None:
try:
- await generator.aclose()
+ if generator and hasattr(generator, 'aclose'):
+ await generator.aclose()
except Exception as e:
logging.warning(f"Error while closing generator: {e}")
diff --git a/g4f/client/stubs.py b/g4f/client/stubs.py
index b38c9f6c..7367ac75 100644
--- a/g4f/client/stubs.py
+++ b/g4f/client/stubs.py
@@ -1,130 +1,150 @@
from __future__ import annotations
-from typing import Union
+from typing import Optional, List, Dict
from time import time
-class Model():
- ...
+from .helper import filter_none
+
+try:
+ from pydantic import BaseModel, Field
+except ImportError:
+ class BaseModel():
+ @classmethod
+ def model_construct(cls, **data):
+ new = cls()
+ for key, value in data.items():
+ setattr(new, key, value)
+ return new
+ class Field():
+ def __init__(self, **config):
+ pass
+
+class ChatCompletionChunk(BaseModel):
+ id: str
+ object: str
+ created: int
+ model: str
+ provider: Optional[str]
+ choices: List[ChatCompletionDeltaChoice]
-class ChatCompletion(Model):
- def __init__(
- self,
+ @classmethod
+ def model_construct(
+ cls,
content: str,
finish_reason: str,
completion_id: str = None,
created: int = None
):
- self.id: str = f"chatcmpl-{completion_id}" if completion_id else None
- self.object: str = "chat.completion"
- self.created: int = created
- self.model: str = None
- self.provider: str = None
- self.choices = [ChatCompletionChoice(ChatCompletionMessage(content), finish_reason)]
- self.usage: dict[str, int] = {
- "prompt_tokens": 0, #prompt_tokens,
- "completion_tokens": 0, #completion_tokens,
- "total_tokens": 0, #prompt_tokens + completion_tokens,
- }
-
- def to_json(self):
- return {
- **self.__dict__,
- "choices": [choice.to_json() for choice in self.choices]
- }
-
-class ChatCompletionChunk(Model):
- def __init__(
- self,
+ return super().model_construct(
+ id=f"chatcmpl-{completion_id}" if completion_id else None,
+ object="chat.completion.cunk",
+ created=created,
+ model=None,
+ provider=None,
+ choices=[ChatCompletionDeltaChoice.model_construct(
+ ChatCompletionDelta.model_construct(content),
+ finish_reason
+ )]
+ )
+
+class ChatCompletionMessage(BaseModel):
+ role: str
+ content: str
+
+ @classmethod
+ def model_construct(cls, content: str):
+ return super().model_construct(role="assistant", content=content)
+
+class ChatCompletionChoice(BaseModel):
+ index: int
+ message: ChatCompletionMessage
+ finish_reason: str
+
+ @classmethod
+ def model_construct(cls, message: ChatCompletionMessage, finish_reason: str):
+ return super().model_construct(index=0, message=message, finish_reason=finish_reason)
+
+class ChatCompletion(BaseModel):
+ id: str
+ object: str
+ created: int
+ model: str
+ provider: Optional[str]
+ choices: List[ChatCompletionChoice]
+ usage: Dict[str, int] = Field(examples=[{
+ "prompt_tokens": 0, #prompt_tokens,
+ "completion_tokens": 0, #completion_tokens,
+ "total_tokens": 0, #prompt_tokens + completion_tokens,
+ }])
+
+ @classmethod
+ def model_construct(
+ cls,
content: str,
finish_reason: str,
completion_id: str = None,
created: int = None
):
- self.id: str = f"chatcmpl-{completion_id}" if completion_id else None
- self.object: str = "chat.completion.chunk"
- self.created: int = created
- self.model: str = None
- self.provider: str = None
- self.choices = [ChatCompletionDeltaChoice(ChatCompletionDelta(content), finish_reason)]
-
- def to_json(self):
- return {
- **self.__dict__,
- "choices": [choice.to_json() for choice in self.choices]
- }
-
-class ChatCompletionMessage(Model):
- def __init__(self, content: Union[str, None]):
- self.role = "assistant"
- self.content = content
-
- def to_json(self):
- return self.__dict__
-
-class ChatCompletionChoice(Model):
- def __init__(self, message: ChatCompletionMessage, finish_reason: str):
- self.index = 0
- self.message = message
- self.finish_reason = finish_reason
-
- def to_json(self):
- return {
- **self.__dict__,
- "message": self.message.to_json()
- }
-
-class ChatCompletionDelta(Model):
- content: Union[str, None] = None
-
- def __init__(self, content: Union[str, None]):
- if content is not None:
- self.content = content
- self.role = "assistant"
-
- def to_json(self):
- return self.__dict__
-
-class ChatCompletionDeltaChoice(Model):
- def __init__(self, delta: ChatCompletionDelta, finish_reason: Union[str, None]):
- self.index = 0
- self.delta = delta
- self.finish_reason = finish_reason
-
- def to_json(self):
- return {
- **self.__dict__,
- "delta": self.delta.to_json()
- }
-
-class Image(Model):
- def __init__(self, url: str = None, b64_json: str = None, revised_prompt: str = None) -> None:
- if url is not None:
- self.url = url
- if b64_json is not None:
- self.b64_json = b64_json
- if revised_prompt is not None:
- self.revised_prompt = revised_prompt
-
- def to_json(self):
- return self.__dict__
-
-class ImagesResponse(Model):
- data: list[Image]
+ return super().model_construct(
+ id=f"chatcmpl-{completion_id}" if completion_id else None,
+ object="chat.completion",
+ created=created,
+ model=None,
+ provider=None,
+ choices=[ChatCompletionChoice.model_construct(
+ ChatCompletionMessage.model_construct(content),
+ finish_reason
+ )],
+ usage={
+ "prompt_tokens": 0, #prompt_tokens,
+ "completion_tokens": 0, #completion_tokens,
+ "total_tokens": 0, #prompt_tokens + completion_tokens,
+ }
+ )
+
+class ChatCompletionDelta(BaseModel):
+ role: str
+ content: str
+
+ @classmethod
+ def model_construct(cls, content: Optional[str]):
+ return super().model_construct(role="assistant", content=content)
+
+class ChatCompletionDeltaChoice(BaseModel):
+ index: int
+ delta: ChatCompletionDelta
+ finish_reason: Optional[str]
+
+ @classmethod
+ def model_construct(cls, delta: ChatCompletionDelta, finish_reason: Optional[str]):
+ return super().model_construct(index=0, delta=delta, finish_reason=finish_reason)
+
+class Image(BaseModel):
+ url: Optional[str]
+ b64_json: Optional[str]
+ revised_prompt: Optional[str]
+
+ @classmethod
+ def model_construct(cls, url: str = None, b64_json: str = None, revised_prompt: str = None):
+ return super().model_construct(**filter_none(
+ url=url,
+ b64_json=b64_json,
+ revised_prompt=revised_prompt
+ ))
+
+class ImagesResponse(BaseModel):
+ data: List[Image]
model: str
provider: str
created: int
- def __init__(self, data: list[Image], created: int = None, model: str = None, provider: str = None) -> None:
- self.data = data
+ @classmethod
+ def model_construct(cls, data: List[Image], created: int = None, model: str = None, provider: str = None):
if created is None:
created = int(time())
- self.model = model
- if provider is not None:
- self.provider = provider
- self.created = created
-
- def to_json(self):
- return {
- **self.__dict__,
- "data": [image.to_json() for image in self.data]
- } \ No newline at end of file
+ return super().model_construct(
+ data=data,
+ model=model,
+ provider=provider,
+ created=created
+ )
diff --git a/g4f/gui/__init__.py b/g4f/gui/__init__.py
index 930a2aa0..140711fa 100644
--- a/g4f/gui/__init__.py
+++ b/g4f/gui/__init__.py
@@ -8,22 +8,13 @@ try:
except ImportError as e:
import_error = e
-def run_gui(host: str = '0.0.0.0', port: int = 8080, debug: bool = False) -> None:
- if import_error is not None:
- raise MissingRequirementsError(f'Install "gui" requirements | pip install -U g4f[gui]\n{import_error}')
-
- config = {
- 'host' : host,
- 'port' : port,
- 'debug': debug
- }
-
+def get_gui_app():
site = Website(app)
for route in site.routes:
app.add_url_rule(
route,
- view_func = site.routes[route]['function'],
- methods = site.routes[route]['methods'],
+ view_func=site.routes[route]['function'],
+ methods=site.routes[route]['methods'],
)
backend_api = Backend_Api(app)
@@ -33,6 +24,19 @@ def run_gui(host: str = '0.0.0.0', port: int = 8080, debug: bool = False) -> Non
view_func = backend_api.routes[route]['function'],
methods = backend_api.routes[route]['methods'],
)
+ return app
+
+def run_gui(host: str = '0.0.0.0', port: int = 8080, debug: bool = False) -> None:
+ if import_error is not None:
+ raise MissingRequirementsError(f'Install "gui" requirements | pip install -U g4f[gui]\n{import_error}')
+
+ config = {
+ 'host' : host,
+ 'port' : port,
+ 'debug': debug
+ }
+
+ get_gui_app()
print(f"Running on port {config['port']}")
app.run(**config)
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index 116509d8..8cbcd578 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -191,6 +191,9 @@
<button class="slide-systemPrompt">
<i class="fa-solid fa-angles-up"></i>
</button>
+ <div class="media_player">
+ <i class="fa-regular fa-x"></i>
+ </div>
<div class="toolbar">
<div id="input-count" class="">
<button class="hide-input">
diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css
index b7ec00b9..856f9fb5 100644
--- a/g4f/gui/client/static/css/style.css
+++ b/g4f/gui/client/static/css/style.css
@@ -177,6 +177,10 @@ body {
filter: blur(calc(0.5 * 70vw)) opacity(var(--opacity));
}
+body.white .gradient{
+ display: none;
+}
+
.conversations {
display: flex;
flex-direction: column;
@@ -434,15 +438,28 @@ body {
font-size: 12px;
}
-.message audio {
+.media_player {
display: none;
- max-width: 400px;
}
-.message audio.show {
+.media_player audio {
+ right: 28px;
+ position: absolute;
+ top: -4px;
+ z-index: 900;
+}
+
+.media_player.show {
display: block;
}
+.media_player .fa-x {
+ position: absolute;
+ right: 8px;
+ top: 8px;
+ z-index: 1000;
+}
+
.count_total {
font-size: 12px;
padding-left: 25px;
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index 8127fb9d..fd9cc50e 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -44,17 +44,18 @@ appStorage = window.localStorage || {
removeItem: (key) => delete self[key],
length: 0
}
-
-const markdown = window.markdownit();
-const markdown_render = (content) => {
- return markdown.render(content
- .replaceAll(/<!-- generated images start -->|<!-- generated images end -->/gm, "")
- .replaceAll(/<img data-prompt="[^>]+">/gm, "")
- )
- .replaceAll("<a href=", '<a target="_blank" href=')
- .replaceAll('<code>', '<code class="language-plaintext">')
+let markdown_render = () => null;
+if (window.markdownit) {
+ const markdown = window.markdownit();
+ markdown_render = (content) => {
+ return markdown.render(content
+ .replaceAll(/<!-- generated images start -->|<!-- generated images end -->/gm, "")
+ .replaceAll(/<img data-prompt="[^>]+">/gm, "")
+ )
+ .replaceAll("<a href=", '<a target="_blank" href=')
+ .replaceAll('<code>', '<code class="language-plaintext">')
+ }
}
-
function filter_message(text) {
return text.replaceAll(
/<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm, ""
@@ -135,10 +136,21 @@ const register_message_buttons = async () => {
if (!("click" in el.dataset)) {
el.dataset.click = "true";
el.addEventListener("click", async () => {
- const content_el = el.parentElement.parentElement;
- const audio = content_el.querySelector("audio");
- if (audio) {
- audio.classList.add("show");
+ const message_el = el.parentElement.parentElement.parentElement;
+ let audio;
+ if (message_el.dataset.synthesize_url) {
+ el.classList.add("active");
+ setTimeout(()=>el.classList.remove("active"), 2000);
+ const media_player = document.querySelector(".media_player");
+ if (!media_player.classList.contains("show")) {
+ media_player.classList.add("show");
+ audio = new Audio(message_el.dataset.synthesize_url);
+ audio.controls = true;
+ media_player.appendChild(audio);
+ } else {
+ audio = media_player.querySelector("audio");
+ audio.src = message_el.dataset.synthesize_url;
+ }
audio.play();
return;
}
@@ -163,7 +175,7 @@ const register_message_buttons = async () => {
el.dataset.running = true;
el.classList.add("blink")
el.classList.add("active")
- const message_el = content_el.parentElement;
+
let speechText = await get_message(window.conversation_id, message_el.dataset.index);
speechText = speechText.replaceAll(/([^0-9])\./gm, "$1.;");
@@ -351,6 +363,13 @@ stop_generating.addEventListener("click", async () => {
await load_conversation(window.conversation_id, false);
});
+document.querySelector(".media_player .fa-x").addEventListener("click", ()=>{
+ const media_player = document.querySelector(".media_player");
+ media_player.classList.remove("show");
+ const audio = document.querySelector(".media_player audio");
+ media_player.removeChild(audio);
+});
+
const prepare_messages = (messages, message_index = -1) => {
if (message_index >= 0) {
messages = messages.filter((_, index) => message_index >= index);
@@ -726,17 +745,17 @@ const load_conversation = async (conversation_id, scroll=true) => {
${item.provider.model ? ' with ' + item.provider.model : ''}
</div>
` : "";
- let audio = "";
+ let synthesize_params = {text: item.content}
+ let synthesize_provider = "Gemini";
if (item.synthesize) {
- const synthesize_params = (new URLSearchParams(item.synthesize.data)).toString();
- audio = `
- <audio controls preload="none">
- <source src="/backend-api/v2/synthesize/${item.synthesize.provider}?${synthesize_params}" type="audio/mpeg">
- </audio>
- `;
+ synthesize_params = item.synthesize.data
+ synthesize_provider = item.synthesize.provider;
}
+ synthesize_params = (new URLSearchParams(synthesize_params)).toString();
+ let synthesize_url = `/backend-api/v2/synthesize/${synthesize_provider}?${synthesize_params}`;
+
elements += `
- <div class="message${item.regenerate ? " regenerate": ""}" data-index="${i}">
+ <div class="message${item.regenerate ? " regenerate": ""}" data-index="${i}" data-synthesize_url="${synthesize_url}">
<div class="${item.role}">
${item.role == "assistant" ? gpt_image : user_image}
<i class="fa-solid fa-xmark"></i>
@@ -748,7 +767,6 @@ const load_conversation = async (conversation_id, scroll=true) => {
<div class="content">
${provider}
<div class="content_inner">${markdown_render(item.content)}</div>
- ${audio}
<div class="count">
${count_words_and_tokens(item.content, next_provider?.model)}
<i class="fa-solid fa-volume-high"></i>
diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py
index 0c32bea5..0701210d 100644
--- a/g4f/gui/server/api.py
+++ b/g4f/gui/server/api.py
@@ -22,11 +22,11 @@ conversations: dict[dict[str, BaseConversation]] = {}
class Api:
@staticmethod
- def get_models() -> list[str]:
+ def get_models():
return models._all_models
@staticmethod
- def get_provider_models(provider: str, api_key: str = None) -> list[dict]:
+ def get_provider_models(provider: str, api_key: str = None):
if provider in __map__:
provider: ProviderType = __map__[provider]
if issubclass(provider, ProviderModelMixin):
@@ -46,39 +46,7 @@ class Api:
return []
@staticmethod
- def get_image_models() -> list[dict]:
- image_models = []
- index = []
- for provider in __providers__:
- if hasattr(provider, "image_models"):
- if hasattr(provider, "get_models"):
- provider.get_models()
- parent = provider
- if hasattr(provider, "parent"):
- parent = __map__[provider.parent]
- if parent.__name__ not in index:
- for model in provider.image_models:
- image_models.append({
- "provider": parent.__name__,
- "url": parent.url,
- "label": parent.label if hasattr(parent, "label") else None,
- "image_model": model,
- "vision_model": getattr(parent, "default_vision_model", None)
- })
- index.append(parent.__name__)
- elif hasattr(provider, "default_vision_model") and provider.__name__ not in index:
- image_models.append({
- "provider": provider.__name__,
- "url": provider.url,
- "label": provider.label if hasattr(provider, "label") else None,
- "image_model": None,
- "vision_model": provider.default_vision_model
- })
- index.append(provider.__name__)
- return image_models
-
- @staticmethod
- def get_providers() -> list[str]:
+ def get_providers() -> dict[str, str]:
return {
provider.__name__: (provider.label if hasattr(provider, "label") else provider.__name__)
+ (" (Image Generation)" if getattr(provider, "image_models", None) else "")
@@ -90,7 +58,7 @@ class Api:
}
@staticmethod
- def get_version():
+ def get_version() -> dict:
try:
current_version = version.utils.current_version
except VersionNotFoundError:
@@ -140,13 +108,12 @@ class Api:
}
def _create_response_stream(self, kwargs: dict, conversation_id: str, provider: str, download_images: bool = True) -> Iterator:
- if debug.logging:
- debug.logs = []
- print_callback = debug.log_handler
- def log_handler(text: str):
- debug.logs.append(text)
- print_callback(text)
- debug.log_handler = log_handler
+ debug.logs = []
+ print_callback = debug.log_handler
+ def log_handler(text: str):
+ debug.logs.append(text)
+ print_callback(text)
+ debug.log_handler = log_handler
try:
result = ChatCompletion.create(**kwargs)
first = True
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index 102c5685..3c87b0e2 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -1,7 +1,9 @@
import json
import flask
import os
-from flask import request, Flask
+import logging
+import asyncio
+from flask import Flask, request, jsonify
from typing import Generator
from werkzeug.utils import secure_filename
@@ -12,6 +14,8 @@ from g4f.errors import ProviderNotFoundError
from g4f.cookies import get_cookies_dir
from .api import Api
+logger = logging.getLogger(__name__)
+
def safe_iter_generator(generator: Generator) -> Generator:
start = next(generator)
def iter_generator():
@@ -38,17 +42,26 @@ class Backend_Api(Api):
app (Flask): Flask application instance to attach routes to.
"""
self.app: Flask = app
+
+ def jsonify_models(**kwargs):
+ response = self.get_models(**kwargs)
+ if isinstance(response, list):
+ return jsonify(response)
+ return response
+
+ def jsonify_provider_models(**kwargs):
+ response = self.get_provider_models(**kwargs)
+ if isinstance(response, list):
+ return jsonify(response)
+ return response
+
self.routes = {
'/backend-api/v2/models': {
- 'function': self.get_models,
+ 'function': jsonify_models,
'methods': ['GET']
},
'/backend-api/v2/models/<provider>': {
- 'function': self.get_provider_models,
- 'methods': ['GET']
- },
- '/backend-api/v2/image_models': {
- 'function': self.get_image_models,
+ 'function': jsonify_provider_models,
'methods': ['GET']
},
'/backend-api/v2/providers': {
@@ -127,15 +140,17 @@ class Backend_Api(Api):
return "Provider not found", 404
if not hasattr(provider_handler, "synthesize"):
return "Provider doesn't support synthesize", 500
- try:
- response_generator = provider_handler.synthesize({**request.args})
- if hasattr(response_generator, "__aiter__"):
- response_generator = to_sync_generator(response_generator)
- response = flask.Response(safe_iter_generator(response_generator), content_type="audio/mpeg")
- response.headers['Cache-Control'] = "max-age=604800"
- return response
- except Exception as e:
- return f"{e.__class__.__name__}: {e}", 500
+ response_data = provider_handler.synthesize({**request.args})
+ if asyncio.iscoroutinefunction(provider_handler.synthesize):
+ response_data = asyncio.run(response_data)
+ else:
+ if hasattr(response_data, "__aiter__"):
+ response_data = to_sync_generator(response_data)
+ response_data = safe_iter_generator(response_data)
+ content_type = getattr(provider_handler, "synthesize_content_type", "application/octet-stream")
+ response = flask.Response(response_data, content_type=content_type)
+ response.headers['Cache-Control'] = "max-age=604800"
+ return response
def get_provider_models(self, provider: str):
api_key = None if request.authorization is None else request.authorization.token
diff --git a/g4f/gui/server/internet.py b/g4f/gui/server/internet.py
index bafa3af7..96496f65 100644
--- a/g4f/gui/server/internet.py
+++ b/g4f/gui/server/internet.py
@@ -107,7 +107,6 @@ async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text
safesearch="moderate",
timelimit="y",
max_results=n_results,
- backend="html"
):
results.append(SearchResultEntry(
result["title"],
diff --git a/g4f/gui/server/website.py b/g4f/gui/server/website.py
index 3cabcdf3..456056b1 100644
--- a/g4f/gui/server/website.py
+++ b/g4f/gui/server/website.py
@@ -1,11 +1,12 @@
import uuid
from flask import render_template, redirect
+def redirect_home():
+ return redirect('/chat')
+
class Website:
def __init__(self, app) -> None:
self.app = app
- def redirect_home():
- return redirect('/chat')
self.routes = {
'/': {
'function': redirect_home,
@@ -35,7 +36,7 @@ class Website:
def _chat(self, conversation_id):
if '-' not in conversation_id:
- return redirect('/chat')
+ return redirect_home()
return render_template('index.html', chat_id=conversation_id)
def _index(self):
diff --git a/g4f/image.py b/g4f/image.py
index 114dcc13..e9abcb6e 100644
--- a/g4f/image.py
+++ b/g4f/image.py
@@ -33,10 +33,14 @@ EXTENSIONS_MAP: dict[str, str] = {
# Define the directory for generated images
images_dir = "./generated_images"
-def fix_url(url:str) -> str:
+def fix_url(url: str) -> str:
""" replace ' ' by '+' (to be markdown compliant)"""
return url.replace(" ","+")
+def fix_title(title: str) -> str:
+ if title:
+ return title.replace("\n", "").replace('"', '')
+
def to_image(image: ImageType, is_svg: bool = False) -> Image:
"""
Converts the input image to a PIL Image object.
@@ -226,12 +230,12 @@ def format_images_markdown(images: Union[str, list], alt: str, preview: Union[st
str: The formatted markdown string.
"""
if isinstance(images, str):
- result = f"[![{alt}]({fix_url(preview.replace('{image}', images) if preview else images)})]({fix_url(images)})"
+ result = f"[![{fix_title(alt)}]({fix_url(preview.replace('{image}', images) if preview else images)})]({fix_url(images)})"
else:
if not isinstance(preview, list):
preview = [preview.replace('{image}', image) if preview else image for image in images]
result = "\n".join(
- f"[![#{idx+1} {alt}]({fix_url(preview[idx])})]({fix_url(image)})"
+ f"[![#{idx+1} {fix_title(alt)}]({fix_url(preview[idx])})]({fix_url(image)})"
for idx, image in enumerate(images)
)
start_flag = "<!-- generated images start -->\n"
diff --git a/g4f/providers/base_provider.py b/g4f/providers/base_provider.py
index e2c2f46a..80a9e09d 100644
--- a/g4f/providers/base_provider.py
+++ b/g4f/providers/base_provider.py
@@ -66,11 +66,12 @@ class AbstractProvider(BaseProvider):
@classmethod
def get_parameters(cls) -> dict[str, Parameter]:
- return signature(
+ return {name: parameter for name, parameter in signature(
cls.create_async_generator if issubclass(cls, AsyncGeneratorProvider) else
cls.create_async if issubclass(cls, AsyncProvider) else
cls.create_completion
- ).parameters
+ ).parameters.items() if name not in ["kwargs", "model", "messages"]
+ and (name != "stream" or cls.supports_stream)}
@classmethod
@property
@@ -90,8 +91,6 @@ class AbstractProvider(BaseProvider):
args = ""
for name, param in cls.get_parameters().items():
- if name in ("self", "kwargs") or (name == "stream" and not cls.supports_stream):
- continue
args += f"\n {name}"
args += f": {get_type_name(param.annotation)}" if param.annotation is not Parameter.empty else ""
default_value = f'"{param.default}"' if isinstance(param.default, str) else param.default
diff --git a/g4f/requests/raise_for_status.py b/g4f/requests/raise_for_status.py
index 8625f552..0cd09a2a 100644
--- a/g4f/requests/raise_for_status.py
+++ b/g4f/requests/raise_for_status.py
@@ -11,7 +11,7 @@ class CloudflareError(ResponseStatusError):
...
def is_cloudflare(text: str) -> bool:
- if "Generated by cloudfront" in text:
+ if "Generated by cloudfront" in text or '<p id="cf-spinner-please-wait">' in text:
return True
elif "<title>Attention Required! | Cloudflare</title>" in text or 'id="cf-cloudflare-status"' in text:
return True
diff --git a/requirements-slim.txt b/requirements-slim.txt
index b9cbceba..2377faa3 100644
--- a/requirements-slim.txt
+++ b/requirements-slim.txt
@@ -13,4 +13,5 @@ flask
brotli
beautifulsoup4
aiohttp_socks
-cryptography \ No newline at end of file
+cryptography
+python-multipart \ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 83130838..11c34d59 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -18,4 +18,5 @@ aiohttp_socks
pywebview
plyer
cryptography
-nodriver \ No newline at end of file
+nodriver
+python-multipart \ No newline at end of file
diff --git a/setup.py b/setup.py
index 12581be9..9dd8d36a 100644
--- a/setup.py
+++ b/setup.py
@@ -31,6 +31,7 @@ EXTRA_REQUIRE = {
"fastapi", # api
"uvicorn", # api
"nodriver",
+ "python-multipart",
],
'slim': [
"curl_cffi>=0.6.2",
@@ -43,6 +44,7 @@ EXTRA_REQUIRE = {
"werkzeug", "flask", # gui
"fastapi", # api
"uvicorn", # api
+ "python-multipart",
],
"image": [
"pillow",
@@ -64,12 +66,13 @@ EXTRA_REQUIRE = {
"api": [
"loguru", "fastapi",
"uvicorn",
+ "python-multipart",
],
"gui": [
"werkzeug", "flask",
"beautifulsoup4", "pillow",
"duckduckgo-search>=5.0",
- "browser_cookie3"
+ "browser_cookie3",
],
"search": [
"beautifulsoup4", "pillow",