summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--README.md71
-rw-r--r--docs/async_client.md85
-rw-r--r--etc/examples/api.py2
-rw-r--r--etc/examples/image_api.py9
-rw-r--r--g4f/Provider/BingCreateImages.py40
-rw-r--r--g4f/Provider/Blackbox.py10
-rw-r--r--g4f/Provider/Cohere.py2
-rw-r--r--g4f/Provider/DeepInfra.py5
-rw-r--r--g4f/Provider/DuckDuckGo.py7
-rw-r--r--g4f/Provider/Liaobots.py79
-rw-r--r--g4f/Provider/Llama.py2
-rw-r--r--g4f/Provider/PerplexityLabs.py7
-rw-r--r--g4f/Provider/Reka.py26
-rw-r--r--g4f/Provider/Replicate.py1
-rw-r--r--g4f/Provider/You.py26
-rw-r--r--g4f/Provider/__init__.py2
-rw-r--r--g4f/Provider/base_provider.py1
-rw-r--r--g4f/Provider/bing/create_images.py30
-rw-r--r--g4f/Provider/needs_auth/Gemini.py162
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py179
-rw-r--r--g4f/Provider/needs_auth/PerplexityApi.py31
-rw-r--r--g4f/Provider/needs_auth/__init__.py3
-rw-r--r--g4f/Provider/openai/har_file.py33
-rw-r--r--g4f/Provider/openai/proofofwork.py44
-rw-r--r--g4f/Provider/you/har_file.py34
-rw-r--r--g4f/api/__init__.py71
-rw-r--r--g4f/client/async_client.py109
-rw-r--r--g4f/client/client.py11
-rw-r--r--g4f/client/service.py6
-rw-r--r--g4f/client/stubs.py25
-rw-r--r--g4f/cookies.py37
-rw-r--r--g4f/gui/client/index.html45
-rw-r--r--g4f/gui/client/static/css/dracula.min.css7
-rw-r--r--g4f/gui/client/static/css/style.css219
-rw-r--r--g4f/gui/client/static/js/chat.v1.js118
-rw-r--r--g4f/gui/server/api.py46
-rw-r--r--g4f/gui/server/backend.py8
-rw-r--r--g4f/image.py23
-rw-r--r--g4f/models.py159
-rw-r--r--g4f/providers/base_provider.py9
-rw-r--r--g4f/providers/retry_provider.py195
-rw-r--r--g4f/typing.py2
-rw-r--r--generated_images/.gitkeep0
-rwxr-xr-xprojects/windows/copy.sh2
-rw-r--r--projects/windows/main.py9
-rw-r--r--projects/windows/main.spec4
47 files changed, 1215 insertions, 784 deletions
diff --git a/.gitignore b/.gitignore
index 3803a710..8d678a0f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -64,4 +64,5 @@ dist.py
x.txt
bench.py
to-reverse.txt
-g4f/Provider/OpenaiChat2.py \ No newline at end of file
+g4f/Provider/OpenaiChat2.py
+generated_images/ \ No newline at end of file
diff --git a/README.md b/README.md
index 73916af6..bf8c398f 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,13 @@
<a href="https://trendshift.io/repositories/1692" target="_blank"><img src="https://trendshift.io/api/badge/repositories/1692" alt="xtekky%2Fgpt4free | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
-Written by [@xtekky](https://github.com/hlohaus) & maintained by [@hlohaus](https://github.com/hlohaus)
+The **ETA** till (v3 for g4f) where I, [@xtekky](https://github.com/xtekky) will pick this project back up and improve it is **`29` days** (written Tue 28 May), join [t.me/g4f_channel](https://t.me/g4f_channel) in the meanwhile to stay updated.
+
+_____
+
+
+Written by [@xtekky](https://github.com/xtekky) & maintained by [@hlohaus](https://github.com/hlohaus)
+
<div id="top"></div>
@@ -24,6 +30,7 @@ docker pull hlohaus789/g4f
## πŸ†• What's New
+- Added `gpt-4o`, simply use `gpt-4o` in `chat.completion.create`.
- Installation Guide for Windows (.exe): πŸ’» [#installation-guide-for-windows](#installation-guide-for-windows-exe)
- Join our Telegram Channel: πŸ“¨ [telegram.me/g4f_channel](https://telegram.me/g4f_channel)
- Join our Discord Group: πŸ’¬ [discord.gg/XfybzPXPH5](https://discord.gg/XfybzPXPH5)
@@ -91,7 +98,12 @@ As per the survey, here is a list of improvements to come
```sh
docker pull hlohaus789/g4f
-docker run -p 8080:8080 -p 1337:1337 -p 7900:7900 --shm-size="2g" -v ${PWD}/har_and_cookies:/app/har_and_cookies hlohaus789/g4f:latest
+docker run \
+ -p 8080:8080 -p 1337:1337 -p 7900:7900 \
+ --shm-size="2g" \
+ -v ${PWD}/har_and_cookies:/app/har_and_cookies \
+ -v ${PWD}/generated_images:/app/generated_images \
+ hlohaus789/g4f:latest
```
3. **Access the Client:**
@@ -103,12 +115,10 @@ docker run -p 8080:8080 -p 1337:1337 -p 7900:7900 --shm-size="2g" -v ${PWD}/har_
#### Installation Guide for Windows (.exe)
To ensure the seamless operation of our application, please follow the instructions below. These steps are designed to guide you through the installation process on Windows operating systems.
-##### Prerequisites
-1. **WebView2 Runtime**: Our application requires the *WebView2 Runtime* to be installed on your system. If you do not have it installed, please download and install it from the [Microsoft Developer Website](https://developer.microsoft.com/en-us/microsoft-edge/webview2/). If you already have *WebView2 Runtime* installed but are encountering issues, navigate to *Installed Windows Apps*, select *WebView2*, and opt for the repair option.
-##### Installation Steps
-2. **Download the Application**: Visit our [latest releases page](https://github.com/xtekky/gpt4free/releases/latest) and download the most recent version of the application, named `g4f.webview.*.exe`.
-3. **File Placement**: Once downloaded, transfer the `.exe` file from your downloads folder to a directory of your choice on your system, and then execute it to run the app.
-##### Post-Installation Adjustment
+### Installation Steps
+1. **Download the Application**: Visit our [releases page](https://github.com/xtekky/gpt4free/releases/tag/0.3.1.7) and download the most recent version of the application, named `g4f.exe.zip`.
+2. **File Placement**: After downloading, locate the `.zip` file in your Downloads folder. Unpack it to a directory of your choice on your system, then execute the `g4f.exe` file to run the app.
+3. **Open GUI**: The app starts a web server with the GUI. Open your favorite browser and navigate to `http://localhost:8080/chat/` to access the application interface.
4. **Firewall Configuration (Hotfix)**: Upon installation, it may be necessary to adjust your Windows Firewall settings to allow the application to operate correctly. To do this, access your Windows Firewall settings and allow the application.
By following these steps, you should be able to successfully install and run the application on your Windows system. If you encounter any issues during the installation process, please refer to our Issue Tracker or try to get contact over Discord for assistance.
@@ -235,19 +245,39 @@ set_cookies(".google.com", {
})
```
-Alternatively, you can place your .har and cookie files in the `/har_and_cookies` directory. To export a cookie file, use the EditThisCookie extension available on the Chrome Web Store: [EditThisCookie Extension](https://chromewebstore.google.com/detail/editthiscookie/fngmhnnpilhplaeedifhccceomclgfbg).
+#### Using .har and Cookie Files
-You can also create .har files to capture cookies. If you need further assistance, refer to the next section.
+You can place `.har` and cookie files in the default `./har_and_cookies` directory. To export a cookie file, use the [EditThisCookie Extension](https://chromewebstore.google.com/detail/editthiscookie/fngmhnnpilhplaeedifhccceomclgfbg) available on the Chrome Web Store.
-```bash
-python -m g4f.cli api --debug
+#### Creating .har Files to Capture Cookies
+
+To capture cookies, you can also create `.har` files. For more details, refer to the next section.
+
+#### Changing the Cookies Directory and Loading Cookie Files in Python
+
+You can change the cookies directory and load cookie files in your Python environment. To set the cookies directory relative to your Python file, use the following code:
+
+```python
+import os.path
+from g4f.cookies import set_cookies_dir, read_cookie_files
+
+import g4f.debug
+g4f.debug.logging = True
+
+cookies_dir = os.path.join(os.path.dirname(__file__), "har_and_cookies")
+set_cookies_dir(cookies_dir)
+read_cookie_files(cookies_dir)
```
+
+### Debug Mode
+
+If you enable debug mode, you will see logs similar to the following:
+
```
Read .har file: ./har_and_cookies/you.com.har
Cookies added: 10 from .you.com
Read cookie file: ./har_and_cookies/google.json
Cookies added: 16 from .google.com
-Starting server... [g4f v-0.0.0] (debug)
```
#### .HAR File for OpenaiChat Provider
@@ -301,7 +331,7 @@ set G4F_PROXY=http://host:port
While we wait for gpt-5, here is a list of new models that are at least better than gpt-3.5-turbo. **Some are better than gpt-4**. Expect this list to grow.
| Website | Provider | parameters | better than |
-| ------ | ------- | ------ | ------ |
+| ------ | ------- | ------ | ------ |
| [claude-3-opus](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0125-preview |
| [command-r+](https://txt.cohere.com/command-r-plus-microsoft-azure/) | `g4f.Provider.HuggingChat` | 104B | gpt-4-0314 |
| [llama-3-70b](https://meta.ai/) | `g4f.Provider.Llama` or `DeepInfra` | 70B | gpt-4-0314 |
@@ -417,19 +447,6 @@ While we wait for gpt-5, here is a list of new models that are at least better t
| Replicate | `g4f.Provider.Replicate` | stability-ai/sdxl| llava-v1.6-34b | [replicate.com](https://replicate.com) |
| You.com | `g4f.Provider.You` | dall-e-3| βœ”οΈ | [you.com](https://you.com) |
-```python
-import requests
-from g4f.client import Client
-
-client = Client()
-image = requests.get("https://change_me.jpg", stream=True).raw
-response = client.chat.completions.create(
- "",
- messages=[{"role": "user", "content": "what is in this picture?"}],
- image=image
-)
-print(response.choices[0].message.content)
-```
## πŸ”— Powered by gpt4free
diff --git a/docs/async_client.md b/docs/async_client.md
index 4827d11b..003cfb20 100644
--- a/docs/async_client.md
+++ b/docs/async_client.md
@@ -9,21 +9,52 @@ Designed to maintain compatibility with the existing OpenAI API, the G4F AsyncCl
The G4F AsyncClient API offers several key features:
- **Custom Providers:** The G4F Client API allows you to use custom providers. This feature enhances the flexibility of the API, enabling it to cater to a wide range of use cases.
-
- **ChatCompletion Interface:** The G4F package provides an interface for interacting with chat models through the ChatCompletion class. This class provides methods for creating both streaming and non-streaming responses.
-
- **Streaming Responses:** The ChatCompletion.create method can return a response iteratively as and when they are received if the stream parameter is set to True.
-
- **Non-Streaming Responses:** The ChatCompletion.create method can also generate non-streaming responses.
+- **Image Generation and Vision Models:** The G4F Client API also supports image generation and vision models, expanding its utility beyond text-based interactions.
-- **Image Generation and Vision Models:** The G4F Client API also supports image generation and vision models, expanding its utility beyond text-based interactions.
+## Initializing the Client
+
+To utilize the G4F `AsyncClient`, you need to create a new instance. Below is an example showcasing how to initialize the client with custom providers:
+
+```python
+from g4f.client import AsyncClient
+from g4f.Provider import BingCreateImages, OpenaiChat, Gemini
+
+client = AsyncClient(
+ provider=OpenaiChat,
+ image_provider=Gemini,
+ ...
+)
+```
+
+In this example:
+- `provider` specifies the primary provider for generating text completions.
+- `image_provider` specifies the provider for image-related functionalities.
+
+## Configuration
+
+You can configure the `AsyncClient` with additional settings, such as an API key for your provider and a proxy for all outgoing requests:
+
+```python
+from g4f.client import AsyncClient
+
+client = AsyncClient(
+ api_key="your_api_key_here",
+ proxies="http://user:pass@host",
+ ...
+)
+```
+- `api_key`: Your API key for the provider.
+- `proxies`: The proxy configuration for routing requests.
## Using AsyncClient
-### Text Completions:
+### Text Completions
-You can use the ChatCompletions endpoint to generate text completions as follows:
+You can use the `ChatCompletions` endpoint to generate text completions. Here’s how you can do it:
```python
response = await client.chat.completions.create(
@@ -34,7 +65,9 @@ response = await client.chat.completions.create(
print(response.choices[0].message.content)
```
-Streaming completions are also supported:
+### Streaming Completions
+
+The `AsyncClient` also supports streaming completions. This allows you to process the response incrementally as it is generated:
```python
stream = client.chat.completions.create(
@@ -48,6 +81,33 @@ async for chunk in stream:
print(chunk.choices[0].delta.content or "", end="")
```
+In this example:
+- `stream=True` enables streaming of the response.
+
+### Example: Using a Vision Model
+
+The following code snippet demonstrates how to use a vision model to analyze an image and generate a description based on the content of the image. This example shows how to fetch an image, send it to the model, and then process the response.
+
+```python
+import requests
+from g4f.client import Client
+from g4f.Provider import Bing
+
+client = AsyncClient(
+ provider=Bing
+)
+
+image = requests.get("https://my_website/image.jpg", stream=True).raw
+# Or: image = open("local_path/image.jpg", "rb")
+
+response = client.chat.completions.create(
+ "",
+ messages=[{"role": "user", "content": "what is in this picture?"}],
+ image=image
+)
+print(response.choices[0].message.content)
+```
+
### Image Generation:
You can generate images using a specified prompt:
@@ -62,6 +122,17 @@ response = await client.images.generate(
image_url = response.data[0].url
```
+#### Base64 as the response format
+
+```python
+response = await client.images.generate(
+ prompt="a cool cat",
+ response_format="b64_json"
+)
+
+base64_text = response.data[0].b64_json
+```
+
### Example usage with asyncio.gather
Start two tasks at the same time:
diff --git a/etc/examples/api.py b/etc/examples/api.py
index d4d03a77..1ab9b51b 100644
--- a/etc/examples/api.py
+++ b/etc/examples/api.py
@@ -3,7 +3,7 @@ import json
url = "http://localhost:1337/v1/chat/completions"
body = {
"model": "",
- "provider": "MetaAI",
+ "provider": "",
"stream": True,
"messages": [
{"role": "assistant", "content": "What can you do? Who are you?"}
diff --git a/etc/examples/image_api.py b/etc/examples/image_api.py
new file mode 100644
index 00000000..dbae22ed
--- /dev/null
+++ b/etc/examples/image_api.py
@@ -0,0 +1,9 @@
+import requests
+url = "http://localhost:1337/v1/images/generations"
+body = {
+ "prompt": "heaven for dogs",
+ "provider": "OpenaiAccount",
+ "response_format": "b64_json",
+}
+data = requests.post(url, json=body, stream=True).json()
+print(data) \ No newline at end of file
diff --git a/g4f/Provider/BingCreateImages.py b/g4f/Provider/BingCreateImages.py
index 79dc5665..7a206c8f 100644
--- a/g4f/Provider/BingCreateImages.py
+++ b/g4f/Provider/BingCreateImages.py
@@ -1,18 +1,14 @@
from __future__ import annotations
-import asyncio
-import os
-from typing import Iterator, Union
-
from ..cookies import get_cookies
from ..image import ImageResponse
-from ..errors import MissingRequirementsError, MissingAuthError
+from ..errors import MissingAuthError
from ..typing import AsyncResult, Messages, Cookies
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .bing.create_images import create_images, create_session, get_cookies_from_browser
+from .bing.create_images import create_images, create_session
class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Microsoft Designer"
+ label = "Microsoft Designer in Bing"
parent = "Bing"
url = "https://www.bing.com/images/create"
working = True
@@ -38,30 +34,9 @@ class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs
) -> AsyncResult:
session = BingCreateImages(cookies, proxy, api_key)
- yield await session.create_async(messages[-1]["content"])
-
- def create(self, prompt: str) -> Iterator[Union[ImageResponse, str]]:
- """
- Generator for creating imagecompletion based on a prompt.
-
- Args:
- prompt (str): Prompt to generate images.
-
- Yields:
- Generator[str, None, None]: The final output as markdown formatted string with images.
- """
- cookies = self.cookies or get_cookies(".bing.com", False)
- if cookies is None or "_U" not in cookies:
- login_url = os.environ.get("G4F_LOGIN_URL")
- if login_url:
- yield f"Please login: [Bing]({login_url})\n\n"
- try:
- self.cookies = get_cookies_from_browser(self.proxy)
- except MissingRequirementsError as e:
- raise MissingAuthError(f'Missing "_U" cookie. {e}')
- yield asyncio.run(self.create_async(prompt))
+ yield await session.generate(messages[-1]["content"])
- async def create_async(self, prompt: str) -> ImageResponse:
+ async def generate(self, prompt: str) -> ImageResponse:
"""
Asynchronously creates a markdown formatted string with images based on the prompt.
@@ -74,7 +49,6 @@ class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin):
cookies = self.cookies or get_cookies(".bing.com", False)
if cookies is None or "_U" not in cookies:
raise MissingAuthError('Missing "_U" cookie')
- proxy = self.proxy or os.environ.get("G4F_PROXY")
- async with create_session(cookies, proxy) as session:
- images = await create_images(session, prompt, proxy)
+ async with create_session(cookies, self.proxy) as session:
+ images = await create_images(session, prompt)
return ImageResponse(images, prompt, {"preview": "{image}?w=200&h=200"} if len(images) > 1 else {}) \ No newline at end of file
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 6e43a7a4..6e1e3949 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -4,7 +4,8 @@ import uuid
import secrets
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
+from ..typing import AsyncResult, Messages, ImageType
+from ..image import to_data_uri
from .base_provider import AsyncGeneratorProvider
class Blackbox(AsyncGeneratorProvider):
@@ -17,8 +18,15 @@ class Blackbox(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
+ image: ImageType = None,
+ image_name: str = None,
**kwargs
) -> AsyncResult:
+ if image is not None:
+ messages[-1]["data"] = {
+ "fileText": image_name,
+ "imageBase64": to_data_uri(image)
+ }
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
"Accept": "*/*",
diff --git a/g4f/Provider/Cohere.py b/g4f/Provider/Cohere.py
index 4f9fd30a..eac04ab4 100644
--- a/g4f/Provider/Cohere.py
+++ b/g4f/Provider/Cohere.py
@@ -9,7 +9,7 @@ from .helper import format_prompt
class Cohere(AbstractProvider):
url = "https://cohereforai-c4ai-command-r-plus.hf.space"
- working = True
+ working = False
supports_gpt_35_turbo = False
supports_gpt_4 = False
supports_stream = True
diff --git a/g4f/Provider/DeepInfra.py b/g4f/Provider/DeepInfra.py
index 9691539e..f3e31962 100644
--- a/g4f/Provider/DeepInfra.py
+++ b/g4f/Provider/DeepInfra.py
@@ -8,11 +8,10 @@ class DeepInfra(Openai):
label = "DeepInfra"
url = "https://deepinfra.com"
working = True
- needs_auth = False
- has_auth = True
+ needs_auth = True
supports_stream = True
supports_message_history = True
- default_model = "meta-llama/Meta-Llama-3-70b-instruct"
+ default_model = "meta-llama/Meta-Llama-3-70B-Instruct"
default_vision_model = "llava-hf/llava-1.5-7b-hf"
model_aliases = {
'dbrx-instruct': 'databricks/dbrx-instruct',
diff --git a/g4f/Provider/DuckDuckGo.py b/g4f/Provider/DuckDuckGo.py
index 2fa0612a..9379660b 100644
--- a/g4f/Provider/DuckDuckGo.py
+++ b/g4f/Provider/DuckDuckGo.py
@@ -16,8 +16,11 @@ class DuckDuckGo(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = True
default_model = "gpt-3.5-turbo-0125"
- models = ["gpt-3.5-turbo-0125", "claude-instant-1.2"]
- model_aliases = {"gpt-3.5-turbo": "gpt-3.5-turbo-0125"}
+ models = ["gpt-3.5-turbo-0125", "claude-3-haiku-20240307"]
+ model_aliases = {
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
+ "claude-3-haiku": "claude-3-haiku-20240307"
+ }
status_url = "https://duckduckgo.com/duckchat/v1/status"
chat_url = "https://duckduckgo.com/duckchat/v1/chat"
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index deb7899c..75ecf300 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -10,6 +10,15 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
+ "gpt-4o": {
+ "context": "8K",
+ "id": "gpt-4o-free",
+ "maxLength": 31200,
+ "model": "ChatGPT",
+ "name": "GPT-4o-free",
+ "provider": "OpenAI",
+ "tokenLimit": 7800,
+ },
"gpt-3.5-turbo": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5-Turbo",
@@ -95,7 +104,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
model_aliases = {
"claude-v2": "claude-2"
}
- _auth_code = None
+ _auth_code = ""
_cookie_jar = None
@classmethod
@@ -120,7 +129,13 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
cookie_jar=cls._cookie_jar,
connector=get_connector(connector, proxy, True)
) as session:
- cls._auth_code = auth if isinstance(auth, str) else cls._auth_code
+ data = {
+ "conversationId": str(uuid.uuid4()),
+ "model": models[cls.get_model(model)],
+ "messages": messages,
+ "key": "",
+ "prompt": kwargs.get("system_message", "You are a helpful assistant."),
+ }
if not cls._auth_code:
async with session.post(
"https://liaobots.work/recaptcha/api/login",
@@ -128,31 +143,49 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
verify_ssl=False
) as response:
await raise_for_status(response)
+ try:
async with session.post(
"https://liaobots.work/api/user",
- json={"authcode": ""},
+ json={"authcode": cls._auth_code},
verify_ssl=False
) as response:
await raise_for_status(response)
cls._auth_code = (await response.json(content_type=None))["authCode"]
+ if not cls._auth_code:
+ raise RuntimeError("Empty auth code")
cls._cookie_jar = session.cookie_jar
-
- data = {
- "conversationId": str(uuid.uuid4()),
- "model": models[cls.get_model(model)],
- "messages": messages,
- "key": "",
- "prompt": kwargs.get("system_message", "You are a helpful assistant."),
- }
- async with session.post(
- "https://liaobots.work/api/chat",
- json=data,
- headers={"x-auth-code": cls._auth_code},
- verify_ssl=False
- ) as response:
- await raise_for_status(response)
- async for chunk in response.content.iter_any():
- if b"<html coupert-item=" in chunk:
- raise RuntimeError("Invalid session")
- if chunk:
- yield chunk.decode(errors="ignore")
+ async with session.post(
+ "https://liaobots.work/api/chat",
+ json=data,
+ headers={"x-auth-code": cls._auth_code},
+ verify_ssl=False
+ ) as response:
+ await raise_for_status(response)
+ async for chunk in response.content.iter_any():
+ if b"<html coupert-item=" in chunk:
+ raise RuntimeError("Invalid session")
+ if chunk:
+ yield chunk.decode(errors="ignore")
+ except:
+ async with session.post(
+ "https://liaobots.work/api/user",
+ json={"authcode": "pTIQr4FTnVRfr"},
+ verify_ssl=False
+ ) as response:
+ await raise_for_status(response)
+ cls._auth_code = (await response.json(content_type=None))["authCode"]
+ if not cls._auth_code:
+ raise RuntimeError("Empty auth code")
+ cls._cookie_jar = session.cookie_jar
+ async with session.post(
+ "https://liaobots.work/api/chat",
+ json=data,
+ headers={"x-auth-code": cls._auth_code},
+ verify_ssl=False
+ ) as response:
+ await raise_for_status(response)
+ async for chunk in response.content.iter_any():
+ if b"<html coupert-item=" in chunk:
+ raise RuntimeError("Invalid session")
+ if chunk:
+ yield chunk.decode(errors="ignore")
diff --git a/g4f/Provider/Llama.py b/g4f/Provider/Llama.py
index f2c78b36..235c0994 100644
--- a/g4f/Provider/Llama.py
+++ b/g4f/Provider/Llama.py
@@ -9,7 +9,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class Llama(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.llama2.ai"
- working = True
+ working = False
supports_message_history = True
default_model = "meta/meta-llama-3-70b-instruct"
models = [
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index 91ba63f2..4a2cc9e5 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -21,11 +21,14 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
"related"
]
model_aliases = {
- "mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
+ "mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
+ "mistralai/Mistral-7B-Instruct-v0.2": "mistral-7b-instruct",
"mistralai/Mixtral-8x7B-Instruct-v0.1": "mixtral-8x7b-instruct",
"codellama/CodeLlama-70b-Instruct-hf": "codellama-70b-instruct",
"llava-v1.5-7b": "llava-v1.5-7b-wrapper",
- 'databricks/dbrx-instruct': "dbrx-instruct"
+ "databricks/dbrx-instruct": "dbrx-instruct",
+ "meta-llama/Meta-Llama-3-70B-Instruct": "llama-3-70b-instruct",
+ "meta-llama/Meta-Llama-3-8B-Instruct": "llama-3-8b-instruct"
}
@classmethod
diff --git a/g4f/Provider/Reka.py b/g4f/Provider/Reka.py
index 09164b07..2306149e 100644
--- a/g4f/Provider/Reka.py
+++ b/g4f/Provider/Reka.py
@@ -9,6 +9,7 @@ from ..image import to_bytes
class Reka(AbstractProvider):
url = "https://chat.reka.ai/"
working = True
+ needs_auth = True
supports_stream = True
default_vision_model = "reka"
cookies = {}
@@ -20,13 +21,12 @@ class Reka(AbstractProvider):
messages: Messages,
stream: bool,
proxy: str = None,
- timeout: int = 180,
api_key: str = None,
image: ImageType = None,
**kwargs
) -> CreateResult:
cls.proxy = proxy
-
+
if not api_key:
cls.cookies = get_cookies("chat.reka.ai")
if not cls.cookies:
@@ -34,19 +34,19 @@ class Reka(AbstractProvider):
elif "appSession" not in cls.cookies:
raise ValueError("No appSession found in cookies for chat.reka.ai, log in or provide bearer_auth")
api_key = cls.get_access_token(cls)
-
+
conversation = []
for message in messages:
conversation.append({
"type": "human",
"text": message["content"],
})
-
+
if image:
image_url = cls.upload_image(cls, api_key, image)
conversation[-1]["image_url"] = image_url
conversation[-1]["media_type"] = "image"
-
+
headers = {
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
@@ -64,7 +64,7 @@ class Reka(AbstractProvider):
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
}
-
+
json_data = {
'conversation_history': conversation,
'stream': True,
@@ -73,7 +73,7 @@ class Reka(AbstractProvider):
'model_name': 'reka-core',
'random_seed': int(time.time() * 1000),
}
-
+
tokens = ''
response = requests.post('https://chat.reka.ai/api/chat',
@@ -82,11 +82,11 @@ class Reka(AbstractProvider):
for completion in response.iter_lines():
if b'data' in completion:
token_data = json.loads(completion.decode('utf-8')[5:])['text']
-
+
yield (token_data.replace(tokens, ''))
-
+
tokens = token_data
-
+
def upload_image(cls, access_token, image: ImageType) -> str:
boundary_token = os.urandom(8).hex()
@@ -120,7 +120,7 @@ class Reka(AbstractProvider):
cookies=cls.cookies, headers=headers, proxies=cls.proxy, data=data.encode('latin-1'))
return response.json()['media_url']
-
+
def get_access_token(cls):
headers = {
'accept': '*/*',
@@ -141,8 +141,8 @@ class Reka(AbstractProvider):
try:
response = requests.get('https://chat.reka.ai/bff/auth/access_token',
cookies=cls.cookies, headers=headers, proxies=cls.proxy)
-
+
return response.json()['accessToken']
-
+
except Exception as e:
raise ValueError(f"Failed to get access token: {e}, refresh your cookies / log in into chat.reka.ai") \ No newline at end of file
diff --git a/g4f/Provider/Replicate.py b/g4f/Provider/Replicate.py
index 89777cf2..7ff8ad65 100644
--- a/g4f/Provider/Replicate.py
+++ b/g4f/Provider/Replicate.py
@@ -10,6 +10,7 @@ from ..errors import ResponseError, MissingAuthError
class Replicate(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
working = True
+ needs_auth = True
default_model = "meta/meta-llama-3-70b-instruct"
model_aliases = {
"meta-llama/Meta-Llama-3-70B-Instruct": default_model
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index e7958f71..162d6adb 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -8,7 +8,7 @@ import uuid
from ..typing import AsyncResult, Messages, ImageType, Cookies
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
-from ..image import ImageResponse, ImagePreview, to_bytes, is_accepted_format
+from ..image import ImageResponse, ImagePreview, EXTENSIONS_MAP, to_bytes, is_accepted_format
from ..requests import StreamSession, FormData, raise_for_status
from .you.har_file import get_telemetry_ids
from .. import debug
@@ -24,6 +24,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
image_models = ["dall-e"]
models = [
default_model,
+ "gpt-4o",
"gpt-4",
"gpt-4-turbo",
"claude-instant",
@@ -42,7 +43,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
*image_models
]
model_aliases = {
- "claude-v2": "claude-2"
+ "claude-v2": "claude-2",
}
_cookies = None
_cookies_used = 0
@@ -93,17 +94,22 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
"q": format_prompt(messages),
"domain": "youchat",
"selectedChatMode": chat_mode,
+ "conversationTurnId": str(uuid.uuid4()),
+ "chatId": str(uuid.uuid4()),
}
params = {
"userFiles": upload,
"selectedChatMode": chat_mode,
}
if chat_mode == "custom":
- params["selectedAIModel"] = model.replace("-", "_")
+ if debug.logging:
+ print(f"You model: {model}")
+ params["selectedAiModel"] = model.replace("-", "_")
+
async with (session.post if chat_mode == "default" else session.get)(
f"{cls.url}/api/streamingSearch",
- data=data,
- params=params,
+ data=data if chat_mode == "default" else None,
+ params=params if chat_mode == "default" else data,
headers=headers,
cookies=cookies
) as response:
@@ -114,9 +120,9 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
elif line.startswith(b'data: '):
if event in ["youChatUpdate", "youChatToken"]:
data = json.loads(line[6:])
- if event == "youChatToken" and event in data:
+ if event == "youChatToken" and event in data and data[event]:
yield data[event]
- elif event == "youChatUpdate" and "t" in data and data["t"] is not None:
+ elif event == "youChatUpdate" and "t" in data and data["t"]:
if chat_mode == "create":
match = re.search(r"!\[(.+?)\]\((.+?)\)", data["t"])
if match:
@@ -138,7 +144,9 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
await raise_for_status(response)
upload_nonce = await response.text()
data = FormData()
- data.add_field('file', file, content_type=is_accepted_format(file), filename=filename)
+ content_type = is_accepted_format(file)
+ filename = f"image.{EXTENSIONS_MAP[content_type]}" if filename is None else filename
+ data.add_field('file', file, content_type=content_type, filename=filename)
async with client.post(
f"{cls.url}/api/upload",
data=data,
@@ -212,4 +220,4 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
'stytch_session_jwt': session["session_jwt"],
'ydc_stytch_session': session["session_token"],
'ydc_stytch_session_jwt': session["session_jwt"],
- }
+ } \ No newline at end of file
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index d0c0d8b6..e60e1310 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from ..providers.types import BaseProvider, ProviderType
-from ..providers.retry_provider import RetryProvider, IterProvider
+from ..providers.retry_provider import RetryProvider, IterListProvider
from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
from ..providers.create_images import CreateImagesProvider
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index 8f368747..667f6964 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -1,3 +1,4 @@
from ..providers.base_provider import *
from ..providers.types import FinishReason, Streaming
+from ..providers.conversation import BaseConversation
from .helper import get_cookies, format_prompt \ No newline at end of file
diff --git a/g4f/Provider/bing/create_images.py b/g4f/Provider/bing/create_images.py
index 44303c21..7a08ddfe 100644
--- a/g4f/Provider/bing/create_images.py
+++ b/g4f/Provider/bing/create_images.py
@@ -1,7 +1,3 @@
-"""
-This module provides functionalities for creating and managing images using Bing's service.
-It includes functions for user login, session creation, image creation, and processing.
-"""
from __future__ import annotations
import asyncio
@@ -17,9 +13,7 @@ try:
except ImportError:
has_requirements = False
-from ...providers.create_images import CreateImagesProvider
from ..helper import get_connector
-from ...providers.types import ProviderType
from ...errors import MissingRequirementsError, RateLimitError
from ...webdriver import WebDriver, get_driver_cookies, get_browser
@@ -101,7 +95,7 @@ def create_session(cookies: Dict[str, str], proxy: str = None, connector: BaseCo
headers["Cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items())
return ClientSession(headers=headers, connector=get_connector(connector, proxy))
-async def create_images(session: ClientSession, prompt: str, proxy: str = None, timeout: int = TIMEOUT_IMAGE_CREATION) -> List[str]:
+async def create_images(session: ClientSession, prompt: str, timeout: int = TIMEOUT_IMAGE_CREATION) -> List[str]:
"""
Creates images based on a given prompt using Bing's service.
@@ -132,7 +126,7 @@ async def create_images(session: ClientSession, prompt: str, proxy: str = None,
raise RuntimeError(f"Create images failed: {error}")
if response.status != 302:
url = f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=3&FORM=GENCRE"
- async with session.post(url, allow_redirects=False, proxy=proxy, timeout=timeout) as response:
+ async with session.post(url, allow_redirects=False, timeout=timeout) as response:
if response.status != 302:
raise RuntimeError(f"Create images failed. Code: {response.status}")
@@ -185,22 +179,4 @@ def read_images(html_content: str) -> List[str]:
raise RuntimeError("Bad images found")
if not images:
raise RuntimeError("No images found")
- return images
-
-def patch_provider(provider: ProviderType) -> CreateImagesProvider:
- """
- Patches a provider to include image creation capabilities.
-
- Args:
- provider (ProviderType): The provider to be patched.
-
- Returns:
- CreateImagesProvider: The patched provider with image creation capabilities.
- """
- from ..BingCreateImages import BingCreateImages
- service = BingCreateImages()
- return CreateImagesProvider(
- provider,
- service.create,
- service.create_async
- ) \ No newline at end of file
+ return images \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py
index 02bb0f5a..eddd25fa 100644
--- a/g4f/Provider/needs_auth/Gemini.py
+++ b/g4f/Provider/needs_auth/Gemini.py
@@ -17,12 +17,12 @@ except ImportError:
pass
from ... import debug
-from ...typing import Messages, Cookies, ImageType, AsyncResult
-from ..base_provider import AsyncGeneratorProvider
+from ...typing import Messages, Cookies, ImageType, AsyncResult, AsyncIterator
+from ..base_provider import AsyncGeneratorProvider, BaseConversation
from ..helper import format_prompt, get_cookies
from ...requests.raise_for_status import raise_for_status
from ...errors import MissingAuthError, MissingRequirementsError
-from ...image import to_bytes, ImageResponse
+from ...image import ImageResponse, to_bytes
from ...webdriver import get_browser, get_driver_cookies
REQUEST_HEADERS = {
@@ -32,7 +32,7 @@ REQUEST_HEADERS = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
'x-same-domain': '1',
}
-REQUEST_BL_PARAM = "boq_assistant-bard-web-server_20240421.18_p0"
+REQUEST_BL_PARAM = "boq_assistant-bard-web-server_20240519.16_p0"
REQUEST_URL = "https://gemini.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate"
UPLOAD_IMAGE_URL = "https://content-push.googleapis.com/upload/"
UPLOAD_IMAGE_HEADERS = {
@@ -57,9 +57,11 @@ class Gemini(AsyncGeneratorProvider):
image_models = ["gemini"]
default_vision_model = "gemini"
_cookies: Cookies = None
+ _snlm0e: str = None
+ _sid: str = None
@classmethod
- async def nodriver_login(cls) -> Cookies:
+ async def nodriver_login(cls, proxy: str = None) -> AsyncIterator[str]:
try:
import nodriver as uc
except ImportError:
@@ -71,7 +73,13 @@ class Gemini(AsyncGeneratorProvider):
user_data_dir = None
if debug.logging:
print(f"Open nodriver with user_dir: {user_data_dir}")
- browser = await uc.start(user_data_dir=user_data_dir)
+ browser = await uc.start(
+ user_data_dir=user_data_dir,
+ browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
+ )
+ login_url = os.environ.get("G4F_LOGIN_URL")
+ if login_url:
+ yield f"Please login: [Google Gemini]({login_url})\n\n"
page = await browser.get(f"{cls.url}/app")
await page.select("div.ql-editor.textarea", 240)
cookies = {}
@@ -79,10 +87,10 @@ class Gemini(AsyncGeneratorProvider):
if c.domain.endswith(".google.com"):
cookies[c.name] = c.value
await page.close()
- return cookies
+ cls._cookies = cookies
@classmethod
- async def webdriver_login(cls, proxy: str):
+ async def webdriver_login(cls, proxy: str) -> AsyncIterator[str]:
driver = None
try:
driver = get_browser(proxy=proxy)
@@ -111,40 +119,40 @@ class Gemini(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
- api_key: str = None,
cookies: Cookies = None,
connector: BaseConnector = None,
image: ImageType = None,
image_name: str = None,
+ response_format: str = None,
+ return_conversation: bool = False,
+ conversation: Conversation = None,
+ language: str = "en",
**kwargs
) -> AsyncResult:
- prompt = format_prompt(messages)
- if api_key is not None:
- if cookies is None:
- cookies = {}
- cookies["__Secure-1PSID"] = api_key
+ prompt = format_prompt(messages) if conversation is None else messages[-1]["content"]
cls._cookies = cookies or cls._cookies or get_cookies(".google.com", False, True)
base_connector = get_connector(connector, proxy)
async with ClientSession(
headers=REQUEST_HEADERS,
connector=base_connector
) as session:
- snlm0e = await cls.fetch_snlm0e(session, cls._cookies) if cls._cookies else None
- if not snlm0e:
- cls._cookies = await cls.nodriver_login();
+ if not cls._snlm0e:
+ await cls.fetch_snlm0e(session, cls._cookies) if cls._cookies else None
+ if not cls._snlm0e:
+ async for chunk in cls.nodriver_login(proxy):
+ yield chunk
if cls._cookies is None:
async for chunk in cls.webdriver_login(proxy):
yield chunk
-
- if not snlm0e:
- if "__Secure-1PSID" not in cls._cookies:
+ if not cls._snlm0e:
+ if cls._cookies is None or "__Secure-1PSID" not in cls._cookies:
raise MissingAuthError('Missing "__Secure-1PSID" cookie')
- snlm0e = await cls.fetch_snlm0e(session, cls._cookies)
- if not snlm0e:
+ await cls.fetch_snlm0e(session, cls._cookies)
+ if not cls._snlm0e:
raise RuntimeError("Invalid cookies. SNlM0e not found")
image_url = await cls.upload_image(base_connector, to_bytes(image), image_name) if image else None
-
+
async with ClientSession(
cookies=cls._cookies,
headers=REQUEST_HEADERS,
@@ -152,13 +160,17 @@ class Gemini(AsyncGeneratorProvider):
) as client:
params = {
'bl': REQUEST_BL_PARAM,
+ 'hl': language,
'_reqid': random.randint(1111, 9999),
- 'rt': 'c'
+ 'rt': 'c',
+ "f.sid": cls._sid,
}
data = {
- 'at': snlm0e,
+ 'at': cls._snlm0e,
'f.req': json.dumps([None, json.dumps(cls.build_request(
prompt,
+ language=language,
+ conversation=conversation,
image_url=image_url,
image_name=image_name
))])
@@ -169,37 +181,53 @@ class Gemini(AsyncGeneratorProvider):
params=params,
) as response:
await raise_for_status(response)
- response = await response.text()
- response_part = json.loads(json.loads(response.splitlines()[-5])[0][2])
- if response_part[4] is None:
- response_part = json.loads(json.loads(response.splitlines()[-7])[0][2])
-
- content = response_part[4][0][1][0]
- image_prompt = None
- match = re.search(r'\[Imagen of (.*?)\]', content)
- if match:
- image_prompt = match.group(1)
- content = content.replace(match.group(0), '')
-
- yield content
+ image_prompt = response_part = None
+ last_content_len = 0
+ async for line in response.content:
+ try:
+ try:
+ line = json.loads(line)
+ except ValueError:
+ continue
+ if not isinstance(line, list):
+ continue
+ if len(line[0]) < 3 or not line[0][2]:
+ continue
+ response_part = json.loads(line[0][2])
+ if not response_part[4]:
+ continue
+ if return_conversation:
+ yield Conversation(response_part[1][0], response_part[1][1], response_part[4][0][0])
+ content = response_part[4][0][1][0]
+ except (ValueError, KeyError, TypeError, IndexError) as e:
+ print(f"{cls.__name__}:{e.__class__.__name__}:{e}")
+ continue
+ match = re.search(r'\[Imagen of (.*?)\]', content)
+ if match:
+ image_prompt = match.group(1)
+ content = content.replace(match.group(0), '')
+ yield content[last_content_len:]
+ last_content_len = len(content)
if image_prompt:
images = [image[0][3][3] for image in response_part[4][0][12][7][0]]
- resolved_images = []
- preview = []
- for image in images:
- async with client.get(image, allow_redirects=False) as fetch:
- image = fetch.headers["location"]
- async with client.get(image, allow_redirects=False) as fetch:
- image = fetch.headers["location"]
- resolved_images.append(image)
- preview.append(image.replace('=s512', '=s200'))
- yield ImageResponse(resolved_images, image_prompt, {"orginal_links": images, "preview": preview})
+ if response_format == "b64_json":
+ yield ImageResponse(images, image_prompt, {"cookies": cls._cookies})
+ else:
+ resolved_images = []
+ preview = []
+ for image in images:
+ async with client.get(image, allow_redirects=False) as fetch:
+ image = fetch.headers["location"]
+ async with client.get(image, allow_redirects=False) as fetch:
+ image = fetch.headers["location"]
+ resolved_images.append(image)
+ preview.append(image.replace('=s512', '=s200'))
+ yield ImageResponse(resolved_images, image_prompt, {"orginal_links": images, "preview": preview})
def build_request(
prompt: str,
- conversation_id: str = "",
- response_id: str = "",
- choice_id: str = "",
+ language: str,
+ conversation: Conversation = None,
image_url: str = None,
image_name: str = None,
tools: list[list[str]] = []
@@ -207,8 +235,15 @@ class Gemini(AsyncGeneratorProvider):
image_list = [[[image_url, 1], image_name]] if image_url else []
return [
[prompt, 0, None, image_list, None, None, 0],
- ["en"],
- [conversation_id, response_id, choice_id, None, None, []],
+ [language],
+ [
+ None if conversation is None else conversation.conversation_id,
+ None if conversation is None else conversation.response_id,
+ None if conversation is None else conversation.choice_id,
+ None,
+ None,
+ []
+ ],
None,
None,
None,
@@ -225,7 +260,7 @@ class Gemini(AsyncGeneratorProvider):
headers=UPLOAD_IMAGE_HEADERS,
connector=connector
) as session:
- async with session.options(UPLOAD_IMAGE_URL) as reponse:
+ async with session.options(UPLOAD_IMAGE_URL) as response:
await raise_for_status(response)
headers = {
@@ -254,7 +289,20 @@ class Gemini(AsyncGeneratorProvider):
async def fetch_snlm0e(cls, session: ClientSession, cookies: Cookies):
async with session.get(cls.url, cookies=cookies) as response:
await raise_for_status(response)
- text = await response.text()
- match = re.search(r'SNlM0e\":\"(.*?)\"', text)
+ response_text = await response.text()
+ match = re.search(r'SNlM0e\":\"(.*?)\"', response_text)
if match:
- return match.group(1) \ No newline at end of file
+ cls._snlm0e = match.group(1)
+ sid_match = re.search(r'"FdrFJe":"([\d-]+)"', response_text)
+ if sid_match:
+ cls._sid = sid_match.group(1)
+
+class Conversation(BaseConversation):
+ def __init__(self,
+ conversation_id: str = "",
+ response_id: str = "",
+ choice_id: str = ""
+ ) -> None:
+ self.conversation_id = conversation_id
+ self.response_id = response_id
+ self.choice_id = choice_id \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 056a3702..f40ae961 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -26,7 +26,7 @@ from ...webdriver import get_browser
from ...typing import AsyncResult, Messages, Cookies, ImageType, AsyncIterator
from ...requests import get_args_from_browser, raise_for_status
from ...requests.aiohttp import StreamSession
-from ...image import to_image, to_bytes, ImageResponse, ImageRequest
+from ...image import ImageResponse, ImageRequest, to_image, to_bytes, is_accepted_format
from ...errors import MissingAuthError, ResponseError
from ...providers.conversation import BaseConversation
from ..helper import format_cookies
@@ -38,7 +38,7 @@ DEFAULT_HEADERS = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br, zstd",
"accept-language": "en-US,en;q=0.5",
- "referer": "https://chat.openai.com/",
+ "referer": "https://chatgpt.com/",
"sec-ch-ua": "\"Brave\";v=\"123\", \"Not:A-Brand\";v=\"8\", \"Chromium\";v=\"123\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Windows\"",
@@ -53,15 +53,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"""A class for creating and managing conversations with OpenAI chat service"""
label = "OpenAI ChatGPT"
- url = "https://chat.openai.com"
+ url = "https://chatgpt.com"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_message_history = True
supports_system_message = True
default_model = None
- default_vision_model = "gpt-4-vision"
- models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo"]
+ default_vision_model = "gpt-4o"
+ models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo", "gpt-4o", "auto"]
model_aliases = {
"text-davinci-002-render-sha": "gpt-3.5-turbo",
"": "gpt-3.5-turbo",
@@ -138,23 +138,22 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
An ImageRequest object that contains the download URL, file name, and other data
"""
# Convert the image to a PIL Image object and get the extension
- image = to_image(image)
- extension = image.format.lower()
- # Convert the image to a bytes object and get the size
data_bytes = to_bytes(image)
+ image = to_image(data_bytes)
+ extension = image.format.lower()
data = {
- "file_name": image_name if image_name else f"{image.width}x{image.height}.{extension}",
+ "file_name": "" if image_name is None else image_name,
"file_size": len(data_bytes),
"use_case": "multimodal"
}
# Post the image data to the service and get the image data
async with session.post(f"{cls.url}/backend-api/files", json=data, headers=headers) as response:
- cls._update_request_args()
+ cls._update_request_args(session)
await raise_for_status(response)
image_data = {
**data,
**await response.json(),
- "mime_type": f"image/{extension}",
+ "mime_type": is_accepted_format(data_bytes),
"extension": extension,
"height": image.height,
"width": image.width
@@ -275,7 +274,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
first_part = line["message"]["content"]["parts"][0]
if "asset_pointer" not in first_part or "metadata" not in first_part:
return
- if first_part["metadata"] is None:
+ if first_part["metadata"] is None or first_part["metadata"]["dalle"] is None:
return
prompt = first_part["metadata"]["dalle"]["prompt"]
file_id = first_part["asset_pointer"].split("file-service://", 1)[1]
@@ -330,6 +329,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
image: ImageType = None,
image_name: str = None,
return_conversation: bool = False,
+ max_retries: int = 3,
**kwargs
) -> AsyncResult:
"""
@@ -364,84 +364,24 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
) as session:
if cls._expires is not None and cls._expires < time.time():
cls._headers = cls._api_key = None
- if cls._headers is None or cookies is not None:
- cls._create_request_args(cookies)
- api_key = kwargs["access_token"] if "access_token" in kwargs else api_key
- if api_key is not None:
+ arkose_token = None
+ proofTokens = None
+ try:
+ arkose_token, api_key, cookies, headers, proofTokens = await getArkoseAndAccessToken(proxy)
+ cls._create_request_args(cookies, headers)
cls._set_api_key(api_key)
+ except NoValidHarFileError as e:
+ if cls._api_key is None and cls.needs_auth:
+ raise e
+ cls._create_request_args()
- if cls.default_model is None and (not cls.needs_auth or cls._api_key is not None):
- if cls._api_key is None:
- cls._create_request_args(cookies)
- async with session.get(
- f"{cls.url}/",
- headers=DEFAULT_HEADERS
- ) as response:
- cls._update_request_args(session)
- await raise_for_status(response)
- try:
- if not model:
- cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
- else:
- cls.default_model = cls.get_model(model)
- except MissingAuthError:
- pass
- except Exception as e:
- api_key = cls._api_key = None
- cls._create_request_args()
- if debug.logging:
- print("OpenaiChat: Load default model failed")
- print(f"{e.__class__.__name__}: {e}")
-
- arkose_token = None
if cls.default_model is None:
- error = None
- try:
- arkose_token, api_key, cookies, headers = await getArkoseAndAccessToken(proxy)
- cls._create_request_args(cookies, headers)
- cls._set_api_key(api_key)
- except NoValidHarFileError as e:
- error = e
- if cls._api_key is None:
- await cls.nodriver_access_token()
- if cls._api_key is None and cls.needs_auth:
- raise error
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
- async with session.post(
- f"{cls.url}/backend-anon/sentinel/chat-requirements"
- if cls._api_key is None else
- f"{cls.url}/backend-api/sentinel/chat-requirements",
- json={"conversation_mode_kind": "primary_assistant"},
- headers=cls._headers
- ) as response:
- cls._update_request_args(session)
- await raise_for_status(response)
- data = await response.json()
- blob = data["arkose"]["dx"]
- need_arkose = data["arkose"]["required"]
- chat_token = data["token"]
- proofofwork = ""
- if "proofofwork" in data:
- proofofwork = generate_proof_token(**data["proofofwork"], user_agent=cls._headers["user-agent"])
-
- if need_arkose and arkose_token is None:
- arkose_token, api_key, cookies, headers = await getArkoseAndAccessToken(proxy)
- cls._create_request_args(cookies, headers)
- cls._set_api_key(api_key)
- if arkose_token is None:
- raise MissingAuthError("No arkose token found in .har file")
-
- if debug.logging:
- print(
- 'Arkose:', False if not need_arkose else arkose_token[:12]+"...",
- 'Turnstile:', data["turnstile"]["required"],
- 'Proofofwork:', False if proofofwork is None else proofofwork[:12]+"...",
- )
-
try:
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
except Exception as e:
+ image_request = None
if debug.logging:
print("OpenaiChat: Upload image failed")
print(f"{e.__class__.__name__}: {e}")
@@ -456,6 +396,43 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
auto_continue = False
conversation.finish_reason = None
while conversation.finish_reason is None:
+ async with session.post(
+ f"{cls.url}/backend-anon/sentinel/chat-requirements"
+ if cls._api_key is None else
+ f"{cls.url}/backend-api/sentinel/chat-requirements",
+ json={"p": generate_proof_token(True, user_agent=cls._headers["user-agent"], proofTokens=proofTokens)},
+ headers=cls._headers
+ ) as response:
+ cls._update_request_args(session)
+ await raise_for_status(response)
+ requirements = await response.json()
+ need_arkose = requirements.get("arkose", {}).get("required")
+ chat_token = requirements["token"]
+
+ if need_arkose and arkose_token is None:
+ arkose_token, api_key, cookies, headers, proofTokens = await getArkoseAndAccessToken(proxy)
+ cls._create_request_args(cookies, headers)
+ cls._set_api_key(api_key)
+ if arkose_token is None:
+ raise MissingAuthError("No arkose token found in .har file")
+
+ if "proofofwork" in requirements:
+ proofofwork = generate_proof_token(
+ **requirements["proofofwork"],
+ user_agent=cls._headers["user-agent"],
+ proofTokens=proofTokens
+ )
+ if debug.logging:
+ print(
+ 'Arkose:', False if not need_arkose else arkose_token[:12]+"...",
+ 'Proofofwork:', False if proofofwork is None else proofofwork[:12]+"...",
+ )
+ ws = None
+ if need_arkose:
+ async with session.post(f"{cls.url}/backend-api/register-websocket", headers=cls._headers) as response:
+ wss_url = (await response.json()).get("wss_url")
+ if wss_url:
+ ws = await session.ws_connect(wss_url)
websocket_request_id = str(uuid.uuid4())
data = {
"action": action,
@@ -481,14 +458,21 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if proofofwork is not None:
headers["Openai-Sentinel-Proof-Token"] = proofofwork
async with session.post(
- f"{cls.url}/backend-anon/conversation" if cls._api_key is None else
+ f"{cls.url}/backend-anon/conversation"
+ if cls._api_key is None else
f"{cls.url}/backend-api/conversation",
json=data,
headers=headers
) as response:
cls._update_request_args(session)
+ if response.status == 403 and max_retries > 0:
+ max_retries -= 1
+ if debug.logging:
+ print(f"Retry: Error {response.status}: {await response.text()}")
+ await asyncio.sleep(5)
+ continue
await raise_for_status(response)
- async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, conversation):
+ async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, conversation, ws):
if return_conversation:
history_disabled = False
return_conversation = False
@@ -518,13 +502,14 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
cls,
messages: AsyncIterator,
session: StreamSession,
- fields: Conversation
+ fields: Conversation,
+ ws = None
) -> AsyncIterator:
last_message: int = 0
async for message in messages:
if message.startswith(b'{"wss_url":'):
message = json.loads(message)
- ws = await session.ws_connect(message["wss_url"])
+ ws = await session.ws_connect(message["wss_url"]) if ws is None else ws
try:
async for chunk in cls.iter_messages_chunk(
cls.iter_messages_ws(ws, message["conversation_id"], hasattr(ws, "recv")),
@@ -564,12 +549,9 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
raise RuntimeError(line["error"])
if "message_type" not in line["message"]["metadata"]:
return
- try:
- image_response = await cls.get_generated_image(session, cls._headers, line)
- if image_response is not None:
- yield image_response
- except Exception as e:
- yield e
+ image_response = await cls.get_generated_image(session, cls._headers, line)
+ if image_response is not None:
+ yield image_response
if line["message"]["author"]["role"] != "assistant":
return
if line["message"]["content"]["content_type"] != "text":
@@ -601,7 +583,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
this._fetch = this.fetch;
this.fetch = async (url, options) => {
const response = await this._fetch(url, options);
- if (url == "https://chat.openai.com/backend-api/conversation") {
+ if (url == "https://chatgpt.com/backend-api/conversation") {
this._headers = options.headers;
return response;
}
@@ -624,7 +606,7 @@ this.fetch = async (url, options) => {
cls._update_cookie_header()
@classmethod
- async def nodriver_access_token(cls):
+ async def nodriver_access_token(cls, proxy: str = None):
try:
import nodriver as uc
except ImportError:
@@ -636,8 +618,11 @@ this.fetch = async (url, options) => {
user_data_dir = None
if debug.logging:
print(f"Open nodriver with user_dir: {user_data_dir}")
- browser = await uc.start(user_data_dir=user_data_dir)
- page = await browser.get("https://chat.openai.com/")
+ browser = await uc.start(
+ user_data_dir=user_data_dir,
+ browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
+ )
+ page = await browser.get("https://chatgpt.com/")
await page.select("[id^=headlessui-menu-button-]", 240)
api_key = await page.evaluate(
"(async () => {"
@@ -652,7 +637,7 @@ this.fetch = async (url, options) => {
)
cookies = {}
for c in await page.browser.cookies.get_all():
- if c.domain.endswith("chat.openai.com"):
+ if c.domain.endswith("chatgpt.com"):
cookies[c.name] = c.value
user_agent = await page.evaluate("window.navigator.userAgent")
await page.close()
diff --git a/g4f/Provider/needs_auth/PerplexityApi.py b/g4f/Provider/needs_auth/PerplexityApi.py
new file mode 100644
index 00000000..35d8d9d6
--- /dev/null
+++ b/g4f/Provider/needs_auth/PerplexityApi.py
@@ -0,0 +1,31 @@
+from __future__ import annotations
+
+from .Openai import Openai
+from ...typing import AsyncResult, Messages
+
+class PerplexityApi(Openai):
+ label = "Perplexity API"
+ url = "https://www.perplexity.ai"
+ working = True
+ default_model = "llama-3-sonar-large-32k-online"
+ models = [
+ "llama-3-sonar-small-32k-chat",
+ "llama-3-sonar-small-32k-online",
+ "llama-3-sonar-large-32k-chat",
+ "llama-3-sonar-large-32k-online",
+ "llama-3-8b-instruct",
+ "llama-3-70b-instruct",
+ "mixtral-8x7b-instruct"
+ ]
+
+ @classmethod
+ def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_base: str = "https://api.perplexity.ai",
+ **kwargs
+ ) -> AsyncResult:
+ return super().create_async_generator(
+ model, messages, api_base=api_base, **kwargs
+ ) \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 805d9fca..b5463b71 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -7,4 +7,5 @@ from .Poe import Poe
from .Openai import Openai
from .Groq import Groq
from .OpenRouter import OpenRouter
-from .OpenaiAccount import OpenaiAccount \ No newline at end of file
+from .OpenaiAccount import OpenaiAccount
+from .PerplexityApi import PerplexityApi \ No newline at end of file
diff --git a/g4f/Provider/openai/har_file.py b/g4f/Provider/openai/har_file.py
index 6a34c97a..2287d6a6 100644
--- a/g4f/Provider/openai/har_file.py
+++ b/g4f/Provider/openai/har_file.py
@@ -12,6 +12,8 @@ from copy import deepcopy
from .crypt import decrypt, encrypt
from ...requests import StreamSession
+from ...cookies import get_cookies_dir
+from ... import debug
class NoValidHarFileError(Exception):
...
@@ -26,24 +28,23 @@ class arkReq:
self.userAgent = userAgent
arkPreURL = "https://tcr9i.chat.openai.com/fc/gt2/public_key/35536E1E-65B4-4D96-9D97-6ADB7EFF8147"
-sessionUrl = "https://chat.openai.com/api/auth/session"
+sessionUrl = "https://chatgpt.com/api/auth/session"
chatArk: arkReq = None
accessToken: str = None
cookies: dict = None
headers: dict = None
+proofTokens: list = []
def readHAR():
- dirPath = "./"
+ global proofTokens
harPath = []
chatArks = []
accessToken = None
cookies = {}
- for root, dirs, files in os.walk(dirPath):
+ for root, dirs, files in os.walk(get_cookies_dir()):
for file in files:
if file.endswith(".har"):
harPath.append(os.path.join(root, file))
- if harPath:
- break
if not harPath:
raise NoValidHarFileError("No .har file found")
for path in harPath:
@@ -54,6 +55,15 @@ def readHAR():
# Error: not a HAR file!
continue
for v in harFile['log']['entries']:
+ v_headers = get_headers(v)
+ try:
+ if "openai-sentinel-proof-token" in v_headers:
+ proofTokens.append(json.loads(base64.b64decode(
+ v_headers["openai-sentinel-proof-token"].split("gAAAAAB", 1)[-1].encode()
+ ).decode()))
+ except Exception as e:
+ if debug.logging:
+ print(f"Read proof token: {e}")
if arkPreURL in v['request']['url']:
chatArks.append(parseHAREntry(v))
elif v['request']['url'] == sessionUrl:
@@ -61,8 +71,8 @@ def readHAR():
accessToken = json.loads(v["response"]["content"]["text"]).get("accessToken")
except KeyError:
continue
- cookies = {c['name']: c['value'] for c in v['request']['cookies']}
- headers = get_headers(v)
+ cookies = {c['name']: c['value'] for c in v['request']['cookies'] if c['name'] != "oai-did"}
+ headers = v_headers
if not accessToken:
raise NoValidHarFileError("No accessToken found in .har files")
if not chatArks:
@@ -101,7 +111,8 @@ def genArkReq(chatArk: arkReq) -> arkReq:
async def sendRequest(tmpArk: arkReq, proxy: str = None):
async with StreamSession(headers=tmpArk.arkHeader, cookies=tmpArk.arkCookies, proxies={"https": proxy}) as session:
async with session.post(tmpArk.arkURL, data=tmpArk.arkBody) as response:
- arkose = (await response.json()).get("token")
+ data = await response.json()
+ arkose = data.get("token")
if "sup=1|rid=" not in arkose:
return RuntimeError("No valid arkose token generated")
return arkose
@@ -131,10 +142,10 @@ def getN() -> str:
return base64.b64encode(timestamp.encode()).decode()
async def getArkoseAndAccessToken(proxy: str) -> tuple[str, str, dict, dict]:
- global chatArk, accessToken, cookies, headers
+ global chatArk, accessToken, cookies, headers, proofTokens
if chatArk is None or accessToken is None:
chatArk, accessToken, cookies, headers = readHAR()
if chatArk is None:
- return None, accessToken, cookies, headers
+ return None, accessToken, cookies, headers, proofTokens
newReq = genArkReq(chatArk)
- return await sendRequest(newReq, proxy), accessToken, cookies, headers
+ return await sendRequest(newReq, proxy), accessToken, cookies, headers, proofTokens
diff --git a/g4f/Provider/openai/proofofwork.py b/g4f/Provider/openai/proofofwork.py
index e44ef6f7..baf8a0ea 100644
--- a/g4f/Provider/openai/proofofwork.py
+++ b/g4f/Provider/openai/proofofwork.py
@@ -2,29 +2,32 @@ import random
import hashlib
import json
import base64
-from datetime import datetime, timedelta, timezone
+from datetime import datetime, timezone
-def generate_proof_token(required: bool, seed: str, difficulty: str, user_agent: str):
+
+def generate_proof_token(required: bool, seed: str = "", difficulty: str = "", user_agent: str = None, proofTokens: list = None):
if not required:
return
- cores = [8, 12, 16, 24]
- screens = [3000, 4000, 6000]
-
- core = random.choice(cores)
- screen = random.choice(screens)
-
- # Get current UTC time
- now_utc = datetime.now(timezone.utc)
- # Convert UTC time to Eastern Time
- now_et = now_utc.astimezone(timezone(timedelta(hours=-5)))
-
- parse_time = now_et.strftime('%a, %d %b %Y %H:%M:%S GMT')
-
- config = [core + screen, parse_time, 4294705152, 0, user_agent]
-
- diff_len = len(difficulty) // 2
-
+ if proofTokens:
+ config = proofTokens[-1]
+ else:
+ screen = random.choice([3008, 4010, 6000]) * random.choice([1, 2, 4])
+ # Get current UTC time
+ now_utc = datetime.now(timezone.utc)
+ parse_time = now_utc.strftime('%a, %d %b %Y %H:%M:%S GMT')
+ config = [
+ screen, parse_time,
+ None, 0, user_agent,
+ "https://tcr9i.chat.openai.com/v2/35536E1E-65B4-4D96-9D97-6ADB7EFF8147/api.js",
+ "dpl=1440a687921de39ff5ee56b92807faaadce73f13","en","en-US",
+ None,
+ "pluginsβˆ’[object PluginArray]",
+ random.choice(["_reactListeningcfilawjnerp", "_reactListening9ne2dfo1i47", "_reactListening410nzwhan2a"]),
+ random.choice(["alert", "ontransitionend", "onprogress"])
+ ]
+
+ diff_len = len(difficulty)
for i in range(100000):
config[3] = i
json_data = json.dumps(config)
@@ -32,8 +35,7 @@ def generate_proof_token(required: bool, seed: str, difficulty: str, user_agent:
hash_value = hashlib.sha3_512((seed + base).encode()).digest()
if hash_value.hex()[:diff_len] <= difficulty:
- result = "gAAAAAB" + base
- return result
+ return "gAAAAAB" + base
fallback_base = base64.b64encode(f'"{seed}"'.encode()).decode()
return "gAAAAABwQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D" + fallback_base
diff --git a/g4f/Provider/you/har_file.py b/g4f/Provider/you/har_file.py
index 8089d686..71d741fd 100644
--- a/g4f/Provider/you/har_file.py
+++ b/g4f/Provider/you/har_file.py
@@ -4,11 +4,15 @@ import json
import os
import os.path
import random
+import logging
from ...requests import StreamSession, raise_for_status
+from ...cookies import get_cookies_dir
from ...errors import MissingRequirementsError
from ... import debug
+logging.basicConfig(level=logging.ERROR)
+
class NoValidHarFileError(Exception):
...
@@ -25,10 +29,9 @@ public_token = "public-token-live-507a52ad-7e69-496b-aee0-1c9863c7c819"
chatArks: list = None
def readHAR():
- dirPath = "./"
harPath = []
chatArks = []
- for root, dirs, files in os.walk(dirPath):
+ for root, dirs, files in os.walk(get_cookies_dir()):
for file in files:
if file.endswith(".har"):
harPath.append(os.path.join(root, file))
@@ -78,28 +81,35 @@ async def get_telemetry_ids(proxy: str = None) -> list:
return [await create_telemetry_id(proxy)]
except NoValidHarFileError as e:
if debug.logging:
- print(e)
- if debug.logging:
- print('Getting telemetry_id for you.com with nodriver')
+ logging.error(e)
+
try:
from nodriver import start
except ImportError:
raise MissingRequirementsError('Add .har file from you.com or install "nodriver" package | pip install -U nodriver')
- page = None
+ if debug.logging:
+ logging.error('Getting telemetry_id for you.com with nodriver')
+
+ browser = page = None
try:
- browser = await start()
+ browser = await start(
+ browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
+ )
page = await browser.get("https://you.com")
-
while not await page.evaluate('"GetTelemetryID" in this'):
await page.sleep(1)
-
async def get_telemetry_id():
return await page.evaluate(
f'this.GetTelemetryID("{public_token}", "{telemetry_url}");',
await_promise=True
)
-
return [await get_telemetry_id()]
finally:
- if page is not None:
- await page.close() \ No newline at end of file
+ try:
+ if page is not None:
+ await page.close()
+ if browser is not None:
+ await browser.stop()
+ except Exception as e:
+ if debug.logging:
+ logging.error(e)
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index 9772f8d7..acb27e9c 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -47,6 +47,14 @@ class ChatCompletionsForm(BaseModel):
web_search: Optional[bool] = None
proxy: Optional[str] = None
+class ImagesGenerateForm(BaseModel):
+ model: Optional[str] = None
+ provider: Optional[str] = None
+ prompt: str
+ response_format: Optional[str] = None
+ api_key: Optional[str] = None
+ proxy: Optional[str] = None
+
class AppConfig():
list_ignored_providers: Optional[list[str]] = None
g4f_api_key: Optional[str] = None
@@ -120,7 +128,10 @@ class Api:
'created': 0,
'owned_by': model.base_provider
} for model_id, model in model_list.items()]
- return JSONResponse(model_list)
+ return JSONResponse({
+ "object": "list",
+ "data": model_list,
+ })
@self.app.get("/v1/models/{model_name}")
async def model_info(model_name: str):
@@ -146,37 +157,53 @@ class Api:
if auth_header and auth_header != "Bearer":
config.api_key = auth_header
response = self.client.chat.completions.create(
- **{
- **AppConfig.defaults,
- **config.dict(exclude_none=True),
- },
-
+ **{
+ **AppConfig.defaults,
+ **config.dict(exclude_none=True),
+ },
ignored=AppConfig.list_ignored_providers
)
+ if not config.stream:
+ return JSONResponse((await response).to_json())
+
+ async def streaming():
+ try:
+ async for chunk in response:
+ yield f"data: {json.dumps(chunk.to_json())}\n\n"
+ except GeneratorExit:
+ pass
+ except Exception as e:
+ logging.exception(e)
+ yield f'data: {format_exception(e, config)}\n\n'
+ yield "data: [DONE]\n\n"
+ return StreamingResponse(streaming(), media_type="text/event-stream")
+
except Exception as e:
logging.exception(e)
return Response(content=format_exception(e, config), status_code=500, media_type="application/json")
- if not config.stream:
- return JSONResponse((await response).to_json())
-
- async def streaming():
- try:
- async for chunk in response:
- yield f"data: {json.dumps(chunk.to_json())}\n\n"
- except GeneratorExit:
- pass
- except Exception as e:
- logging.exception(e)
- yield f'data: {format_exception(e, config)}\n\n'
- yield "data: [DONE]\n\n"
-
- return StreamingResponse(streaming(), media_type="text/event-stream")
-
@self.app.post("/v1/completions")
async def completions():
return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json")
+ @self.app.post("/v1/images/generations")
+ async def images_generate(config: ImagesGenerateForm, request: Request = None, provider: str = None):
+ try:
+ config.provider = provider if config.provider is None else config.provider
+ if config.api_key is None and request is not None:
+ auth_header = request.headers.get("Authorization")
+ if auth_header is not None:
+ auth_header = auth_header.split(None, 1)[-1]
+ if auth_header and auth_header != "Bearer":
+ config.api_key = auth_header
+ response = self.client.images.generate(
+ **config.dict(exclude_none=True),
+ )
+ return JSONResponse((await response).to_json())
+ except Exception as e:
+ logging.exception(e)
+ return Response(content=format_exception(e, config), status_code=500, media_type="application/json")
+
def format_exception(e: Exception, config: ChatCompletionsForm) -> str:
last_provider = g4f.get_last_provider(True)
return json.dumps({
diff --git a/g4f/client/async_client.py b/g4f/client/async_client.py
index 8e1ee33c..2fe4640b 100644
--- a/g4f/client/async_client.py
+++ b/g4f/client/async_client.py
@@ -3,6 +3,9 @@ from __future__ import annotations
import time
import random
import string
+import asyncio
+import base64
+from aiohttp import ClientSession, BaseConnector
from .types import Client as BaseClient
from .types import ProviderType, FinishReason
@@ -11,10 +14,12 @@ from .types import AsyncIterResponse, ImageProvider
from .image_models import ImageModels
from .helper import filter_json, find_stop, filter_none, cast_iter_async
from .service import get_last_provider, get_model_and_provider
-from ..typing import Union, Iterator, Messages, AsyncIterator, ImageType
-from ..errors import NoImageResponseError
-from ..image import ImageResponse as ImageProviderResponse
-from ..providers.base_provider import AsyncGeneratorProvider
+from ..Provider import ProviderUtils
+from ..typing import Union, Messages, AsyncIterator, ImageType
+from ..errors import NoImageResponseError, ProviderNotFoundError
+from ..requests.aiohttp import get_connector
+from ..providers.conversation import BaseConversation
+from ..image import ImageResponse as ImageProviderResponse, ImageDataResponse
try:
anext
@@ -38,6 +43,9 @@ async def iter_response(
if isinstance(chunk, FinishReason):
finish_reason = chunk.reason
break
+ elif isinstance(chunk, BaseConversation):
+ yield chunk
+ continue
content += str(chunk)
count += 1
if max_tokens is not None and count >= max_tokens:
@@ -88,7 +96,7 @@ def create_response(
api_key: str = None,
**kwargs
):
- has_asnyc = isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider)
+ has_asnyc = hasattr(provider, "create_async_generator")
if has_asnyc:
create = provider.create_async_generator
else:
@@ -157,20 +165,37 @@ class Chat():
def __init__(self, client: AsyncClient, provider: ProviderType = None):
self.completions = Completions(client, provider)
-async def iter_image_response(response: Iterator) -> Union[ImagesResponse, None]:
+async def iter_image_response(
+ response: AsyncIterator,
+ response_format: str = None,
+ connector: BaseConnector = None,
+ proxy: str = None
+) -> Union[ImagesResponse, None]:
async for chunk in response:
if isinstance(chunk, ImageProviderResponse):
- return ImagesResponse([Image(image) for image in chunk.get_list()])
+ if response_format == "b64_json":
+ async with ClientSession(
+ connector=get_connector(connector, proxy),
+ cookies=chunk.options.get("cookies")
+ ) as session:
+ async def fetch_image(image):
+ async with session.get(image) as response:
+ return base64.b64encode(await response.content.read()).decode()
+ images = await asyncio.gather(*[fetch_image(image) for image in chunk.get_list()])
+ return ImagesResponse([Image(None, image, chunk.alt) for image in images], int(time.time()))
+ return ImagesResponse([Image(image, None, chunk.alt) for image in chunk.get_list()], int(time.time()))
+ elif isinstance(chunk, ImageDataResponse):
+ return ImagesResponse([Image(None, image, chunk.alt) for image in chunk.get_list()], int(time.time()))
-def create_image(client: AsyncClient, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> AsyncIterator:
- prompt = f"create a image with: {prompt}"
- if provider.__name__ == "You":
+def create_image(provider: ProviderType, prompt: str, model: str = "", **kwargs) -> AsyncIterator:
+ if isinstance(provider, type) and provider.__name__ == "You":
kwargs["chat_mode"] = "create"
+ else:
+ prompt = f"create a image with: {prompt}"
return provider.create_async_generator(
model,
[{"role": "user", "content": prompt}],
stream=True,
- proxy=client.get_proxy(),
**kwargs
)
@@ -180,31 +205,71 @@ class Images():
self.provider: ImageProvider = provider
self.models: ImageModels = ImageModels(client)
- async def generate(self, prompt, model: str = "", **kwargs) -> ImagesResponse:
- provider = self.models.get(model, self.provider)
- if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
- response = create_image(self.client, provider, prompt, **kwargs)
+ def get_provider(self, model: str, provider: ProviderType = None):
+ if isinstance(provider, str):
+ if provider in ProviderUtils.convert:
+ provider = ProviderUtils.convert[provider]
+ else:
+ raise ProviderNotFoundError(f'Provider not found: {provider}')
+ else:
+ provider = self.models.get(model, self.provider)
+ return provider
+
+ async def generate(
+ self,
+ prompt,
+ model: str = "",
+ provider: ProviderType = None,
+ response_format: str = None,
+ connector: BaseConnector = None,
+ proxy: str = None,
+ **kwargs
+ ) -> ImagesResponse:
+ provider = self.get_provider(model, provider)
+ if hasattr(provider, "create_async_generator"):
+ response = create_image(
+ provider,
+ prompt,
+ **filter_none(
+ response_format=response_format,
+ connector=connector,
+ proxy=self.client.get_proxy() if proxy is None else proxy,
+ ),
+ **kwargs
+ )
else:
response = await provider.create_async(prompt)
return ImagesResponse([Image(image) for image in response.get_list()])
- image = await iter_image_response(response)
+ image = await iter_image_response(response, response_format, connector, proxy)
if image is None:
raise NoImageResponseError()
return image
- async def create_variation(self, image: ImageType, model: str = None, **kwargs):
- provider = self.models.get(model, self.provider)
+ async def create_variation(
+ self,
+ image: ImageType,
+ model: str = None,
+ response_format: str = None,
+ connector: BaseConnector = None,
+ proxy: str = None,
+ **kwargs
+ ):
+ provider = self.get_provider(model, provider)
result = None
- if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
+ if hasattr(provider, "create_async_generator"):
response = provider.create_async_generator(
"",
[{"role": "user", "content": "create a image like this"}],
- True,
+ stream=True,
image=image,
- proxy=self.client.get_proxy(),
+ **filter_none(
+ response_format=response_format,
+ connector=connector,
+ proxy=self.client.get_proxy() if proxy is None else proxy,
+ ),
**kwargs
)
- result = iter_image_response(response)
+ result = iter_image_response(response, response_format, connector, proxy)
if result is None:
raise NoImageResponseError()
return result
diff --git a/g4f/client/client.py b/g4f/client/client.py
index 1b090981..63bae4fe 100644
--- a/g4f/client/client.py
+++ b/g4f/client/client.py
@@ -6,6 +6,7 @@ import string
from ..typing import Union, Iterator, Messages, ImageType
from ..providers.types import BaseProvider, ProviderType, FinishReason
+from ..providers.conversation import BaseConversation
from ..image import ImageResponse as ImageProviderResponse
from ..errors import NoImageResponseError
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
@@ -29,6 +30,9 @@ def iter_response(
if isinstance(chunk, FinishReason):
finish_reason = chunk.reason
break
+ elif isinstance(chunk, BaseConversation):
+ yield chunk
+ continue
content += str(chunk)
if max_tokens is not None and idx + 1 >= max_tokens:
finish_reason = "length"
@@ -125,9 +129,12 @@ def iter_image_response(response: Iterator) -> Union[ImagesResponse, None]:
return ImagesResponse([Image(image) for image in chunk.get_list()])
def create_image(client: Client, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> Iterator:
- prompt = f"create a image with: {prompt}"
- if provider.__name__ == "You":
+
+
+ if isinstance(provider, type) and provider.__name__ == "You":
kwargs["chat_mode"] = "create"
+ else:
+ prompt = f"create a image with: {prompt}"
return provider.create_completion(
model,
[{"role": "user", "content": prompt}],
diff --git a/g4f/client/service.py b/g4f/client/service.py
index dd6bf4b6..5fdb150c 100644
--- a/g4f/client/service.py
+++ b/g4f/client/service.py
@@ -4,7 +4,7 @@ from typing import Union
from .. import debug, version
from ..errors import ProviderNotFoundError, ModelNotFoundError, ProviderNotWorkingError, StreamNotSupportedError
-from ..models import Model, ModelUtils
+from ..models import Model, ModelUtils, default
from ..Provider import ProviderUtils
from ..providers.types import BaseRetryProvider, ProviderType
from ..providers.retry_provider import IterProvider
@@ -60,7 +60,9 @@ def get_model_and_provider(model : Union[Model, str],
model = ModelUtils.convert[model]
if not provider:
- if isinstance(model, str):
+ if not model:
+ model = default
+ elif isinstance(model, str):
raise ModelNotFoundError(f'Model not found: {model}')
provider = model.best_provider
diff --git a/g4f/client/stubs.py b/g4f/client/stubs.py
index 2441e287..8cf2bcba 100644
--- a/g4f/client/stubs.py
+++ b/g4f/client/stubs.py
@@ -78,12 +78,14 @@ class ChatCompletionDelta(Model):
def __init__(self, content: Union[str, None]):
if content is not None:
self.content = content
+ self.role = "assistant"
def to_json(self):
return self.__dict__
class ChatCompletionDeltaChoice(Model):
def __init__(self, delta: ChatCompletionDelta, finish_reason: Union[str, None]):
+ self.index = 0
self.delta = delta
self.finish_reason = finish_reason
@@ -94,13 +96,24 @@ class ChatCompletionDeltaChoice(Model):
}
class Image(Model):
- url: str
+ def __init__(self, url: str = None, b64_json: str = None, revised_prompt: str = None) -> None:
+ if url is not None:
+ self.url = url
+ if b64_json is not None:
+ self.b64_json = b64_json
+ if revised_prompt is not None:
+ self.revised_prompt = revised_prompt
- def __init__(self, url: str) -> None:
- self.url = url
+ def to_json(self):
+ return self.__dict__
class ImagesResponse(Model):
- data: list[Image]
-
- def __init__(self, data: list) -> None:
+ def __init__(self, data: list[Image], created: int = 0) -> None:
self.data = data
+ self.created = created
+
+ def to_json(self):
+ return {
+ **self.__dict__,
+ "data": [image.to_json() for image in self.data]
+ } \ No newline at end of file
diff --git a/g4f/cookies.py b/g4f/cookies.py
index 9dfe0ca5..0a25c41e 100644
--- a/g4f/cookies.py
+++ b/g4f/cookies.py
@@ -23,8 +23,9 @@ from .typing import Dict, Cookies
from .errors import MissingRequirementsError
from . import debug
-# Global variable to store cookies
-_cookies: Dict[str, Cookies] = {}
+class CookiesConfig():
+ cookies: Dict[str, Cookies] = {}
+ cookies_dir: str = "./har_and_cookies"
DOMAINS = [
".bing.com",
@@ -48,20 +49,18 @@ def get_cookies(domain_name: str = '', raise_requirements_error: bool = True, si
Returns:
Dict[str, str]: A dictionary of cookie names and values.
"""
- global _cookies
- if domain_name in _cookies:
- return _cookies[domain_name]
+ if domain_name in CookiesConfig.cookies:
+ return CookiesConfig.cookies[domain_name]
cookies = load_cookies_from_browsers(domain_name, raise_requirements_error, single_browser)
- _cookies[domain_name] = cookies
+ CookiesConfig.cookies[domain_name] = cookies
return cookies
def set_cookies(domain_name: str, cookies: Cookies = None) -> None:
- global _cookies
if cookies:
- _cookies[domain_name] = cookies
- elif domain_name in _cookies:
- _cookies.pop(domain_name)
+ CookiesConfig.cookies[domain_name] = cookies
+ elif domain_name in CookiesConfig.cookies:
+ CookiesConfig.cookies.pop(domain_name)
def load_cookies_from_browsers(domain_name: str, raise_requirements_error: bool = True, single_browser: bool = False) -> Cookies:
"""
@@ -96,7 +95,13 @@ def load_cookies_from_browsers(domain_name: str, raise_requirements_error: bool
print(f"Error reading cookies from {cookie_fn.__name__} for {domain_name}: {e}")
return cookies
-def read_cookie_files(dirPath: str = "./har_and_cookies"):
+def set_cookies_dir(dir: str) -> None:
+ CookiesConfig.cookies_dir = dir
+
+def get_cookies_dir() -> str:
+ return CookiesConfig.cookies_dir
+
+def read_cookie_files(dirPath: str = None):
def get_domain(v: dict) -> str:
host = [h["value"] for h in v['request']['headers'] if h["name"].lower() in ("host", ":authority")]
if not host:
@@ -106,16 +111,16 @@ def read_cookie_files(dirPath: str = "./har_and_cookies"):
if d in host:
return d
- global _cookies
harFiles = []
cookieFiles = []
- for root, dirs, files in os.walk(dirPath):
+ for root, dirs, files in os.walk(CookiesConfig.cookies_dir if dirPath is None else dirPath):
for file in files:
if file.endswith(".har"):
harFiles.append(os.path.join(root, file))
elif file.endswith(".json"):
cookieFiles.append(os.path.join(root, file))
- _cookies = {}
+
+ CookiesConfig.cookies = {}
for path in harFiles:
with open(path, 'rb') as file:
try:
@@ -134,7 +139,7 @@ def read_cookie_files(dirPath: str = "./har_and_cookies"):
for c in v['request']['cookies']:
v_cookies[c['name']] = c['value']
if len(v_cookies) > 0:
- _cookies[domain] = v_cookies
+ CookiesConfig.cookies[domain] = v_cookies
new_cookies[domain] = len(v_cookies)
if debug.logging:
for domain, new_values in new_cookies.items():
@@ -159,7 +164,7 @@ def read_cookie_files(dirPath: str = "./har_and_cookies"):
for domain, new_values in new_cookies.items():
if debug.logging:
print(f"Cookies added: {len(new_values)} from {domain}")
- _cookies[domain] = new_values
+ CookiesConfig.cookies[domain] = new_values
def _g4f(domain_name: str) -> list:
"""
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index 66bcaaab..a2f883d9 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -19,8 +19,7 @@
<script src="/static/js/highlightjs-copy.min.js"></script>
<script src="/static/js/chat.v1.js" defer></script>
<script src="https://cdn.jsdelivr.net/npm/markdown-it@13.0.1/dist/markdown-it.min.js"></script>
- <link rel="stylesheet"
- href="//cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.7.0/build/styles/base16/dracula.min.css">
+ <link rel="stylesheet" href="/static/css/dracula.min.css">
<script>
MathJax = {
chtml: {
@@ -33,10 +32,10 @@
<script type="module" src="https://cdn.jsdelivr.net/npm/mistral-tokenizer-js" async>
import mistralTokenizer from "mistral-tokenizer-js"
</script>
- <script type="module" src="https://belladoreai.github.io/llama-tokenizer-js/llama-tokenizer.js" async>
+ <script type="module" src="https://cdn.jsdelivr.net/gh/belladoreai/llama-tokenizer-js@master/llama-tokenizer.js" async>
import llamaTokenizer from "llama-tokenizer-js"
</script>
- <script src="https://unpkg.com/gpt-tokenizer/dist/cl100k_base.js" async></script>
+ <script src="https://cdn.jsdelivr.net/npm/gpt-tokenizer/dist/cl100k_base.js" async></script>
<script src="/static/js/text_to_speech/index.js" async></script>
<!--
<script src="/static/js/whisper-web/index.js" async></script>
@@ -94,22 +93,27 @@
<div class="paper">
<h3>Settings</h3>
<div class="field">
- <span class="label">Web Access</span>
+ <span class="label">Enable Dark Mode</span>
+ <input type="checkbox" id="darkMode" checked />
+ <label for="darkMode" class="toogle" title=""></label>
+ </div>
+ <div class="field">
+ <span class="label">Web Access with DuckDuckGo</span>
<input type="checkbox" id="switch" />
<label for="switch" class="toogle" title="Add the pages of the first 5 search results to the query."></label>
</div>
<div class="field">
- <span class="label">Disable History</span>
+ <span class="label">Disable Conversation History</span>
<input type="checkbox" id="history" />
<label for="history" class="toogle" title="To improve the reaction time or if you have trouble with large conversations."></label>
</div>
<div class="field">
- <span class="label">Hide System prompt</span>
+ <span class="label">Hide System-prompt</span>
<input type="checkbox" id="hide-systemPrompt" />
<label for="hide-systemPrompt" class="toogle" title="For more space on phones"></label>
</div>
<div class="field">
- <span class="label">Auto continue</span>
+ <span class="label">Auto continue in ChatGPT</span>
<input id="auto_continue" type="checkbox" name="auto_continue" checked/>
<label for="auto_continue" class="toogle" title="Continue large responses in OpenaiChat"></label>
</div>
@@ -122,8 +126,8 @@
<input type="text" id="recognition-language" value="" placeholder="navigator.language"/>
</div>
<div class="field box">
- <label for="Bing-api_key" class="label" title="">Bing:</label>
- <textarea id="Bing-api_key" name="Bing[api_key]" class="BingCreateImages-api_key" placeholder="&quot;_U&quot; cookie"></textarea>
+ <label for="BingCreateImages-api_key" class="label" title="">Microsoft Designer in Bing:</label>
+ <textarea id="BingCreateImages-api_key" name="BingCreateImages[api_key]" placeholder="&quot;_U&quot; cookie"></textarea>
</div>
<div class="field box">
<label for="DeepInfra-api_key" class="label" title="">DeepInfra:</label>
@@ -146,14 +150,14 @@
<textarea id="Openai-api_key" name="Openai[api_key]" placeholder="api_key"></textarea>
</div>
<div class="field box">
- <label for="OpenaiAccount-api_key" class="label" title="">OpenAI ChatGPT:</label>
- <textarea id="OpenaiAccount-api_key" name="OpenaiAccount[api_key]" class="OpenaiChat-api_key" placeholder="access_key"></textarea>
- </div>
- <div class="field box">
<label for="OpenRouter-api_key" class="label" title="">OpenRouter:</label>
<textarea id="OpenRouter-api_key" name="OpenRouter[api_key]" placeholder="api_key"></textarea>
</div>
<div class="field box">
+ <label for="PerplexityApi-api_key" class="label" title="">Perplexity API:</label>
+ <textarea id="PerplexityApi-api_key" name="PerplexityApi[api_key]" placeholder="api_key"></textarea>
+ </div>
+ <div class="field box">
<label for="Replicate-api_key" class="label" title="">Replicate:</label>
<textarea id="Replicate-api_key" name="Replicate[api_key]" class="ReplicateImage-api_key" placeholder="api_key"></textarea>
</div>
@@ -170,11 +174,17 @@
</div>
</div>
<div class="conversation">
- <textarea id="systemPrompt" class="box" placeholder="System prompt"></textarea>
+ <textarea id="systemPrompt" class="box" placeholder="System prompt"></textarea>
<div id="messages" class="box"></div>
+ <button class="slide-systemPrompt">
+ <i class="fa-solid fa-angles-up"></i>
+ </button>
<div class="toolbar">
<div id="input-count" class="">
- &nbsp;
+ <button class="hide-input">
+ <i class="fa-solid fa-angles-down"></i>
+ </button>
+ <span class="text"></span>
</div>
<div class="stop_generating stop_generating-hidden">
<button id="cancelButton">
@@ -244,8 +254,5 @@
<div class="mobile-sidebar">
<i class="fa-solid fa-bars"></i>
</div>
- <script>
- </script>
</body>
-
</html>
diff --git a/g4f/gui/client/static/css/dracula.min.css b/g4f/gui/client/static/css/dracula.min.css
new file mode 100644
index 00000000..729bbbfb
--- /dev/null
+++ b/g4f/gui/client/static/css/dracula.min.css
@@ -0,0 +1,7 @@
+/*!
+ Theme: Dracula
+ Author: Mike Barkmin (http://github.com/mikebarkmin) based on Dracula Theme (http://github.com/dracula)
+ License: ~ MIT (or more permissive) [via base16-schemes-source]
+ Maintainer: @highlightjs/core-team
+ Version: 2021.09.0
+*/pre code.hljs{display:block;overflow-x:auto;padding:1em}code.hljs{padding:3px 5px}.hljs{color:#e9e9f4;background:#282936}.hljs ::selection,.hljs::selection{background-color:#4d4f68;color:#e9e9f4}.hljs-comment{color:#626483}.hljs-tag{color:#62d6e8}.hljs-operator,.hljs-punctuation,.hljs-subst{color:#e9e9f4}.hljs-operator{opacity:.7}.hljs-bullet,.hljs-deletion,.hljs-name,.hljs-selector-tag,.hljs-template-variable,.hljs-variable{color:#ea51b2}.hljs-attr,.hljs-link,.hljs-literal,.hljs-number,.hljs-symbol,.hljs-variable.constant_{color:#b45bcf}.hljs-class .hljs-title,.hljs-title,.hljs-title.class_{color:#00f769}.hljs-strong{font-weight:700;color:#00f769}.hljs-addition,.hljs-code,.hljs-string,.hljs-title.class_.inherited__{color:#ebff87}.hljs-built_in,.hljs-doctag,.hljs-keyword.hljs-atrule,.hljs-quote,.hljs-regexp{color:#a1efe4}.hljs-attribute,.hljs-function .hljs-title,.hljs-section,.hljs-title.function_,.ruby .hljs-property{color:#62d6e8}.diff .hljs-meta,.hljs-keyword,.hljs-template-tag,.hljs-type{color:#b45bcf}.hljs-emphasis{color:#b45bcf;font-style:italic}.hljs-meta,.hljs-meta .hljs-keyword,.hljs-meta .hljs-string{color:#00f769}.hljs-meta .hljs-keyword,.hljs-meta-keyword{font-weight:700} \ No newline at end of file
diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css
index 979f9f96..f3a4708d 100644
--- a/g4f/gui/client/static/css/style.css
+++ b/g4f/gui/client/static/css/style.css
@@ -55,11 +55,14 @@
--colour-6: #242424;
--accent: #8b3dff;
+ --gradient: var(--accent);
--blur-bg: #16101b66;
--blur-border: #84719040;
--user-input: #ac87bb;
--conversations: #c7a2ff;
--conversations-hover: #c7a2ff4d;
+ --scrollbar: var(--colour-3);
+ --scrollbar-thumb: var(--blur-bg);
}
:root {
@@ -168,7 +171,7 @@ body {
z-index: -1;
border-radius: calc(0.5 * var(--size));
background-color: var(--accent);
- background: radial-gradient(circle at center, var(--accent), var(--accent));
+ background: radial-gradient(circle at center, var(--gradient), var(--gradient));
width: 70vw;
height: 70vw;
top: 50%;
@@ -262,6 +265,14 @@ body {
padding-bottom: 0;
}
+.message.print {
+ height: 100%;
+ position: absolute;
+ background-color: #fff;
+ z-index: 100;
+ top: 0;
+}
+
.message.regenerate {
opacity: 0.75;
}
@@ -336,14 +347,14 @@ body {
flex-wrap: wrap;
}
-.message .content,
-.message .content a:link,
-.message .content a:visited{
+.message .content_inner,
+.message .content_inner a:link,
+.message .content_inner a:visited{
font-size: 15px;
line-height: 1.3;
color: var(--colour-3);
}
-.message .content pre{
+.message .content_inner pre{
white-space: pre-wrap;
}
@@ -380,24 +391,25 @@ body {
cursor: pointer;
}
-.message .count .fa-clipboard,
-.message .count .fa-volume-high {
- z-index: 1000;
- cursor: pointer;
-}
-
.message .user .fa-xmark {
color: var(--colour-1);
}
-.message .count .fa-clipboard {
- color: var(--colour-3);
+.message .count .fa-clipboard,
+.message .count .fa-volume-high,
+.message .count .fa-rotate,
+.message .count .fa-print {
+ z-index: 1000;
+ cursor: pointer;
}
-.message .count .fa-clipboard.clicked {
- color: var(--accent);
+.message .count .fa-clipboard,
+.message .count .fa-whatsapp {
+ color: var(--colour-3);
}
+.message .count .fa-clipboard.clicked,
+.message .count .fa-print.clicked,
.message .count .fa-volume-high.active {
color: var(--accent);
}
@@ -456,7 +468,11 @@ body {
#input-count {
width: fit-content;
font-size: 12px;
- padding: 6px var(--inner-gap);
+ padding: 6px 6px;
+}
+
+#input-count .text {
+ padding: 0 4px;
}
.stop_generating, .toolbar .regenerate {
@@ -490,6 +506,13 @@ body {
animation: show_popup 0.4s;
}
+.toolbar .hide-input {
+ background: transparent;
+ border: none;
+ color: var(--colour-3);
+ cursor: pointer;
+}
+
@keyframes show_popup {
from {
opacity: 0;
@@ -675,8 +698,15 @@ select {
resize: vertical;
}
-#systemPrompt {
- padding-left: 35px;
+.slide-systemPrompt {
+ position: absolute;
+ top: 0;
+ padding: var(--inner-gap) 10px;
+ border: none;
+ background: transparent;
+ cursor: pointer;
+ height: 49px;
+ color: var(--colour-3);
}
@media only screen and (min-width: 40em) {
@@ -769,7 +799,7 @@ select {
}
50% {
- background: white;
+ background: var(--colour-3);
}
100% {
@@ -976,120 +1006,23 @@ a:-webkit-any-link {
width: 1px;
}
-.color-picker>fieldset {
- border: 0;
- display: flex;
- width: fit-content;
- background: var(--colour-1);
- margin-inline: auto;
- border-radius: 8px;
- -webkit-backdrop-filter: blur(20px);
- backdrop-filter: blur(20px);
- cursor: pointer;
- background-color: var(--blur-bg);
- border: 1px solid var(--blur-border);
- color: var(--colour-3);
- display: block;
- position: relative;
- overflow: hidden;
- outline: none;
- padding: 6px 16px;
-}
-
-.color-picker input[type="radio"]:checked {
- background-color: var(--radio-color);
-}
-
-.color-picker input[type="radio"]#light {
- --radio-color: gray;
-}
-
-.color-picker input[type="radio"]#pink {
- --radio-color: white;
-}
-
-.color-picker input[type="radio"]#blue {
- --radio-color: blue;
-}
-
-.color-picker input[type="radio"]#green {
- --radio-color: green;
-}
-
-.color-picker input[type="radio"]#dark {
- --radio-color: #232323;
-}
-
-.pink {
- --colour-1: #ffffff;
- --colour-2: #000000;
- --colour-3: #000000;
- --colour-4: #000000;
- --colour-5: #000000;
- --colour-6: #000000;
-
- --accent: #ffffff;
- --blur-bg: #98989866;
- --blur-border: #00000040;
- --user-input: #000000;
- --conversations: #000000;
-}
-
-.blue {
- --colour-1: hsl(209 50% 90%);
- --clr-card-bg: hsl(209 50% 100%);
- --colour-3: hsl(209 50% 15%);
- --conversations: hsl(209 50% 25%);
-}
-
-.green {
- --colour-1: hsl(109 50% 90%);
- --clr-card-bg: hsl(109 50% 100%);
- --colour-3: hsl(109 50% 15%);
- --conversations: hsl(109 50% 25%);
-}
-
-.dark {
- --colour-1: hsl(209 50% 10%);
- --clr-card-bg: hsl(209 50% 5%);
- --colour-3: hsl(209 50% 90%);
- --conversations: hsl(209 50% 80%);
-}
-
-:root:has(#pink:checked) {
+.white {
+ --blur-bg: transparent;
+ --accent: #007bff;
+ --gradient: #ccc;
+ --conversations: #0062cc;
--colour-1: #ffffff;
- --colour-2: #000000;
- --colour-3: #000000;
- --colour-4: #000000;
- --colour-5: #000000;
- --colour-6: #000000;
-
- --accent: #ffffff;
- --blur-bg: #98989866;
- --blur-border: #00000040;
- --user-input: #000000;
- --conversations: #000000;
+ --colour-3: #212529;
+ --scrollbar: var(--colour-1);
+ --scrollbar-thumb: var(--gradient);
}
-:root:has(#blue:checked) {
- --colour-1: hsl(209 50% 90%);
- --clr-card-bg: hsl(209 50% 100%);
- --colour-3: hsl(209 50% 15%);
- --conversations: hsl(209 50% 25%);
-}
-
-:root:has(#green:checked) {
- --colour-1: hsl(109 50% 90%);
- --clr-card-bg: hsl(109 50% 100%);
- --colour-3: hsl(109 50% 15%);
- --conversations: hsl(109 50% 25%);
+.white .message .assistant .fa-xmark {
+ color: var(--colour-1);
}
-:root:has(#dark:checked) {
- --colour-1: hsl(209 50% 10%);
- --clr-card-bg: hsl(209 50% 5%);
- --colour-3: hsl(209 50% 90%);
- --conversations: hsl(209 50% 80%);
+.white .message .user .fa-xmark {
+ color: var(--colour-3);
}
#send-button {
@@ -1159,15 +1092,12 @@ a:-webkit-any-link {
white-space:nowrap;
}
-::-webkit-scrollbar {
- width: 10px;
-}
::-webkit-scrollbar-track {
- background: var(--colour-3);
+ background: var(--scrollbar);
}
::-webkit-scrollbar-thumb {
- background: var(--blur-bg);
- border-radius: 5px;
+ background: var(--scrollbar-thumb);
+ border-radius: 2px;
}
::-webkit-scrollbar-thumb:hover {
background: var(--accent);
@@ -1187,10 +1117,6 @@ a:-webkit-any-link {
max-height: 200px;
}
-#message-input::-webkit-scrollbar {
- width: 5px;
-}
-
.hidden {
display: none;
}
@@ -1203,4 +1129,21 @@ a:-webkit-any-link {
50% {
opacity: 0;
}
+}
+
+@media print {
+ #systemPrompt:placeholder-shown,
+ .conversations,
+ .conversation .user-input,
+ .conversation .buttons,
+ .conversation .toolbar,
+ .conversation .slide-systemPrompt,
+ .message .count i,
+ .message .assistant,
+ .message .user {
+ display: none;
+ }
+ .message.regenerate {
+ opacity: 1;
+ }
} \ No newline at end of file
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index 23605ed4..9790b261 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -11,7 +11,7 @@ const imageInput = document.getElementById("image");
const cameraInput = document.getElementById("camera");
const fileInput = document.getElementById("file");
const microLabel = document.querySelector(".micro-label");
-const inputCount = document.getElementById("input-count");
+const inputCount = document.getElementById("input-count").querySelector(".text");
const providerSelect = document.getElementById("provider");
const modelSelect = document.getElementById("model");
const modelProvider = document.getElementById("model2");
@@ -41,9 +41,7 @@ appStorage = window.localStorage || {
length: 0
}
-const markdown = window.markdownit({
- html: true,
-});
+const markdown = window.markdownit();
const markdown_render = (content) => {
return markdown.render(content
.replaceAll(/<!-- generated images start -->|<!-- generated images end -->/gm, "")
@@ -109,8 +107,9 @@ const register_message_buttons = async () => {
let playlist = [];
function play_next() {
const next = playlist.shift();
- if (next)
+ if (next && el.dataset.do_play) {
next.play();
+ }
}
if (el.dataset.stopped) {
el.classList.remove("blink")
@@ -179,6 +178,40 @@ const register_message_buttons = async () => {
});
}
});
+ document.querySelectorAll(".message .fa-rotate").forEach(async (el) => {
+ if (!("click" in el.dataset)) {
+ el.dataset.click = "true";
+ el.addEventListener("click", async () => {
+ const message_el = el.parentElement.parentElement.parentElement;
+ el.classList.add("clicked");
+ setTimeout(() => el.classList.remove("clicked"), 1000);
+ prompt_lock = true;
+ await hide_message(window.conversation_id, message_el.dataset.index);
+ window.token = message_id();
+ await ask_gpt(message_el.dataset.index);
+ })
+ }
+ });
+ document.querySelectorAll(".message .fa-whatsapp").forEach(async (el) => {
+ if (!el.parentElement.href) {
+ const text = el.parentElement.parentElement.parentElement.innerText;
+ el.parentElement.href = `https://wa.me/?text=${encodeURIComponent(text)}`;
+ }
+ });
+ document.querySelectorAll(".message .fa-print").forEach(async (el) => {
+ if (!("click" in el.dataset)) {
+ el.dataset.click = "true";
+ el.addEventListener("click", async () => {
+ const message_el = el.parentElement.parentElement.parentElement;
+ el.classList.add("clicked");
+ message_box.scrollTop = 0;
+ message_el.classList.add("print");
+ setTimeout(() => el.classList.remove("clicked"), 1000);
+ setTimeout(() => message_el.classList.remove("print"), 1000);
+ window.print()
+ })
+ }
+ });
}
const delete_conversations = async () => {
@@ -240,6 +273,8 @@ const handle_ask = async () => {
${count_words_and_tokens(message, get_selected_model())}
<i class="fa-solid fa-volume-high"></i>
<i class="fa-regular fa-clipboard"></i>
+ <a><i class="fa-brands fa-whatsapp"></i></a>
+ <i class="fa-solid fa-print"></i>
</div>
</div>
</div>
@@ -257,9 +292,9 @@ const remove_cancel_button = async () => {
}, 300);
};
-const prepare_messages = (messages, filter_last_message=true) => {
+const prepare_messages = (messages, message_index = -1) => {
// Removes none user messages at end
- if (filter_last_message) {
+ if (message_index == -1) {
let last_message;
while (last_message = messages.pop()) {
if (last_message["role"] == "user") {
@@ -267,14 +302,16 @@ const prepare_messages = (messages, filter_last_message=true) => {
break;
}
}
+ } else if (message_index >= 0) {
+ messages = messages.filter((_, index) => message_index >= index);
}
// Remove history, if it's selected
if (document.getElementById('history')?.checked) {
- if (filter_last_message) {
- messages = [messages.pop()];
- } else {
+ if (message_index == null) {
messages = [messages.pop(), messages.pop()];
+ } else {
+ messages = [messages.pop()];
}
}
@@ -287,7 +324,7 @@ const prepare_messages = (messages, filter_last_message=true) => {
}
messages.forEach((new_message) => {
// Include only not regenerated messages
- if (!new_message.regenerate) {
+ if (new_message && !new_message.regenerate) {
// Remove generated images from history
new_message.content = filter_message(new_message.content);
delete new_message.provider;
@@ -361,11 +398,11 @@ imageInput?.addEventListener("click", (e) => {
}
});
-const ask_gpt = async () => {
+const ask_gpt = async (message_index = -1) => {
regenerate.classList.add(`regenerate-hidden`);
messages = await get_messages(window.conversation_id);
total_messages = messages.length;
- messages = prepare_messages(messages);
+ messages = prepare_messages(messages, message_index);
stop_generating.classList.remove(`stop_generating-hidden`);
@@ -528,6 +565,7 @@ const hide_option = async (conversation_id) => {
const span_el = document.createElement("span");
span_el.innerText = input_el.value;
span_el.classList.add("convo-title");
+ span_el.onclick = () => set_conversation(conversation_id);
left_el.removeChild(input_el);
left_el.appendChild(span_el);
}
@@ -609,6 +647,8 @@ const load_conversation = async (conversation_id, scroll=true) => {
${count_words_and_tokens(item.content, next_provider?.model)}
<i class="fa-solid fa-volume-high"></i>
<i class="fa-regular fa-clipboard"></i>
+ <a><i class="fa-brands fa-whatsapp"></i></a>
+ <i class="fa-solid fa-print"></i>
</div>
</div>
</div>
@@ -616,7 +656,7 @@ const load_conversation = async (conversation_id, scroll=true) => {
}
if (window.GPTTokenizer_cl100k_base) {
- const filtered = prepare_messages(messages, false);
+ const filtered = prepare_messages(messages, null);
if (filtered.length > 0) {
last_model = last_model?.startsWith("gpt-4") ? "gpt-4" : "gpt-3.5-turbo"
let count_total = GPTTokenizer_cl100k_base?.encodeChat(filtered, last_model).length
@@ -683,15 +723,15 @@ async function save_system_message() {
await save_conversation(window.conversation_id, conversation);
}
}
-
-const hide_last_message = async (conversation_id) => {
+const hide_message = async (conversation_id, message_index =- 1) => {
const conversation = await get_conversation(conversation_id)
- const last_message = conversation.items.pop();
+ message_index = message_index == -1 ? conversation.items.length - 1 : message_index
+ const last_message = message_index in conversation.items ? conversation.items[message_index] : null;
if (last_message !== null) {
if (last_message["role"] == "assistant") {
last_message["regenerate"] = true;
}
- conversation.items.push(last_message);
+ conversation.items[message_index] = last_message;
}
await save_conversation(conversation_id, conversation);
};
@@ -790,11 +830,22 @@ document.getElementById("cancelButton").addEventListener("click", async () => {
document.getElementById("regenerateButton").addEventListener("click", async () => {
prompt_lock = true;
- await hide_last_message(window.conversation_id);
+ await hide_message(window.conversation_id);
window.token = message_id();
await ask_gpt();
});
+const hide_input = document.querySelector(".toolbar .hide-input");
+hide_input.addEventListener("click", async (e) => {
+ const icon = hide_input.querySelector("i");
+ const func = icon.classList.contains("fa-angles-down") ? "add" : "remove";
+ const remv = icon.classList.contains("fa-angles-down") ? "remove" : "add";
+ icon.classList[func]("fa-angles-up");
+ icon.classList[remv]("fa-angles-down");
+ document.querySelector(".conversation .user-input").classList[func]("hidden");
+ document.querySelector(".conversation .buttons").classList[func]("hidden");
+});
+
const uuid = () => {
return `xxxxxxxx-xxxx-4xxx-yxxx-${Date.now().toString(16)}`.replace(
/[xy]/g,
@@ -998,7 +1049,7 @@ const count_input = async () => {
if (countFocus.value) {
inputCount.innerText = count_words_and_tokens(countFocus.value, get_selected_model());
} else {
- inputCount.innerHTML = "&nbsp;"
+ inputCount.innerText = "";
}
}, 100);
};
@@ -1042,6 +1093,8 @@ async function on_api() {
messageInput.addEventListener("keydown", async (evt) => {
if (prompt_lock) return;
+ // If not mobile
+ if (!window.matchMedia("(pointer:coarse)").matches)
if (evt.keyCode === 13 && !evt.shiftKey) {
evt.preventDefault();
console.log("pressed enter");
@@ -1079,8 +1132,11 @@ async function on_api() {
await load_settings_storage()
const hide_systemPrompt = document.getElementById("hide-systemPrompt")
+ const slide_systemPrompt_icon = document.querySelector(".slide-systemPrompt i");
if (hide_systemPrompt.checked) {
systemPrompt.classList.add("hidden");
+ slide_systemPrompt_icon.classList.remove("fa-angles-up");
+ slide_systemPrompt_icon.classList.add("fa-angles-down");
}
hide_systemPrompt.addEventListener('change', async (event) => {
if (event.target.checked) {
@@ -1089,6 +1145,13 @@ async function on_api() {
systemPrompt.classList.remove("hidden");
}
});
+ document.querySelector(".slide-systemPrompt")?.addEventListener("click", () => {
+ hide_systemPrompt.click();
+ let checked = hide_systemPrompt.checked;
+ systemPrompt.classList[checked ? "add": "remove"]("hidden");
+ slide_systemPrompt_icon.classList[checked ? "remove": "add"]("fa-angles-up");
+ slide_systemPrompt_icon.classList[checked ? "add": "remove"]("fa-angles-down");
+ });
const messageInputHeight = document.getElementById("message-input-height");
if (messageInputHeight) {
if (messageInputHeight.value) {
@@ -1098,6 +1161,19 @@ async function on_api() {
messageInput.style.maxHeight = `${messageInputHeight.value}px`;
});
}
+ const darkMode = document.getElementById("darkMode");
+ if (darkMode) {
+ if (!darkMode.checked) {
+ document.body.classList.add("white");
+ }
+ darkMode.addEventListener('change', async (event) => {
+ if (event.target.checked) {
+ document.body.classList.remove("white");
+ } else {
+ document.body.classList.add("white");
+ }
+ });
+ }
}
async function load_version() {
@@ -1244,6 +1320,7 @@ async function load_provider_models(providerIndex=null) {
if (!providerIndex) {
providerIndex = providerSelect.selectedIndex;
}
+ modelProvider.innerHTML = '';
const provider = providerSelect.options[providerIndex].value;
if (!provider) {
modelProvider.classList.add("hidden");
@@ -1251,7 +1328,6 @@ async function load_provider_models(providerIndex=null) {
return;
}
const models = await api('models', provider);
- modelProvider.innerHTML = '';
if (models.length > 0) {
modelSelect.classList.add("hidden");
modelProvider.classList.remove("hidden");
diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py
index 020b2090..3da0fe17 100644
--- a/g4f/gui/server/api.py
+++ b/g4f/gui/server/api.py
@@ -1,18 +1,27 @@
from __future__ import annotations
import logging
-import json
-from typing import Iterator
+import os
+import os.path
+import uuid
+import asyncio
+import time
+from aiohttp import ClientSession
+from typing import Iterator, Optional
+from flask import send_from_directory
from g4f import version, models
from g4f import get_last_provider, ChatCompletion
from g4f.errors import VersionNotFoundError
-from g4f.image import ImagePreview
+from g4f.typing import Cookies
+from g4f.image import ImagePreview, ImageResponse, is_accepted_format
+from g4f.requests.aiohttp import get_connector
from g4f.Provider import ProviderType, __providers__, __map__
from g4f.providers.base_provider import ProviderModelMixin, FinishReason
from g4f.providers.conversation import BaseConversation
conversations: dict[dict[str, BaseConversation]] = {}
+images_dir = "./generated_images"
class Api():
@@ -110,14 +119,8 @@ class Api():
"latest_version": version.utils.latest_version,
}
- def generate_title(self):
- """
- Generates and returns a title based on the request data.
-
- Returns:
- dict: A dictionary with the generated title.
- """
- return {'title': ''}
+ def serve_images(self, name):
+ return send_from_directory(os.path.abspath(images_dir), name)
def _prepare_conversation_kwargs(self, json_data: dict, kwargs: dict):
"""
@@ -185,6 +188,27 @@ class Api():
yield self._format_json("message", get_error_message(chunk))
elif isinstance(chunk, ImagePreview):
yield self._format_json("preview", chunk.to_string())
+ elif isinstance(chunk, ImageResponse):
+ async def copy_images(images: list[str], cookies: Optional[Cookies] = None):
+ async with ClientSession(
+ connector=get_connector(None, os.environ.get("G4F_PROXY")),
+ cookies=cookies
+ ) as session:
+ async def copy_image(image):
+ async with session.get(image) as response:
+ target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}")
+ with open(target, "wb") as f:
+ async for chunk in response.content.iter_any():
+ f.write(chunk)
+ with open(target, "rb") as f:
+ extension = is_accepted_format(f.read(12)).split("/")[-1]
+ extension = "jpg" if extension == "jpeg" else extension
+ new_target = f"{target}.{extension}"
+ os.rename(target, new_target)
+ return f"/images/{os.path.basename(new_target)}"
+ return await asyncio.gather(*[copy_image(image) for image in images])
+ images = asyncio.run(copy_images(chunk.get_list(), chunk.options.get("cookies")))
+ yield self._format_json("content", str(ImageResponse(images, chunk.alt)))
elif not isinstance(chunk, FinishReason):
yield self._format_json("content", str(chunk))
except Exception as e:
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index d9e31c0e..dc1b1080 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -47,13 +47,13 @@ class Backend_Api(Api):
'function': self.handle_conversation,
'methods': ['POST']
},
- '/backend-api/v2/gen.set.summarize:title': {
- 'function': self.generate_title,
- 'methods': ['POST']
- },
'/backend-api/v2/error': {
'function': self.handle_error,
'methods': ['POST']
+ },
+ '/images/<path:name>': {
+ 'function': self.serve_images,
+ 'methods': ['GET']
}
}
diff --git a/g4f/image.py b/g4f/image.py
index 270b59ad..6561b83a 100644
--- a/g4f/image.py
+++ b/g4f/image.py
@@ -16,6 +16,13 @@ from .errors import MissingRequirementsError
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp', 'svg'}
+EXTENSIONS_MAP: dict[str, str] = {
+ "image/png": "png",
+ "image/jpeg": "jpg",
+ "image/gif": "gif",
+ "image/webp": "webp",
+}
+
def to_image(image: ImageType, is_svg: bool = False) -> Image:
"""
Converts the input image to a PIL Image object.
@@ -210,8 +217,8 @@ def format_images_markdown(images: Union[str, list], alt: str, preview: Union[st
if not isinstance(preview, list):
preview = [preview.replace('{image}', image) if preview else image for image in images]
result = "\n".join(
- #f"[![#{idx+1} {alt}]({preview[idx]})]({image})"
- f'[<img src="{preview[idx]}" width="200" alt="#{idx+1} {alt}">]({image})'
+ f"[![#{idx+1} {alt}]({preview[idx]})]({image})"
+ #f'[<img src="{preview[idx]}" width="200" alt="#{idx+1} {alt}">]({image})'
for idx, image in enumerate(images)
)
start_flag = "<!-- generated images start -->\n"
@@ -275,6 +282,18 @@ class ImagePreview(ImageResponse):
def to_string(self):
return super().__str__()
+class ImageDataResponse():
+ def __init__(
+ self,
+ images: Union[str, list],
+ alt: str,
+ ):
+ self.images = images
+ self.alt = alt
+
+ def get_list(self) -> list[str]:
+ return [self.images] if isinstance(self.images, str) else self.images
+
class ImageRequest:
def __init__(
self,
diff --git a/g4f/models.py b/g4f/models.py
index e3da0363..40de22ba 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -2,27 +2,27 @@ from __future__ import annotations
from dataclasses import dataclass
-from .Provider import RetryProvider, ProviderType
+from .Provider import IterListProvider, ProviderType
from .Provider import (
Aichatos,
Bing,
Blackbox,
- Chatgpt4Online,
ChatgptAi,
ChatgptNext,
- Cohere,
Cnote,
DeepInfra,
+ DuckDuckGo,
+ Ecosia,
Feedough,
FreeGpt,
Gemini,
- GeminiProChat,
+ GeminiPro,
GigaChat,
HuggingChat,
HuggingFace,
Koala,
Liaobots,
- Llama,
+ MetaAI,
OpenaiChat,
PerplexityLabs,
Replicate,
@@ -32,7 +32,6 @@ from .Provider import (
Reka
)
-
@dataclass(unsafe_hash=True)
class Model:
"""
@@ -55,12 +54,12 @@ class Model:
default = Model(
name = "",
base_provider = "",
- best_provider = RetryProvider([
+ best_provider = IterListProvider([
Bing,
ChatgptAi,
You,
- Chatgpt4Online,
- OpenaiChat
+ OpenaiChat,
+ Ecosia,
])
)
@@ -68,11 +67,14 @@ default = Model(
gpt_35_long = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
- best_provider = RetryProvider([
+ best_provider = IterListProvider([
FreeGpt,
You,
ChatgptNext,
OpenaiChat,
+ Koala,
+ Ecosia,
+ DuckDuckGo,
])
)
@@ -80,7 +82,7 @@ gpt_35_long = Model(
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
- best_provider = RetryProvider([
+ best_provider = IterListProvider([
FreeGpt,
You,
ChatgptNext,
@@ -95,11 +97,19 @@ gpt_35_turbo = Model(
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
- best_provider = RetryProvider([
+ best_provider = IterListProvider([
Bing, Liaobots,
])
)
+gpt_4o = Model(
+ name = 'gpt-4o',
+ base_provider = 'openai',
+ best_provider = IterListProvider([
+ You, Liaobots
+ ])
+)
+
gpt_4_turbo = Model(
name = 'gpt-4-turbo',
base_provider = 'openai',
@@ -112,46 +122,22 @@ gigachat = Model(
best_provider = GigaChat
)
-gigachat_plus = Model(
- name = 'GigaChat-Plus',
- base_provider = 'gigachat',
- best_provider = GigaChat
-)
-
-gigachat_pro = Model(
- name = 'GigaChat-Pro',
- base_provider = 'gigachat',
- best_provider = GigaChat
-)
-
-llama2_7b = Model(
- name = "meta-llama/Llama-2-7b-chat-hf",
- base_provider = 'meta',
- best_provider = RetryProvider([Llama, DeepInfra])
-)
-
-llama2_13b = Model(
- name = "meta-llama/Llama-2-13b-chat-hf",
- base_provider = 'meta',
- best_provider = RetryProvider([Llama, DeepInfra])
-)
-
-llama2_70b = Model(
- name = "meta-llama/Llama-2-70b-chat-hf",
+meta = Model(
+ name = "meta",
base_provider = "meta",
- best_provider = RetryProvider([Llama, DeepInfra])
+ best_provider = MetaAI
)
llama3_8b_instruct = Model(
name = "meta-llama/Meta-Llama-3-8B-Instruct",
base_provider = "meta",
- best_provider = RetryProvider([Llama, DeepInfra, Replicate])
+ best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
)
llama3_70b_instruct = Model(
name = "meta-llama/Meta-Llama-3-70B-Instruct",
base_provider = "meta",
- best_provider = RetryProvider([Llama, DeepInfra])
+ best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
)
codellama_34b_instruct = Model(
@@ -163,61 +149,30 @@ codellama_34b_instruct = Model(
codellama_70b_instruct = Model(
name = "codellama/CodeLlama-70b-Instruct-hf",
base_provider = "meta",
- best_provider = RetryProvider([DeepInfra, PerplexityLabs])
+ best_provider = IterListProvider([DeepInfra, PerplexityLabs])
)
# Mistral
mixtral_8x7b = Model(
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
base_provider = "huggingface",
- best_provider = RetryProvider([DeepInfra, HuggingFace, PerplexityLabs])
+ best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs])
)
mistral_7b = Model(
name = "mistralai/Mistral-7B-Instruct-v0.1",
base_provider = "huggingface",
- best_provider = RetryProvider([HuggingChat, HuggingFace, PerplexityLabs])
+ best_provider = IterListProvider([HuggingChat, HuggingFace, PerplexityLabs])
)
mistral_7b_v02 = Model(
name = "mistralai/Mistral-7B-Instruct-v0.2",
base_provider = "huggingface",
- best_provider = DeepInfra
-)
-
-mixtral_8x22b = Model(
- name = "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
- base_provider = "huggingface",
- best_provider = DeepInfra
-)
-
-# Misc models
-dolphin_mixtral_8x7b = Model(
- name = "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
- base_provider = "huggingface",
- best_provider = DeepInfra
-)
-
-lzlv_70b = Model(
- name = "lizpreciatior/lzlv_70b_fp16_hf",
- base_provider = "huggingface",
- best_provider = DeepInfra
-)
-
-airoboros_70b = Model(
- name = "deepinfra/airoboros-70b",
- base_provider = "huggingface",
- best_provider = DeepInfra
-)
-
-openchat_35 = Model(
- name = "openchat/openchat_3.5",
- base_provider = "huggingface",
- best_provider = DeepInfra
+ best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs])
)
# Bard
-gemini = bard = palm = Model(
+gemini = Model(
name = 'gemini',
base_provider = 'google',
best_provider = Gemini
@@ -226,7 +181,7 @@ gemini = bard = palm = Model(
claude_v2 = Model(
name = 'claude-v2',
base_provider = 'anthropic',
- best_provider = RetryProvider([Vercel])
+ best_provider = IterListProvider([Vercel])
)
claude_3_opus = Model(
@@ -241,6 +196,12 @@ claude_3_sonnet = Model(
best_provider = You
)
+claude_3_haiku = Model(
+ name = 'claude-3-haiku',
+ base_provider = 'anthropic',
+ best_provider = DuckDuckGo
+)
+
gpt_35_turbo_16k = Model(
name = 'gpt-3.5-turbo-16k',
base_provider = 'openai',
@@ -280,7 +241,7 @@ gpt_4_32k_0613 = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'google',
- best_provider = RetryProvider([GeminiProChat, You])
+ best_provider = IterListProvider([GeminiPro, You])
)
pi = Model(
@@ -292,13 +253,13 @@ pi = Model(
dbrx_instruct = Model(
name = 'databricks/dbrx-instruct',
base_provider = 'mistral',
- best_provider = RetryProvider([DeepInfra, PerplexityLabs])
+ best_provider = IterListProvider([DeepInfra, PerplexityLabs])
)
command_r_plus = Model(
name = 'CohereForAI/c4ai-command-r-plus',
base_provider = 'mistral',
- best_provider = RetryProvider([HuggingChat, Cohere])
+ best_provider = IterListProvider([HuggingChat])
)
blackbox = Model(
@@ -326,62 +287,48 @@ class ModelUtils:
'gpt-3.5-turbo-0613' : gpt_35_turbo_0613,
'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
-
'gpt-3.5-long': gpt_35_long,
-
+
# gpt-4
+ 'gpt-4o' : gpt_4o,
'gpt-4' : gpt_4,
'gpt-4-0613' : gpt_4_0613,
'gpt-4-32k' : gpt_4_32k,
'gpt-4-32k-0613' : gpt_4_32k_0613,
'gpt-4-turbo' : gpt_4_turbo,
- # Llama
- 'llama2-7b' : llama2_7b,
- 'llama2-13b': llama2_13b,
- 'llama2-70b': llama2_70b,
-
- 'llama3-8b' : llama3_8b_instruct, # alias
+ "meta-ai": meta,
+ 'llama3-8b': llama3_8b_instruct, # alias
'llama3-70b': llama3_70b_instruct, # alias
'llama3-8b-instruct' : llama3_8b_instruct,
'llama3-70b-instruct': llama3_70b_instruct,
-
+
'codellama-34b-instruct': codellama_34b_instruct,
'codellama-70b-instruct': codellama_70b_instruct,
- # GigaChat
- 'gigachat' : gigachat,
- 'gigachat_plus': gigachat_plus,
- 'gigachat_pro' : gigachat_pro,
-
# Mistral Opensource
'mixtral-8x7b': mixtral_8x7b,
'mistral-7b': mistral_7b,
'mistral-7b-v02': mistral_7b_v02,
- 'mixtral-8x22b': mixtral_8x22b,
- 'dolphin-mixtral-8x7b': dolphin_mixtral_8x7b,
-
+
# google gemini
'gemini': gemini,
'gemini-pro': gemini_pro,
-
+
# anthropic
'claude-v2': claude_v2,
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,
-
+ 'claude-3-haiku': claude_3_haiku,
+
# reka core
- 'reka-core': reka_core,
'reka': reka_core,
- 'Reka Core': reka_core,
-
+
# other
'blackbox': blackbox,
'command-r+': command_r_plus,
'dbrx-instruct': dbrx_instruct,
- 'lzlv-70b': lzlv_70b,
- 'airoboros-70b': airoboros_70b,
- 'openchat_3.5': openchat_35,
+ 'gigachat': gigachat,
'pi': pi
}
diff --git a/g4f/providers/base_provider.py b/g4f/providers/base_provider.py
index 5624c9e1..a03dcbba 100644
--- a/g4f/providers/base_provider.py
+++ b/g4f/providers/base_provider.py
@@ -30,6 +30,13 @@ if sys.platform == 'win32':
def get_running_loop(check_nested: bool) -> Union[AbstractEventLoop, None]:
try:
loop = asyncio.get_running_loop()
+ # Do not patch uvloop loop because its incompatible.
+ try:
+ import uvloop
+ if isinstance(loop, uvloop.Loop):
+ return loop
+ except (ImportError, ModuleNotFoundError):
+ pass
if check_nested and not hasattr(loop.__class__, "_nest_patched"):
try:
import nest_asyncio
@@ -290,4 +297,4 @@ class ProviderModelMixin:
elif model not in cls.get_models() and cls.models:
raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}")
debug.last_model = model
- return model \ No newline at end of file
+ return model
diff --git a/g4f/providers/retry_provider.py b/g4f/providers/retry_provider.py
index d64e8471..0061bcc1 100644
--- a/g4f/providers/retry_provider.py
+++ b/g4f/providers/retry_provider.py
@@ -3,18 +3,16 @@ from __future__ import annotations
import asyncio
import random
-from ..typing import Type, List, CreateResult, Messages, Iterator
-from .types import BaseProvider, BaseRetryProvider
+from ..typing import Type, List, CreateResult, Messages, Iterator, AsyncResult
+from .types import BaseProvider, BaseRetryProvider, ProviderType
from .. import debug
from ..errors import RetryProviderError, RetryNoProviderError
-class RetryProvider(BaseRetryProvider):
+class IterListProvider(BaseRetryProvider):
def __init__(
self,
providers: List[Type[BaseProvider]],
- shuffle: bool = True,
- single_provider_retry: bool = False,
- max_retries: int = 3,
+ shuffle: bool = True
) -> None:
"""
Initialize the BaseRetryProvider.
@@ -26,8 +24,6 @@ class RetryProvider(BaseRetryProvider):
"""
self.providers = providers
self.shuffle = shuffle
- self.single_provider_retry = single_provider_retry
- self.max_retries = max_retries
self.working = True
self.last_provider: Type[BaseProvider] = None
@@ -49,15 +45,145 @@ class RetryProvider(BaseRetryProvider):
Raises:
Exception: Any exception encountered during the completion process.
"""
- providers = [p for p in self.providers if stream and p.supports_stream] if stream else self.providers
+ exceptions = {}
+ started: bool = False
+
+ for provider in self.get_providers(stream):
+ self.last_provider = provider
+ try:
+ if debug.logging:
+ print(f"Using {provider.__name__} provider")
+ for token in provider.create_completion(model, messages, stream, **kwargs):
+ yield token
+ started = True
+ if started:
+ return
+ except Exception as e:
+ exceptions[provider.__name__] = e
+ if debug.logging:
+ print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
+ if started:
+ raise e
+
+ raise_exceptions(exceptions)
+
+ async def create_async(
+ self,
+ model: str,
+ messages: Messages,
+ **kwargs,
+ ) -> str:
+ """
+ Asynchronously create a completion using available providers.
+ Args:
+ model (str): The model to be used for completion.
+ messages (Messages): The messages to be used for generating completion.
+ Returns:
+ str: The result of the asynchronous completion.
+ Raises:
+ Exception: Any exception encountered during the asynchronous completion process.
+ """
+ exceptions = {}
+
+ for provider in self.get_providers(False):
+ self.last_provider = provider
+ try:
+ if debug.logging:
+ print(f"Using {provider.__name__} provider")
+ return await asyncio.wait_for(
+ provider.create_async(model, messages, **kwargs),
+ timeout=kwargs.get("timeout", 60),
+ )
+ except Exception as e:
+ exceptions[provider.__name__] = e
+ if debug.logging:
+ print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
+
+ raise_exceptions(exceptions)
+
+ def get_providers(self, stream: bool) -> list[ProviderType]:
+ providers = [p for p in self.providers if p.supports_stream] if stream else self.providers
if self.shuffle:
random.shuffle(providers)
+ return providers
+ async def create_async_generator(
+ self,
+ model: str,
+ messages: Messages,
+ stream: bool = True,
+ **kwargs
+ ) -> AsyncResult:
exceptions = {}
started: bool = False
- if self.single_provider_retry and len(providers) == 1:
- provider = providers[0]
+ for provider in self.get_providers(stream):
+ self.last_provider = provider
+ try:
+ if debug.logging:
+ print(f"Using {provider.__name__} provider")
+ if not stream:
+ yield await provider.create_async(model, messages, **kwargs)
+ elif hasattr(provider, "create_async_generator"):
+ async for token in provider.create_async_generator(model, messages, stream=stream, **kwargs):
+ yield token
+ else:
+ for token in provider.create_completion(model, messages, stream, **kwargs):
+ yield token
+ started = True
+ if started:
+ return
+ except Exception as e:
+ exceptions[provider.__name__] = e
+ if debug.logging:
+ print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
+ if started:
+ raise e
+
+ raise_exceptions(exceptions)
+
+class RetryProvider(IterListProvider):
+ def __init__(
+ self,
+ providers: List[Type[BaseProvider]],
+ shuffle: bool = True,
+ single_provider_retry: bool = False,
+ max_retries: int = 3,
+ ) -> None:
+ """
+ Initialize the BaseRetryProvider.
+ Args:
+ providers (List[Type[BaseProvider]]): List of providers to use.
+ shuffle (bool): Whether to shuffle the providers list.
+ single_provider_retry (bool): Whether to retry a single provider if it fails.
+ max_retries (int): Maximum number of retries for a single provider.
+ """
+ super().__init__(providers, shuffle)
+ self.single_provider_retry = single_provider_retry
+ self.max_retries = max_retries
+
+ def create_completion(
+ self,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ **kwargs,
+ ) -> CreateResult:
+ """
+ Create a completion using available providers, with an option to stream the response.
+ Args:
+ model (str): The model to be used for completion.
+ messages (Messages): The messages to be used for generating completion.
+ stream (bool, optional): Flag to indicate if the response should be streamed. Defaults to False.
+ Yields:
+ CreateResult: Tokens or results from the completion.
+ Raises:
+ Exception: Any exception encountered during the completion process.
+ """
+ if self.single_provider_retry:
+ exceptions = {}
+ started: bool = False
+ provider = self.providers[0]
self.last_provider = provider
for attempt in range(self.max_retries):
try:
@@ -65,7 +191,7 @@ class RetryProvider(BaseRetryProvider):
print(f"Using {provider.__name__} provider (attempt {attempt + 1})")
for token in provider.create_completion(model, messages, stream, **kwargs):
yield token
- started = True
+ started = True
if started:
return
except Exception as e:
@@ -74,25 +200,9 @@ class RetryProvider(BaseRetryProvider):
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
if started:
raise e
+ raise_exceptions(exceptions)
else:
- for provider in providers:
- self.last_provider = provider
- try:
- if debug.logging:
- print(f"Using {provider.__name__} provider")
- for token in provider.create_completion(model, messages, stream, **kwargs):
- yield token
- started = True
- if started:
- return
- except Exception as e:
- exceptions[provider.__name__] = e
- if debug.logging:
- print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
- if started:
- raise e
-
- raise_exceptions(exceptions)
+ yield from super().create_completion(model, messages, stream, **kwargs)
async def create_async(
self,
@@ -110,14 +220,10 @@ class RetryProvider(BaseRetryProvider):
Raises:
Exception: Any exception encountered during the asynchronous completion process.
"""
- providers = self.providers
- if self.shuffle:
- random.shuffle(providers)
-
exceptions = {}
- if self.single_provider_retry and len(providers) == 1:
- provider = providers[0]
+ if self.single_provider_retry:
+ provider = self.providers[0]
self.last_provider = provider
for attempt in range(self.max_retries):
try:
@@ -131,22 +237,9 @@ class RetryProvider(BaseRetryProvider):
exceptions[provider.__name__] = e
if debug.logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
+ raise_exceptions(exceptions)
else:
- for provider in providers:
- self.last_provider = provider
- try:
- if debug.logging:
- print(f"Using {provider.__name__} provider")
- return await asyncio.wait_for(
- provider.create_async(model, messages, **kwargs),
- timeout=kwargs.get("timeout", 60),
- )
- except Exception as e:
- exceptions[provider.__name__] = e
- if debug.logging:
- print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
-
- raise_exceptions(exceptions)
+ return await super().create_async(model, messages, **kwargs)
class IterProvider(BaseRetryProvider):
__name__ = "IterProvider"
diff --git a/g4f/typing.py b/g4f/typing.py
index 5d1bc959..710fde4b 100644
--- a/g4f/typing.py
+++ b/g4f/typing.py
@@ -14,7 +14,7 @@ else:
SHA256 = NewType('sha_256_hash', str)
CreateResult = Iterator[str]
AsyncResult = AsyncIterator[str]
-Messages = List[Dict[str, str]]
+Messages = List[Dict[str, Union[str,List[Dict[str,Union[str,Dict[str,str]]]]]]]
Cookies = Dict[str, str]
ImageType = Union[str, bytes, IO, Image, None]
diff --git a/generated_images/.gitkeep b/generated_images/.gitkeep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/generated_images/.gitkeep
diff --git a/projects/windows/copy.sh b/projects/windows/copy.sh
index 58faadd5..257790ef 100755
--- a/projects/windows/copy.sh
+++ b/projects/windows/copy.sh
@@ -1,5 +1,5 @@
cp -r * /var/win/shared/
-cp -r windows/* /var/win/shared/
+cp -r projects/windows/* /var/win/shared/
cp setup.py /var/win/shared/
cp README.md /var/win/shared/
#git clone https://github.com/pyinstaller/pyinstaller/ /var/win/shared/pyinstaller \ No newline at end of file
diff --git a/projects/windows/main.py b/projects/windows/main.py
index 11a3f902..05042715 100644
--- a/projects/windows/main.py
+++ b/projects/windows/main.py
@@ -8,15 +8,12 @@ ssl.create_default_context = partial(
cafile=certifi.where()
)
-from g4f.gui.webview import run_webview
-from g4f.gui.run import gui_parser
+from g4f.gui.run import run_gui_args, gui_parser
import g4f.debug
g4f.debug.version_check = False
-g4f.debug.version = "0.2.8.0"
+g4f.debug.version = "0.3.1.7"
if __name__ == "__main__":
parser = gui_parser()
args = parser.parse_args()
- if args.debug:
- g4f.debug.logging = True
- run_webview(args.debug) \ No newline at end of file
+ run_gui_args(args) \ No newline at end of file
diff --git a/projects/windows/main.spec b/projects/windows/main.spec
index 35f6d3ac..31cc2bd9 100644
--- a/projects/windows/main.spec
+++ b/projects/windows/main.spec
@@ -6,7 +6,7 @@ a = Analysis(
pathex=[],
binaries=[],
datas=[],
- hiddenimports=['plyer.platforms.linux.filechooser', 'plyer.platforms.win.filechooser'],
+ hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
@@ -42,4 +42,4 @@ coll = COLLECT(
upx=True,
upx_exclude=[],
name='g4f',
-) \ No newline at end of file
+)