summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--README.md333
-rw-r--r--g4f/Provider/AI365VIP.py67
-rw-r--r--g4f/Provider/Allyfy.py71
-rw-r--r--g4f/Provider/Blackbox.py37
-rw-r--r--g4f/Provider/ChatGot.py75
-rw-r--r--g4f/Provider/Chatgpt4Online.py109
-rw-r--r--g4f/Provider/Chatgpt4o.py83
-rw-r--r--g4f/Provider/ChatgptFree.py2
-rw-r--r--g4f/Provider/DDG.py118
-rw-r--r--g4f/Provider/Feedough.py82
-rw-r--r--g4f/Provider/FlowGpt.py4
-rw-r--r--g4f/Provider/FreeChatgpt.py97
-rw-r--r--g4f/Provider/FreeGpt.py66
-rw-r--r--g4f/Provider/FreeNetfly.py107
-rw-r--r--g4f/Provider/GeminiProChat.py11
-rw-r--r--g4f/Provider/HuggingChat.py134
-rw-r--r--g4f/Provider/HuggingFace.py15
-rw-r--r--g4f/Provider/Koala.py36
-rw-r--r--g4f/Provider/Liaobots.py169
-rw-r--r--g4f/Provider/LiteIcoding.py97
-rw-r--r--g4f/Provider/MagickPenAsk.py51
-rw-r--r--g4f/Provider/MagickPenChat.py50
-rw-r--r--g4f/Provider/Marsyoo.py64
-rw-r--r--g4f/Provider/MetaAI.py7
-rw-r--r--g4f/Provider/PerplexityLabs.py28
-rw-r--r--g4f/Provider/Pi.py3
-rw-r--r--g4f/Provider/ReplicateHome.py (renamed from g4f/Provider/ReplicateImage.py)86
-rw-r--r--g4f/Provider/Rocks.py56
-rw-r--r--g4f/Provider/TeachAnything.py62
-rw-r--r--g4f/Provider/You.py20
-rw-r--r--g4f/Provider/__init__.py26
-rw-r--r--g4f/Provider/deprecated/Yqcloud.py2
-rw-r--r--g4f/Provider/needs_auth/Openai.py3
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py5
-rw-r--r--g4f/Provider/not_working/Aichatos.py (renamed from g4f/Provider/Aichatos.py)8
-rw-r--r--g4f/Provider/not_working/ChatForAi.py (renamed from g4f/Provider/ChatForAi.py)10
-rw-r--r--g4f/Provider/not_working/ChatgptAi.py (renamed from g4f/Provider/ChatgptAi.py)12
-rw-r--r--g4f/Provider/not_working/ChatgptNext.py (renamed from g4f/Provider/ChatgptNext.py)8
-rw-r--r--g4f/Provider/not_working/ChatgptX.py (renamed from g4f/Provider/ChatgptX.py)10
-rw-r--r--g4f/Provider/not_working/Cnote.py (renamed from g4f/Provider/Cnote.py)8
-rw-r--r--g4f/Provider/not_working/Feedough.py78
-rw-r--r--g4f/Provider/not_working/__init__.py9
-rw-r--r--g4f/gui/client/index.html4
-rw-r--r--g4f/models.py523
44 files changed, 2153 insertions, 693 deletions
diff --git a/README.md b/README.md
index d1a5f0a9..a9a0cbaa 100644
--- a/README.md
+++ b/README.md
@@ -2,28 +2,23 @@
<a href="https://trendshift.io/repositories/1692" target="_blank"><img src="https://trendshift.io/api/badge/repositories/1692" alt="xtekky%2Fgpt4free | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
-The **ETA** till (v3 for g4f) where I, [@xtekky](https://github.com/xtekky) will pick this project back up and improve it is **`29` days** (written Tue 28 May), join [t.me/g4f_channel](https://t.me/g4f_channel) in the meanwhile to stay updated.
-
-_____
-
+---
Written by [@xtekky](https://github.com/xtekky) & maintained by [@hlohaus](https://github.com/hlohaus)
-
<div id="top"></div>
-> By using this repository or any code related to it, you agree to the [legal notice](https://github.com/xtekky/gpt4free/blob/main/LEGAL_NOTICE.md). The author is **not responsible for the usage of this repository nor endorses it**, nor is the author responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses.
+> By using this repository or any code related to it, you agree to the [legal notice](https://github.com/xtekky/gpt4free/blob/main/LEGAL_NOTICE.md). The author is **not responsible for the usage of this repository nor endorses it**, nor is the author responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses.
-> [!Warning]
-*"gpt4free"* serves as a **PoC** (proof of concept), demonstrating the development of an API package with multi-provider requests, with features like timeouts, load balance and flow control.
+> [!Warning] > _"gpt4free"_ serves as a **PoC** (proof of concept), demonstrating the development of an API package with multi-provider requests, with features like timeouts, load balance and flow control.
-> [!Note]
-<sup><strong>Lastet version:</strong></sup> [![PyPI version](https://img.shields.io/pypi/v/g4f?color=blue)](https://pypi.org/project/g4f) [![Docker version](https://img.shields.io/docker/v/hlohaus789/g4f?label=docker&color=blue)](https://hub.docker.com/r/hlohaus789/g4f)
-> <sup><strong>Stats:</strong></sup> [![Downloads](https://static.pepy.tech/badge/g4f)](https://pepy.tech/project/g4f) [![Downloads](https://static.pepy.tech/badge/g4f/month)](https://pepy.tech/project/g4f)
+> [!Note] > <sup><strong>Lastet version:</strong></sup> [![PyPI version](https://img.shields.io/pypi/v/g4f?color=blue)](https://pypi.org/project/g4f) [![Docker version](https://img.shields.io/docker/v/hlohaus789/g4f?label=docker&color=blue)](https://hub.docker.com/r/hlohaus789/g4f)
+> <sup><strong>Stats:</strong></sup> [![Downloads](https://static.pepy.tech/badge/g4f)](https://pepy.tech/project/g4f) [![Downloads](https://static.pepy.tech/badge/g4f/month)](https://pepy.tech/project/g4f)
```sh
pip install -U g4f
```
+
```sh
docker pull hlohaus789/g4f
```
@@ -37,12 +32,15 @@ docker pull hlohaus789/g4f
- `g4f` now supports 100% local inference: 🧠 [local-docs](https://g4f.mintlify.app/docs/core/usage/local)
## đŸ”ģ Site Takedown
+
Is your site on this repository and you want to take it down? Send an email to takedown@g4f.ai with proof it is yours and it will be removed as fast as possible. To prevent reproduction please secure your API. 😉
-## 🚀 Feedback and Todo
+## 🚀 Feedback and Todo
+
You can always leave some feedback here: https://forms.gle/FeWV9RLEedfdkmFN6
As per the survey, here is a list of improvements to come
+
- [x] Update the repository to include the new openai library syntax (ex: `Openai()` class) | completed, use `g4f.client.Client`
- [ ] Golang implementation
- [ ] 🚧 Improve Documentation (in /docs & Guides, Howtos, & Do video tutorials)
@@ -58,28 +56,28 @@ As per the survey, here is a list of improvements to come
- [🆕 What's New](#-whats-new)
- [📚 Table of Contents](#-table-of-contents)
- [🛠ī¸ Getting Started](#-getting-started)
- + [Docker Container Guide](#docker-container-guide)
- + [Installation Guide for Windows (.exe)](#installation-guide-for-windows-exe)
- + [Use python](#use-python)
- - [Prerequisites](#prerequisites)
- - [Install using PyPI package:](#install-using-pypi-package)
- - [Install from source:](#install-from-source)
- - [Install using Docker:](#install-using-docker)
+ - [Docker Container Guide](#docker-container-guide)
+ - [Installation Guide for Windows (.exe)](#installation-guide-for-windows-exe)
+ - [Use python](#use-python)
+ - [Prerequisites](#prerequisites)
+ - [Install using PyPI package:](#install-using-pypi-package)
+ - [Install from source:](#install-from-source)
+ - [Install using Docker:](#install-using-docker)
- [💡 Usage](#-usage)
- * [Text Generation](#text-generation)
- * [Image Generation](#image-generation)
- * [Web UI](#web-ui)
- * [Interference API](#interference-api)
- * [Configuration](#configuration)
+ - [Text Generation](#text-generation)
+ - [Image Generation](#image-generation)
+ - [Web UI](#web-ui)
+ - [Interference API](#interference-api)
+ - [Configuration](#configuration)
- [🚀 Providers and Models](#-providers-and-models)
- * [GPT-4](#gpt-4)
- * [GPT-3.5](#gpt-35)
- * [Other](#other)
- * [Models](#models)
+ - [GPT-4](#gpt-4)
+ - [GPT-3.5](#gpt-35)
+ - [Other](#other)
+ - [Models](#models)
- [🔗 Powered by gpt4free](#-powered-by-gpt4free)
- [🤝 Contribute](#-contribute)
- + [How do i create a new Provider?](#guide-how-do-i-create-a-new-provider)
- + [How can AI help me with writing code?](#guide-how-can-ai-help-me-with-writing-code)
+ - [How do i create a new Provider?](#guide-how-do-i-create-a-new-provider)
+ - [How can AI help me with writing code?](#guide-how-can-ai-help-me-with-writing-code)
- [🙌 Contributors](#-contributors)
- [Šī¸ Copyright](#-copyright)
- [⭐ Star History](#-star-history)
@@ -106,7 +104,8 @@ docker run \
hlohaus789/g4f:latest
```
-3. **Access the Client:**
+3. **Access the Client:**
+
- To use the included client, navigate to: [http://localhost:8080/chat/](http://localhost:8080/chat/)
- Or set the API base for your client to: [http://localhost:1337/v1](http://localhost:1337/v1)
@@ -114,8 +113,11 @@ docker run \
If required, you can access the container's desktop here: http://localhost:7900/?autoconnect=1&resize=scale&password=secret for provider login purposes.
#### Installation Guide for Windows (.exe)
+
To ensure the seamless operation of our application, please follow the instructions below. These steps are designed to guide you through the installation process on Windows operating systems.
+
### Installation Steps
+
1. **Download the Application**: Visit our [releases page](https://github.com/xtekky/gpt4free/releases/tag/0.3.1.7) and download the most recent version of the application, named `g4f.exe.zip`.
2. **File Placement**: After downloading, locate the `.zip` file in your Downloads folder. Unpack it to a directory of your choice on your system, then execute the `g4f.exe` file to run the app.
3. **Open GUI**: The app starts a web server with the GUI. Open your favorite browser and navigate to `http://localhost:8080/chat/` to access the application interface.
@@ -124,11 +126,13 @@ To ensure the seamless operation of our application, please follow the instructi
By following these steps, you should be able to successfully install and run the application on your Windows system. If you encounter any issues during the installation process, please refer to our Issue Tracker or try to get contact over Discord for assistance.
Run the **Webview UI** on other Platfroms:
+
- [/docs/guides/webview](https://github.com/xtekky/gpt4free/blob/main/docs/webview.md)
##### Use your smartphone:
Run the Web UI on Your Smartphone:
+
- [/docs/guides/phone](https://github.com/xtekky/gpt4free/blob/main/docs/guides/phone.md)
#### Use python
@@ -152,13 +156,11 @@ Use partial requirements: [/docs/requirements](https://github.com/xtekky/gpt4fre
How do I load the project using git and installing the project requirements?
Read this tutorial and follow it step by step: [/docs/git](https://github.com/xtekky/gpt4free/blob/main/docs/git.md)
-
##### Install using Docker:
How do I build and run composer image from source?
Use docker-compose: [/docs/docker](https://github.com/xtekky/gpt4free/blob/main/docs/docker.md)
-
## 💡 Usage
#### Text Generation
@@ -193,7 +195,6 @@ response = client.images.generate(
image_url = response.data[0].url
```
-
[![Image with cat](/docs/cat.jpeg)](https://github.com/xtekky/gpt4free/blob/main/docs/client.md)
**Full Documentation for Python API**
@@ -210,7 +211,9 @@ To start the web interface, type the following codes in python:
from g4f.gui import run_gui
run_gui()
```
+
or execute the following command:
+
```bash
python -m g4f.cli gui -port 8080 -debug
```
@@ -229,7 +232,7 @@ Access with: http://localhost:1337/v1
Cookies are essential for using Meta AI and Microsoft Designer to create images.
Additionally, cookies are required for the Google Gemini and WhiteRabbitNeo Provider.
-From Bing, ensure you have the "_U" cookie, and from Google, all cookies starting with "__Secure-1PSID" are needed.
+From Bing, ensure you have the "\_U" cookie, and from Google, all cookies starting with "\_\_Secure-1PSID" are needed.
You can pass these cookies directly to the create function or set them using the `set_cookies` method before running G4F:
@@ -304,11 +307,13 @@ Note: Ensure that your .har file is stored securely, as it may contain sensitive
If you want to hide or change your IP address for the providers, you can set a proxy globally via an environment variable:
- On macOS and Linux:
+
```bash
export G4F_PROXY="http://host:port"
```
- On Windows:
+
```bash
set G4F_PROXY=http://host:port
```
@@ -317,135 +322,134 @@ set G4F_PROXY=http://host:port
### GPT-4
-| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
-| ------ | ------- | ------- | ----- | ------ | ------ | ---- |
-| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔ī¸ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [chatgpt.ai](https://chatgpt.ai) | `g4f.Provider.ChatgptAi` | ❌ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [liaobots.site](https://liaobots.site) | `g4f.Provider.Liaobots` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chatgpt.com](https://chatgpt.com) | `g4f.Provider.OpenaiChat` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌+✔ī¸ |
-| [raycast.com](https://raycast.com) | `g4f.Provider.Raycast` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
-| [beta.theb.ai](https://beta.theb.ai) | `g4f.Provider.Theb` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [you.com](https://you.com) | `g4f.Provider.You` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
+| -------------------------------------- | ------------------------- | ------- | ----- | ------ | ---------------------------------------------------------- | ----- |
+| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔ī¸ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chatgpt.ai](https://chatgpt.ai) | `g4f.Provider.ChatgptAi` | ❌ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [liaobots.site](https://liaobots.site) | `g4f.Provider.Liaobots` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chatgpt.com](https://chatgpt.com) | `g4f.Provider.OpenaiChat` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌+✔ī¸ |
+| [raycast.com](https://raycast.com) | `g4f.Provider.Raycast` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
+| [beta.theb.ai](https://beta.theb.ai) | `g4f.Provider.Theb` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [you.com](https://you.com) | `g4f.Provider.You` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
## Best OpenSource Models
+
While we wait for gpt-5, here is a list of new models that are at least better than gpt-3.5-turbo. **Some are better than gpt-4**. Expect this list to grow.
-| Website | Provider | parameters | better than |
-| ------ | ------- | ------ | ------ |
-| [claude-3-opus](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0125-preview |
-| [command-r+](https://txt.cohere.com/command-r-plus-microsoft-azure/) | `g4f.Provider.HuggingChat` | 104B | gpt-4-0314 |
-| [llama-3-70b](https://meta.ai/) | `g4f.Provider.Llama` or `DeepInfra` | 70B | gpt-4-0314 |
-| [claude-3-sonnet](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0314 |
-| [reka-core](https://chat.reka.ai/) | `g4f.Provider.Reka` | 21B | gpt-4-vision |
-| [dbrx-instruct](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm) | `g4f.Provider.DeepInfra` | 132B / 36B active| gpt-3.5-turbo |
-| [mixtral-8x22b](https://huggingface.co/mistral-community/Mixtral-8x22B-v0.1) | `g4f.Provider.DeepInfra` | 176B / 44b active | gpt-3.5-turbo |
+| Website | Provider | parameters | better than |
+| ---------------------------------------------------------------------------------------- | ----------------------------------- | ----------------- | ------------------ |
+| [claude-3-opus](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0125-preview |
+| [command-r+](https://txt.cohere.com/command-r-plus-microsoft-azure/) | `g4f.Provider.HuggingChat` | 104B | gpt-4-0314 |
+| [llama-3-70b](https://meta.ai/) | `g4f.Provider.Llama` or `DeepInfra` | 70B | gpt-4-0314 |
+| [claude-3-sonnet](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0314 |
+| [reka-core](https://chat.reka.ai/) | `g4f.Provider.Reka` | 21B | gpt-4-vision |
+| [dbrx-instruct](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm) | `g4f.Provider.DeepInfra` | 132B / 36B active | gpt-3.5-turbo |
+| [mixtral-8x22b](https://huggingface.co/mistral-community/Mixtral-8x22B-v0.1) | `g4f.Provider.DeepInfra` | 176B / 44b active | gpt-3.5-turbo |
### GPT-3.5
-| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
-| ------ | ------- | ------- | ----- | ------ | ------ | ---- |
-| [chat3.aiyunos.top](https://chat3.aiyunos.top/) | `g4f.Provider.AItianhuSpace` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chat10.aichatos.xyz](https://chat10.aichatos.xyz) | `g4f.Provider.Aichatos` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [chatforai.store](https://chatforai.store) | `g4f.Provider.ChatForAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chatgpt4online.org](https://chatgpt4online.org) | `g4f.Provider.Chatgpt4Online` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chatgpt-free.cc](https://www.chatgpt-free.cc) | `g4f.Provider.ChatgptNext` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chatgptx.de](https://chatgptx.de) | `g4f.Provider.ChatgptX` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [f1.cnote.top](https://f1.cnote.top) | `g4f.Provider.Cnote` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [duckduckgo.com](https://duckduckgo.com/duckchat) | `g4f.Provider.DuckDuckGo` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [feedough.com](https://www.feedough.com) | `g4f.Provider.Feedough` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [flowgpt.com](https://flowgpt.com/chat) | `g4f.Provider.FlowGpt` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [freegptsnav.aifree.site](https://freegptsnav.aifree.site) | `g4f.Provider.FreeGpt` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [gpttalk.ru](https://gpttalk.ru) | `g4f.Provider.GptTalkRu` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [koala.sh](https://koala.sh) | `g4f.Provider.Koala` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [app.myshell.ai](https://app.myshell.ai/chat) | `g4f.Provider.MyShell` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [perplexity.ai](https://www.perplexity.ai) | `g4f.Provider.PerplexityAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [poe.com](https://poe.com) | `g4f.Provider.Poe` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
-| [talkai.info](https://talkai.info) | `g4f.Provider.TalkAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chat.vercel.ai](https://chat.vercel.ai) | `g4f.Provider.Vercel` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [aitianhu.com](https://www.aitianhu.com) | `g4f.Provider.AItianhu` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chatgpt.bestim.org](https://chatgpt.bestim.org) | `g4f.Provider.Bestim` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chatbase.co](https://www.chatbase.co) | `g4f.Provider.ChatBase` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chatgptdemo.info](https://chatgptdemo.info/chat) | `g4f.Provider.ChatgptDemo` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chat.chatgptdemo.ai](https://chat.chatgptdemo.ai) | `g4f.Provider.ChatgptDemoAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chatgptfree.ai](https://chatgptfree.ai) | `g4f.Provider.ChatgptFree` | ✔ī¸ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chatgptlogin.ai](https://chatgptlogin.ai) | `g4f.Provider.ChatgptLogin` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chat.3211000.xyz](https://chat.3211000.xyz) | `g4f.Provider.Chatxyz` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [gpt6.ai](https://gpt6.ai) | `g4f.Provider.Gpt6` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [gptchatly.com](https://gptchatly.com) | `g4f.Provider.GptChatly` | ✔ī¸ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [ai18.gptforlove.com](https://ai18.gptforlove.com) | `g4f.Provider.GptForLove` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [gptgo.ai](https://gptgo.ai) | `g4f.Provider.GptGo` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [gptgod.site](https://gptgod.site) | `g4f.Provider.GptGod` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [onlinegpt.org](https://onlinegpt.org) | `g4f.Provider.OnlineGpt` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
+| ---------------------------------------------------------- | ----------------------------- | ------- | ----- | ------ | ---------------------------------------------------------- | ---- |
+| [chat3.aiyunos.top](https://chat3.aiyunos.top/) | `g4f.Provider.AItianhuSpace` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chat10.aichatos.xyz](https://chat10.aichatos.xyz) | `g4f.Provider.Aichatos` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chatforai.store](https://chatforai.store) | `g4f.Provider.ChatForAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chatgpt4online.org](https://chatgpt4online.org) | `g4f.Provider.Chatgpt4Online` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chatgpt-free.cc](https://www.chatgpt-free.cc) | `g4f.Provider.ChatgptNext` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chatgptx.de](https://chatgptx.de) | `g4f.Provider.ChatgptX` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [duckduckgo.com](https://duckduckgo.com/duckchat) | `g4f.Provider.DDG` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [feedough.com](https://www.feedough.com) | `g4f.Provider.Feedough` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [flowgpt.com](https://flowgpt.com/chat) | `g4f.Provider.FlowGpt` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [freegptsnav.aifree.site](https://freegptsnav.aifree.site) | `g4f.Provider.FreeGpt` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [gpttalk.ru](https://gpttalk.ru) | `g4f.Provider.GptTalkRu` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [koala.sh](https://koala.sh) | `g4f.Provider.Koala` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [app.myshell.ai](https://app.myshell.ai/chat) | `g4f.Provider.MyShell` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [perplexity.ai](https://www.perplexity.ai) | `g4f.Provider.PerplexityAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [poe.com](https://poe.com) | `g4f.Provider.Poe` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
+| [talkai.info](https://talkai.info) | `g4f.Provider.TalkAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chat.vercel.ai](https://chat.vercel.ai) | `g4f.Provider.Vercel` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [aitianhu.com](https://www.aitianhu.com) | `g4f.Provider.AItianhu` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chatgpt.bestim.org](https://chatgpt.bestim.org) | `g4f.Provider.Bestim` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chatbase.co](https://www.chatbase.co) | `g4f.Provider.ChatBase` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chatgptdemo.info](https://chatgptdemo.info/chat) | `g4f.Provider.ChatgptDemo` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chat.chatgptdemo.ai](https://chat.chatgptdemo.ai) | `g4f.Provider.ChatgptDemoAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chatgptfree.ai](https://chatgptfree.ai) | `g4f.Provider.ChatgptFree` | ✔ī¸ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chatgptlogin.ai](https://chatgptlogin.ai) | `g4f.Provider.ChatgptLogin` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chat.3211000.xyz](https://chat.3211000.xyz) | `g4f.Provider.Chatxyz` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [gpt6.ai](https://gpt6.ai) | `g4f.Provider.Gpt6` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [gptchatly.com](https://gptchatly.com) | `g4f.Provider.GptChatly` | ✔ī¸ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [ai18.gptforlove.com](https://ai18.gptforlove.com) | `g4f.Provider.GptForLove` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [gptgo.ai](https://gptgo.ai) | `g4f.Provider.GptGo` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [gptgod.site](https://gptgod.site) | `g4f.Provider.GptGod` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [onlinegpt.org](https://onlinegpt.org) | `g4f.Provider.OnlineGpt` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
### Other
-| Website | Provider | Stream | Status | Auth |
-| ------ | ------- | ------ | ------ | ---- |
-| [openchat.team](https://openchat.team) | `g4f.Provider.Aura`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [blackbox.ai](https://www.blackbox.ai) | `g4f.Provider.Blackbox`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [cohereforai-c4ai-command-r-plus.hf.space](https://cohereforai-c4ai-command-r-plus.hf.space) | `g4f.Provider.Cohere`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [deepinfra.com](https://deepinfra.com) | `g4f.Provider.DeepInfra`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [free.chatgpt.org.uk](https://free.chatgpt.org.uk) | `g4f.Provider.FreeChatgpt`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [gemini.google.com](https://gemini.google.com) | `g4f.Provider.Gemini`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
-| [ai.google.dev](https://ai.google.dev) | `g4f.Provider.GeminiPro`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
-| [gemini-chatbot-sigma.vercel.app](https://gemini-chatbot-sigma.vercel.app) | `g4f.Provider.GeminiProChat`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [developers.sber.ru](https://developers.sber.ru/gigachat) | `g4f.Provider.GigaChat`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
-| [console.groq.com](https://console.groq.com/playground) | `g4f.Provider.Groq`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
-| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingChat`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingFace`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [llama2.ai](https://www.llama2.ai) | `g4f.Provider.Llama`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [meta.ai](https://www.meta.ai) | `g4f.Provider.MetaAI`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [openrouter.ai](https://openrouter.ai) | `g4f.Provider.OpenRouter`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
-| [labs.perplexity.ai](https://labs.perplexity.ai) | `g4f.Provider.PerplexityLabs`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [pi.ai](https://pi.ai/talk) | `g4f.Provider.Pi`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [replicate.com](https://replicate.com) | `g4f.Provider.Replicate`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [theb.ai](https://theb.ai) | `g4f.Provider.ThebApi`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
-| [whiterabbitneo.com](https://www.whiterabbitneo.com) | `g4f.Provider.WhiteRabbitNeo`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
-| [bard.google.com](https://bard.google.com) | `g4f.Provider.Bard`| ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔ī¸ |
+| Website | Provider | Stream | Status | Auth |
+| -------------------------------------------------------------------------------------------- | ----------------------------- | ------ | ---------------------------------------------------------- | ---- |
+| [openchat.team](https://openchat.team) | `g4f.Provider.Aura` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [blackbox.ai](https://www.blackbox.ai) | `g4f.Provider.Blackbox` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [cohereforai-c4ai-command-r-plus.hf.space](https://cohereforai-c4ai-command-r-plus.hf.space) | `g4f.Provider.Cohere` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [deepinfra.com](https://deepinfra.com) | `g4f.Provider.DeepInfra` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [free.chatgpt.org.uk](https://free.chatgpt.org.uk) | `g4f.Provider.FreeChatgpt` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [gemini.google.com](https://gemini.google.com) | `g4f.Provider.Gemini` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
+| [ai.google.dev](https://ai.google.dev) | `g4f.Provider.GeminiPro` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
+| [gemini-chatbot-sigma.vercel.app](https://gemini-chatbot-sigma.vercel.app) | `g4f.Provider.GeminiProChat` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [developers.sber.ru](https://developers.sber.ru/gigachat) | `g4f.Provider.GigaChat` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
+| [console.groq.com](https://console.groq.com/playground) | `g4f.Provider.Groq` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
+| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingChat` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingFace` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [llama2.ai](https://www.llama2.ai) | `g4f.Provider.Llama` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [meta.ai](https://www.meta.ai) | `g4f.Provider.MetaAI` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [openrouter.ai](https://openrouter.ai) | `g4f.Provider.OpenRouter` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
+| [labs.perplexity.ai](https://labs.perplexity.ai) | `g4f.Provider.PerplexityLabs` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [pi.ai](https://pi.ai/talk) | `g4f.Provider.Pi` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [replicate.com](https://replicate.com) | `g4f.Provider.Replicate` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [theb.ai](https://theb.ai) | `g4f.Provider.ThebApi` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
+| [whiterabbitneo.com](https://www.whiterabbitneo.com) | `g4f.Provider.WhiteRabbitNeo` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
+| [bard.google.com](https://bard.google.com) | `g4f.Provider.Bard` | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔ī¸ |
### Models
-| Model | Base Provider | Provider | Website |
-| ----- | ------------- | -------- | ------- |
-| gpt-3.5-turbo | OpenAI | 8+ Providers | [openai.com](https://openai.com/) |
-| gpt-4 | OpenAI | 2+ Providers | [openai.com](https://openai.com/) |
-| gpt-4-turbo | OpenAI | g4f.Provider.Bing | [openai.com](https://openai.com/) |
-| Llama-2-7b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| Llama-2-13b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| Llama-2-70b-chat-hf | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| Meta-Llama-3-8b-instruct | Meta | 1+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| Meta-Llama-3-70b-instruct | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| CodeLlama-34b-Instruct-hf | Meta | g4f.Provider.HuggingChat | [llama.meta.com](https://llama.meta.com/) |
-| CodeLlama-70b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| Mixtral-8x7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) |
-| Mistral-7B-Instruct-v0.1 | Huggingface | 3+ Providers | [huggingface.co](https://huggingface.co/) |
-| Mistral-7B-Instruct-v0.2 | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
-| zephyr-orpo-141b-A35b-v0.1 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) |
-| dolphin-2.6-mixtral-8x7b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
-| gemini | Google | g4f.Provider.Gemini | [gemini.google.com](https://gemini.google.com/) |
-| gemini-pro | Google | 2+ Providers | [gemini.google.com](https://gemini.google.com/) |
-| claude-v2 | Anthropic | 1+ Providers | [anthropic.com](https://www.anthropic.com/) |
-| claude-3-opus | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
-| claude-3-sonnet | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
-| lzlv_70b_fp16_hf | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
-| airoboros-70b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
-| openchat_3.5 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) |
-| pi | Inflection | g4f.Provider.Pi | [inflection.ai](https://inflection.ai/) |
+| Model | Base Provider | Provider | Website |
+| -------------------------- | ------------- | ------------------------ | ----------------------------------------------- |
+| gpt-3.5-turbo | OpenAI | 8+ Providers | [openai.com](https://openai.com/) |
+| gpt-4 | OpenAI | 2+ Providers | [openai.com](https://openai.com/) |
+| gpt-4-turbo | OpenAI | g4f.Provider.Bing | [openai.com](https://openai.com/) |
+| Llama-2-7b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| Llama-2-13b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| Llama-2-70b-chat-hf | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| Meta-Llama-3-8b-instruct | Meta | 1+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| Meta-Llama-3-70b-instruct | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| CodeLlama-34b-Instruct-hf | Meta | g4f.Provider.HuggingChat | [llama.meta.com](https://llama.meta.com/) |
+| CodeLlama-70b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| Mixtral-8x7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) |
+| Mistral-7B-Instruct-v0.1 | Huggingface | 3+ Providers | [huggingface.co](https://huggingface.co/) |
+| Mistral-7B-Instruct-v0.2 | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
+| zephyr-orpo-141b-A35b-v0.1 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) |
+| dolphin-2.6-mixtral-8x7b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
+| gemini | Google | g4f.Provider.Gemini | [gemini.google.com](https://gemini.google.com/) |
+| gemini-pro | Google | 2+ Providers | [gemini.google.com](https://gemini.google.com/) |
+| claude-v2 | Anthropic | 1+ Providers | [anthropic.com](https://www.anthropic.com/) |
+| claude-3-opus | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
+| claude-3-sonnet | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
+| lzlv_70b_fp16_hf | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
+| airoboros-70b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
+| openchat_3.5 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) |
+| pi | Inflection | g4f.Provider.Pi | [inflection.ai](https://inflection.ai/) |
### Image and Vision Models
-| Label | Provider | Image Model | Vision Model | Website |
-| ----- | -------- | ----------- | ------------ | ------- |
-| Microsoft Copilot in Bing | `g4f.Provider.Bing` | dall-e-3 | gpt-4-vision | [bing.com](https://bing.com/chat) |
-| DeepInfra | `g4f.Provider.DeepInfra` | stability-ai/sdxl | llava-1.5-7b-hf | [deepinfra.com](https://deepinfra.com) |
-| Gemini | `g4f.Provider.Gemini` | ✔ī¸ | ✔ī¸ | [gemini.google.com](https://gemini.google.com) |
-| Gemini API | `g4f.Provider.GeminiPro` | ❌ | gemini-1.5-pro | [ai.google.dev](https://ai.google.dev) |
-| Meta AI | `g4f.Provider.MetaAI` | ✔ī¸ | ❌ | [meta.ai](https://www.meta.ai) |
-| OpenAI ChatGPT | `g4f.Provider.OpenaiChat` | dall-e-3 | gpt-4-vision | [chatgpt.com](https://chatgpt.com) |
-| Reka | `g4f.Provider.Reka` | ❌ | ✔ī¸ | [chat.reka.ai](https://chat.reka.ai/) |
-| Replicate | `g4f.Provider.Replicate` | stability-ai/sdxl| llava-v1.6-34b | [replicate.com](https://replicate.com) |
-| You.com | `g4f.Provider.You` | dall-e-3| ✔ī¸ | [you.com](https://you.com) |
-
+| Label | Provider | Image Model | Vision Model | Website |
+| ------------------------- | ------------------------- | ----------------- | --------------- | ---------------------------------------------- |
+| Microsoft Copilot in Bing | `g4f.Provider.Bing` | dall-e-3 | gpt-4-vision | [bing.com](https://bing.com/chat) |
+| DeepInfra | `g4f.Provider.DeepInfra` | stability-ai/sdxl | llava-1.5-7b-hf | [deepinfra.com](https://deepinfra.com) |
+| Gemini | `g4f.Provider.Gemini` | ✔ī¸ | ✔ī¸ | [gemini.google.com](https://gemini.google.com) |
+| Gemini API | `g4f.Provider.GeminiPro` | ❌ | gemini-1.5-pro | [ai.google.dev](https://ai.google.dev) |
+| Meta AI | `g4f.Provider.MetaAI` | ✔ī¸ | ❌ | [meta.ai](https://www.meta.ai) |
+| OpenAI ChatGPT | `g4f.Provider.OpenaiChat` | dall-e-3 | gpt-4-vision | [chatgpt.com](https://chatgpt.com) |
+| Reka | `g4f.Provider.Reka` | ❌ | ✔ī¸ | [chat.reka.ai](https://chat.reka.ai/) |
+| Replicate | `g4f.Provider.Replicate` | stability-ai/sdxl | llava-v1.6-34b | [replicate.com](https://replicate.com) |
+| You.com | `g4f.Provider.You` | dall-e-3 | ✔ī¸ | [you.com](https://you.com) |
## 🔗 Powered by gpt4free
@@ -848,6 +852,33 @@ While we wait for gpt-5, here is a list of new models that are at least better t
</a>
</td>
</tr>
+ <tr>
+ <td>
+ <a href="https://github.com/zachey01/gpt4free.js">
+ <b>GPT4js</b>
+ </a>
+ </td>
+ <td>
+ <a href="https://github.com/zachey01/gpt4free.js/stargazers">
+ <img alt="Stars" src="https://img.shields.io/github/stars/zachey01/gpt4free.js?style=flat-square&labelColor=343b41" />
+ </a>
+ </td>
+ <td>
+ <a href="https://github.com/zachey01/gpt4free.js/network/members">
+ <img alt="Forks" src="https://img.shields.io/github/forks/zachey01/gpt4free.js?style=flat-square&labelColor=343b41" />
+ </a>
+ </td>
+ <td>
+ <a href="https://github.com/zachey01/gpt4free.js/issues">
+ <img alt="Issues" src="https://img.shields.io/github/issues/zachey01/gpt4free.js?style=flat-square&labelColor=343b41" />
+ </a>
+ </td>
+ <td>
+ <a href="https://github.com/zachey01/gpt4free.js/pulls">
+ <img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/zachey01/gpt4free.js?style=flat-square&labelColor=343b41" />
+ </a>
+ </td>
+ </tr>
</tbody>
</table>
@@ -857,11 +888,11 @@ We welcome contributions from the community. Whether you're adding new providers
###### Guide: How do i create a new Provider?
- - Read: [/docs/guides/create_provider](https://github.com/xtekky/gpt4free/blob/main/docs/guides/create_provider.md)
+- Read: [/docs/guides/create_provider](https://github.com/xtekky/gpt4free/blob/main/docs/guides/create_provider.md)
###### Guide: How can AI help me with writing code?
- - Read: [/docs/guides/help_me](https://github.com/xtekky/gpt4free/blob/main/docs/guides/help_me.md)
+- Read: [/docs/guides/help_me](https://github.com/xtekky/gpt4free/blob/main/docs/guides/help_me.md)
## 🙌 Contributors
@@ -911,7 +942,7 @@ A list of all contributors is available [here](https://github.com/xtekky/gpt4fre
- The [`MetaAI.py`](https://github.com/xtekky/gpt4free/blob/main/g4f/Provider/MetaAI.py) file contains code from [meta-ai-api](https://github.com/Strvm/meta-ai-api) by [@Strvm](https://github.com/Strvm)
- The [`proofofwork.py`](https://github.com/xtekky/gpt4free/blob/main/g4f/Provider/openai/proofofwork.py) has input from [missuo/FreeGPT35](https://github.com/missuo/FreeGPT35)
-*Having input implies that the AI's code generation utilized it as one of many sources.*
+_Having input implies that the AI's code generation utilized it as one of many sources._
## Šī¸ Copyright
diff --git a/g4f/Provider/AI365VIP.py b/g4f/Provider/AI365VIP.py
new file mode 100644
index 00000000..fc6ad237
--- /dev/null
+++ b/g4f/Provider/AI365VIP.py
@@ -0,0 +1,67 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://chat.ai365vip.com"
+ api_endpoint = "/api/chat"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'gpt-4o',
+ 'claude-3-haiku-20240307',
+ ]
+ model_aliases = {
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": "https://chat.ai365vip.com",
+ "priority": "u=1, i",
+ "referer": "https://chat.ai365vip.com/en",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "model": {
+ "id": model,
+ "name": {
+ "gpt-3.5-turbo": "GPT-3.5",
+ "claude-3-haiku-20240307": "claude-3-haiku",
+ "gpt-4o": "GPT-4O"
+ }.get(model, model),
+ },
+ "messages": [{"role": "user", "content": format_prompt(messages)}],
+ "prompt": "You are a helpful assistant.",
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/Allyfy.py b/g4f/Provider/Allyfy.py
new file mode 100644
index 00000000..8733b1ec
--- /dev/null
+++ b/g4f/Provider/Allyfy.py
@@ -0,0 +1,71 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+from .helper import format_prompt
+
+
+class Allyfy(AsyncGeneratorProvider):
+ url = "https://chatbot.allyfy.chat"
+ api_endpoint = "/api/v1/message/stream/super/chat"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json;charset=utf-8",
+ "dnt": "1",
+ "origin": "https://www.allyfy.chat",
+ "priority": "u=1, i",
+ "referer": "https://www.allyfy.chat/",
+ "referrer": "https://www.allyfy.chat",
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "messages": [{"content": prompt, "role": "user"}],
+ "content": prompt,
+ "baseInfo": {
+ "clientId": "q08kdrde1115003lyedfoir6af0yy531",
+ "pid": "38281",
+ "channelId": "100000",
+ "locale": "en-US",
+ "localZone": 180,
+ "packageName": "com.cch.allyfy.webh",
+ }
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_response = []
+ async for line in response.content:
+ line = line.decode().strip()
+ if line.startswith("data:"):
+ data_content = line[5:]
+ if data_content == "[DONE]":
+ break
+ try:
+ json_data = json.loads(data_content)
+ if "content" in json_data:
+ full_response.append(json_data["content"])
+ except json.JSONDecodeError:
+ continue
+ yield "".join(full_response)
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 6e1e3949..a86471f2 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -2,31 +2,35 @@ from __future__ import annotations
import uuid
import secrets
-from aiohttp import ClientSession
+import re
+from aiohttp import ClientSession, ClientResponse
+from typing import AsyncGenerator, Optional
from ..typing import AsyncResult, Messages, ImageType
from ..image import to_data_uri
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-class Blackbox(AsyncGeneratorProvider):
+class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.blackbox.ai"
working = True
+ default_model = 'blackbox'
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
- proxy: str = None,
- image: ImageType = None,
- image_name: str = None,
+ proxy: Optional[str] = None,
+ image: Optional[ImageType] = None,
+ image_name: Optional[str] = None,
**kwargs
- ) -> AsyncResult:
+ ) -> AsyncGenerator[str, None]:
if image is not None:
messages[-1]["data"] = {
- "fileText": image_name,
+ "fileText": image_name,
"imageBase64": to_data_uri(image)
}
+
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
"Accept": "*/*",
@@ -40,9 +44,11 @@ class Blackbox(AsyncGeneratorProvider):
"Alt-Used": "www.blackbox.ai",
"Connection": "keep-alive",
}
+
async with ClientSession(headers=headers) as session:
random_id = secrets.token_hex(16)
random_user_id = str(uuid.uuid4())
+
data = {
"messages": messages,
"id": random_id,
@@ -55,10 +61,17 @@ class Blackbox(AsyncGeneratorProvider):
"playgroundMode": False,
"webSearchMode": False,
"userSystemPrompt": "",
- "githubToken": None
+ "githubToken": None,
+ "maxTokens": None
}
- async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
+
+ async with session.post(
+ f"{cls.url}/api/chat", json=data, proxy=proxy
+ ) as response: # type: ClientResponse
response.raise_for_status()
- async for chunk in response.content:
+ async for chunk in response.content.iter_any():
if chunk:
- yield chunk.decode()
+ # Decode the chunk and clean up unwanted prefixes using a regex
+ decoded_chunk = chunk.decode()
+ cleaned_chunk = re.sub(r'\$@\$.+?\$@\$|\$@\$', '', decoded_chunk)
+ yield cleaned_chunk
diff --git a/g4f/Provider/ChatGot.py b/g4f/Provider/ChatGot.py
new file mode 100644
index 00000000..55e8d0b6
--- /dev/null
+++ b/g4f/Provider/ChatGot.py
@@ -0,0 +1,75 @@
+from __future__ import annotations
+
+import time
+from hashlib import sha256
+
+from aiohttp import BaseConnector, ClientSession
+
+from ..errors import RateLimitError
+from ..requests import raise_for_status
+from ..requests.aiohttp import get_connector
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class ChatGot(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.chatgot.one/"
+ working = True
+ supports_message_history = True
+ default_model = 'gemini-pro'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ connector: BaseConnector = None,
+ **kwargs,
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "text/plain;charset=UTF-8",
+ "Referer": f"{cls.url}/",
+ "Origin": cls.url,
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Connection": "keep-alive",
+ "TE": "trailers",
+ }
+ async with ClientSession(
+ connector=get_connector(connector, proxy), headers=headers
+ ) as session:
+ timestamp = int(time.time() * 1e3)
+ data = {
+ "messages": [
+ {
+ "role": "model" if message["role"] == "assistant" else "user",
+ "parts": [{"text": message["content"]}],
+ }
+ for message in messages
+ ],
+ "time": timestamp,
+ "pass": None,
+ "sign": generate_signature(timestamp, messages[-1]["content"]),
+ }
+ async with session.post(
+ f"{cls.url}/api/generate", json=data, proxy=proxy
+ ) as response:
+ if response.status == 500:
+ if "Quota exceeded" in await response.text():
+ raise RateLimitError(
+ f"Response {response.status}: Rate limit reached"
+ )
+ await raise_for_status(response)
+ async for chunk in response.content.iter_any():
+ yield chunk.decode(errors="ignore")
+
+
+def generate_signature(time: int, text: str, secret: str = ""):
+ message = f"{time}:{text}:{secret}"
+ return sha256(message.encode()).hexdigest()
diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py
index ff9a2c8f..f62ef8af 100644
--- a/g4f/Provider/Chatgpt4Online.py
+++ b/g4f/Provider/Chatgpt4Online.py
@@ -1,72 +1,77 @@
from __future__ import annotations
-import re
import json
from aiohttp import ClientSession
-from ..typing import Messages, AsyncResult
-from ..requests import get_args_from_browser
-from ..webdriver import WebDriver
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
-from .helper import get_random_string
+from .helper import format_prompt
+
class Chatgpt4Online(AsyncGeneratorProvider):
url = "https://chatgpt4online.org"
- supports_message_history = True
- supports_gpt_35_turbo = True
- working = True
- _wpnonce = None
- _context_id = None
-
+ api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
+ working = True
+ supports_gpt_4 = True
+
+ async def get_nonce():
+ async with ClientSession() as session:
+ async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response:
+ return (await response.json())["restNonce"]
+
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
- webdriver: WebDriver = None,
**kwargs
) -> AsyncResult:
- args = get_args_from_browser(f"{cls.url}/chat/", webdriver, proxy=proxy)
- async with ClientSession(**args) as session:
- if not cls._wpnonce:
- async with session.get(f"{cls.url}/chat/", proxy=proxy) as response:
- response.raise_for_status()
- response = await response.text()
- result = re.search(r'restNonce&quot;:&quot;(.*?)&quot;', response)
- if result:
- cls._wpnonce = result.group(1)
- else:
- raise RuntimeError("No nonce found")
- result = re.search(r'contextId&quot;:(.*?),', response)
- if result:
- cls._context_id = result.group(1)
- else:
- raise RuntimeError("No contextId found")
+ headers = {
+ "accept": "text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ "x-wp-nonce": await cls.get_nonce(),
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
data = {
- "botId":"default",
- "customId":None,
- "session":"N/A",
- "chatId":get_random_string(11),
- "contextId":cls._context_id,
- "messages":messages[:-1],
- "newMessage":messages[-1]["content"],
- "newImageId":None,
- "stream":True
+ "botId": "default",
+ "newMessage": prompt,
+ "stream": True,
}
- async with session.post(
- f"{cls.url}/wp-json/mwai-ui/v1/chats/submit",
- json=data,
- proxy=proxy,
- headers={"x-wp-nonce": cls._wpnonce}
- ) as response:
+
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
- async for line in response.content:
- if line.startswith(b"data: "):
- line = json.loads(line[6:])
- if "type" not in line:
- raise RuntimeError(f"Response: {line}")
- elif line["type"] == "live":
- yield line["data"]
- elif line["type"] == "end":
- break
+ full_response = ""
+
+ async for chunk in response.content.iter_any():
+ if chunk:
+ try:
+ # Extract the JSON object from the chunk
+ for line in chunk.decode().splitlines():
+ if line.startswith("data: "):
+ json_data = json.loads(line[6:])
+ if json_data["type"] == "live":
+ full_response += json_data["data"]
+ elif json_data["type"] == "end":
+ final_data = json.loads(json_data["data"])
+ full_response = final_data["reply"]
+ break
+ except json.JSONDecodeError:
+ continue
+
+ yield full_response
+
diff --git a/g4f/Provider/Chatgpt4o.py b/g4f/Provider/Chatgpt4o.py
new file mode 100644
index 00000000..f3dc8a15
--- /dev/null
+++ b/g4f/Provider/Chatgpt4o.py
@@ -0,0 +1,83 @@
+from __future__ import annotations
+
+import re
+from ..requests import StreamSession, raise_for_status
+from ..typing import Messages
+from .base_provider import AsyncProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Chatgpt4o(AsyncProvider, ProviderModelMixin):
+ url = "https://chatgpt4o.one"
+ supports_gpt_4 = True
+ working = True
+ _post_id = None
+ _nonce = None
+ default_model = 'gpt-4o'
+
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ cookies: dict = None,
+ **kwargs
+ ) -> str:
+ headers = {
+ 'authority': 'chatgpt4o.one',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'origin': 'https://chatgpt4o.one',
+ 'referer': 'https://chatgpt4o.one',
+ 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
+ }
+
+ async with StreamSession(
+ headers=headers,
+ cookies=cookies,
+ impersonate="chrome",
+ proxies={"all": proxy},
+ timeout=timeout
+ ) as session:
+
+ if not cls._post_id or not cls._nonce:
+ async with session.get(f"{cls.url}/") as response:
+ await raise_for_status(response)
+ response_text = await response.text()
+
+ post_id_match = re.search(r'data-post-id="([0-9]+)"', response_text)
+ nonce_match = re.search(r'data-nonce="(.*?)"', response_text)
+
+ if not post_id_match:
+ raise RuntimeError("No post ID found")
+ cls._post_id = post_id_match.group(1)
+
+ if not nonce_match:
+ raise RuntimeError("No nonce found")
+ cls._nonce = nonce_match.group(1)
+
+ prompt = format_prompt(messages)
+ data = {
+ "_wpnonce": cls._nonce,
+ "post_id": cls._post_id,
+ "url": cls.url,
+ "action": "wpaicg_chat_shortcode_message",
+ "message": prompt,
+ "bot_id": "0"
+ }
+
+ async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
+ await raise_for_status(response)
+ response_json = await response.json()
+ if "data" not in response_json:
+ raise RuntimeError("Unexpected response structure: 'data' field missing")
+ return response_json["data"]
diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py
index 22064f1b..b1e00a22 100644
--- a/g4f/Provider/ChatgptFree.py
+++ b/g4f/Provider/ChatgptFree.py
@@ -10,7 +10,7 @@ from .helper import format_prompt
class ChatgptFree(AsyncProvider):
url = "https://chatgptfree.ai"
supports_gpt_35_turbo = True
- working = False
+ working = True
_post_id = None
_nonce = None
diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py
new file mode 100644
index 00000000..6146994b
--- /dev/null
+++ b/g4f/Provider/DDG.py
@@ -0,0 +1,118 @@
+from __future__ import annotations
+
+import json
+import aiohttp
+import asyncio
+from typing import Optional
+import base64
+
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import get_connector
+from ..typing import AsyncResult, Messages
+from ..requests.raise_for_status import raise_for_status
+from ..providers.conversation import BaseConversation
+
+class DDG(AsyncGeneratorProvider, ProviderModelMixin):
+ url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9haWNoYXQ=").decode("utf-8")
+ working = True
+ supports_gpt_35_turbo = True
+ supports_message_history = True
+
+ default_model = "gpt-4o-mini"
+ models = ["gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
+ model_aliases = {
+ "gpt-4": "gpt-4o-mini",
+ "gpt-4o": "gpt-4o-mini",
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
+ "mixtral-8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1"
+ }
+
+ # Obfuscated URLs and headers
+ status_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9zdGF0dXM=").decode("utf-8")
+ chat_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9jaGF0").decode("utf-8")
+ referer = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS8=").decode("utf-8")
+ origin = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbQ==").decode("utf-8")
+
+ user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
+ headers = {
+ 'User-Agent': user_agent,
+ 'Accept': 'text/event-stream',
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
+ 'Referer': referer,
+ 'Content-Type': 'application/json',
+ 'Origin': origin,
+ 'Connection': 'keep-alive',
+ 'Cookie': 'dcm=3',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'Pragma': 'no-cache',
+ 'TE': 'trailers'
+ }
+
+ @classmethod
+ async def get_vqd(cls, session: aiohttp.ClientSession) -> Optional[str]:
+ try:
+ async with session.get(cls.status_url, headers={"x-vqd-accept": "1"}) as response:
+ await raise_for_status(response)
+ return response.headers.get("x-vqd-4")
+ except Exception as e:
+ print(f"Error getting VQD: {e}")
+ return None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ connector: aiohttp.BaseConnector = None,
+ conversation: Conversation = None,
+ return_conversation: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ async with aiohttp.ClientSession(headers=cls.headers, connector=get_connector(connector, proxy)) as session:
+ vqd_4 = None
+ if conversation is not None and len(messages) > 1:
+ vqd_4 = conversation.vqd_4
+ messages = [*conversation.messages, messages[-2], messages[-1]]
+ else:
+ for _ in range(3): # Try up to 3 times to get a valid VQD
+ vqd_4 = await cls.get_vqd(session)
+ if vqd_4:
+ break
+ await asyncio.sleep(1) # Wait a bit before retrying
+
+ if not vqd_4:
+ raise Exception("Failed to obtain a valid VQD token")
+
+ messages = [messages[-1]] # Only use the last message for new conversations
+
+ payload = {
+ 'model': cls.get_model(model),
+ 'messages': [{'role': m['role'], 'content': m['content']} for m in messages]
+ }
+
+ async with session.post(cls.chat_url, json=payload, headers={"x-vqd-4": vqd_4}) as response:
+ await raise_for_status(response)
+ if return_conversation:
+ yield Conversation(vqd_4, messages)
+
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ chunk = line[6:]
+ if chunk.startswith(b"[DONE]"):
+ break
+ try:
+ data = json.loads(chunk)
+ if "message" in data and data["message"]:
+ yield data["message"]
+ except json.JSONDecodeError:
+ print(f"Failed to decode JSON: {chunk}")
+
+class Conversation(BaseConversation):
+ def __init__(self, vqd_4: str, messages: Messages) -> None:
+ self.vqd_4 = vqd_4
+ self.messages = messages
diff --git a/g4f/Provider/Feedough.py b/g4f/Provider/Feedough.py
index 2ce4561e..d35e30ee 100644
--- a/g4f/Provider/Feedough.py
+++ b/g4f/Provider/Feedough.py
@@ -1,17 +1,20 @@
from __future__ import annotations
import json
-from aiohttp import ClientSession
+import asyncio
+from aiohttp import ClientSession, TCPConnector
+from urllib.parse import urlencode
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
-class Feedough(AsyncGeneratorProvider):
+class Feedough(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.feedough.com"
+ api_endpoint = "/wp-admin/admin-ajax.php"
working = True
- supports_gpt_35_turbo = True
+ default_model = ''
@classmethod
async def create_async_generator(
@@ -22,31 +25,54 @@ class Feedough(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
headers = {
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
- "Accept": "*/*",
- "Accept-Language": "en-US,en;q=0.5",
- "Accept-Encoding": "gzip, deflate, br",
- "Referer": "https://www.feedough.com/ai-prompt-generator/",
- "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
- "Origin": "https://www.feedough.com",
- "DNT": "1",
- "Sec-GPC": "1",
- "Connection": "keep-alive",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "TE": "trailers",
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/x-www-form-urlencoded;charset=UTF-8",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/ai-prompt-generator/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
}
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
+
+ connector = TCPConnector(ssl=False)
+
+ async with ClientSession(headers=headers, connector=connector) as session:
data = {
"action": "aixg_generate",
- "prompt": prompt,
+ "prompt": format_prompt(messages),
+ "aixg_generate_nonce": "110c021031"
}
- async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response:
- response.raise_for_status()
- response_text = await response.text()
- response_json = json.loads(response_text)
- if response_json["success"]:
- message = response_json["data"]["message"]
- yield message
+
+ try:
+ async with session.post(
+ f"{cls.url}{cls.api_endpoint}",
+ data=urlencode(data),
+ proxy=proxy
+ ) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+ try:
+ response_json = json.loads(response_text)
+ if response_json.get("success") and "data" in response_json:
+ message = response_json["data"].get("message", "")
+ yield message
+ except json.JSONDecodeError:
+ yield response_text
+ except Exception as e:
+ print(f"An error occurred: {e}")
+
+ @classmethod
+ async def run(cls, *args, **kwargs):
+ async for item in cls.create_async_generator(*args, **kwargs):
+ yield item
+
+ tasks = asyncio.all_tasks()
+ for task in tasks:
+ if not task.done():
+ await task
diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/FlowGpt.py
index 6c2aa046..d823a7ab 100644
--- a/g4f/Provider/FlowGpt.py
+++ b/g4f/Provider/FlowGpt.py
@@ -30,7 +30,7 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
"pygmalion-13b",
"chronos-hermes-13b",
"Mixtral-8x7B",
- "Dolphin-2.6-8x7B"
+ "Dolphin-2.6-8x7B",
]
model_aliases = {
"gemini": "google-gemini",
@@ -91,7 +91,7 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
"generateImage": False,
"generateAudio": False
}
- async with session.post("https://backend-k8s.flowgpt.com/v2/chat-anonymous-encrypted", json=data, proxy=proxy) as response:
+ async with session.post("https://prod-backend-k8s.flowgpt.com/v3/chat-anonymous", json=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in response.content:
if chunk.strip():
diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py
index 3fb247c7..7d8c1d10 100644
--- a/g4f/Provider/FreeChatgpt.py
+++ b/g4f/Provider/FreeChatgpt.py
@@ -1,17 +1,27 @@
from __future__ import annotations
-
import json
-from aiohttp import ClientSession, ClientTimeout
-
+from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..requests.raise_for_status import raise_for_status
+from .helper import format_prompt
+
class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://free.chatgpt.org.uk"
+ url = "https://chat.chatgpt.org.uk"
+ api_endpoint = "/api/openai/v1/chat/completions"
working = True
- supports_message_history = True
- default_model = "google-gemini-pro"
+ supports_gpt_35_turbo = True
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'SparkDesk-v1.1',
+ 'deepseek-coder',
+ 'deepseek-chat',
+ 'Qwen2-7B-Instruct',
+ 'glm4-9B-chat',
+ 'chatglm3-6B',
+ 'Yi-1.5-9B-Chat',
+ ]
@classmethod
async def create_async_generator(
@@ -19,45 +29,50 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
- timeout: int = 120,
**kwargs
) -> AsyncResult:
headers = {
- "Accept": "application/json, text/event-stream",
- "Content-Type":"application/json",
- "Accept-Encoding": "gzip, deflate, br",
- "Accept-Language": "en-US,en;q=0.5",
- "Host":"free.chatgpt.org.uk",
- "Referer":f"{cls.url}/",
- "Origin":f"{cls.url}",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
+ "accept": "application/json, text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
}
- async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
data = {
- "messages": messages,
+ "messages": [
+ {"role": "system", "content": "\nYou are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent model: gpt-3.5-turbo\nCurrent time: Thu Jul 04 2024 21:35:59 GMT+0300 (Eastern European Summer Time)\nLatex inline: \\(x^2\\) \nLatex block: $$e=mc^2$$\n\n"},
+ {"role": "user", "content": prompt}
+ ],
"stream": True,
- "model": cls.get_model(""),
- "temperature": kwargs.get("temperature", 0.5),
- "presence_penalty": kwargs.get("presence_penalty", 0),
- "frequency_penalty": kwargs.get("frequency_penalty", 0),
- "top_p": kwargs.get("top_p", 1)
+ "model": model,
+ "temperature": 0.5,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ "top_p": 1
}
- async with session.post(f'{cls.url}/api/openai/v1/chat/completions', json=data, proxy=proxy) as response:
- await raise_for_status(response)
- started = False
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ accumulated_text = ""
async for line in response.content:
- if line.startswith(b"data: [DONE]"):
- break
- elif line.startswith(b"data: "):
- line = json.loads(line[6:])
- if(line["choices"]==[]):
- continue
- chunk = line["choices"][0]["delta"].get("content")
- if chunk:
- started = True
- yield chunk
- if not started:
- raise RuntimeError("Empty response") \ No newline at end of file
+ if line:
+ line_str = line.decode().strip()
+ if line_str == "data: [DONE]":
+ yield accumulated_text
+ break
+ elif line_str.startswith("data: "):
+ try:
+ chunk = json.loads(line_str[6:])
+ delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
+ accumulated_text += delta_content
+ except json.JSONDecodeError:
+ pass
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index f79f0a66..7fa3b5ab 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -1,55 +1,67 @@
from __future__ import annotations
-import time, hashlib, random
-
-from ..typing import AsyncResult, Messages
+import time
+import hashlib
+import random
+from typing import AsyncGenerator, Optional, Dict, Any
+from ..typing import Messages
from ..requests import StreamSession, raise_for_status
from .base_provider import AsyncGeneratorProvider
from ..errors import RateLimitError
-domains = [
+# Constants
+DOMAINS = [
"https://s.aifree.site",
"https://v.aifree.site/"
]
+RATE_LIMIT_ERROR_MESSAGE = "åŊ“前地åŒēåŊ“æ—ĨéĸåēĻåˇ˛æļˆč€—厌"
+
class FreeGpt(AsyncGeneratorProvider):
- url = "https://freegptsnav.aifree.site"
- working = True
- supports_message_history = True
- supports_system_message = True
- supports_gpt_35_turbo = True
+ url: str = "https://freegptsnav.aifree.site"
+ working: bool = True
+ supports_message_history: bool = True
+ supports_system_message: bool = True
+ supports_gpt_35_turbo: bool = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
- proxy: str = None,
+ proxy: Optional[str] = None,
timeout: int = 120,
- **kwargs
- ) -> AsyncResult:
+ **kwargs: Any
+ ) -> AsyncGenerator[str, None]:
+ prompt = messages[-1]["content"]
+ timestamp = int(time.time())
+ data = cls._build_request_data(messages, prompt, timestamp)
+
+ domain = random.choice(DOMAINS)
+
async with StreamSession(
impersonate="chrome",
timeout=timeout,
- proxies={"all": proxy}
+ proxies={"all": proxy} if proxy else None
) as session:
- prompt = messages[-1]["content"]
- timestamp = int(time.time())
- data = {
- "messages": messages,
- "time": timestamp,
- "pass": None,
- "sign": generate_signature(timestamp, prompt)
- }
- domain = random.choice(domains)
async with session.post(f"{domain}/api/generate", json=data) as response:
await raise_for_status(response)
async for chunk in response.iter_content():
- chunk = chunk.decode(errors="ignore")
- if chunk == "åŊ“前地åŒēåŊ“æ—ĨéĸåēĻåˇ˛æļˆč€—厌":
+ chunk_decoded = chunk.decode(errors="ignore")
+ if chunk_decoded == RATE_LIMIT_ERROR_MESSAGE:
raise RateLimitError("Rate limit reached")
- yield chunk
-
-def generate_signature(timestamp: int, message: str, secret: str = ""):
+ yield chunk_decoded
+
+ @staticmethod
+ def _build_request_data(messages: Messages, prompt: str, timestamp: int, secret: str = "") -> Dict[str, Any]:
+ return {
+ "messages": messages,
+ "time": timestamp,
+ "pass": None,
+ "sign": generate_signature(timestamp, prompt, secret)
+ }
+
+
+def generate_signature(timestamp: int, message: str, secret: str = "") -> str:
data = f"{timestamp}:{message}:{secret}"
return hashlib.sha256(data.encode()).hexdigest()
diff --git a/g4f/Provider/FreeNetfly.py b/g4f/Provider/FreeNetfly.py
new file mode 100644
index 00000000..624f33cf
--- /dev/null
+++ b/g4f/Provider/FreeNetfly.py
@@ -0,0 +1,107 @@
+from __future__ import annotations
+
+import json
+import asyncio
+from aiohttp import ClientSession, ClientTimeout, ClientError
+from typing import AsyncGenerator
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://free.netfly.top"
+ api_endpoint = "/api/openai/v1/chat/completions"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'gpt-4',
+ ]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ }
+ data = {
+ "messages": messages,
+ "stream": True,
+ "model": model,
+ "temperature": 0.5,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ "top_p": 1
+ }
+
+ max_retries = 3
+ retry_delay = 1
+
+ for attempt in range(max_retries):
+ try:
+ async with ClientSession(headers=headers) as session:
+ timeout = ClientTimeout(total=60)
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy, timeout=timeout) as response:
+ response.raise_for_status()
+ async for chunk in cls._process_response(response):
+ yield chunk
+ return # If successful, exit the function
+ except (ClientError, asyncio.TimeoutError) as e:
+ if attempt == max_retries - 1:
+ raise # If all retries failed, raise the last exception
+ await asyncio.sleep(retry_delay)
+ retry_delay *= 2 # Exponential backoff
+
+ @classmethod
+ async def _process_response(cls, response) -> AsyncGenerator[str, None]:
+ buffer = ""
+ async for line in response.content:
+ buffer += line.decode('utf-8')
+ if buffer.endswith('\n\n'):
+ for subline in buffer.strip().split('\n'):
+ if subline.startswith('data: '):
+ if subline == 'data: [DONE]':
+ return
+ try:
+ data = json.loads(subline[6:])
+ content = data['choices'][0]['delta'].get('content')
+ if content:
+ yield content
+ except json.JSONDecodeError:
+ print(f"Failed to parse JSON: {subline}")
+ except KeyError:
+ print(f"Unexpected JSON structure: {data}")
+ buffer = ""
+
+ # Process any remaining data in the buffer
+ if buffer:
+ for subline in buffer.strip().split('\n'):
+ if subline.startswith('data: ') and subline != 'data: [DONE]':
+ try:
+ data = json.loads(subline[6:])
+ content = data['choices'][0]['delta'].get('content')
+ if content:
+ yield content
+ except (json.JSONDecodeError, KeyError):
+ pass
+
diff --git a/g4f/Provider/GeminiProChat.py b/g4f/Provider/GeminiProChat.py
index d88c4ed0..208ca773 100644
--- a/g4f/Provider/GeminiProChat.py
+++ b/g4f/Provider/GeminiProChat.py
@@ -9,13 +9,14 @@ from ..errors import RateLimitError
from ..requests import raise_for_status
from ..requests.aiohttp import get_connector
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-class GeminiProChat(AsyncGeneratorProvider):
- url = "https://www.chatgot.one/"
+class GeminiProChat(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://gemini-pro.chat/"
working = True
supports_message_history = True
+ default_model = 'gemini-pro'
@classmethod
async def create_async_generator(
@@ -32,8 +33,8 @@ class GeminiProChat(AsyncGeneratorProvider):
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "text/plain;charset=UTF-8",
- "Referer": "https://gemini-chatbot-sigma.vercel.app/",
- "Origin": "https://gemini-chatbot-sigma.vercel.app",
+ "Referer": f"{cls.url}/",
+ "Origin": cls.url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index 00d49b82..c130d183 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -13,15 +13,14 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
supports_stream = True
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
- "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
+ 'meta-llama/Meta-Llama-3.1-70B-Instruct',
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
'CohereForAI/c4ai-command-r-plus',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
- 'google/gemma-1.1-7b-it',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
+ '01-ai/Yi-1.5-34B-Chat',
'mistralai/Mistral-7B-Instruct-v0.2',
- 'meta-llama/Meta-Llama-3-70B-Instruct',
'microsoft/Phi-3-mini-4k-instruct',
- '01-ai/Yi-1.5-34B-Chat'
]
model_aliases = {
@@ -34,80 +33,78 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
model: str,
messages: Messages,
stream: bool,
- **kwargs
- ) -> CreateResult:
+ **kwargs) -> CreateResult:
if (model in cls.models) :
- session = requests.Session()
- headers = {
- 'accept' : '*/*',
- 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'cache-control' : 'no-cache',
- 'origin' : 'https://huggingface.co',
- 'pragma' : 'no-cache',
- 'priority' : 'u=1, i',
- 'referer' : 'https://huggingface.co/chat/',
- 'sec-ch-ua' : '"Not/A)Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
- 'sec-ch-ua-mobile' : '?0',
+ session = cf_reqs.Session()
+ session.headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en',
+ 'cache-control': 'no-cache',
+ 'origin': 'https://huggingface.co',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://huggingface.co/chat/',
+ 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
+ 'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest' : 'empty',
- 'sec-fetch-mode' : 'cors',
- 'sec-fetch-site' : 'same-origin',
- 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
}
+ print(model)
json_data = {
- 'searchEnabled' : True,
- 'activeModel' : 'CohereForAI/c4ai-command-r-plus', # doesn't matter
- 'hideEmojiOnSidebar': False,
- 'customPrompts' : {},
- 'assistants' : [],
- 'tools' : {},
- 'disableStream' : False,
- 'recentlySaved' : False,
- 'ethicsModalAccepted' : True,
- 'ethicsModalAcceptedAt' : None,
- 'shareConversationsWithModelAuthors': False,
+ 'model': model,
}
- response = cf_reqs.post('https://huggingface.co/chat/settings', headers=headers, json=json_data)
- session.cookies.update(response.cookies)
-
- response = session.post('https://huggingface.co/chat/conversation',
- headers=headers, json={'model': model})
-
+ response = session.post('https://huggingface.co/chat/conversation', json=json_data)
conversationId = response.json()['conversationId']
- response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11',
- headers=headers,
- )
- messageId = extract_id(response.json())
+ response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=01',)
+
+ data: list = (response.json())["nodes"][1]["data"]
+ keys: list[int] = data[data[0]["messages"]]
+ message_keys: dict = data[keys[0]]
+ messageId: str = data[message_keys["id"]]
settings = {
- "inputs" : format_prompt(messages),
- "id" : messageId,
- "is_retry" : False,
- "is_continue" : False,
- "web_search" : False,
-
- # TODO // add feature to enable/disable tools
- "tools": {
- "websearch" : True,
- "document_parser" : False,
- "query_calculator" : False,
- "image_generation" : False,
- "image_editing" : False,
- "fetch_url" : False,
- }
+ "inputs":format_prompt(messages),
+ "id":messageId,
+ "is_retry":False,
+ "is_continue":False,
+ "web_search":False,
+ "tools":[]
}
- payload = {
- "data": json.dumps(settings),
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en',
+ 'cache-control': 'no-cache',
+ 'origin': 'https://huggingface.co',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': f'https://huggingface.co/chat/conversation/{conversationId}',
+ 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
+ }
+
+
+ files = {
+ 'data': (None, json.dumps(settings, separators=(',', ':'))),
}
- response = session.post(f"https://huggingface.co/chat/conversation/{conversationId}",
- headers=headers, data=payload, stream=True,
+ response = requests.post(f'https://huggingface.co/chat/conversation/{conversationId}',
+ cookies=session.cookies,
+ headers=headers,
+ files=files,
)
first_token = True
@@ -126,18 +123,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
else:
token = token.replace('\u0000', '')
- yield token
+ yield (token)
elif line["type"] == "finalAnswer":
- break
-
-def extract_id(response: dict) -> str:
- data = response["nodes"][1]["data"]
- uuid_pattern = re.compile(
- r"^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}$"
- )
- for item in data:
- if type(item) == str and uuid_pattern.match(item):
- return item
-
- return None
+ break \ No newline at end of file
diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py
index 6a05c26e..6634aa75 100644
--- a/g4f/Provider/HuggingFace.py
+++ b/g4f/Provider/HuggingFace.py
@@ -12,12 +12,19 @@ from ..requests.raise_for_status import raise_for_status
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"
working = True
+ needs_auth = True
supports_message_history = True
+ default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
- "mistralai/Mistral-7B-Instruct-v0.2"
+ 'meta-llama/Meta-Llama-3.1-70B-Instruct',
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
+ 'CohereForAI/c4ai-command-r-plus',
+ 'mistralai/Mixtral-8x7B-Instruct-v0.1',
+ 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
+ '01-ai/Yi-1.5-34B-Chat',
+ 'mistralai/Mistral-7B-Instruct-v0.2',
+ 'microsoft/Phi-3-mini-4k-instruct',
]
- default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
@classmethod
async def create_async_generator(
@@ -74,4 +81,4 @@ def format_prompt(messages: Messages) -> str:
for idx, message in enumerate(messages)
if message["role"] == "assistant"
])
- return f"{history}<s>[INST] {question} [/INST]" \ No newline at end of file
+ return f"{history}<s>[INST] {question} [/INST]"
diff --git a/g4f/Provider/Koala.py b/g4f/Provider/Koala.py
index 849bcdbe..c708bcb9 100644
--- a/g4f/Provider/Koala.py
+++ b/g4f/Provider/Koala.py
@@ -1,7 +1,8 @@
from __future__ import annotations
import json
-from aiohttp import ClientSession, BaseConnector
+from typing import AsyncGenerator, Optional, List, Dict, Union, Any
+from aiohttp import ClientSession, BaseConnector, ClientResponse
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
@@ -19,12 +20,13 @@ class Koala(AsyncGeneratorProvider):
cls,
model: str,
messages: Messages,
- proxy: str = None,
- connector: BaseConnector = None,
- **kwargs
- ) -> AsyncResult:
+ proxy: Optional[str] = None,
+ connector: Optional[BaseConnector] = None,
+ **kwargs: Any
+ ) -> AsyncGenerator[Dict[str, Union[str, int, float, List[Dict[str, Any]], None]], None]:
if not model:
model = "gpt-3.5-turbo"
+
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "text/event-stream",
@@ -40,13 +42,17 @@ class Koala(AsyncGeneratorProvider):
"Sec-Fetch-Site": "same-origin",
"TE": "trailers",
}
+
async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
- input = messages[-1]["content"]
- system_messages = [message["content"] for message in messages if message["role"] == "system"]
+ input_text = messages[-1]["content"]
+ system_messages = " ".join(
+ message["content"] for message in messages if message["role"] == "system"
+ )
if system_messages:
- input += " ".join(system_messages)
+ input_text += f" {system_messages}"
+
data = {
- "input": input,
+ "input": input_text,
"inputHistory": [
message["content"]
for message in messages[:-1]
@@ -59,8 +65,14 @@ class Koala(AsyncGeneratorProvider):
],
"model": model,
}
+
async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response:
await raise_for_status(response)
- async for chunk in response.content:
- if chunk.startswith(b"data: "):
- yield json.loads(chunk[6:]) \ No newline at end of file
+ async for chunk in cls._parse_event_stream(response):
+ yield chunk
+
+ @staticmethod
+ async def _parse_event_stream(response: ClientResponse) -> AsyncGenerator[Dict[str, Any], None]:
+ async for chunk in response.content:
+ if chunk.startswith(b"data: "):
+ yield json.loads(chunk[6:])
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 75ecf300..af90860d 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -10,7 +10,25 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
- "gpt-4o": {
+ "gpt-4o-mini-free": {
+ "id": "gpt-4o-mini-free",
+ "name": "GPT-4o-Mini-Free",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 31200,
+ "tokenLimit": 7800,
+ "context": "8K",
+ },
+ "gpt-4o-mini": {
+ "id": "gpt-4o-mini",
+ "name": "GPT-4o-Mini",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
+ },
+ "gpt-4o-free": {
"context": "8K",
"id": "gpt-4o-free",
"maxLength": 31200,
@@ -19,79 +37,46 @@ models = {
"provider": "OpenAI",
"tokenLimit": 7800,
},
- "gpt-3.5-turbo": {
- "id": "gpt-3.5-turbo",
- "name": "GPT-3.5-Turbo",
- "maxLength": 48000,
- "tokenLimit": 14000,
- "context": "16K",
- },
- "gpt-4-turbo": {
- "id": "gpt-4-turbo-preview",
+ "gpt-4-turbo-2024-04-09": {
+ "id": "gpt-4-turbo-2024-04-09",
"name": "GPT-4-Turbo",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
- "gpt-4": {
- "id": "gpt-4-plus",
- "name": "GPT-4-Plus",
- "maxLength": 130000,
- "tokenLimit": 31000,
- "context": "32K",
+ "gpt-4o": {
+ "context": "128K",
+ "id": "gpt-4o",
+ "maxLength": 124000,
+ "model": "ChatGPT",
+ "name": "GPT-4o",
+ "provider": "OpenAI",
+ "tokenLimit": 62000,
},
"gpt-4-0613": {
"id": "gpt-4-0613",
- "name": "GPT-4-0613",
- "maxLength": 60000,
- "tokenLimit": 15000,
- "context": "16K",
- },
- "gemini-pro": {
- "id": "gemini-pro",
- "name": "Gemini-Pro",
- "maxLength": 120000,
- "tokenLimit": 30000,
- "context": "32K",
- },
- "claude-3-opus-20240229": {
- "id": "claude-3-opus-20240229",
- "name": "Claude-3-Opus",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-3-sonnet-20240229": {
- "id": "claude-3-sonnet-20240229",
- "name": "Claude-3-Sonnet",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-2.1": {
- "id": "claude-2.1",
- "name": "Claude-2.1-200k",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
+ "name": "GPT-4",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
},
- "claude-2.0": {
- "id": "claude-2.0",
- "name": "Claude-2.0-100k",
- "maxLength": 400000,
- "tokenLimit": 100000,
- "context": "100K",
+ "gpt-4-turbo": {
+ "id": "gpt-4-turbo",
+ "name": "GPT-4-Turbo",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
},
- "claude-instant-1": {
- "id": "claude-instant-1",
- "name": "Claude-instant-1",
- "maxLength": 400000,
- "tokenLimit": 100000,
- "context": "100K",
- }
}
+
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://liaobots.site"
working = True
@@ -99,10 +84,22 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
- default_model = "gpt-3.5-turbo"
- models = list(models)
+ default_model = "gpt-4o"
+ models = list(models.keys())
model_aliases = {
- "claude-v2": "claude-2"
+ "gpt-4o-mini": "gpt-4o-mini-free",
+ "gpt-4o": "gpt-4o-free",
+ "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
+ "gpt-4-": "gpt-4-0613",
+ "claude-3-opus": "claude-3-opus-20240229",
+ "claude-3-opus": "claude-3-opus-20240229-aws",
+ "claude-3-opus": "claude-3-opus-20240229-gcp",
+ "claude-3-sonnet": "claude-3-sonnet-20240229",
+ "claude-3-5-sonnet": "claude-3-5-sonnet-20240620",
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ "gemini-pro": "gemini-1.5-pro-latest",
+ "gemini-pro": "gemini-1.0-pro-latest",
+ "gemini-flash": "gemini-1.5-flash-latest",
}
_auth_code = ""
_cookie_jar = None
@@ -131,7 +128,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
) as session:
data = {
"conversationId": str(uuid.uuid4()),
- "model": models[cls.get_model(model)],
+ "model": models[model],
"messages": messages,
"key": "",
"prompt": kwargs.get("system_message", "You are a helpful assistant."),
@@ -189,3 +186,45 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
raise RuntimeError("Invalid session")
if chunk:
yield chunk.decode(errors="ignore")
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ """
+ Retrieve the internal model identifier based on the provided model name or alias.
+ """
+ if model in cls.model_aliases:
+ model = cls.model_aliases[model]
+ if model not in models:
+ raise ValueError(f"Model '{model}' is not supported.")
+ return model
+
+ @classmethod
+ def is_supported(cls, model: str) -> bool:
+ """
+ Check if the given model is supported.
+ """
+ return model in models or model in cls.model_aliases
+
+ @classmethod
+ async def initialize_auth_code(cls, session: ClientSession) -> None:
+ """
+ Initialize the auth code by making the necessary login requests.
+ """
+ async with session.post(
+ "https://liaobots.work/api/user",
+ json={"authcode": "pTIQr4FTnVRfr"},
+ verify_ssl=False
+ ) as response:
+ await raise_for_status(response)
+ cls._auth_code = (await response.json(content_type=None))["authCode"]
+ if not cls._auth_code:
+ raise RuntimeError("Empty auth code")
+ cls._cookie_jar = session.cookie_jar
+
+ @classmethod
+ async def ensure_auth_code(cls, session: ClientSession) -> None:
+ """
+ Ensure the auth code is initialized, and if not, perform the initialization.
+ """
+ if not cls._auth_code:
+ await cls.initialize_auth_code(session)
diff --git a/g4f/Provider/LiteIcoding.py b/g4f/Provider/LiteIcoding.py
new file mode 100644
index 00000000..6aa407ca
--- /dev/null
+++ b/g4f/Provider/LiteIcoding.py
@@ -0,0 +1,97 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession, ClientResponseError
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://lite.icoding.ink"
+ api_endpoint = "/api/v1/gpt/message"
+ working = True
+ supports_gpt_4 = True
+ default_model = "gpt-4o"
+ models = [
+ 'gpt-4o',
+ 'gpt-4-turbo',
+ 'claude-3',
+ 'claude-3.5',
+ 'gemini-1.5',
+ ]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.9",
+ "Authorization": "Bearer null",
+ "Connection": "keep-alive",
+ "Content-Type": "application/json;charset=utf-8",
+ "DNT": "1",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "User-Agent": (
+ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "
+ "Chrome/126.0.0.0 Safari/537.36"
+ ),
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ }
+
+ data = {
+ "model": model,
+ "chatId": "-1",
+ "messages": [
+ {
+ "role": msg["role"],
+ "content": msg["content"],
+ "time": msg.get("time", ""),
+ "attachments": msg.get("attachments", []),
+ }
+ for msg in messages
+ ],
+ "plugins": [],
+ "systemPrompt": "",
+ "temperature": 0.5,
+ }
+
+ async with ClientSession(headers=headers) as session:
+ try:
+ async with session.post(
+ f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy
+ ) as response:
+ response.raise_for_status()
+ buffer = ""
+ full_response = ""
+ async for chunk in response.content.iter_any():
+ if chunk:
+ buffer += chunk.decode()
+ while "\n\n" in buffer:
+ part, buffer = buffer.split("\n\n", 1)
+ if part.startswith("data: "):
+ content = part[6:].strip()
+ if content and content != "[DONE]":
+ content = content.strip('"')
+ full_response += content
+
+ full_response = full_response.replace('" "', ' ')
+ yield full_response.strip()
+
+ except ClientResponseError as e:
+ raise RuntimeError(
+ f"ClientResponseError {e.status}: {e.message}, url={e.request_info.url}, data={data}"
+ ) from e
+
+ except Exception as e:
+ raise RuntimeError(f"Unexpected error: {str(e)}") from e
diff --git a/g4f/Provider/MagickPenAsk.py b/g4f/Provider/MagickPenAsk.py
new file mode 100644
index 00000000..8b7473d8
--- /dev/null
+++ b/g4f/Provider/MagickPenAsk.py
@@ -0,0 +1,51 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class MagickPenAsk(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://api.magickpen.com"
+ api_endpoint = "/ask"
+ working = True
+ supports_gpt_4 = True
+ default_model = "gpt-4o"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/plain, */*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": "https://magickpen.com",
+ "priority": "u=1, i",
+ "referer": "https://magickpen.com/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ 'X-API-Secret': 'WCASR6ZQJYM85DVDX7'
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "query": format_prompt(messages),
+ "plan": "Pay as you go"
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/MagickPenChat.py b/g4f/Provider/MagickPenChat.py
new file mode 100644
index 00000000..6c30028a
--- /dev/null
+++ b/g4f/Provider/MagickPenChat.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class MagickPenChat(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://api.magickpen.com"
+ api_endpoint = "/chat/free"
+ working = True
+ supports_gpt_4 = True
+ default_model = "gpt-4o-mini"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/plain, */*",
+ "accept-language": "en-US,en;q=0.9",
+ "access-control-allow-origin": "*",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": "https://magickpen.com",
+ "priority": "u=1, i",
+ "referer": "https://magickpen.com/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "history": [{"role": "user", "content": format_prompt(messages)}]
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/Marsyoo.py b/g4f/Provider/Marsyoo.py
new file mode 100644
index 00000000..1c5fa9fd
--- /dev/null
+++ b/g4f/Provider/Marsyoo.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession, ClientResponseError
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Marsyoo(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://aiagent.marsyoo.com"
+ api_endpoint = "/api/chat-messages"
+ working = True
+ supports_gpt_4 = True
+ default_model = 'gpt-4o'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.9",
+ "Connection": "keep-alive",
+ "DNT": "1",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/chat",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ "authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI0MWNkOTE3MS1mNTg1LTRjMTktOTY0Ni01NzgxMTBjYWViNTciLCJzdWIiOiJXZWIgQVBJIFBhc3Nwb3J0IiwiYXBwX2lkIjoiNDFjZDkxNzEtZjU4NS00YzE5LTk2NDYtNTc4MTEwY2FlYjU3IiwiYXBwX2NvZGUiOiJMakhzdWJqNjhMTXZCT0JyIiwiZW5kX3VzZXJfaWQiOiI4YjE5YjY2Mi05M2E1LTRhYTktOGNjNS03MDhmNWE0YmQxNjEifQ.pOzdQ4wTrQjjRlEv1XY9TZitkW5KW1K-wbcUJAoBJ5I",
+ "content-type": "application/json",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": "Linux",
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "response_mode": "streaming",
+ "query": prompt,
+ "inputs": {},
+ }
+ try:
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line:
+ try:
+ json_data = json.loads(line.decode('utf-8').strip().lstrip('data: '))
+ if json_data['event'] == 'message':
+ yield json_data['answer']
+ elif json_data['event'] == 'message_end':
+ return
+ except json.JSONDecodeError:
+ continue
+ except ClientResponseError as e:
+ yield f"Error: HTTP {e.status}: {e.message}"
diff --git a/g4f/Provider/MetaAI.py b/g4f/Provider/MetaAI.py
index caed7778..f1ef348a 100644
--- a/g4f/Provider/MetaAI.py
+++ b/g4f/Provider/MetaAI.py
@@ -12,7 +12,7 @@ from ..typing import AsyncResult, Messages, Cookies
from ..requests import raise_for_status, DEFAULT_HEADERS
from ..image import ImageResponse, ImagePreview
from ..errors import ResponseError
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt, get_connector, format_cookies
class Sources():
@@ -25,10 +25,11 @@ class Sources():
class AbraGeoBlockedError(Exception):
pass
-class MetaAI(AsyncGeneratorProvider):
+class MetaAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Meta AI"
url = "https://www.meta.ai"
working = True
+ default_model = ''
def __init__(self, proxy: str = None, connector: BaseConnector = None):
self.session = ClientSession(connector=get_connector(connector, proxy), headers=DEFAULT_HEADERS)
@@ -234,4 +235,4 @@ def generate_offline_threading_id() -> str:
# Combine timestamp and random value
threading_id = (timestamp << 22) | (random_value & ((1 << 22) - 1))
- return str(threading_id) \ No newline at end of file
+ return str(threading_id)
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index 4a2cc9e5..7f4587e1 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -11,25 +11,21 @@ API_URL = "https://www.perplexity.ai/socket.io/"
WS_URL = "wss://www.perplexity.ai/socket.io/"
class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://labs.perplexity.ai"
+ url = "https://labs.perplexity.ai"
working = True
default_model = "mixtral-8x7b-instruct"
models = [
- "llama-3-sonar-large-32k-online", "llama-3-sonar-small-32k-online", "llama-3-sonar-large-32k-chat", "llama-3-sonar-small-32k-chat",
- "dbrx-instruct", "claude-3-haiku-20240307", "llama-3-8b-instruct", "llama-3-70b-instruct", "codellama-70b-instruct", "mistral-7b-instruct",
- "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct", "mixtral-8x22b-instruct", "mistral-medium", "gemma-2b-it", "gemma-7b-it",
- "related"
+ "llama-3.1-sonar-large-128k-online",
+ "llama-3.1-sonar-small-128k-online",
+ "llama-3.1-sonar-large-128k-chat",
+ "llama-3.1-sonar-small-128k-chat",
+ "llama-3.1-8b-instruct",
+ "llama-3.1-70b-instruct",
+ "gemma-2-9b-it",
+ "gemma-2-27b-it",
+ "nemotron-4-340b-instruct",
+ "mixtral-8x7b-instruct"
]
- model_aliases = {
- "mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
- "mistralai/Mistral-7B-Instruct-v0.2": "mistral-7b-instruct",
- "mistralai/Mixtral-8x7B-Instruct-v0.1": "mixtral-8x7b-instruct",
- "codellama/CodeLlama-70b-Instruct-hf": "codellama-70b-instruct",
- "llava-v1.5-7b": "llava-v1.5-7b-wrapper",
- "databricks/dbrx-instruct": "dbrx-instruct",
- "meta-llama/Meta-Llama-3-70B-Instruct": "llama-3-70b-instruct",
- "meta-llama/Meta-Llama-3-8B-Instruct": "llama-3-8b-instruct"
- }
@classmethod
async def create_async_generator(
@@ -67,7 +63,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
data=post_data
) as response:
await raise_for_status(response)
- assert await response.text() == "OK"
+ assert await response.text() == "OK"
async with session.ws_connect(f"{WS_URL}?EIO=4&transport=websocket&sid={sid}", autoping=False) as ws:
await ws.send_str("2probe")
assert(await ws.receive_str() == "3probe")
diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py
index 5a1e9f0e..e03830f4 100644
--- a/g4f/Provider/Pi.py
+++ b/g4f/Provider/Pi.py
@@ -11,6 +11,7 @@ class Pi(AbstractProvider):
working = True
supports_stream = True
_session = None
+ default_model = "pi"
@classmethod
def create_completion(
@@ -65,4 +66,4 @@ class Pi(AbstractProvider):
yield json.loads(line.split(b'data: ')[1])
elif line.startswith(b'data: {"title":'):
yield json.loads(line.split(b'data: ')[1])
- \ No newline at end of file
+
diff --git a/g4f/Provider/ReplicateImage.py b/g4f/Provider/ReplicateHome.py
index cc3943d7..e6c8d2d3 100644
--- a/g4f/Provider/ReplicateImage.py
+++ b/g4f/Provider/ReplicateHome.py
@@ -1,32 +1,67 @@
from __future__ import annotations
-
+from typing import Generator, Optional, Dict, Any, Union, List
import random
import asyncio
+import base64
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages
from ..requests import StreamSession, raise_for_status
-from ..image import ImageResponse
from ..errors import ResponseError
+from ..image import ImageResponse
-class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
+class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
parent = "Replicate"
working = True
- default_model = 'stability-ai/sdxl'
- default_versions = [
- "39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b",
- "2b017d9b67edd2ee1401238df49d75da53c523f36e363881e057f5dc3ed3c5b2"
+ default_model = 'stability-ai/stable-diffusion-3'
+ models = [
+ # Models for image generation
+ 'stability-ai/stable-diffusion-3',
+ 'bytedance/sdxl-lightning-4step',
+ 'playgroundai/playground-v2.5-1024px-aesthetic',
+
+ # Models for image generation
+ 'meta/meta-llama-3-70b-instruct',
+ 'mistralai/mixtral-8x7b-instruct-v0.1',
+ 'google-deepmind/gemma-2b-it',
]
- image_models = [default_model]
+
+ versions = {
+ # Model versions for generating images
+ 'stability-ai/stable-diffusion-3': [
+ "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f"
+ ],
+ 'bytedance/sdxl-lightning-4step': [
+ "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f"
+ ],
+ 'playgroundai/playground-v2.5-1024px-aesthetic': [
+ "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24"
+ ],
+
+
+ # Model versions for text generation
+ 'meta/meta-llama-3-70b-instruct': [
+ "dp-cf04fe09351e25db628e8b6181276547"
+ ],
+ 'mistralai/mixtral-8x7b-instruct-v0.1': [
+ "dp-89e00f489d498885048e94f9809fbc76"
+ ],
+ 'google-deepmind/gemma-2b-it': [
+ "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626"
+ ]
+ }
+
+ image_models = {"stability-ai/stable-diffusion-3", "bytedance/sdxl-lightning-4step", "playgroundai/playground-v2.5-1024px-aesthetic"}
+ text_models = {"meta/meta-llama-3-70b-instruct", "mistralai/mixtral-8x7b-instruct-v0.1", "google-deepmind/gemma-2b-it"}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
- **kwargs
- ) -> AsyncResult:
+ **kwargs: Any
+ ) -> Generator[Union[str, ImageResponse], None, None]:
yield await cls.create_async(messages[-1]["content"], model, **kwargs)
@classmethod
@@ -34,13 +69,13 @@ class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
cls,
prompt: str,
model: str,
- api_key: str = None,
- proxy: str = None,
+ api_key: Optional[str] = None,
+ proxy: Optional[str] = None,
timeout: int = 180,
- version: str = None,
- extra_data: dict = {},
- **kwargs
- ) -> ImageResponse:
+ version: Optional[str] = None,
+ extra_data: Dict[str, Any] = {},
+ **kwargs: Any
+ ) -> Union[str, ImageResponse]:
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US',
@@ -55,10 +90,12 @@ class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
}
+
if version is None:
- version = random.choice(cls.default_versions)
+ version = random.choice(cls.versions.get(model, []))
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
+
async with StreamSession(
proxies={"all": proxy},
headers=headers,
@@ -81,6 +118,7 @@ class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
result = await response.json()
if "id" not in result:
raise ResponseError(f"Invalid response: {result}")
+
while True:
if api_key is None:
url = f"https://homepage.replicate.com/api/poll?id={result['id']}"
@@ -92,7 +130,13 @@ class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
if "status" not in result:
raise ResponseError(f"Invalid response: {result}")
if result["status"] == "succeeded":
- images = result['output']
- images = images[0] if len(images) == 1 else images
- return ImageResponse(images, prompt)
- await asyncio.sleep(0.5) \ No newline at end of file
+ output = result['output']
+ if model in cls.text_models:
+ return ''.join(output) if isinstance(output, list) else output
+ elif model in cls.image_models:
+ images: List[Any] = output
+ images = images[0] if len(images) == 1 else images
+ return ImageResponse(images, prompt)
+ elif result["status"] == "failed":
+ raise ResponseError(f"Prediction failed: {result}")
+ await asyncio.sleep(0.5)
diff --git a/g4f/Provider/Rocks.py b/g4f/Provider/Rocks.py
new file mode 100644
index 00000000..8465a6c0
--- /dev/null
+++ b/g4f/Provider/Rocks.py
@@ -0,0 +1,56 @@
+import json
+from aiohttp import ClientSession
+
+from ..typing import Messages, AsyncResult
+from .base_provider import AsyncGeneratorProvider
+
+class Rocks(AsyncGeneratorProvider):
+ url = "https://api.discord.rocks"
+ api_endpoint = "/chat/completions"
+ supports_message_history = False
+ supports_gpt_35_turbo = True
+ working = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ payload = {"messages":messages,"model":model,"max_tokens":4096,"temperature":1,"top_p":1,"stream":True}
+
+ headers = {
+ "Accept": "application/json",
+ "Accept-Encoding": "gzip, deflate, br, zstd",
+ "Accept-Language": "en-US,en;q=0.9",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/en",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ }
+
+ async with ClientSession() as session:
+ async with session.post(
+ f"{cls.url}{cls.api_endpoint}",
+ json=payload,
+ proxy=proxy,
+ headers=headers
+ ) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ try:
+ line = json.loads(line[6:])
+ except:
+ continue
+ chunk = line["choices"][0]["delta"].get("content")
+ if chunk:
+ yield chunk
+ elif line.startswith(b"\n"):
+ pass
+ else:
+ raise Exception(f"Unexpected line: {line}")
diff --git a/g4f/Provider/TeachAnything.py b/g4f/Provider/TeachAnything.py
new file mode 100644
index 00000000..908dd56e
--- /dev/null
+++ b/g4f/Provider/TeachAnything.py
@@ -0,0 +1,62 @@
+from __future__ import annotations
+
+from typing import Any, Dict
+
+from aiohttp import ClientSession, ClientTimeout
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.teach-anything.com"
+ api_endpoint = "/api/generate"
+ working = True
+ default_model = "llama-3-70b-instruct"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str | None = None,
+ **kwargs: Any
+ ) -> AsyncResult:
+ headers = cls._get_headers()
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {"prompt": prompt}
+
+ timeout = ClientTimeout(total=60)
+
+ async with session.post(
+ f"{cls.url}{cls.api_endpoint}",
+ json=data,
+ proxy=proxy,
+ timeout=timeout
+ ) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode()
+
+ @staticmethod
+ def _get_headers() -> Dict[str, str]:
+ return {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": "https://www.teach-anything.com",
+ "priority": "u=1, i",
+ "referer": "https://www.teach-anything.com/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index 162d6adb..cdf5f430 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -24,27 +24,27 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
image_models = ["dall-e"]
models = [
default_model,
+ "gpt-4o-mini",
"gpt-4o",
- "gpt-4",
"gpt-4-turbo",
- "claude-instant",
- "claude-2",
+ "gpt-4",
+ "claude-3.5-sonnet",
"claude-3-opus",
"claude-3-sonnet",
"claude-3-haiku",
- "gemini-pro",
+ "claude-2",
+ "llama-3.1-70b",
+ "llama-3",
+ "gemini-1-5-flash",
"gemini-1-5-pro",
+ "gemini-1-0-pro",
"databricks-dbrx-instruct",
"command-r",
"command-r-plus",
- "llama3",
- "zephyr",
+ "dolphin-2.5",
default_vision_model,
*image_models
]
- model_aliases = {
- "claude-v2": "claude-2",
- }
_cookies = None
_cookies_used = 0
_telemetry_ids = []
@@ -220,4 +220,4 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
'stytch_session_jwt': session["session_jwt"],
'ydc_stytch_session': session["session_token"],
'ydc_stytch_session_jwt': session["session_jwt"],
- } \ No newline at end of file
+ }
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index dab6f5d5..fa1dcfe5 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -10,47 +10,53 @@ from .not_working import *
from .selenium import *
from .needs_auth import *
-from .Aichatos import Aichatos
+from .AI365VIP import AI365VIP
+from .Allyfy import Allyfy
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .Blackbox import Blackbox
-from .ChatForAi import ChatForAi
+from .ChatGot import ChatGot
+from .Chatgpt4o import Chatgpt4o
from .Chatgpt4Online import Chatgpt4Online
-from .ChatgptAi import ChatgptAi
from .ChatgptFree import ChatgptFree
-from .ChatgptNext import ChatgptNext
-from .ChatgptX import ChatgptX
-from .Cnote import Cnote
from .Cohere import Cohere
+from .DDG import DDG
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
-from .Feedough import Feedough
from .FlowGpt import FlowGpt
from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
-from .GigaChat import GigaChat
+from .FreeNetfly import FreeNetfly
from .GeminiPro import GeminiPro
from .GeminiProChat import GeminiProChat
+from .GigaChat import GigaChat
from .GptTalkRu import GptTalkRu
from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
+from .HuggingFace import HuggingFace
from .Koala import Koala
from .Liaobots import Liaobots
+from .LiteIcoding import LiteIcoding
from .Llama import Llama
from .Local import Local
+from .MagickPenAsk import MagickPenAsk
+from .MagickPenChat import MagickPenChat
+from .Marsyoo import Marsyoo
from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount
from .Ollama import Ollama
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
+from .Reka import Reka
from .Replicate import Replicate
-from .ReplicateImage import ReplicateImage
+from .ReplicateHome import ReplicateHome
+from .Rocks import Rocks
+from .TeachAnything import TeachAnything
from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You
-from .Reka import Reka
import sys
diff --git a/g4f/Provider/deprecated/Yqcloud.py b/g4f/Provider/deprecated/Yqcloud.py
index 2ec6931a..227f8995 100644
--- a/g4f/Provider/deprecated/Yqcloud.py
+++ b/g4f/Provider/deprecated/Yqcloud.py
@@ -9,7 +9,7 @@ from ..base_provider import AsyncGeneratorProvider, format_prompt
class Yqcloud(AsyncGeneratorProvider):
url = "https://chat9.yqcloud.top/"
- working = True
+ working = False
supports_gpt_35_turbo = True
@staticmethod
diff --git a/g4f/Provider/needs_auth/Openai.py b/g4f/Provider/needs_auth/Openai.py
index 9da6bad8..a0740c47 100644
--- a/g4f/Provider/needs_auth/Openai.py
+++ b/g4f/Provider/needs_auth/Openai.py
@@ -16,6 +16,7 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
needs_auth = True
supports_message_history = True
supports_system_message = True
+ default_model = ""
@classmethod
async def create_async_generator(
@@ -120,4 +121,4 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
if api_key is not None else {}
),
**({} if headers is None else headers)
- } \ No newline at end of file
+ }
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 9321c24a..82462040 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -55,16 +55,13 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI ChatGPT"
url = "https://chatgpt.com"
working = True
- supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_message_history = True
supports_system_message = True
default_model = None
default_vision_model = "gpt-4o"
- models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo", "gpt-4o", "auto"]
+ models = [ "auto", "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-gizmo"]
model_aliases = {
- "text-davinci-002-render-sha": "gpt-3.5-turbo",
- "": "gpt-3.5-turbo",
"gpt-4-turbo-preview": "gpt-4",
"dall-e": "gpt-4",
}
diff --git a/g4f/Provider/Aichatos.py b/g4f/Provider/not_working/Aichatos.py
index 1d4747d7..d651abf3 100644
--- a/g4f/Provider/Aichatos.py
+++ b/g4f/Provider/not_working/Aichatos.py
@@ -2,16 +2,16 @@ from __future__ import annotations
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
import random
class Aichatos(AsyncGeneratorProvider):
url = "https://chat10.aichatos.xyz"
api = "https://api.binjie.fun"
- working = True
+ working = False
supports_gpt_35_turbo = True
@classmethod
diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/not_working/ChatForAi.py
index 1c693955..b7f13c3d 100644
--- a/g4f/Provider/ChatForAi.py
+++ b/g4f/Provider/not_working/ChatForAi.py
@@ -4,14 +4,14 @@ import time
import hashlib
import uuid
-from ..typing import AsyncResult, Messages
-from ..requests import StreamSession, raise_for_status
-from ..errors import RateLimitError
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...typing import AsyncResult, Messages
+from ...requests import StreamSession, raise_for_status
+from ...errors import RateLimitError
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class ChatForAi(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatforai.store"
- working = True
+ working = False
default_model = "gpt-3.5-turbo"
supports_message_history = True
supports_gpt_35_turbo = True
diff --git a/g4f/Provider/ChatgptAi.py b/g4f/Provider/not_working/ChatgptAi.py
index d15140d7..5c694549 100644
--- a/g4f/Provider/ChatgptAi.py
+++ b/g4f/Provider/not_working/ChatgptAi.py
@@ -3,14 +3,14 @@ from __future__ import annotations
import re, html, json, string, random
from aiohttp import ClientSession
-from ..typing import Messages, AsyncResult
-from ..errors import RateLimitError
-from .base_provider import AsyncGeneratorProvider
-from .helper import get_random_string
+from ...typing import Messages, AsyncResult
+from ...errors import RateLimitError
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_random_string
class ChatgptAi(AsyncGeneratorProvider):
url = "https://chatgpt.ai"
- working = True
+ working = False
supports_message_history = True
supports_system_message = True,
supports_gpt_4 = True,
@@ -85,4 +85,4 @@ class ChatgptAi(AsyncGeneratorProvider):
if line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
- break \ No newline at end of file
+ break
diff --git a/g4f/Provider/ChatgptNext.py b/g4f/Provider/not_working/ChatgptNext.py
index 2d6f7487..1c15dd67 100644
--- a/g4f/Provider/ChatgptNext.py
+++ b/g4f/Provider/not_working/ChatgptNext.py
@@ -3,12 +3,12 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
class ChatgptNext(AsyncGeneratorProvider):
url = "https://www.chatgpt-free.cc"
- working = True
+ working = False
supports_gpt_35_turbo = True
supports_message_history = True
supports_system_message = True
@@ -63,4 +63,4 @@ class ChatgptNext(AsyncGeneratorProvider):
if chunk.startswith(b"data: "):
content = json.loads(chunk[6:])["choices"][0]["delta"].get("content")
if content:
- yield content \ No newline at end of file
+ yield content
diff --git a/g4f/Provider/ChatgptX.py b/g4f/Provider/not_working/ChatgptX.py
index 9be0d89b..760333d9 100644
--- a/g4f/Provider/ChatgptX.py
+++ b/g4f/Provider/not_working/ChatgptX.py
@@ -4,15 +4,15 @@ import re
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
-from ..errors import RateLimitError
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+from ...errors import RateLimitError
class ChatgptX(AsyncGeneratorProvider):
url = "https://chatgptx.de"
supports_gpt_35_turbo = True
- working = True
+ working = False
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/Cnote.py b/g4f/Provider/not_working/Cnote.py
index 7d4018bb..48626982 100644
--- a/g4f/Provider/Cnote.py
+++ b/g4f/Provider/not_working/Cnote.py
@@ -3,15 +3,15 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
class Cnote(AsyncGeneratorProvider):
url = "https://f1.cnote.top"
api_url = "https://p1api.xjai.pro/freeapi/chat-process"
- working = True
+ working = False
supports_gpt_35_turbo = True
@classmethod
diff --git a/g4f/Provider/not_working/Feedough.py b/g4f/Provider/not_working/Feedough.py
new file mode 100644
index 00000000..24c33d14
--- /dev/null
+++ b/g4f/Provider/not_working/Feedough.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+import json
+import asyncio
+from aiohttp import ClientSession, TCPConnector
+from urllib.parse import urlencode
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class Feedough(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.feedough.com"
+ api_endpoint = "/wp-admin/admin-ajax.php"
+ working = False
+ default_model = ''
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/x-www-form-urlencoded;charset=UTF-8",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/ai-prompt-generator/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+
+ connector = TCPConnector(ssl=False)
+
+ async with ClientSession(headers=headers, connector=connector) as session:
+ data = {
+ "action": "aixg_generate",
+ "prompt": format_prompt(messages),
+ "aixg_generate_nonce": "110c021031"
+ }
+
+ try:
+ async with session.post(
+ f"{cls.url}{cls.api_endpoint}",
+ data=urlencode(data),
+ proxy=proxy
+ ) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+ try:
+ response_json = json.loads(response_text)
+ if response_json.get("success") and "data" in response_json:
+ message = response_json["data"].get("message", "")
+ yield message
+ except json.JSONDecodeError:
+ yield response_text
+ except Exception as e:
+ print(f"An error occurred: {e}")
+
+ @classmethod
+ async def run(cls, *args, **kwargs):
+ async for item in cls.create_async_generator(*args, **kwargs):
+ yield item
+
+ tasks = asyncio.all_tasks()
+ for task in tasks:
+ if not task.done():
+ await task
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
index 4778c968..c4c9a5a1 100644
--- a/g4f/Provider/not_working/__init__.py
+++ b/g4f/Provider/not_working/__init__.py
@@ -1,14 +1,21 @@
from .AItianhu import AItianhu
+from .Aichatos import Aichatos
from .Bestim import Bestim
from .ChatBase import ChatBase
+from .ChatForAi import ChatForAi
+from .ChatgptAi import ChatgptAi
from .ChatgptDemo import ChatgptDemo
from .ChatgptDemoAi import ChatgptDemoAi
from .ChatgptLogin import ChatgptLogin
+from .ChatgptNext import ChatgptNext
+from .ChatgptX import ChatgptX
from .Chatxyz import Chatxyz
+from .Cnote import Cnote
+from .Feedough import Feedough
from .Gpt6 import Gpt6
from .GptChatly import GptChatly
from .GptForLove import GptForLove
from .GptGo import GptGo
from .GptGod import GptGod
-from .OnlineGpt import OnlineGpt \ No newline at end of file
+from .OnlineGpt import OnlineGpt
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index a2f883d9..1a660062 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -229,8 +229,8 @@
<option value="">Model: Default</option>
<option value="gpt-4">gpt-4</option>
<option value="gpt-3.5-turbo">gpt-3.5-turbo</option>
- <option value="llama2-70b">llama2-70b</option>
- <option value="llama3-70b-instruct">llama3-70b-instruct</option>
+ <option value="llama-3-70b-chat">llama-3-70b-chat</option>
+ <option value="llama-3.1-70b">llama-3.1-70b</option>
<option value="gemini-pro">gemini-pro</option>
<option value="">----</option>
</select>
diff --git a/g4f/models.py b/g4f/models.py
index 78e1d74e..b6c5fd14 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -4,32 +4,45 @@ from dataclasses import dataclass
from .Provider import IterListProvider, ProviderType
from .Provider import (
- Aichatos,
+ AI365VIP,
+ Allyfy,
Bing,
Blackbox,
- ChatgptAi,
- ChatgptNext,
- Cnote,
+ ChatGot,
+ Chatgpt4o,
+ Chatgpt4Online,
+ ChatgptFree,
+ DDG,
DeepInfra,
- Feedough,
+ DeepInfraImage,
+ FreeChatgpt,
FreeGpt,
+ FreeNetfly,
Gemini,
GeminiPro,
+ GeminiProChat,
GigaChat,
HuggingChat,
HuggingFace,
Koala,
Liaobots,
+ LiteIcoding,
+ MagickPenAsk,
+ MagickPenChat,
+ Marsyoo,
MetaAI,
OpenaiChat,
PerplexityLabs,
- Replicate,
Pi,
- Vercel,
+ Pizzagpt,
+ Reka,
+ Replicate,
+ ReplicateHome,
+ TeachAnything,
You,
- Reka
)
+
@dataclass(unsafe_hash=True)
class Model:
"""
@@ -54,9 +67,15 @@ default = Model(
base_provider = "",
best_provider = IterListProvider([
Bing,
- ChatgptAi,
You,
OpenaiChat,
+ FreeChatgpt,
+ AI365VIP,
+ Chatgpt4o,
+ DDG,
+ ChatgptFree,
+ Koala,
+ Pizzagpt,
])
)
@@ -67,206 +86,394 @@ gpt_35_long = Model(
best_provider = IterListProvider([
FreeGpt,
You,
- ChatgptNext,
- OpenaiChat,
Koala,
+ ChatgptFree,
+ FreeChatgpt,
+ DDG,
+ AI365VIP,
+ Pizzagpt,
+ Allyfy,
])
)
-# GPT-3.5 / GPT-4
+############
+### Text ###
+############
+
+### OpenAI ###
+### GPT-3.5 / GPT-4 ###
+# gpt-3.5
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = IterListProvider([
FreeGpt,
You,
- ChatgptNext,
Koala,
- OpenaiChat,
- Aichatos,
- Cnote,
- Feedough,
+ ChatgptFree,
+ FreeChatgpt,
+ AI365VIP,
+ Pizzagpt,
+ Allyfy,
])
)
+# gpt-4
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = IterListProvider([
- Bing, Liaobots,
+ Bing, Chatgpt4Online
])
)
+gpt_4_turbo = Model(
+ name = 'gpt-4-turbo',
+ base_provider = 'openai',
+ best_provider = IterListProvider([
+ Bing, Liaobots, LiteIcoding
+ ])
+)
gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'openai',
best_provider = IterListProvider([
- You, Liaobots
+ You, Liaobots, Chatgpt4o, AI365VIP, OpenaiChat, Marsyoo, LiteIcoding, MagickPenAsk,
])
)
-gpt_4_turbo = Model(
- name = 'gpt-4-turbo',
+gpt_4o_mini = Model(
+ name = 'gpt-4o-mini',
base_provider = 'openai',
- best_provider = Bing
+ best_provider = IterListProvider([
+ DDG, Liaobots, OpenaiChat, You, FreeNetfly, MagickPenChat,
+ ])
)
+
+### GigaChat ###
gigachat = Model(
name = 'GigaChat:latest',
base_provider = 'gigachat',
best_provider = GigaChat
)
+
+### Meta ###
meta = Model(
name = "meta",
base_provider = "meta",
best_provider = MetaAI
)
-llama3_8b_instruct = Model(
+llama_3_8b_instruct = Model(
name = "meta-llama/Meta-Llama-3-8B-Instruct",
base_provider = "meta",
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
)
-llama3_70b_instruct = Model(
+llama_3_70b_instruct = Model(
name = "meta-llama/Meta-Llama-3-70B-Instruct",
base_provider = "meta",
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
)
-codellama_34b_instruct = Model(
- name = "codellama/CodeLlama-34b-Instruct-hf",
+llama_3_70b_instruct = Model(
+ name = "meta/meta-llama-3-70b-instruct",
base_provider = "meta",
- best_provider = HuggingChat
+ best_provider = IterListProvider([ReplicateHome, TeachAnything])
)
-codellama_70b_instruct = Model(
- name = "codellama/CodeLlama-70b-Instruct-hf",
+llama_3_70b_chat_hf = Model(
+ name = "meta-llama/Llama-3-70b-chat-hf",
base_provider = "meta",
- best_provider = IterListProvider([DeepInfra, PerplexityLabs])
+ best_provider = IterListProvider([DDG])
)
-# Mistral
-mixtral_8x7b = Model(
- name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
- base_provider = "huggingface",
- best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs])
+llama_3_1_70b_instruct = Model(
+ name = "meta-llama/Meta-Llama-3.1-70B-Instruct",
+ base_provider = "meta",
+ best_provider = IterListProvider([HuggingChat, HuggingFace])
+)
+
+llama_3_1_405b_instruct_FP8 = Model(
+ name = "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
+ base_provider = "meta",
+ best_provider = IterListProvider([HuggingChat, HuggingFace])
)
-mistral_7b = Model(
- name = "mistralai/Mistral-7B-Instruct-v0.1",
+
+### Mistral ###
+mixtral_8x7b = Model(
+ name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
base_provider = "huggingface",
- best_provider = IterListProvider([HuggingChat, HuggingFace, PerplexityLabs])
+ best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs, HuggingChat, DDG, ReplicateHome])
)
mistral_7b_v02 = Model(
name = "mistralai/Mistral-7B-Instruct-v0.2",
base_provider = "huggingface",
- best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs])
+ best_provider = IterListProvider([DeepInfra, HuggingFace, HuggingChat])
+)
+
+
+### NousResearch ###
+Nous_Hermes_2_Mixtral_8x7B_DPO = Model(
+ name = "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
+ base_provider = "NousResearch",
+ best_provider = IterListProvider([HuggingFace, HuggingChat])
)
-# Bard
+
+### 01-ai ###
+Yi_1_5_34B_chat = Model(
+ name = "01-ai/Yi-1.5-34B-Chat",
+ base_provider = "01-ai",
+ best_provider = IterListProvider([HuggingFace, HuggingChat])
+)
+
+
+### Microsoft ###
+Phi_3_mini_4k_instruct = Model(
+ name = "microsoft/Phi-3-mini-4k-instruct",
+ base_provider = "Microsoft",
+ best_provider = IterListProvider([HuggingFace, HuggingChat])
+)
+
+
+### Google ###
+# gemini
gemini = Model(
name = 'gemini',
- base_provider = 'google',
+ base_provider = 'Google',
best_provider = Gemini
)
-claude_v2 = Model(
- name = 'claude-v2',
- base_provider = 'anthropic',
- best_provider = IterListProvider([Vercel])
+gemini_pro = Model(
+ name = 'gemini-pro',
+ base_provider = 'Google',
+ best_provider = IterListProvider([GeminiPro, You, ChatGot, GeminiProChat, Liaobots, LiteIcoding])
+)
+
+gemini_flash = Model(
+ name = 'gemini-flash',
+ base_provider = 'Google',
+ best_provider = IterListProvider([Liaobots])
+)
+
+gemini_1_5 = Model(
+ name = 'gemini-1.5',
+ base_provider = 'Google',
+ best_provider = IterListProvider([LiteIcoding])
+)
+
+# gemma
+gemma_2b_it = Model(
+ name = 'gemma-2b-it',
+ base_provider = 'Google',
+ best_provider = IterListProvider([ReplicateHome])
+)
+
+gemma_2_9b_it = Model(
+ name = 'gemma-2-9b-it',
+ base_provider = 'Google',
+ best_provider = IterListProvider([PerplexityLabs])
+)
+
+gemma_2_27b_it = Model(
+ name = 'gemma-2-27b-it',
+ base_provider = 'Google',
+ best_provider = IterListProvider([PerplexityLabs])
+)
+
+
+### Anthropic ###
+claude_2 = Model(
+ name = 'claude-2',
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([You])
+)
+
+claude_2_0 = Model(
+ name = 'claude-2.0',
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([Liaobots])
+)
+
+claude_2_1 = Model(
+ name = 'claude-2.1',
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([Liaobots])
)
claude_3_opus = Model(
name = 'claude-3-opus',
- base_provider = 'anthropic',
- best_provider = You
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([You, Liaobots])
)
claude_3_sonnet = Model(
name = 'claude-3-sonnet',
- base_provider = 'anthropic',
- best_provider = You
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([You, Liaobots])
+)
+
+claude_3_5_sonnet = Model(
+ name = 'claude-3-5-sonnet',
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([Liaobots])
)
claude_3_haiku = Model(
name = 'claude-3-haiku',
- base_provider = 'anthropic',
- best_provider = None
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([DDG, AI365VIP, Liaobots])
)
-gpt_35_turbo_16k = Model(
- name = 'gpt-3.5-turbo-16k',
- base_provider = 'openai',
- best_provider = gpt_35_long.best_provider
+claude_3 = Model(
+ name = 'claude-3',
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([LiteIcoding])
)
-gpt_35_turbo_16k_0613 = Model(
- name = 'gpt-3.5-turbo-16k-0613',
- base_provider = 'openai',
- best_provider = gpt_35_long.best_provider
+claude_3_5 = Model(
+ name = 'claude-3.5',
+ base_provider = 'Anthropic',
+ best_provider = IterListProvider([LiteIcoding])
)
-gpt_35_turbo_0613 = Model(
- name = 'gpt-3.5-turbo-0613',
- base_provider = 'openai',
- best_provider = gpt_35_turbo.best_provider
+
+### Reka AI ###
+reka_core = Model(
+ name = 'reka-core',
+ base_provider = 'Reka AI',
+ best_provider = Reka
)
-gpt_4_0613 = Model(
- name = 'gpt-4-0613',
- base_provider = 'openai',
- best_provider = gpt_4.best_provider
+
+### NVIDIA ###
+nemotron_4_340b_instruct = Model(
+ name = 'nemotron-4-340b-instruct',
+ base_provider = 'NVIDIA',
+ best_provider = IterListProvider([PerplexityLabs])
)
-gpt_4_32k = Model(
- name = 'gpt-4-32k',
- base_provider = 'openai',
- best_provider = gpt_4.best_provider
+
+### Blackbox ###
+blackbox = Model(
+ name = 'blackbox',
+ base_provider = 'Blackbox',
+ best_provider = Blackbox
)
-gpt_4_32k_0613 = Model(
- name = 'gpt-4-32k-0613',
- base_provider = 'openai',
- best_provider = gpt_4.best_provider
+
+### Databricks ###
+dbrx_instruct = Model(
+ name = 'databricks/dbrx-instruct',
+ base_provider = 'Databricks',
+ best_provider = IterListProvider([DeepInfra])
)
-gemini_pro = Model(
- name = 'gemini-pro',
- base_provider = 'google',
- best_provider = IterListProvider([GeminiPro, You])
+
+### CohereForAI ###
+command_r_plus = Model(
+ name = 'CohereForAI/c4ai-command-r-plus',
+ base_provider = 'CohereForAI',
+ best_provider = IterListProvider([HuggingChat])
+)
+
+
+### iFlytek ###
+SparkDesk_v1_1 = Model(
+ name = 'SparkDesk-v1.1',
+ base_provider = 'iFlytek',
+ best_provider = IterListProvider([FreeChatgpt])
+)
+
+
+### DeepSeek ###
+deepseek_coder = Model(
+ name = 'deepseek-coder',
+ base_provider = 'DeepSeek',
+ best_provider = IterListProvider([FreeChatgpt])
)
+deepseek_chat = Model(
+ name = 'deepseek-chat',
+ base_provider = 'DeepSeek',
+ best_provider = IterListProvider([FreeChatgpt])
+)
+
+
+### Qwen ###
+Qwen2_7B_instruct = Model(
+ name = 'Qwen2-7B-Instruct',
+ base_provider = 'Qwen',
+ best_provider = IterListProvider([FreeChatgpt])
+)
+
+
+### Zhipu AI ###
+glm4_9B_chat = Model(
+ name = 'glm4-9B-chat',
+ base_provider = 'Zhipu AI',
+ best_provider = IterListProvider([FreeChatgpt])
+)
+
+chatglm3_6B = Model(
+ name = 'chatglm3-6B',
+ base_provider = 'Zhipu AI',
+ best_provider = IterListProvider([FreeChatgpt])
+)
+
+
+### 01-ai ###
+Yi_1_5_9B_chat = Model(
+ name = 'Yi-1.5-9B-Chat',
+ base_provider = '01-ai',
+ best_provider = IterListProvider([FreeChatgpt])
+)
+
+
+### Other ###
pi = Model(
name = 'pi',
base_provider = 'inflection',
best_provider = Pi
)
-dbrx_instruct = Model(
- name = 'databricks/dbrx-instruct',
- base_provider = 'mistral',
- best_provider = IterListProvider([DeepInfra, PerplexityLabs])
+
+#############
+### Image ###
+#############
+
+### Stability AI ###
+sdxl = Model(
+ name = 'stability-ai/sdxl',
+ base_provider = 'Stability AI',
+ best_provider = IterListProvider([DeepInfraImage])
+
)
-command_r_plus = Model(
- name = 'CohereForAI/c4ai-command-r-plus',
- base_provider = 'mistral',
- best_provider = IterListProvider([HuggingChat])
+stable_diffusion_3 = Model(
+ name = 'stability-ai/stable-diffusion-3',
+ base_provider = 'Stability AI',
+ best_provider = IterListProvider([ReplicateHome])
+
)
-blackbox = Model(
- name = 'blackbox',
- base_provider = 'blackbox',
- best_provider = Blackbox
+sdxl_lightning_4step = Model(
+ name = 'bytedance/sdxl-lightning-4step',
+ base_provider = 'Stability AI',
+ best_provider = IterListProvider([ReplicateHome])
+
)
-reka_core = Model(
- name = 'reka-core',
- base_provider = 'Reka AI',
- best_provider = Reka
+playground_v2_5_1024px_aesthetic = Model(
+ name = 'playgroundai/playground-v2.5-1024px-aesthetic',
+ base_provider = 'Stability AI',
+ best_provider = IterListProvider([ReplicateHome])
+
)
class ModelUtils:
@@ -277,54 +484,128 @@ class ModelUtils:
convert (dict[str, Model]): Dictionary mapping model string identifiers to Model instances.
"""
convert: dict[str, Model] = {
+
+ ############
+ ### Text ###
+ ############
+
+ ### OpenAI ###
+ ### GPT-3.5 / GPT-4 ###
# gpt-3.5
- 'gpt-3.5-turbo' : gpt_35_turbo,
- 'gpt-3.5-turbo-0613' : gpt_35_turbo_0613,
- 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
- 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
+ 'gpt-3.5-turbo': gpt_35_turbo,
'gpt-3.5-long': gpt_35_long,
# gpt-4
'gpt-4o' : gpt_4o,
+ 'gpt-4o-mini' : gpt_4o_mini,
'gpt-4' : gpt_4,
- 'gpt-4-0613' : gpt_4_0613,
- 'gpt-4-32k' : gpt_4_32k,
- 'gpt-4-32k-0613' : gpt_4_32k_0613,
'gpt-4-turbo' : gpt_4_turbo,
-
+
+ ### Meta ###
"meta-ai": meta,
- 'llama3-8b': llama3_8b_instruct, # alias
- 'llama3-70b': llama3_70b_instruct, # alias
- 'llama3-8b-instruct' : llama3_8b_instruct,
- 'llama3-70b-instruct': llama3_70b_instruct,
-
- 'codellama-34b-instruct': codellama_34b_instruct,
- 'codellama-70b-instruct': codellama_70b_instruct,
-
- # Mistral Opensource
+
+ 'llama-3-8b-instruct': llama_3_8b_instruct,
+ 'llama-3-70b-instruct': llama_3_70b_instruct,
+ 'llama-3-70b-chat': llama_3_70b_chat_hf,
+ 'llama-3-70b-instruct': llama_3_70b_instruct,
+
+ 'llama-3.1-70b': llama_3_1_70b_instruct,
+ 'llama-3.1-405b': llama_3_1_405b_instruct_FP8,
+ 'llama-3.1-70b-instruct': llama_3_1_70b_instruct,
+ 'llama-3.1-405b-instruct': llama_3_1_405b_instruct_FP8,
+
+ ### Mistral (Opensource) ###
'mixtral-8x7b': mixtral_8x7b,
- 'mistral-7b': mistral_7b,
'mistral-7b-v02': mistral_7b_v02,
-
- # google gemini
+
+ ### NousResearch ###
+ 'Nous-Hermes-2-Mixtral-8x7B-DPO': Nous_Hermes_2_Mixtral_8x7B_DPO,
+
+ ### 01-ai ###
+ 'Yi-1.5-34b-chat': Yi_1_5_34B_chat,
+
+ ### Microsoft ###
+ 'Phi-3-mini-4k-instruct': Phi_3_mini_4k_instruct,
+
+ ### Google ###
+ # gemini
'gemini': gemini,
'gemini-pro': gemini_pro,
-
- # anthropic
- 'claude-v2': claude_v2,
+ 'gemini-pro': gemini_1_5,
+ 'gemini-flash': gemini_flash,
+
+ # gemma
+ 'gemma-2b': gemma_2b_it,
+ 'gemma-2-9b': gemma_2_9b_it,
+ 'gemma-2-27b': gemma_2_27b_it,
+
+ ### Anthropic ###
+ 'claude-2': claude_2,
+ 'claude-2.0': claude_2_0,
+ 'claude-2.1': claude_2_1,
+
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,
+ 'claude-3-5-sonnet': claude_3_5_sonnet,
'claude-3-haiku': claude_3_haiku,
+
+ 'claude-3-opus': claude_3,
+ 'claude-3-5-sonnet': claude_3_5,
+
+
- # reka core
+ ### Reka AI ###
'reka': reka_core,
- # other
+ ### NVIDIA ###
+ 'nemotron-4-340b-instruct': nemotron_4_340b_instruct,
+
+ ### Blackbox ###
'blackbox': blackbox,
+
+ ### CohereForAI ###
'command-r+': command_r_plus,
+
+ ### Databricks ###
'dbrx-instruct': dbrx_instruct,
+
+ ### GigaChat ###
'gigachat': gigachat,
- 'pi': pi
+
+ ### iFlytek ###
+ 'SparkDesk-v1.1': SparkDesk_v1_1,
+
+ ### DeepSeek ###
+ 'deepseek-coder': deepseek_coder,
+ 'deepseek-chat': deepseek_chat,
+
+ ### Qwen ###
+ 'Qwen2-7b-instruct': Qwen2_7B_instruct,
+
+ ### Zhipu AI ###
+ 'glm4-9b-chat': glm4_9B_chat,
+ 'chatglm3-6b': chatglm3_6B,
+
+ ### 01-ai ###
+ 'Yi-1.5-9b-chat': Yi_1_5_9B_chat,
+
+ # Other
+ 'pi': pi,
+
+ #############
+ ### Image ###
+ #############
+
+ ### Stability AI ###
+ 'sdxl': sdxl,
+ 'stable-diffusion-3': stable_diffusion_3,
+
+ ### ByteDance ###
+ 'sdxl-lightning': sdxl_lightning_4step,
+
+ ### Playground ###
+ 'playground-v2.5': playground_v2_5_1024px_aesthetic,
+
}
_all_models = list(ModelUtils.convert.keys())