summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.md332
-rw-r--r--g4f/Provider/AI365VIP.py67
-rw-r--r--g4f/Provider/Blackbox.py37
-rw-r--r--g4f/Provider/Chatgpt4o.py83
-rw-r--r--g4f/Provider/ChatgptFree.py2
-rw-r--r--g4f/Provider/DDG.py117
-rw-r--r--g4f/Provider/Feedough.py82
-rw-r--r--g4f/Provider/FreeChatgpt.py97
-rw-r--r--g4f/Provider/FreeGpt.py66
-rw-r--r--g4f/Provider/GeminiProChat.py9
-rw-r--r--g4f/Provider/HuggingChat.py6
-rw-r--r--g4f/Provider/HuggingFace.py12
-rw-r--r--g4f/Provider/Koala.py36
-rw-r--r--g4f/Provider/Liaobots.py168
-rw-r--r--g4f/Provider/MetaAI.py7
-rw-r--r--g4f/Provider/ReplicateHome.py (renamed from g4f/Provider/ReplicateImage.py)78
-rw-r--r--g4f/Provider/__init__.py16
-rw-r--r--g4f/Provider/deprecated/Yqcloud.py2
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py7
-rw-r--r--g4f/Provider/not_working/Aichatos.py (renamed from g4f/Provider/Aichatos.py)8
-rw-r--r--g4f/Provider/not_working/ChatForAi.py (renamed from g4f/Provider/ChatForAi.py)10
-rw-r--r--g4f/Provider/not_working/ChatgptAi.py (renamed from g4f/Provider/ChatgptAi.py)12
-rw-r--r--g4f/Provider/not_working/ChatgptNext.py (renamed from g4f/Provider/ChatgptNext.py)8
-rw-r--r--g4f/Provider/not_working/ChatgptX.py (renamed from g4f/Provider/ChatgptX.py)10
-rw-r--r--g4f/Provider/not_working/Cnote.py (renamed from g4f/Provider/Cnote.py)8
-rw-r--r--g4f/Provider/not_working/Feedough.py78
-rw-r--r--g4f/Provider/not_working/__init__.py9
-rw-r--r--g4f/models.py381
28 files changed, 1261 insertions, 487 deletions
diff --git a/README.md b/README.md
index d1a5f0a9..e276d1a7 100644
--- a/README.md
+++ b/README.md
@@ -4,26 +4,23 @@
The **ETA** till (v3 for g4f) where I, [@xtekky](https://github.com/xtekky) will pick this project back up and improve it is **`29` days** (written Tue 28 May), join [t.me/g4f_channel](https://t.me/g4f_channel) in the meanwhile to stay updated.
-_____
-
+---
Written by [@xtekky](https://github.com/xtekky) & maintained by [@hlohaus](https://github.com/hlohaus)
-
<div id="top"></div>
-> By using this repository or any code related to it, you agree to the [legal notice](https://github.com/xtekky/gpt4free/blob/main/LEGAL_NOTICE.md). The author is **not responsible for the usage of this repository nor endorses it**, nor is the author responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses.
+> By using this repository or any code related to it, you agree to the [legal notice](https://github.com/xtekky/gpt4free/blob/main/LEGAL_NOTICE.md). The author is **not responsible for the usage of this repository nor endorses it**, nor is the author responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses.
-> [!Warning]
-*"gpt4free"* serves as a **PoC** (proof of concept), demonstrating the development of an API package with multi-provider requests, with features like timeouts, load balance and flow control.
+> [!Warning] > _"gpt4free"_ serves as a **PoC** (proof of concept), demonstrating the development of an API package with multi-provider requests, with features like timeouts, load balance and flow control.
-> [!Note]
-<sup><strong>Lastet version:</strong></sup> [![PyPI version](https://img.shields.io/pypi/v/g4f?color=blue)](https://pypi.org/project/g4f) [![Docker version](https://img.shields.io/docker/v/hlohaus789/g4f?label=docker&color=blue)](https://hub.docker.com/r/hlohaus789/g4f)
-> <sup><strong>Stats:</strong></sup> [![Downloads](https://static.pepy.tech/badge/g4f)](https://pepy.tech/project/g4f) [![Downloads](https://static.pepy.tech/badge/g4f/month)](https://pepy.tech/project/g4f)
+> [!Note] > <sup><strong>Lastet version:</strong></sup> [![PyPI version](https://img.shields.io/pypi/v/g4f?color=blue)](https://pypi.org/project/g4f) [![Docker version](https://img.shields.io/docker/v/hlohaus789/g4f?label=docker&color=blue)](https://hub.docker.com/r/hlohaus789/g4f)
+> <sup><strong>Stats:</strong></sup> [![Downloads](https://static.pepy.tech/badge/g4f)](https://pepy.tech/project/g4f) [![Downloads](https://static.pepy.tech/badge/g4f/month)](https://pepy.tech/project/g4f)
```sh
pip install -U g4f
```
+
```sh
docker pull hlohaus789/g4f
```
@@ -37,12 +34,15 @@ docker pull hlohaus789/g4f
- `g4f` now supports 100% local inference: 🧠 [local-docs](https://g4f.mintlify.app/docs/core/usage/local)
## đŸ”ģ Site Takedown
+
Is your site on this repository and you want to take it down? Send an email to takedown@g4f.ai with proof it is yours and it will be removed as fast as possible. To prevent reproduction please secure your API. 😉
-## 🚀 Feedback and Todo
+## 🚀 Feedback and Todo
+
You can always leave some feedback here: https://forms.gle/FeWV9RLEedfdkmFN6
As per the survey, here is a list of improvements to come
+
- [x] Update the repository to include the new openai library syntax (ex: `Openai()` class) | completed, use `g4f.client.Client`
- [ ] Golang implementation
- [ ] 🚧 Improve Documentation (in /docs & Guides, Howtos, & Do video tutorials)
@@ -58,28 +58,28 @@ As per the survey, here is a list of improvements to come
- [🆕 What's New](#-whats-new)
- [📚 Table of Contents](#-table-of-contents)
- [🛠ī¸ Getting Started](#-getting-started)
- + [Docker Container Guide](#docker-container-guide)
- + [Installation Guide for Windows (.exe)](#installation-guide-for-windows-exe)
- + [Use python](#use-python)
- - [Prerequisites](#prerequisites)
- - [Install using PyPI package:](#install-using-pypi-package)
- - [Install from source:](#install-from-source)
- - [Install using Docker:](#install-using-docker)
+ - [Docker Container Guide](#docker-container-guide)
+ - [Installation Guide for Windows (.exe)](#installation-guide-for-windows-exe)
+ - [Use python](#use-python)
+ - [Prerequisites](#prerequisites)
+ - [Install using PyPI package:](#install-using-pypi-package)
+ - [Install from source:](#install-from-source)
+ - [Install using Docker:](#install-using-docker)
- [💡 Usage](#-usage)
- * [Text Generation](#text-generation)
- * [Image Generation](#image-generation)
- * [Web UI](#web-ui)
- * [Interference API](#interference-api)
- * [Configuration](#configuration)
+ - [Text Generation](#text-generation)
+ - [Image Generation](#image-generation)
+ - [Web UI](#web-ui)
+ - [Interference API](#interference-api)
+ - [Configuration](#configuration)
- [🚀 Providers and Models](#-providers-and-models)
- * [GPT-4](#gpt-4)
- * [GPT-3.5](#gpt-35)
- * [Other](#other)
- * [Models](#models)
+ - [GPT-4](#gpt-4)
+ - [GPT-3.5](#gpt-35)
+ - [Other](#other)
+ - [Models](#models)
- [🔗 Powered by gpt4free](#-powered-by-gpt4free)
- [🤝 Contribute](#-contribute)
- + [How do i create a new Provider?](#guide-how-do-i-create-a-new-provider)
- + [How can AI help me with writing code?](#guide-how-can-ai-help-me-with-writing-code)
+ - [How do i create a new Provider?](#guide-how-do-i-create-a-new-provider)
+ - [How can AI help me with writing code?](#guide-how-can-ai-help-me-with-writing-code)
- [🙌 Contributors](#-contributors)
- [Šī¸ Copyright](#-copyright)
- [⭐ Star History](#-star-history)
@@ -106,7 +106,8 @@ docker run \
hlohaus789/g4f:latest
```
-3. **Access the Client:**
+3. **Access the Client:**
+
- To use the included client, navigate to: [http://localhost:8080/chat/](http://localhost:8080/chat/)
- Or set the API base for your client to: [http://localhost:1337/v1](http://localhost:1337/v1)
@@ -114,8 +115,11 @@ docker run \
If required, you can access the container's desktop here: http://localhost:7900/?autoconnect=1&resize=scale&password=secret for provider login purposes.
#### Installation Guide for Windows (.exe)
+
To ensure the seamless operation of our application, please follow the instructions below. These steps are designed to guide you through the installation process on Windows operating systems.
+
### Installation Steps
+
1. **Download the Application**: Visit our [releases page](https://github.com/xtekky/gpt4free/releases/tag/0.3.1.7) and download the most recent version of the application, named `g4f.exe.zip`.
2. **File Placement**: After downloading, locate the `.zip` file in your Downloads folder. Unpack it to a directory of your choice on your system, then execute the `g4f.exe` file to run the app.
3. **Open GUI**: The app starts a web server with the GUI. Open your favorite browser and navigate to `http://localhost:8080/chat/` to access the application interface.
@@ -124,11 +128,13 @@ To ensure the seamless operation of our application, please follow the instructi
By following these steps, you should be able to successfully install and run the application on your Windows system. If you encounter any issues during the installation process, please refer to our Issue Tracker or try to get contact over Discord for assistance.
Run the **Webview UI** on other Platfroms:
+
- [/docs/guides/webview](https://github.com/xtekky/gpt4free/blob/main/docs/webview.md)
##### Use your smartphone:
Run the Web UI on Your Smartphone:
+
- [/docs/guides/phone](https://github.com/xtekky/gpt4free/blob/main/docs/guides/phone.md)
#### Use python
@@ -152,13 +158,11 @@ Use partial requirements: [/docs/requirements](https://github.com/xtekky/gpt4fre
How do I load the project using git and installing the project requirements?
Read this tutorial and follow it step by step: [/docs/git](https://github.com/xtekky/gpt4free/blob/main/docs/git.md)
-
##### Install using Docker:
How do I build and run composer image from source?
Use docker-compose: [/docs/docker](https://github.com/xtekky/gpt4free/blob/main/docs/docker.md)
-
## 💡 Usage
#### Text Generation
@@ -193,7 +197,6 @@ response = client.images.generate(
image_url = response.data[0].url
```
-
[![Image with cat](/docs/cat.jpeg)](https://github.com/xtekky/gpt4free/blob/main/docs/client.md)
**Full Documentation for Python API**
@@ -210,7 +213,9 @@ To start the web interface, type the following codes in python:
from g4f.gui import run_gui
run_gui()
```
+
or execute the following command:
+
```bash
python -m g4f.cli gui -port 8080 -debug
```
@@ -229,7 +234,7 @@ Access with: http://localhost:1337/v1
Cookies are essential for using Meta AI and Microsoft Designer to create images.
Additionally, cookies are required for the Google Gemini and WhiteRabbitNeo Provider.
-From Bing, ensure you have the "_U" cookie, and from Google, all cookies starting with "__Secure-1PSID" are needed.
+From Bing, ensure you have the "\_U" cookie, and from Google, all cookies starting with "\_\_Secure-1PSID" are needed.
You can pass these cookies directly to the create function or set them using the `set_cookies` method before running G4F:
@@ -304,11 +309,13 @@ Note: Ensure that your .har file is stored securely, as it may contain sensitive
If you want to hide or change your IP address for the providers, you can set a proxy globally via an environment variable:
- On macOS and Linux:
+
```bash
export G4F_PROXY="http://host:port"
```
- On Windows:
+
```bash
set G4F_PROXY=http://host:port
```
@@ -317,135 +324,135 @@ set G4F_PROXY=http://host:port
### GPT-4
-| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
-| ------ | ------- | ------- | ----- | ------ | ------ | ---- |
-| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔ī¸ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [chatgpt.ai](https://chatgpt.ai) | `g4f.Provider.ChatgptAi` | ❌ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [liaobots.site](https://liaobots.site) | `g4f.Provider.Liaobots` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chatgpt.com](https://chatgpt.com) | `g4f.Provider.OpenaiChat` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌+✔ī¸ |
-| [raycast.com](https://raycast.com) | `g4f.Provider.Raycast` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
-| [beta.theb.ai](https://beta.theb.ai) | `g4f.Provider.Theb` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [you.com](https://you.com) | `g4f.Provider.You` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
+| -------------------------------------- | ------------------------- | ------- | ----- | ------ | ---------------------------------------------------------- | ----- |
+| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔ī¸ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chatgpt.ai](https://chatgpt.ai) | `g4f.Provider.ChatgptAi` | ❌ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [liaobots.site](https://liaobots.site) | `g4f.Provider.Liaobots` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chatgpt.com](https://chatgpt.com) | `g4f.Provider.OpenaiChat` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌+✔ī¸ |
+| [raycast.com](https://raycast.com) | `g4f.Provider.Raycast` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
+| [beta.theb.ai](https://beta.theb.ai) | `g4f.Provider.Theb` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [you.com](https://you.com) | `g4f.Provider.You` | ✔ī¸ | ✔ī¸ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
## Best OpenSource Models
+
While we wait for gpt-5, here is a list of new models that are at least better than gpt-3.5-turbo. **Some are better than gpt-4**. Expect this list to grow.
-| Website | Provider | parameters | better than |
-| ------ | ------- | ------ | ------ |
-| [claude-3-opus](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0125-preview |
-| [command-r+](https://txt.cohere.com/command-r-plus-microsoft-azure/) | `g4f.Provider.HuggingChat` | 104B | gpt-4-0314 |
-| [llama-3-70b](https://meta.ai/) | `g4f.Provider.Llama` or `DeepInfra` | 70B | gpt-4-0314 |
-| [claude-3-sonnet](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0314 |
-| [reka-core](https://chat.reka.ai/) | `g4f.Provider.Reka` | 21B | gpt-4-vision |
-| [dbrx-instruct](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm) | `g4f.Provider.DeepInfra` | 132B / 36B active| gpt-3.5-turbo |
-| [mixtral-8x22b](https://huggingface.co/mistral-community/Mixtral-8x22B-v0.1) | `g4f.Provider.DeepInfra` | 176B / 44b active | gpt-3.5-turbo |
+| Website | Provider | parameters | better than |
+| ---------------------------------------------------------------------------------------- | ----------------------------------- | ----------------- | ------------------ |
+| [claude-3-opus](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0125-preview |
+| [command-r+](https://txt.cohere.com/command-r-plus-microsoft-azure/) | `g4f.Provider.HuggingChat` | 104B | gpt-4-0314 |
+| [llama-3-70b](https://meta.ai/) | `g4f.Provider.Llama` or `DeepInfra` | 70B | gpt-4-0314 |
+| [claude-3-sonnet](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0314 |
+| [reka-core](https://chat.reka.ai/) | `g4f.Provider.Reka` | 21B | gpt-4-vision |
+| [dbrx-instruct](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm) | `g4f.Provider.DeepInfra` | 132B / 36B active | gpt-3.5-turbo |
+| [mixtral-8x22b](https://huggingface.co/mistral-community/Mixtral-8x22B-v0.1) | `g4f.Provider.DeepInfra` | 176B / 44b active | gpt-3.5-turbo |
### GPT-3.5
-| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
-| ------ | ------- | ------- | ----- | ------ | ------ | ---- |
-| [chat3.aiyunos.top](https://chat3.aiyunos.top/) | `g4f.Provider.AItianhuSpace` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chat10.aichatos.xyz](https://chat10.aichatos.xyz) | `g4f.Provider.Aichatos` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [chatforai.store](https://chatforai.store) | `g4f.Provider.ChatForAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chatgpt4online.org](https://chatgpt4online.org) | `g4f.Provider.Chatgpt4Online` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chatgpt-free.cc](https://www.chatgpt-free.cc) | `g4f.Provider.ChatgptNext` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chatgptx.de](https://chatgptx.de) | `g4f.Provider.ChatgptX` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [f1.cnote.top](https://f1.cnote.top) | `g4f.Provider.Cnote` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [duckduckgo.com](https://duckduckgo.com/duckchat) | `g4f.Provider.DuckDuckGo` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [feedough.com](https://www.feedough.com) | `g4f.Provider.Feedough` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [flowgpt.com](https://flowgpt.com/chat) | `g4f.Provider.FlowGpt` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [freegptsnav.aifree.site](https://freegptsnav.aifree.site) | `g4f.Provider.FreeGpt` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [gpttalk.ru](https://gpttalk.ru) | `g4f.Provider.GptTalkRu` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [koala.sh](https://koala.sh) | `g4f.Provider.Koala` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [app.myshell.ai](https://app.myshell.ai/chat) | `g4f.Provider.MyShell` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [perplexity.ai](https://www.perplexity.ai) | `g4f.Provider.PerplexityAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [poe.com](https://poe.com) | `g4f.Provider.Poe` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
-| [talkai.info](https://talkai.info) | `g4f.Provider.TalkAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [chat.vercel.ai](https://chat.vercel.ai) | `g4f.Provider.Vercel` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [aitianhu.com](https://www.aitianhu.com) | `g4f.Provider.AItianhu` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chatgpt.bestim.org](https://chatgpt.bestim.org) | `g4f.Provider.Bestim` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chatbase.co](https://www.chatbase.co) | `g4f.Provider.ChatBase` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chatgptdemo.info](https://chatgptdemo.info/chat) | `g4f.Provider.ChatgptDemo` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chat.chatgptdemo.ai](https://chat.chatgptdemo.ai) | `g4f.Provider.ChatgptDemoAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chatgptfree.ai](https://chatgptfree.ai) | `g4f.Provider.ChatgptFree` | ✔ī¸ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chatgptlogin.ai](https://chatgptlogin.ai) | `g4f.Provider.ChatgptLogin` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [chat.3211000.xyz](https://chat.3211000.xyz) | `g4f.Provider.Chatxyz` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [gpt6.ai](https://gpt6.ai) | `g4f.Provider.Gpt6` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [gptchatly.com](https://gptchatly.com) | `g4f.Provider.GptChatly` | ✔ī¸ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [ai18.gptforlove.com](https://ai18.gptforlove.com) | `g4f.Provider.GptForLove` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [gptgo.ai](https://gptgo.ai) | `g4f.Provider.GptGo` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [gptgod.site](https://gptgod.site) | `g4f.Provider.GptGod` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
-| [onlinegpt.org](https://onlinegpt.org) | `g4f.Provider.OnlineGpt` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
+| ---------------------------------------------------------- | ----------------------------- | ------- | ----- | ------ | ---------------------------------------------------------- | ---- |
+| [chat3.aiyunos.top](https://chat3.aiyunos.top/) | `g4f.Provider.AItianhuSpace` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chat10.aichatos.xyz](https://chat10.aichatos.xyz) | `g4f.Provider.Aichatos` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [chatforai.store](https://chatforai.store) | `g4f.Provider.ChatForAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chatgpt4online.org](https://chatgpt4online.org) | `g4f.Provider.Chatgpt4Online` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chatgpt-free.cc](https://www.chatgpt-free.cc) | `g4f.Provider.ChatgptNext` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chatgptx.de](https://chatgptx.de) | `g4f.Provider.ChatgptX` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [f1.cnote.top](https://f1.cnote.top) | `g4f.Provider.Cnote` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [duckduckgo.com](https://duckduckgo.com/duckchat) | `g4f.Provider.DuckDuckGo` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [feedough.com](https://www.feedough.com) | `g4f.Provider.Feedough` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [flowgpt.com](https://flowgpt.com/chat) | `g4f.Provider.FlowGpt` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [freegptsnav.aifree.site](https://freegptsnav.aifree.site) | `g4f.Provider.FreeGpt` | ✔ī¸ | ❌ | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [gpttalk.ru](https://gpttalk.ru) | `g4f.Provider.GptTalkRu` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [koala.sh](https://koala.sh) | `g4f.Provider.Koala` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [app.myshell.ai](https://app.myshell.ai/chat) | `g4f.Provider.MyShell` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [perplexity.ai](https://www.perplexity.ai) | `g4f.Provider.PerplexityAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [poe.com](https://poe.com) | `g4f.Provider.Poe` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
+| [talkai.info](https://talkai.info) | `g4f.Provider.TalkAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [chat.vercel.ai](https://chat.vercel.ai) | `g4f.Provider.Vercel` | ✔ī¸ | ❌ | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [aitianhu.com](https://www.aitianhu.com) | `g4f.Provider.AItianhu` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chatgpt.bestim.org](https://chatgpt.bestim.org) | `g4f.Provider.Bestim` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chatbase.co](https://www.chatbase.co) | `g4f.Provider.ChatBase` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chatgptdemo.info](https://chatgptdemo.info/chat) | `g4f.Provider.ChatgptDemo` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chat.chatgptdemo.ai](https://chat.chatgptdemo.ai) | `g4f.Provider.ChatgptDemoAi` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chatgptfree.ai](https://chatgptfree.ai) | `g4f.Provider.ChatgptFree` | ✔ī¸ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chatgptlogin.ai](https://chatgptlogin.ai) | `g4f.Provider.ChatgptLogin` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [chat.3211000.xyz](https://chat.3211000.xyz) | `g4f.Provider.Chatxyz` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [gpt6.ai](https://gpt6.ai) | `g4f.Provider.Gpt6` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [gptchatly.com](https://gptchatly.com) | `g4f.Provider.GptChatly` | ✔ī¸ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [ai18.gptforlove.com](https://ai18.gptforlove.com) | `g4f.Provider.GptForLove` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [gptgo.ai](https://gptgo.ai) | `g4f.Provider.GptGo` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [gptgod.site](https://gptgod.site) | `g4f.Provider.GptGod` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
+| [onlinegpt.org](https://onlinegpt.org) | `g4f.Provider.OnlineGpt` | ✔ī¸ | ❌ | ✔ī¸ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
### Other
-| Website | Provider | Stream | Status | Auth |
-| ------ | ------- | ------ | ------ | ---- |
-| [openchat.team](https://openchat.team) | `g4f.Provider.Aura`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [blackbox.ai](https://www.blackbox.ai) | `g4f.Provider.Blackbox`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [cohereforai-c4ai-command-r-plus.hf.space](https://cohereforai-c4ai-command-r-plus.hf.space) | `g4f.Provider.Cohere`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [deepinfra.com](https://deepinfra.com) | `g4f.Provider.DeepInfra`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [free.chatgpt.org.uk](https://free.chatgpt.org.uk) | `g4f.Provider.FreeChatgpt`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [gemini.google.com](https://gemini.google.com) | `g4f.Provider.Gemini`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
-| [ai.google.dev](https://ai.google.dev) | `g4f.Provider.GeminiPro`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
-| [gemini-chatbot-sigma.vercel.app](https://gemini-chatbot-sigma.vercel.app) | `g4f.Provider.GeminiProChat`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [developers.sber.ru](https://developers.sber.ru/gigachat) | `g4f.Provider.GigaChat`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
-| [console.groq.com](https://console.groq.com/playground) | `g4f.Provider.Groq`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
-| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingChat`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingFace`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [llama2.ai](https://www.llama2.ai) | `g4f.Provider.Llama`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [meta.ai](https://www.meta.ai) | `g4f.Provider.MetaAI`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [openrouter.ai](https://openrouter.ai) | `g4f.Provider.OpenRouter`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
-| [labs.perplexity.ai](https://labs.perplexity.ai) | `g4f.Provider.PerplexityLabs`| ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [pi.ai](https://pi.ai/talk) | `g4f.Provider.Pi`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [replicate.com](https://replicate.com) | `g4f.Provider.Replicate`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [theb.ai](https://theb.ai) | `g4f.Provider.ThebApi`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
-| [whiterabbitneo.com](https://www.whiterabbitneo.com) | `g4f.Provider.WhiteRabbitNeo`| ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
-| [bard.google.com](https://bard.google.com) | `g4f.Provider.Bard`| ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔ī¸ |
+| Website | Provider | Stream | Status | Auth |
+| -------------------------------------------------------------------------------------------- | ----------------------------- | ------ | ---------------------------------------------------------- | ---- |
+| [openchat.team](https://openchat.team) | `g4f.Provider.Aura` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [blackbox.ai](https://www.blackbox.ai) | `g4f.Provider.Blackbox` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [cohereforai-c4ai-command-r-plus.hf.space](https://cohereforai-c4ai-command-r-plus.hf.space) | `g4f.Provider.Cohere` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [deepinfra.com](https://deepinfra.com) | `g4f.Provider.DeepInfra` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [free.chatgpt.org.uk](https://free.chatgpt.org.uk) | `g4f.Provider.FreeChatgpt` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [gemini.google.com](https://gemini.google.com) | `g4f.Provider.Gemini` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
+| [ai.google.dev](https://ai.google.dev) | `g4f.Provider.GeminiPro` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
+| [gemini-chatbot-sigma.vercel.app](https://gemini-chatbot-sigma.vercel.app) | `g4f.Provider.GeminiProChat` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [developers.sber.ru](https://developers.sber.ru/gigachat) | `g4f.Provider.GigaChat` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
+| [console.groq.com](https://console.groq.com/playground) | `g4f.Provider.Groq` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
+| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingChat` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingFace` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [llama2.ai](https://www.llama2.ai) | `g4f.Provider.Llama` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [meta.ai](https://www.meta.ai) | `g4f.Provider.MetaAI` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [openrouter.ai](https://openrouter.ai) | `g4f.Provider.OpenRouter` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔ī¸ |
+| [labs.perplexity.ai](https://labs.perplexity.ai) | `g4f.Provider.PerplexityLabs` | ✔ī¸ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [pi.ai](https://pi.ai/talk) | `g4f.Provider.Pi` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [replicate.com](https://replicate.com) | `g4f.Provider.Replicate` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
+| [theb.ai](https://theb.ai) | `g4f.Provider.ThebApi` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
+| [whiterabbitneo.com](https://www.whiterabbitneo.com) | `g4f.Provider.WhiteRabbitNeo` | ✔ī¸ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔ī¸ |
+| [bard.google.com](https://bard.google.com) | `g4f.Provider.Bard` | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔ī¸ |
### Models
-| Model | Base Provider | Provider | Website |
-| ----- | ------------- | -------- | ------- |
-| gpt-3.5-turbo | OpenAI | 8+ Providers | [openai.com](https://openai.com/) |
-| gpt-4 | OpenAI | 2+ Providers | [openai.com](https://openai.com/) |
-| gpt-4-turbo | OpenAI | g4f.Provider.Bing | [openai.com](https://openai.com/) |
-| Llama-2-7b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| Llama-2-13b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| Llama-2-70b-chat-hf | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| Meta-Llama-3-8b-instruct | Meta | 1+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| Meta-Llama-3-70b-instruct | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| CodeLlama-34b-Instruct-hf | Meta | g4f.Provider.HuggingChat | [llama.meta.com](https://llama.meta.com/) |
-| CodeLlama-70b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
-| Mixtral-8x7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) |
-| Mistral-7B-Instruct-v0.1 | Huggingface | 3+ Providers | [huggingface.co](https://huggingface.co/) |
-| Mistral-7B-Instruct-v0.2 | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
-| zephyr-orpo-141b-A35b-v0.1 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) |
-| dolphin-2.6-mixtral-8x7b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
-| gemini | Google | g4f.Provider.Gemini | [gemini.google.com](https://gemini.google.com/) |
-| gemini-pro | Google | 2+ Providers | [gemini.google.com](https://gemini.google.com/) |
-| claude-v2 | Anthropic | 1+ Providers | [anthropic.com](https://www.anthropic.com/) |
-| claude-3-opus | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
-| claude-3-sonnet | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
-| lzlv_70b_fp16_hf | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
-| airoboros-70b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
-| openchat_3.5 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) |
-| pi | Inflection | g4f.Provider.Pi | [inflection.ai](https://inflection.ai/) |
+| Model | Base Provider | Provider | Website |
+| -------------------------- | ------------- | ------------------------ | ----------------------------------------------- |
+| gpt-3.5-turbo | OpenAI | 8+ Providers | [openai.com](https://openai.com/) |
+| gpt-4 | OpenAI | 2+ Providers | [openai.com](https://openai.com/) |
+| gpt-4-turbo | OpenAI | g4f.Provider.Bing | [openai.com](https://openai.com/) |
+| Llama-2-7b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| Llama-2-13b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| Llama-2-70b-chat-hf | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| Meta-Llama-3-8b-instruct | Meta | 1+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| Meta-Llama-3-70b-instruct | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| CodeLlama-34b-Instruct-hf | Meta | g4f.Provider.HuggingChat | [llama.meta.com](https://llama.meta.com/) |
+| CodeLlama-70b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
+| Mixtral-8x7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) |
+| Mistral-7B-Instruct-v0.1 | Huggingface | 3+ Providers | [huggingface.co](https://huggingface.co/) |
+| Mistral-7B-Instruct-v0.2 | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
+| zephyr-orpo-141b-A35b-v0.1 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) |
+| dolphin-2.6-mixtral-8x7b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
+| gemini | Google | g4f.Provider.Gemini | [gemini.google.com](https://gemini.google.com/) |
+| gemini-pro | Google | 2+ Providers | [gemini.google.com](https://gemini.google.com/) |
+| claude-v2 | Anthropic | 1+ Providers | [anthropic.com](https://www.anthropic.com/) |
+| claude-3-opus | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
+| claude-3-sonnet | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
+| lzlv_70b_fp16_hf | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
+| airoboros-70b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
+| openchat_3.5 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) |
+| pi | Inflection | g4f.Provider.Pi | [inflection.ai](https://inflection.ai/) |
### Image and Vision Models
-| Label | Provider | Image Model | Vision Model | Website |
-| ----- | -------- | ----------- | ------------ | ------- |
-| Microsoft Copilot in Bing | `g4f.Provider.Bing` | dall-e-3 | gpt-4-vision | [bing.com](https://bing.com/chat) |
-| DeepInfra | `g4f.Provider.DeepInfra` | stability-ai/sdxl | llava-1.5-7b-hf | [deepinfra.com](https://deepinfra.com) |
-| Gemini | `g4f.Provider.Gemini` | ✔ī¸ | ✔ī¸ | [gemini.google.com](https://gemini.google.com) |
-| Gemini API | `g4f.Provider.GeminiPro` | ❌ | gemini-1.5-pro | [ai.google.dev](https://ai.google.dev) |
-| Meta AI | `g4f.Provider.MetaAI` | ✔ī¸ | ❌ | [meta.ai](https://www.meta.ai) |
-| OpenAI ChatGPT | `g4f.Provider.OpenaiChat` | dall-e-3 | gpt-4-vision | [chatgpt.com](https://chatgpt.com) |
-| Reka | `g4f.Provider.Reka` | ❌ | ✔ī¸ | [chat.reka.ai](https://chat.reka.ai/) |
-| Replicate | `g4f.Provider.Replicate` | stability-ai/sdxl| llava-v1.6-34b | [replicate.com](https://replicate.com) |
-| You.com | `g4f.Provider.You` | dall-e-3| ✔ī¸ | [you.com](https://you.com) |
-
+| Label | Provider | Image Model | Vision Model | Website |
+| ------------------------- | ------------------------- | ----------------- | --------------- | ---------------------------------------------- |
+| Microsoft Copilot in Bing | `g4f.Provider.Bing` | dall-e-3 | gpt-4-vision | [bing.com](https://bing.com/chat) |
+| DeepInfra | `g4f.Provider.DeepInfra` | stability-ai/sdxl | llava-1.5-7b-hf | [deepinfra.com](https://deepinfra.com) |
+| Gemini | `g4f.Provider.Gemini` | ✔ī¸ | ✔ī¸ | [gemini.google.com](https://gemini.google.com) |
+| Gemini API | `g4f.Provider.GeminiPro` | ❌ | gemini-1.5-pro | [ai.google.dev](https://ai.google.dev) |
+| Meta AI | `g4f.Provider.MetaAI` | ✔ī¸ | ❌ | [meta.ai](https://www.meta.ai) |
+| OpenAI ChatGPT | `g4f.Provider.OpenaiChat` | dall-e-3 | gpt-4-vision | [chatgpt.com](https://chatgpt.com) |
+| Reka | `g4f.Provider.Reka` | ❌ | ✔ī¸ | [chat.reka.ai](https://chat.reka.ai/) |
+| Replicate | `g4f.Provider.Replicate` | stability-ai/sdxl | llava-v1.6-34b | [replicate.com](https://replicate.com) |
+| You.com | `g4f.Provider.You` | dall-e-3 | ✔ī¸ | [you.com](https://you.com) |
## 🔗 Powered by gpt4free
@@ -848,6 +855,33 @@ While we wait for gpt-5, here is a list of new models that are at least better t
</a>
</td>
</tr>
+ <tr>
+ <td>
+ <a href="https://github.com/zachey01/gpt4free.js">
+ <b>GPT4js</b>
+ </a>
+ </td>
+ <td>
+ <a href="https://github.com/zachey01/gpt4free.js/stargazers">
+ <img alt="Stars" src="https://img.shields.io/github/stars/zachey01/gpt4free.js?style=flat-square&labelColor=343b41" />
+ </a>
+ </td>
+ <td>
+ <a href="https://github.com/zachey01/gpt4free.js/network/members">
+ <img alt="Forks" src="https://img.shields.io/github/forks/zachey01/gpt4free.js?style=flat-square&labelColor=343b41" />
+ </a>
+ </td>
+ <td>
+ <a href="https://github.com/zachey01/gpt4free.js/issues">
+ <img alt="Issues" src="https://img.shields.io/github/issues/zachey01/gpt4free.js?style=flat-square&labelColor=343b41" />
+ </a>
+ </td>
+ <td>
+ <a href="https://github.com/zachey01/gpt4free.js/pulls">
+ <img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/zachey01/gpt4free.js?style=flat-square&labelColor=343b41" />
+ </a>
+ </td>
+ </tr>
</tbody>
</table>
@@ -857,11 +891,11 @@ We welcome contributions from the community. Whether you're adding new providers
###### Guide: How do i create a new Provider?
- - Read: [/docs/guides/create_provider](https://github.com/xtekky/gpt4free/blob/main/docs/guides/create_provider.md)
+- Read: [/docs/guides/create_provider](https://github.com/xtekky/gpt4free/blob/main/docs/guides/create_provider.md)
###### Guide: How can AI help me with writing code?
- - Read: [/docs/guides/help_me](https://github.com/xtekky/gpt4free/blob/main/docs/guides/help_me.md)
+- Read: [/docs/guides/help_me](https://github.com/xtekky/gpt4free/blob/main/docs/guides/help_me.md)
## 🙌 Contributors
@@ -911,7 +945,7 @@ A list of all contributors is available [here](https://github.com/xtekky/gpt4fre
- The [`MetaAI.py`](https://github.com/xtekky/gpt4free/blob/main/g4f/Provider/MetaAI.py) file contains code from [meta-ai-api](https://github.com/Strvm/meta-ai-api) by [@Strvm](https://github.com/Strvm)
- The [`proofofwork.py`](https://github.com/xtekky/gpt4free/blob/main/g4f/Provider/openai/proofofwork.py) has input from [missuo/FreeGPT35](https://github.com/missuo/FreeGPT35)
-*Having input implies that the AI's code generation utilized it as one of many sources.*
+_Having input implies that the AI's code generation utilized it as one of many sources._
## Šī¸ Copyright
diff --git a/g4f/Provider/AI365VIP.py b/g4f/Provider/AI365VIP.py
new file mode 100644
index 00000000..fc6ad237
--- /dev/null
+++ b/g4f/Provider/AI365VIP.py
@@ -0,0 +1,67 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://chat.ai365vip.com"
+ api_endpoint = "/api/chat"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'gpt-4o',
+ 'claude-3-haiku-20240307',
+ ]
+ model_aliases = {
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": "https://chat.ai365vip.com",
+ "priority": "u=1, i",
+ "referer": "https://chat.ai365vip.com/en",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "model": {
+ "id": model,
+ "name": {
+ "gpt-3.5-turbo": "GPT-3.5",
+ "claude-3-haiku-20240307": "claude-3-haiku",
+ "gpt-4o": "GPT-4O"
+ }.get(model, model),
+ },
+ "messages": [{"role": "user", "content": format_prompt(messages)}],
+ "prompt": "You are a helpful assistant.",
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 6e1e3949..a86471f2 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -2,31 +2,35 @@ from __future__ import annotations
import uuid
import secrets
-from aiohttp import ClientSession
+import re
+from aiohttp import ClientSession, ClientResponse
+from typing import AsyncGenerator, Optional
from ..typing import AsyncResult, Messages, ImageType
from ..image import to_data_uri
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-class Blackbox(AsyncGeneratorProvider):
+class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.blackbox.ai"
working = True
+ default_model = 'blackbox'
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
- proxy: str = None,
- image: ImageType = None,
- image_name: str = None,
+ proxy: Optional[str] = None,
+ image: Optional[ImageType] = None,
+ image_name: Optional[str] = None,
**kwargs
- ) -> AsyncResult:
+ ) -> AsyncGenerator[str, None]:
if image is not None:
messages[-1]["data"] = {
- "fileText": image_name,
+ "fileText": image_name,
"imageBase64": to_data_uri(image)
}
+
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
"Accept": "*/*",
@@ -40,9 +44,11 @@ class Blackbox(AsyncGeneratorProvider):
"Alt-Used": "www.blackbox.ai",
"Connection": "keep-alive",
}
+
async with ClientSession(headers=headers) as session:
random_id = secrets.token_hex(16)
random_user_id = str(uuid.uuid4())
+
data = {
"messages": messages,
"id": random_id,
@@ -55,10 +61,17 @@ class Blackbox(AsyncGeneratorProvider):
"playgroundMode": False,
"webSearchMode": False,
"userSystemPrompt": "",
- "githubToken": None
+ "githubToken": None,
+ "maxTokens": None
}
- async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
+
+ async with session.post(
+ f"{cls.url}/api/chat", json=data, proxy=proxy
+ ) as response: # type: ClientResponse
response.raise_for_status()
- async for chunk in response.content:
+ async for chunk in response.content.iter_any():
if chunk:
- yield chunk.decode()
+ # Decode the chunk and clean up unwanted prefixes using a regex
+ decoded_chunk = chunk.decode()
+ cleaned_chunk = re.sub(r'\$@\$.+?\$@\$|\$@\$', '', decoded_chunk)
+ yield cleaned_chunk
diff --git a/g4f/Provider/Chatgpt4o.py b/g4f/Provider/Chatgpt4o.py
new file mode 100644
index 00000000..f3dc8a15
--- /dev/null
+++ b/g4f/Provider/Chatgpt4o.py
@@ -0,0 +1,83 @@
+from __future__ import annotations
+
+import re
+from ..requests import StreamSession, raise_for_status
+from ..typing import Messages
+from .base_provider import AsyncProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Chatgpt4o(AsyncProvider, ProviderModelMixin):
+ url = "https://chatgpt4o.one"
+ supports_gpt_4 = True
+ working = True
+ _post_id = None
+ _nonce = None
+ default_model = 'gpt-4o'
+
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ cookies: dict = None,
+ **kwargs
+ ) -> str:
+ headers = {
+ 'authority': 'chatgpt4o.one',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'origin': 'https://chatgpt4o.one',
+ 'referer': 'https://chatgpt4o.one',
+ 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
+ }
+
+ async with StreamSession(
+ headers=headers,
+ cookies=cookies,
+ impersonate="chrome",
+ proxies={"all": proxy},
+ timeout=timeout
+ ) as session:
+
+ if not cls._post_id or not cls._nonce:
+ async with session.get(f"{cls.url}/") as response:
+ await raise_for_status(response)
+ response_text = await response.text()
+
+ post_id_match = re.search(r'data-post-id="([0-9]+)"', response_text)
+ nonce_match = re.search(r'data-nonce="(.*?)"', response_text)
+
+ if not post_id_match:
+ raise RuntimeError("No post ID found")
+ cls._post_id = post_id_match.group(1)
+
+ if not nonce_match:
+ raise RuntimeError("No nonce found")
+ cls._nonce = nonce_match.group(1)
+
+ prompt = format_prompt(messages)
+ data = {
+ "_wpnonce": cls._nonce,
+ "post_id": cls._post_id,
+ "url": cls.url,
+ "action": "wpaicg_chat_shortcode_message",
+ "message": prompt,
+ "bot_id": "0"
+ }
+
+ async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
+ await raise_for_status(response)
+ response_json = await response.json()
+ if "data" not in response_json:
+ raise RuntimeError("Unexpected response structure: 'data' field missing")
+ return response_json["data"]
diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py
index 22064f1b..b1e00a22 100644
--- a/g4f/Provider/ChatgptFree.py
+++ b/g4f/Provider/ChatgptFree.py
@@ -10,7 +10,7 @@ from .helper import format_prompt
class ChatgptFree(AsyncProvider):
url = "https://chatgptfree.ai"
supports_gpt_35_turbo = True
- working = False
+ working = True
_post_id = None
_nonce = None
diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py
new file mode 100644
index 00000000..2aa78773
--- /dev/null
+++ b/g4f/Provider/DDG.py
@@ -0,0 +1,117 @@
+from __future__ import annotations
+
+import json
+import aiohttp
+import asyncio
+from typing import Optional
+import base64
+
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import get_connector
+from ..typing import AsyncResult, Messages
+from ..requests.raise_for_status import raise_for_status
+from ..providers.conversation import BaseConversation
+
+class DDG(AsyncGeneratorProvider, ProviderModelMixin):
+ url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9haWNoYXQ=").decode("utf-8")
+ working = True
+ supports_gpt_35_turbo = True
+ supports_message_history = True
+
+ default_model = "gpt-3.5-turbo-0125"
+ models = ["gpt-3.5-turbo-0125", "claude-3-haiku-20240307", "meta-llama/Llama-3-70b-chat-hf", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
+ model_aliases = {
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ "llama-3-70b": "meta-llama/Llama-3-70b-chat-hf",
+ "mixtral-8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1"
+ }
+
+ # Obfuscated URLs and headers
+ status_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9zdGF0dXM=").decode("utf-8")
+ chat_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9jaGF0").decode("utf-8")
+ referer = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS8=").decode("utf-8")
+ origin = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbQ==").decode("utf-8")
+
+ user_agent = 'Mozilla/5.0 (Windows NT 10.0; rv:127.0) Gecko/20100101 Firefox/127.0'
+ headers = {
+ 'User-Agent': user_agent,
+ 'Accept': 'text/event-stream',
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Accept-Encoding': 'gzip, deflate, br, zstd',
+ 'Referer': referer,
+ 'Content-Type': 'application/json',
+ 'Origin': origin,
+ 'Connection': 'keep-alive',
+ 'Cookie': 'dcm=3',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-origin',
+ 'Pragma': 'no-cache',
+ 'TE': 'trailers'
+ }
+
+ @classmethod
+ async def get_vqd(cls, session: aiohttp.ClientSession) -> Optional[str]:
+ try:
+ async with session.get(cls.status_url, headers={"x-vqd-accept": "1"}) as response:
+ await raise_for_status(response)
+ return response.headers.get("x-vqd-4")
+ except Exception as e:
+ print(f"Error getting VQD: {e}")
+ return None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ connector: aiohttp.BaseConnector = None,
+ conversation: Conversation = None,
+ return_conversation: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ async with aiohttp.ClientSession(headers=cls.headers, connector=get_connector(connector, proxy)) as session:
+ vqd_4 = None
+ if conversation is not None and len(messages) > 1:
+ vqd_4 = conversation.vqd_4
+ messages = [*conversation.messages, messages[-2], messages[-1]]
+ else:
+ for _ in range(3): # Try up to 3 times to get a valid VQD
+ vqd_4 = await cls.get_vqd(session)
+ if vqd_4:
+ break
+ await asyncio.sleep(1) # Wait a bit before retrying
+
+ if not vqd_4:
+ raise Exception("Failed to obtain a valid VQD token")
+
+ messages = [messages[-1]] # Only use the last message for new conversations
+
+ payload = {
+ 'model': cls.get_model(model),
+ 'messages': [{'role': m['role'], 'content': m['content']} for m in messages]
+ }
+
+ async with session.post(cls.chat_url, json=payload, headers={"x-vqd-4": vqd_4}) as response:
+ await raise_for_status(response)
+ if return_conversation:
+ yield Conversation(vqd_4, messages)
+
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ chunk = line[6:]
+ if chunk.startswith(b"[DONE]"):
+ break
+ try:
+ data = json.loads(chunk)
+ if "message" in data and data["message"]:
+ yield data["message"]
+ except json.JSONDecodeError:
+ print(f"Failed to decode JSON: {chunk}")
+
+class Conversation(BaseConversation):
+ def __init__(self, vqd_4: str, messages: Messages) -> None:
+ self.vqd_4 = vqd_4
+ self.messages = messages
diff --git a/g4f/Provider/Feedough.py b/g4f/Provider/Feedough.py
index 2ce4561e..d35e30ee 100644
--- a/g4f/Provider/Feedough.py
+++ b/g4f/Provider/Feedough.py
@@ -1,17 +1,20 @@
from __future__ import annotations
import json
-from aiohttp import ClientSession
+import asyncio
+from aiohttp import ClientSession, TCPConnector
+from urllib.parse import urlencode
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
-class Feedough(AsyncGeneratorProvider):
+class Feedough(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.feedough.com"
+ api_endpoint = "/wp-admin/admin-ajax.php"
working = True
- supports_gpt_35_turbo = True
+ default_model = ''
@classmethod
async def create_async_generator(
@@ -22,31 +25,54 @@ class Feedough(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
headers = {
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
- "Accept": "*/*",
- "Accept-Language": "en-US,en;q=0.5",
- "Accept-Encoding": "gzip, deflate, br",
- "Referer": "https://www.feedough.com/ai-prompt-generator/",
- "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
- "Origin": "https://www.feedough.com",
- "DNT": "1",
- "Sec-GPC": "1",
- "Connection": "keep-alive",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "TE": "trailers",
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/x-www-form-urlencoded;charset=UTF-8",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/ai-prompt-generator/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
}
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
+
+ connector = TCPConnector(ssl=False)
+
+ async with ClientSession(headers=headers, connector=connector) as session:
data = {
"action": "aixg_generate",
- "prompt": prompt,
+ "prompt": format_prompt(messages),
+ "aixg_generate_nonce": "110c021031"
}
- async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response:
- response.raise_for_status()
- response_text = await response.text()
- response_json = json.loads(response_text)
- if response_json["success"]:
- message = response_json["data"]["message"]
- yield message
+
+ try:
+ async with session.post(
+ f"{cls.url}{cls.api_endpoint}",
+ data=urlencode(data),
+ proxy=proxy
+ ) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+ try:
+ response_json = json.loads(response_text)
+ if response_json.get("success") and "data" in response_json:
+ message = response_json["data"].get("message", "")
+ yield message
+ except json.JSONDecodeError:
+ yield response_text
+ except Exception as e:
+ print(f"An error occurred: {e}")
+
+ @classmethod
+ async def run(cls, *args, **kwargs):
+ async for item in cls.create_async_generator(*args, **kwargs):
+ yield item
+
+ tasks = asyncio.all_tasks()
+ for task in tasks:
+ if not task.done():
+ await task
diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py
index 3fb247c7..7d8c1d10 100644
--- a/g4f/Provider/FreeChatgpt.py
+++ b/g4f/Provider/FreeChatgpt.py
@@ -1,17 +1,27 @@
from __future__ import annotations
-
import json
-from aiohttp import ClientSession, ClientTimeout
-
+from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..requests.raise_for_status import raise_for_status
+from .helper import format_prompt
+
class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://free.chatgpt.org.uk"
+ url = "https://chat.chatgpt.org.uk"
+ api_endpoint = "/api/openai/v1/chat/completions"
working = True
- supports_message_history = True
- default_model = "google-gemini-pro"
+ supports_gpt_35_turbo = True
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'SparkDesk-v1.1',
+ 'deepseek-coder',
+ 'deepseek-chat',
+ 'Qwen2-7B-Instruct',
+ 'glm4-9B-chat',
+ 'chatglm3-6B',
+ 'Yi-1.5-9B-Chat',
+ ]
@classmethod
async def create_async_generator(
@@ -19,45 +29,50 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
- timeout: int = 120,
**kwargs
) -> AsyncResult:
headers = {
- "Accept": "application/json, text/event-stream",
- "Content-Type":"application/json",
- "Accept-Encoding": "gzip, deflate, br",
- "Accept-Language": "en-US,en;q=0.5",
- "Host":"free.chatgpt.org.uk",
- "Referer":f"{cls.url}/",
- "Origin":f"{cls.url}",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
+ "accept": "application/json, text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
}
- async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
data = {
- "messages": messages,
+ "messages": [
+ {"role": "system", "content": "\nYou are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent model: gpt-3.5-turbo\nCurrent time: Thu Jul 04 2024 21:35:59 GMT+0300 (Eastern European Summer Time)\nLatex inline: \\(x^2\\) \nLatex block: $$e=mc^2$$\n\n"},
+ {"role": "user", "content": prompt}
+ ],
"stream": True,
- "model": cls.get_model(""),
- "temperature": kwargs.get("temperature", 0.5),
- "presence_penalty": kwargs.get("presence_penalty", 0),
- "frequency_penalty": kwargs.get("frequency_penalty", 0),
- "top_p": kwargs.get("top_p", 1)
+ "model": model,
+ "temperature": 0.5,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ "top_p": 1
}
- async with session.post(f'{cls.url}/api/openai/v1/chat/completions', json=data, proxy=proxy) as response:
- await raise_for_status(response)
- started = False
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ accumulated_text = ""
async for line in response.content:
- if line.startswith(b"data: [DONE]"):
- break
- elif line.startswith(b"data: "):
- line = json.loads(line[6:])
- if(line["choices"]==[]):
- continue
- chunk = line["choices"][0]["delta"].get("content")
- if chunk:
- started = True
- yield chunk
- if not started:
- raise RuntimeError("Empty response") \ No newline at end of file
+ if line:
+ line_str = line.decode().strip()
+ if line_str == "data: [DONE]":
+ yield accumulated_text
+ break
+ elif line_str.startswith("data: "):
+ try:
+ chunk = json.loads(line_str[6:])
+ delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
+ accumulated_text += delta_content
+ except json.JSONDecodeError:
+ pass
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index f79f0a66..7fa3b5ab 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -1,55 +1,67 @@
from __future__ import annotations
-import time, hashlib, random
-
-from ..typing import AsyncResult, Messages
+import time
+import hashlib
+import random
+from typing import AsyncGenerator, Optional, Dict, Any
+from ..typing import Messages
from ..requests import StreamSession, raise_for_status
from .base_provider import AsyncGeneratorProvider
from ..errors import RateLimitError
-domains = [
+# Constants
+DOMAINS = [
"https://s.aifree.site",
"https://v.aifree.site/"
]
+RATE_LIMIT_ERROR_MESSAGE = "åŊ“前地åŒēåŊ“æ—ĨéĸåēĻåˇ˛æļˆč€—厌"
+
class FreeGpt(AsyncGeneratorProvider):
- url = "https://freegptsnav.aifree.site"
- working = True
- supports_message_history = True
- supports_system_message = True
- supports_gpt_35_turbo = True
+ url: str = "https://freegptsnav.aifree.site"
+ working: bool = True
+ supports_message_history: bool = True
+ supports_system_message: bool = True
+ supports_gpt_35_turbo: bool = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
- proxy: str = None,
+ proxy: Optional[str] = None,
timeout: int = 120,
- **kwargs
- ) -> AsyncResult:
+ **kwargs: Any
+ ) -> AsyncGenerator[str, None]:
+ prompt = messages[-1]["content"]
+ timestamp = int(time.time())
+ data = cls._build_request_data(messages, prompt, timestamp)
+
+ domain = random.choice(DOMAINS)
+
async with StreamSession(
impersonate="chrome",
timeout=timeout,
- proxies={"all": proxy}
+ proxies={"all": proxy} if proxy else None
) as session:
- prompt = messages[-1]["content"]
- timestamp = int(time.time())
- data = {
- "messages": messages,
- "time": timestamp,
- "pass": None,
- "sign": generate_signature(timestamp, prompt)
- }
- domain = random.choice(domains)
async with session.post(f"{domain}/api/generate", json=data) as response:
await raise_for_status(response)
async for chunk in response.iter_content():
- chunk = chunk.decode(errors="ignore")
- if chunk == "åŊ“前地åŒēåŊ“æ—ĨéĸåēĻåˇ˛æļˆč€—厌":
+ chunk_decoded = chunk.decode(errors="ignore")
+ if chunk_decoded == RATE_LIMIT_ERROR_MESSAGE:
raise RateLimitError("Rate limit reached")
- yield chunk
-
-def generate_signature(timestamp: int, message: str, secret: str = ""):
+ yield chunk_decoded
+
+ @staticmethod
+ def _build_request_data(messages: Messages, prompt: str, timestamp: int, secret: str = "") -> Dict[str, Any]:
+ return {
+ "messages": messages,
+ "time": timestamp,
+ "pass": None,
+ "sign": generate_signature(timestamp, prompt, secret)
+ }
+
+
+def generate_signature(timestamp: int, message: str, secret: str = "") -> str:
data = f"{timestamp}:{message}:{secret}"
return hashlib.sha256(data.encode()).hexdigest()
diff --git a/g4f/Provider/GeminiProChat.py b/g4f/Provider/GeminiProChat.py
index d88c4ed0..c61e2ff3 100644
--- a/g4f/Provider/GeminiProChat.py
+++ b/g4f/Provider/GeminiProChat.py
@@ -9,13 +9,14 @@ from ..errors import RateLimitError
from ..requests import raise_for_status
from ..requests.aiohttp import get_connector
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-class GeminiProChat(AsyncGeneratorProvider):
+class GeminiProChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.chatgot.one/"
working = True
supports_message_history = True
+ default_model = ''
@classmethod
async def create_async_generator(
@@ -32,8 +33,8 @@ class GeminiProChat(AsyncGeneratorProvider):
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "text/plain;charset=UTF-8",
- "Referer": "https://gemini-chatbot-sigma.vercel.app/",
- "Origin": "https://gemini-chatbot-sigma.vercel.app",
+ "Referer": f"{cls.url}/",
+ "Origin": cls.url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index 00d49b82..d480d13c 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -13,15 +13,13 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
supports_stream = True
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
- "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
'CohereForAI/c4ai-command-r-plus',
+ 'meta-llama/Meta-Llama-3-70B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
- 'google/gemma-1.1-7b-it',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
+ '01-ai/Yi-1.5-34B-Chat',
'mistralai/Mistral-7B-Instruct-v0.2',
- 'meta-llama/Meta-Llama-3-70B-Instruct',
'microsoft/Phi-3-mini-4k-instruct',
- '01-ai/Yi-1.5-34B-Chat'
]
model_aliases = {
diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py
index 6a05c26e..a5e27ccf 100644
--- a/g4f/Provider/HuggingFace.py
+++ b/g4f/Provider/HuggingFace.py
@@ -12,10 +12,16 @@ from ..requests.raise_for_status import raise_for_status
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"
working = True
+ needs_auth = True
supports_message_history = True
models = [
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
- "mistralai/Mistral-7B-Instruct-v0.2"
+ 'CohereForAI/c4ai-command-r-plus',
+ 'meta-llama/Meta-Llama-3-70B-Instruct',
+ 'mistralai/Mixtral-8x7B-Instruct-v0.1',
+ 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
+ '01-ai/Yi-1.5-34B-Chat',
+ 'mistralai/Mistral-7B-Instruct-v0.2',
+ 'microsoft/Phi-3-mini-4k-instruct',
]
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
@@ -74,4 +80,4 @@ def format_prompt(messages: Messages) -> str:
for idx, message in enumerate(messages)
if message["role"] == "assistant"
])
- return f"{history}<s>[INST] {question} [/INST]" \ No newline at end of file
+ return f"{history}<s>[INST] {question} [/INST]"
diff --git a/g4f/Provider/Koala.py b/g4f/Provider/Koala.py
index 849bcdbe..c708bcb9 100644
--- a/g4f/Provider/Koala.py
+++ b/g4f/Provider/Koala.py
@@ -1,7 +1,8 @@
from __future__ import annotations
import json
-from aiohttp import ClientSession, BaseConnector
+from typing import AsyncGenerator, Optional, List, Dict, Union, Any
+from aiohttp import ClientSession, BaseConnector, ClientResponse
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
@@ -19,12 +20,13 @@ class Koala(AsyncGeneratorProvider):
cls,
model: str,
messages: Messages,
- proxy: str = None,
- connector: BaseConnector = None,
- **kwargs
- ) -> AsyncResult:
+ proxy: Optional[str] = None,
+ connector: Optional[BaseConnector] = None,
+ **kwargs: Any
+ ) -> AsyncGenerator[Dict[str, Union[str, int, float, List[Dict[str, Any]], None]], None]:
if not model:
model = "gpt-3.5-turbo"
+
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "text/event-stream",
@@ -40,13 +42,17 @@ class Koala(AsyncGeneratorProvider):
"Sec-Fetch-Site": "same-origin",
"TE": "trailers",
}
+
async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
- input = messages[-1]["content"]
- system_messages = [message["content"] for message in messages if message["role"] == "system"]
+ input_text = messages[-1]["content"]
+ system_messages = " ".join(
+ message["content"] for message in messages if message["role"] == "system"
+ )
if system_messages:
- input += " ".join(system_messages)
+ input_text += f" {system_messages}"
+
data = {
- "input": input,
+ "input": input_text,
"inputHistory": [
message["content"]
for message in messages[:-1]
@@ -59,8 +65,14 @@ class Koala(AsyncGeneratorProvider):
],
"model": model,
}
+
async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response:
await raise_for_status(response)
- async for chunk in response.content:
- if chunk.startswith(b"data: "):
- yield json.loads(chunk[6:]) \ No newline at end of file
+ async for chunk in cls._parse_event_stream(response):
+ yield chunk
+
+ @staticmethod
+ async def _parse_event_stream(response: ClientResponse) -> AsyncGenerator[Dict[str, Any], None]:
+ async for chunk in response.content:
+ if chunk.startswith(b"data: "):
+ yield json.loads(chunk[6:])
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 75ecf300..277d8ea2 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -10,7 +10,16 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
- "gpt-4o": {
+ "gpt-3.5-turbo": {
+ "id": "gpt-3.5-turbo",
+ "name": "GPT-3.5-Turbo",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 48000,
+ "tokenLimit": 14000,
+ "context": "16K",
+ },
+ "gpt-4o-free": {
"context": "8K",
"id": "gpt-4o-free",
"maxLength": 31200,
@@ -19,51 +28,74 @@ models = {
"provider": "OpenAI",
"tokenLimit": 7800,
},
- "gpt-3.5-turbo": {
- "id": "gpt-3.5-turbo",
- "name": "GPT-3.5-Turbo",
- "maxLength": 48000,
- "tokenLimit": 14000,
- "context": "16K",
- },
- "gpt-4-turbo": {
- "id": "gpt-4-turbo-preview",
+ "gpt-4-turbo-2024-04-09": {
+ "id": "gpt-4-turbo-2024-04-09",
"name": "GPT-4-Turbo",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
- "gpt-4": {
- "id": "gpt-4-plus",
- "name": "GPT-4-Plus",
- "maxLength": 130000,
- "tokenLimit": 31000,
- "context": "32K",
+ "gpt-4o": {
+ "context": "128K",
+ "id": "gpt-4o",
+ "maxLength": 124000,
+ "model": "ChatGPT",
+ "name": "GPT-4o",
+ "provider": "OpenAI",
+ "tokenLimit": 62000,
},
"gpt-4-0613": {
"id": "gpt-4-0613",
"name": "GPT-4-0613",
- "maxLength": 60000,
- "tokenLimit": 15000,
- "context": "16K",
- },
- "gemini-pro": {
- "id": "gemini-pro",
- "name": "Gemini-Pro",
- "maxLength": 120000,
- "tokenLimit": 30000,
- "context": "32K",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 32000,
+ "tokenLimit": 7600,
+ "context": "8K",
},
"claude-3-opus-20240229": {
"id": "claude-3-opus-20240229",
"name": "Claude-3-Opus",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "claude-3-opus-20240229-aws": {
+ "id": "claude-3-opus-20240229-aws",
+ "name": "Claude-3-Opus-Aws",
+ "model": "Claude",
+ "provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
+ "claude-3-opus-100k-poe": {
+ "id": "claude-3-opus-100k-poe",
+ "name": "Claude-3-Opus-100k-Poe",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 400000,
+ "tokenLimit": 99000,
+ "context": "100K",
+ },
"claude-3-sonnet-20240229": {
"id": "claude-3-sonnet-20240229",
"name": "Claude-3-Sonnet",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "claude-3-haiku-20240307": {
+ "id": "claude-3-haiku-20240307",
+ "name": "Claude-3-Haiku",
+ "model": "Claude",
+ "provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
@@ -71,6 +103,8 @@ models = {
"claude-2.1": {
"id": "claude-2.1",
"name": "Claude-2.1-200k",
+ "model": "Claude",
+ "provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
@@ -78,16 +112,38 @@ models = {
"claude-2.0": {
"id": "claude-2.0",
"name": "Claude-2.0-100k",
+ "model": "Claude",
+ "provider": "Anthropic",
"maxLength": 400000,
"tokenLimit": 100000,
"context": "100K",
},
- "claude-instant-1": {
- "id": "claude-instant-1",
- "name": "Claude-instant-1",
- "maxLength": 400000,
- "tokenLimit": 100000,
- "context": "100K",
+ "gemini-1.0-pro-latest": {
+ "id": "gemini-1.0-pro-latest",
+ "name": "Gemini-Pro",
+ "model": "Gemini",
+ "provider": "Google",
+ "maxLength": 120000,
+ "tokenLimit": 30000,
+ "context": "32K",
+ },
+ "gemini-1.5-flash-latest": {
+ "id": "gemini-1.5-flash-latest",
+ "name": "Gemini-1.5-Flash-1M",
+ "model": "Gemini",
+ "provider": "Google",
+ "maxLength": 4000000,
+ "tokenLimit": 1000000,
+ "context": "1024K",
+ },
+ "gemini-1.5-pro-latest": {
+ "id": "gemini-1.5-pro-latest",
+ "name": "Gemini-1.5-Pro-1M",
+ "model": "Gemini",
+ "provider": "Google",
+ "maxLength": 4000000,
+ "tokenLimit": 1000000,
+ "context": "1024K",
}
}
@@ -100,9 +156,9 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = "gpt-3.5-turbo"
- models = list(models)
+ models = list(models.keys())
model_aliases = {
- "claude-v2": "claude-2"
+ "claude-v2": "claude-2.0"
}
_auth_code = ""
_cookie_jar = None
@@ -131,7 +187,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
) as session:
data = {
"conversationId": str(uuid.uuid4()),
- "model": models[cls.get_model(model)],
+ "model": models[model],
"messages": messages,
"key": "",
"prompt": kwargs.get("system_message", "You are a helpful assistant."),
@@ -189,3 +245,45 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
raise RuntimeError("Invalid session")
if chunk:
yield chunk.decode(errors="ignore")
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ """
+ Retrieve the internal model identifier based on the provided model name or alias.
+ """
+ if model in cls.model_aliases:
+ model = cls.model_aliases[model]
+ if model not in models:
+ raise ValueError(f"Model '{model}' is not supported.")
+ return model
+
+ @classmethod
+ def is_supported(cls, model: str) -> bool:
+ """
+ Check if the given model is supported.
+ """
+ return model in models or model in cls.model_aliases
+
+ @classmethod
+ async def initialize_auth_code(cls, session: ClientSession) -> None:
+ """
+ Initialize the auth code by making the necessary login requests.
+ """
+ async with session.post(
+ "https://liaobots.work/api/user",
+ json={"authcode": "pTIQr4FTnVRfr"},
+ verify_ssl=False
+ ) as response:
+ await raise_for_status(response)
+ cls._auth_code = (await response.json(content_type=None))["authCode"]
+ if not cls._auth_code:
+ raise RuntimeError("Empty auth code")
+ cls._cookie_jar = session.cookie_jar
+
+ @classmethod
+ async def ensure_auth_code(cls, session: ClientSession) -> None:
+ """
+ Ensure the auth code is initialized, and if not, perform the initialization.
+ """
+ if not cls._auth_code:
+ await cls.initialize_auth_code(session)
diff --git a/g4f/Provider/MetaAI.py b/g4f/Provider/MetaAI.py
index caed7778..f1ef348a 100644
--- a/g4f/Provider/MetaAI.py
+++ b/g4f/Provider/MetaAI.py
@@ -12,7 +12,7 @@ from ..typing import AsyncResult, Messages, Cookies
from ..requests import raise_for_status, DEFAULT_HEADERS
from ..image import ImageResponse, ImagePreview
from ..errors import ResponseError
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt, get_connector, format_cookies
class Sources():
@@ -25,10 +25,11 @@ class Sources():
class AbraGeoBlockedError(Exception):
pass
-class MetaAI(AsyncGeneratorProvider):
+class MetaAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Meta AI"
url = "https://www.meta.ai"
working = True
+ default_model = ''
def __init__(self, proxy: str = None, connector: BaseConnector = None):
self.session = ClientSession(connector=get_connector(connector, proxy), headers=DEFAULT_HEADERS)
@@ -234,4 +235,4 @@ def generate_offline_threading_id() -> str:
# Combine timestamp and random value
threading_id = (timestamp << 22) | (random_value & ((1 << 22) - 1))
- return str(threading_id) \ No newline at end of file
+ return str(threading_id)
diff --git a/g4f/Provider/ReplicateImage.py b/g4f/Provider/ReplicateHome.py
index cc3943d7..48336831 100644
--- a/g4f/Provider/ReplicateImage.py
+++ b/g4f/Provider/ReplicateHome.py
@@ -1,32 +1,61 @@
from __future__ import annotations
-
+from typing import Generator, Optional, Dict, Any, Union, List
import random
import asyncio
+import base64
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages
from ..requests import StreamSession, raise_for_status
-from ..image import ImageResponse
from ..errors import ResponseError
+from ..image import ImageResponse
-class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
+class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
parent = "Replicate"
working = True
default_model = 'stability-ai/sdxl'
- default_versions = [
- "39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b",
- "2b017d9b67edd2ee1401238df49d75da53c523f36e363881e057f5dc3ed3c5b2"
+ models = [
+ # image
+ 'stability-ai/sdxl',
+ 'ai-forever/kandinsky-2.2',
+
+ # text
+ 'meta/llama-2-70b-chat',
+ 'mistralai/mistral-7b-instruct-v0.2'
]
- image_models = [default_model]
+
+ versions = {
+ # image
+ 'stability-ai/sdxl': [
+ "39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b",
+ "2b017d9b67edd2ee1401238df49d75da53c523f36e363881e057f5dc3ed3c5b2",
+ "7762fd07cf82c948538e41f63f77d685e02b063e37e496e96eefd46c929f9bdc"
+ ],
+ 'ai-forever/kandinsky-2.2': [
+ "ad9d7879fbffa2874e1d909d1d37d9bc682889cc65b31f7bb00d2362619f194a"
+ ],
+
+
+ # Text
+ 'meta/llama-2-70b-chat': [
+ "dp-542693885b1777c98ef8c5a98f2005e7"
+ ],
+ 'mistralai/mistral-7b-instruct-v0.2': [
+ "dp-89e00f489d498885048e94f9809fbc76"
+ ]
+ }
+
+ image_models = {"stability-ai/sdxl", "ai-forever/kandinsky-2.2"}
+ text_models = {"meta/llama-2-70b-chat", "mistralai/mistral-7b-instruct-v0.2"}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
- **kwargs
- ) -> AsyncResult:
+ **kwargs: Any
+ ) -> Generator[Union[str, ImageResponse], None, None]:
yield await cls.create_async(messages[-1]["content"], model, **kwargs)
@classmethod
@@ -34,13 +63,13 @@ class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
cls,
prompt: str,
model: str,
- api_key: str = None,
- proxy: str = None,
+ api_key: Optional[str] = None,
+ proxy: Optional[str] = None,
timeout: int = 180,
- version: str = None,
- extra_data: dict = {},
- **kwargs
- ) -> ImageResponse:
+ version: Optional[str] = None,
+ extra_data: Dict[str, Any] = {},
+ **kwargs: Any
+ ) -> Union[str, ImageResponse]:
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US',
@@ -55,10 +84,12 @@ class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
}
+
if version is None:
- version = random.choice(cls.default_versions)
+ version = random.choice(cls.versions.get(model, []))
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
+
async with StreamSession(
proxies={"all": proxy},
headers=headers,
@@ -81,6 +112,7 @@ class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
result = await response.json()
if "id" not in result:
raise ResponseError(f"Invalid response: {result}")
+
while True:
if api_key is None:
url = f"https://homepage.replicate.com/api/poll?id={result['id']}"
@@ -92,7 +124,13 @@ class ReplicateImage(AsyncGeneratorProvider, ProviderModelMixin):
if "status" not in result:
raise ResponseError(f"Invalid response: {result}")
if result["status"] == "succeeded":
- images = result['output']
- images = images[0] if len(images) == 1 else images
- return ImageResponse(images, prompt)
- await asyncio.sleep(0.5) \ No newline at end of file
+ output = result['output']
+ if model in cls.text_models:
+ return ''.join(output) if isinstance(output, list) else output
+ elif model in cls.image_models:
+ images: List[Any] = output
+ images = images[0] if len(images) == 1 else images
+ return ImageResponse(images, prompt)
+ elif result["status"] == "failed":
+ raise ResponseError(f"Prediction failed: {result}")
+ await asyncio.sleep(0.5)
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index dab6f5d5..56c01150 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -10,28 +10,24 @@ from .not_working import *
from .selenium import *
from .needs_auth import *
-from .Aichatos import Aichatos
+from .AI365VIP import AI365VIP
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .Blackbox import Blackbox
-from .ChatForAi import ChatForAi
+from .Chatgpt4o import Chatgpt4o
from .Chatgpt4Online import Chatgpt4Online
-from .ChatgptAi import ChatgptAi
from .ChatgptFree import ChatgptFree
-from .ChatgptNext import ChatgptNext
-from .ChatgptX import ChatgptX
-from .Cnote import Cnote
from .Cohere import Cohere
+from .DDG import DDG
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
-from .Feedough import Feedough
from .FlowGpt import FlowGpt
from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
-from .GigaChat import GigaChat
from .GeminiPro import GeminiPro
from .GeminiProChat import GeminiProChat
+from .GigaChat import GigaChat
from .GptTalkRu import GptTalkRu
from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
@@ -45,12 +41,12 @@ from .Ollama import Ollama
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
+from .Reka import Reka
from .Replicate import Replicate
-from .ReplicateImage import ReplicateImage
+from .ReplicateHome import ReplicateHome
from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You
-from .Reka import Reka
import sys
diff --git a/g4f/Provider/deprecated/Yqcloud.py b/g4f/Provider/deprecated/Yqcloud.py
index 2ec6931a..227f8995 100644
--- a/g4f/Provider/deprecated/Yqcloud.py
+++ b/g4f/Provider/deprecated/Yqcloud.py
@@ -9,7 +9,7 @@ from ..base_provider import AsyncGeneratorProvider, format_prompt
class Yqcloud(AsyncGeneratorProvider):
url = "https://chat9.yqcloud.top/"
- working = True
+ working = False
supports_gpt_35_turbo = True
@staticmethod
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index f40ae961..9321c24a 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -406,7 +406,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
cls._update_request_args(session)
await raise_for_status(response)
requirements = await response.json()
- need_arkose = requirements.get("arkose", {}).get("required")
+ text_data = json.loads(requirements.get("text", "{}"))
+ need_arkose = text_data.get("turnstile", {}).get("required", False)
+ if need_arkose:
+ arkose_token = text_data.get("turnstile", {}).get("dx")
+ else:
+ need_arkose = requirements.get("arkose", {}).get("required", False)
chat_token = requirements["token"]
if need_arkose and arkose_token is None:
diff --git a/g4f/Provider/Aichatos.py b/g4f/Provider/not_working/Aichatos.py
index 1d4747d7..d651abf3 100644
--- a/g4f/Provider/Aichatos.py
+++ b/g4f/Provider/not_working/Aichatos.py
@@ -2,16 +2,16 @@ from __future__ import annotations
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
import random
class Aichatos(AsyncGeneratorProvider):
url = "https://chat10.aichatos.xyz"
api = "https://api.binjie.fun"
- working = True
+ working = False
supports_gpt_35_turbo = True
@classmethod
diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/not_working/ChatForAi.py
index 1c693955..b7f13c3d 100644
--- a/g4f/Provider/ChatForAi.py
+++ b/g4f/Provider/not_working/ChatForAi.py
@@ -4,14 +4,14 @@ import time
import hashlib
import uuid
-from ..typing import AsyncResult, Messages
-from ..requests import StreamSession, raise_for_status
-from ..errors import RateLimitError
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...typing import AsyncResult, Messages
+from ...requests import StreamSession, raise_for_status
+from ...errors import RateLimitError
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class ChatForAi(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatforai.store"
- working = True
+ working = False
default_model = "gpt-3.5-turbo"
supports_message_history = True
supports_gpt_35_turbo = True
diff --git a/g4f/Provider/ChatgptAi.py b/g4f/Provider/not_working/ChatgptAi.py
index d15140d7..5c694549 100644
--- a/g4f/Provider/ChatgptAi.py
+++ b/g4f/Provider/not_working/ChatgptAi.py
@@ -3,14 +3,14 @@ from __future__ import annotations
import re, html, json, string, random
from aiohttp import ClientSession
-from ..typing import Messages, AsyncResult
-from ..errors import RateLimitError
-from .base_provider import AsyncGeneratorProvider
-from .helper import get_random_string
+from ...typing import Messages, AsyncResult
+from ...errors import RateLimitError
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_random_string
class ChatgptAi(AsyncGeneratorProvider):
url = "https://chatgpt.ai"
- working = True
+ working = False
supports_message_history = True
supports_system_message = True,
supports_gpt_4 = True,
@@ -85,4 +85,4 @@ class ChatgptAi(AsyncGeneratorProvider):
if line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
- break \ No newline at end of file
+ break
diff --git a/g4f/Provider/ChatgptNext.py b/g4f/Provider/not_working/ChatgptNext.py
index 2d6f7487..1c15dd67 100644
--- a/g4f/Provider/ChatgptNext.py
+++ b/g4f/Provider/not_working/ChatgptNext.py
@@ -3,12 +3,12 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
class ChatgptNext(AsyncGeneratorProvider):
url = "https://www.chatgpt-free.cc"
- working = True
+ working = False
supports_gpt_35_turbo = True
supports_message_history = True
supports_system_message = True
@@ -63,4 +63,4 @@ class ChatgptNext(AsyncGeneratorProvider):
if chunk.startswith(b"data: "):
content = json.loads(chunk[6:])["choices"][0]["delta"].get("content")
if content:
- yield content \ No newline at end of file
+ yield content
diff --git a/g4f/Provider/ChatgptX.py b/g4f/Provider/not_working/ChatgptX.py
index 9be0d89b..760333d9 100644
--- a/g4f/Provider/ChatgptX.py
+++ b/g4f/Provider/not_working/ChatgptX.py
@@ -4,15 +4,15 @@ import re
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
-from ..errors import RateLimitError
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+from ...errors import RateLimitError
class ChatgptX(AsyncGeneratorProvider):
url = "https://chatgptx.de"
supports_gpt_35_turbo = True
- working = True
+ working = False
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/Cnote.py b/g4f/Provider/not_working/Cnote.py
index 7d4018bb..48626982 100644
--- a/g4f/Provider/Cnote.py
+++ b/g4f/Provider/not_working/Cnote.py
@@ -3,15 +3,15 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
class Cnote(AsyncGeneratorProvider):
url = "https://f1.cnote.top"
api_url = "https://p1api.xjai.pro/freeapi/chat-process"
- working = True
+ working = False
supports_gpt_35_turbo = True
@classmethod
diff --git a/g4f/Provider/not_working/Feedough.py b/g4f/Provider/not_working/Feedough.py
new file mode 100644
index 00000000..24c33d14
--- /dev/null
+++ b/g4f/Provider/not_working/Feedough.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+import json
+import asyncio
+from aiohttp import ClientSession, TCPConnector
+from urllib.parse import urlencode
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class Feedough(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.feedough.com"
+ api_endpoint = "/wp-admin/admin-ajax.php"
+ working = False
+ default_model = ''
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/x-www-form-urlencoded;charset=UTF-8",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/ai-prompt-generator/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+
+ connector = TCPConnector(ssl=False)
+
+ async with ClientSession(headers=headers, connector=connector) as session:
+ data = {
+ "action": "aixg_generate",
+ "prompt": format_prompt(messages),
+ "aixg_generate_nonce": "110c021031"
+ }
+
+ try:
+ async with session.post(
+ f"{cls.url}{cls.api_endpoint}",
+ data=urlencode(data),
+ proxy=proxy
+ ) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+ try:
+ response_json = json.loads(response_text)
+ if response_json.get("success") and "data" in response_json:
+ message = response_json["data"].get("message", "")
+ yield message
+ except json.JSONDecodeError:
+ yield response_text
+ except Exception as e:
+ print(f"An error occurred: {e}")
+
+ @classmethod
+ async def run(cls, *args, **kwargs):
+ async for item in cls.create_async_generator(*args, **kwargs):
+ yield item
+
+ tasks = asyncio.all_tasks()
+ for task in tasks:
+ if not task.done():
+ await task
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
index 4778c968..c4c9a5a1 100644
--- a/g4f/Provider/not_working/__init__.py
+++ b/g4f/Provider/not_working/__init__.py
@@ -1,14 +1,21 @@
from .AItianhu import AItianhu
+from .Aichatos import Aichatos
from .Bestim import Bestim
from .ChatBase import ChatBase
+from .ChatForAi import ChatForAi
+from .ChatgptAi import ChatgptAi
from .ChatgptDemo import ChatgptDemo
from .ChatgptDemoAi import ChatgptDemoAi
from .ChatgptLogin import ChatgptLogin
+from .ChatgptNext import ChatgptNext
+from .ChatgptX import ChatgptX
from .Chatxyz import Chatxyz
+from .Cnote import Cnote
+from .Feedough import Feedough
from .Gpt6 import Gpt6
from .GptChatly import GptChatly
from .GptForLove import GptForLove
from .GptGo import GptGo
from .GptGod import GptGod
-from .OnlineGpt import OnlineGpt \ No newline at end of file
+from .OnlineGpt import OnlineGpt
diff --git a/g4f/models.py b/g4f/models.py
index 78e1d74e..e9016561 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -4,30 +4,34 @@ from dataclasses import dataclass
from .Provider import IterListProvider, ProviderType
from .Provider import (
- Aichatos,
- Bing,
- Blackbox,
- ChatgptAi,
- ChatgptNext,
- Cnote,
- DeepInfra,
- Feedough,
- FreeGpt,
- Gemini,
- GeminiPro,
- GigaChat,
- HuggingChat,
- HuggingFace,
- Koala,
- Liaobots,
- MetaAI,
- OpenaiChat,
- PerplexityLabs,
- Replicate,
- Pi,
- Vercel,
- You,
- Reka
+ AI365VIP,
+ Bing,
+ Blackbox,
+ Chatgpt4o,
+ ChatgptFree,
+ DDG,
+ DeepInfra,
+ DeepInfraImage,
+ FreeChatgpt,
+ FreeGpt,
+ Gemini,
+ GeminiPro,
+ GeminiProChat,
+ GigaChat,
+ HuggingChat,
+ HuggingFace,
+ Koala,
+ Liaobots,
+ MetaAI,
+ OpenaiChat,
+ PerplexityLabs,
+ Pi,
+ Pizzagpt,
+ Reka,
+ Replicate,
+ ReplicateHome,
+ Vercel,
+ You,
)
@dataclass(unsafe_hash=True)
@@ -54,9 +58,15 @@ default = Model(
base_provider = "",
best_provider = IterListProvider([
Bing,
- ChatgptAi,
You,
OpenaiChat,
+ FreeChatgpt,
+ AI365VIP,
+ Chatgpt4o,
+ DDG,
+ ChatgptFree,
+ Koala,
+ Pizzagpt,
])
)
@@ -67,28 +77,58 @@ gpt_35_long = Model(
best_provider = IterListProvider([
FreeGpt,
You,
- ChatgptNext,
OpenaiChat,
Koala,
+ ChatgptFree,
+ FreeChatgpt,
+ DDG,
+ AI365VIP,
+ Pizzagpt,
])
)
-# GPT-3.5 / GPT-4
+############
+### Text ###
+############
+
+### OpenAI ###
+### GPT-3.5 / GPT-4 ###
+# gpt-3.5
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = IterListProvider([
FreeGpt,
You,
- ChatgptNext,
Koala,
OpenaiChat,
- Aichatos,
- Cnote,
- Feedough,
+ ChatgptFree,
+ FreeChatgpt,
+ DDG,
+ AI365VIP,
+ Pizzagpt,
])
)
+gpt_35_turbo_16k = Model(
+ name = 'gpt-3.5-turbo-16k',
+ base_provider = 'openai',
+ best_provider = gpt_35_long.best_provider
+)
+
+gpt_35_turbo_16k_0613 = Model(
+ name = 'gpt-3.5-turbo-16k-0613',
+ base_provider = 'openai',
+ best_provider = gpt_35_long.best_provider
+)
+
+gpt_35_turbo_0613 = Model(
+ name = 'gpt-3.5-turbo-0613',
+ base_provider = 'openai',
+ best_provider = gpt_35_turbo.best_provider
+)
+
+# gpt-4
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
@@ -97,12 +137,22 @@ gpt_4 = Model(
])
)
-gpt_4o = Model(
- name = 'gpt-4o',
+gpt_4_0613 = Model(
+ name = 'gpt-4-0613',
base_provider = 'openai',
- best_provider = IterListProvider([
- You, Liaobots
- ])
+ best_provider = gpt_4.best_provider
+)
+
+gpt_4_32k = Model(
+ name = 'gpt-4-32k',
+ base_provider = 'openai',
+ best_provider = gpt_4.best_provider
+)
+
+gpt_4_32k_0613 = Model(
+ name = 'gpt-4-32k-0613',
+ base_provider = 'openai',
+ best_provider = gpt_4.best_provider
)
gpt_4_turbo = Model(
@@ -111,18 +161,36 @@ gpt_4_turbo = Model(
best_provider = Bing
)
+gpt_4o = Model(
+ name = 'gpt-4o',
+ base_provider = 'openai',
+ best_provider = IterListProvider([
+ You, Liaobots, Chatgpt4o, AI365VIP
+ ])
+)
+
+
+### GigaChat ###
gigachat = Model(
name = 'GigaChat:latest',
base_provider = 'gigachat',
best_provider = GigaChat
)
+
+### Meta ###
meta = Model(
name = "meta",
base_provider = "meta",
best_provider = MetaAI
)
+llama_2_70b_chat = Model(
+ name = "meta/llama-2-70b-chat",
+ base_provider = "meta",
+ best_provider = IterListProvider([ReplicateHome])
+)
+
llama3_8b_instruct = Model(
name = "meta-llama/Meta-Llama-3-8B-Instruct",
base_provider = "meta",
@@ -132,7 +200,7 @@ llama3_8b_instruct = Model(
llama3_70b_instruct = Model(
name = "meta-llama/Meta-Llama-3-70B-Instruct",
base_provider = "meta",
- best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
+ best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate, HuggingChat, DDG])
)
codellama_34b_instruct = Model(
@@ -144,35 +212,77 @@ codellama_34b_instruct = Model(
codellama_70b_instruct = Model(
name = "codellama/CodeLlama-70b-Instruct-hf",
base_provider = "meta",
- best_provider = IterListProvider([DeepInfra, PerplexityLabs])
+ best_provider = IterListProvider([DeepInfra])
)
-# Mistral
+
+### Mistral ###
mixtral_8x7b = Model(
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
base_provider = "huggingface",
- best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs])
-)
-
-mistral_7b = Model(
- name = "mistralai/Mistral-7B-Instruct-v0.1",
- base_provider = "huggingface",
- best_provider = IterListProvider([HuggingChat, HuggingFace, PerplexityLabs])
+ best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs, HuggingChat, DDG])
)
mistral_7b_v02 = Model(
name = "mistralai/Mistral-7B-Instruct-v0.2",
base_provider = "huggingface",
- best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs])
+ best_provider = IterListProvider([DeepInfra, HuggingFace, HuggingChat, ReplicateHome])
+)
+
+
+### NousResearch ###
+Nous_Hermes_2_Mixtral_8x7B_DPO = Model(
+ name = "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
+ base_provider = "NousResearch",
+ best_provider = IterListProvider([HuggingFace, HuggingChat])
+)
+
+
+### 01-ai ###
+Yi_1_5_34B_Chat = Model(
+ name = "01-ai/Yi-1.5-34B-Chat",
+ base_provider = "01-ai",
+ best_provider = IterListProvider([HuggingFace, HuggingChat])
+)
+
+
+### Microsoft ###
+Phi_3_mini_4k_instruct = Model(
+ name = "microsoft/Phi-3-mini-4k-instruct",
+ base_provider = "Microsoft",
+ best_provider = IterListProvider([HuggingFace, HuggingChat])
)
-# Bard
+
+### Google ###
+# gemini
gemini = Model(
name = 'gemini',
- base_provider = 'google',
+ base_provider = 'Google',
best_provider = Gemini
)
+gemini_pro = Model(
+ name = 'gemini-pro',
+ base_provider = 'Google',
+ best_provider = IterListProvider([GeminiPro, You, GeminiProChat])
+)
+
+# gemma
+gemma_2_9b_it = Model(
+ name = 'gemma-2-9b-it',
+ base_provider = 'Google',
+ best_provider = IterListProvider([PerplexityLabs])
+)
+
+gemma_2_27b_it = Model(
+ name = 'gemma-2-27b-it',
+ base_provider = 'Google',
+ best_provider = IterListProvider([PerplexityLabs])
+)
+
+
+### Anthropic ###
claude_v2 = Model(
name = 'claude-v2',
base_provider = 'anthropic',
@@ -194,79 +304,76 @@ claude_3_sonnet = Model(
claude_3_haiku = Model(
name = 'claude-3-haiku',
base_provider = 'anthropic',
- best_provider = None
+ best_provider = IterListProvider([DDG, AI365VIP])
)
-gpt_35_turbo_16k = Model(
- name = 'gpt-3.5-turbo-16k',
- base_provider = 'openai',
- best_provider = gpt_35_long.best_provider
-)
-gpt_35_turbo_16k_0613 = Model(
- name = 'gpt-3.5-turbo-16k-0613',
- base_provider = 'openai',
- best_provider = gpt_35_long.best_provider
+### Reka AI ###
+reka_core = Model(
+ name = 'reka-core',
+ base_provider = 'Reka AI',
+ best_provider = Reka
)
-gpt_35_turbo_0613 = Model(
- name = 'gpt-3.5-turbo-0613',
- base_provider = 'openai',
- best_provider = gpt_35_turbo.best_provider
-)
-gpt_4_0613 = Model(
- name = 'gpt-4-0613',
- base_provider = 'openai',
- best_provider = gpt_4.best_provider
+### NVIDIA ###
+nemotron_4_340b_instruct = Model(
+ name = 'nemotron-4-340b-instruct',
+ base_provider = 'NVIDIA',
+ best_provider = IterListProvider([PerplexityLabs])
)
-gpt_4_32k = Model(
- name = 'gpt-4-32k',
- base_provider = 'openai',
- best_provider = gpt_4.best_provider
+
+### Blackbox ###
+blackbox = Model(
+ name = 'blackbox',
+ base_provider = 'Blackbox',
+ best_provider = Blackbox
)
-gpt_4_32k_0613 = Model(
- name = 'gpt-4-32k-0613',
- base_provider = 'openai',
- best_provider = gpt_4.best_provider
+
+### Databricks ###
+dbrx_instruct = Model(
+ name = 'databricks/dbrx-instruct',
+ base_provider = 'Databricks',
+ best_provider = IterListProvider([DeepInfra])
)
-gemini_pro = Model(
- name = 'gemini-pro',
- base_provider = 'google',
- best_provider = IterListProvider([GeminiPro, You])
+
+### CohereForAI ###
+command_r_plus = Model(
+ name = 'CohereForAI/c4ai-command-r-plus',
+ base_provider = 'CohereForAI',
+ best_provider = IterListProvider([HuggingChat])
)
+
+### Other ###
pi = Model(
name = 'pi',
base_provider = 'inflection',
best_provider = Pi
)
-dbrx_instruct = Model(
- name = 'databricks/dbrx-instruct',
- base_provider = 'mistral',
- best_provider = IterListProvider([DeepInfra, PerplexityLabs])
-)
-command_r_plus = Model(
- name = 'CohereForAI/c4ai-command-r-plus',
- base_provider = 'mistral',
- best_provider = IterListProvider([HuggingChat])
-)
+#############
+### Image ###
+#############
-blackbox = Model(
- name = 'blackbox',
- base_provider = 'blackbox',
- best_provider = Blackbox
+### Stability AI ###
+sdxl = Model(
+ name = 'stability-ai/sdxl',
+ base_provider = 'Stability AI',
+ best_provider = IterListProvider([ReplicateHome, DeepInfraImage])
+
)
-reka_core = Model(
- name = 'reka-core',
- base_provider = 'Reka AI',
- best_provider = Reka
+### AI Forever ###
+kandinsky_2_2 = Model(
+ name = 'ai-forever/kandinsky-2.2',
+ base_provider = 'AI Forever',
+ best_provider = IterListProvider([ReplicateHome])
+
)
class ModelUtils:
@@ -277,6 +384,13 @@ class ModelUtils:
convert (dict[str, Model]): Dictionary mapping model string identifiers to Model instances.
"""
convert: dict[str, Model] = {
+
+ ############
+ ### Text ###
+ ############
+
+ ### OpenAI ###
+ ### GPT-3.5 / GPT-4 ###
# gpt-3.5
'gpt-3.5-turbo' : gpt_35_turbo,
'gpt-3.5-turbo-0613' : gpt_35_turbo_0613,
@@ -291,8 +405,12 @@ class ModelUtils:
'gpt-4-32k' : gpt_4_32k,
'gpt-4-32k-0613' : gpt_4_32k_0613,
'gpt-4-turbo' : gpt_4_turbo,
-
+
+
+ ### Meta ###
"meta-ai": meta,
+
+ 'llama-2-70b-chat': llama_2_70b_chat,
'llama3-8b': llama3_8b_instruct, # alias
'llama3-70b': llama3_70b_instruct, # alias
'llama3-8b-instruct' : llama3_8b_instruct,
@@ -301,30 +419,79 @@ class ModelUtils:
'codellama-34b-instruct': codellama_34b_instruct,
'codellama-70b-instruct': codellama_70b_instruct,
- # Mistral Opensource
+
+ ### Mistral (Opensource) ###
'mixtral-8x7b': mixtral_8x7b,
- 'mistral-7b': mistral_7b,
'mistral-7b-v02': mistral_7b_v02,
+
+
+ ### NousResearch ###
+ 'Nous-Hermes-2-Mixtral-8x7B-DPO': Nous_Hermes_2_Mixtral_8x7B_DPO,
- # google gemini
+
+ ### 01-ai ###
+ 'Yi-1.5-34B-Chat': Yi_1_5_34B_Chat,
+
+
+ ### Microsoft ###
+ 'Phi-3-mini-4k-instruct': Phi_3_mini_4k_instruct,
+
+
+ ### Google ###
+ # gemini
'gemini': gemini,
'gemini-pro': gemini_pro,
+
+ # gemma
+ 'gemma-2-9b-it': gemma_2_9b_it,
+ 'gemma-2-27b-it': gemma_2_27b_it,
+
- # anthropic
+ ### Anthropic ###
'claude-v2': claude_v2,
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,
'claude-3-haiku': claude_3_haiku,
- # reka core
+
+ ### Reka AI ###
'reka': reka_core,
- # other
- 'blackbox': blackbox,
+
+ ### NVIDIA ###
+ 'nemotron-4-340b-instruct': nemotron_4_340b_instruct,
+
+
+ ### Blackbox ###
+ 'blackbox': blackbox,
+
+
+ ### CohereForAI ###
'command-r+': command_r_plus,
+
+
+ ### Databricks ###
'dbrx-instruct': dbrx_instruct,
+
+
+ ### GigaChat ###
'gigachat': gigachat,
- 'pi': pi
+
+
+ # Other
+ 'pi': pi,
+
+
+
+ #############
+ ### Image ###
+ #############
+
+ ### Stability AI ###
+ 'sdxl': sdxl,
+
+ ### AI Forever ###
+ 'kandinsky-2.2': kandinsky_2_2,
}
_all_models = list(ModelUtils.convert.keys())