From bb9132bcb42a5f720398b65c721cc77957555863 Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Mon, 9 Dec 2024 15:52:25 +0000
Subject: Updating provider documentation and small fixes in providers (#2469)
* refactor(g4f/Provider/Airforce.py): improve model handling and filtering
- Add hidden_models set to exclude specific models
- Add evil alias for uncensored model handling
- Extend filtering for model-specific response tokens
- Add response buffering for streamed content
- Update model fetching with error handling
* refactor(g4f/Provider/Blackbox.py): improve caching and model handling
- Add caching system for validated values with file-based storage
- Rename 'flux' model to 'ImageGeneration' and update references
- Add temperature, top_p and max_tokens parameters to generator
- Simplify HTTP headers and remove redundant options
- Add model alias mapping for ImageGeneration
- Add file system utilities for cache management
* feat(g4f/Provider/RobocodersAPI.py): add caching and error handling
- Add file-based caching system for access tokens and sessions
- Add robust error handling with specific error messages
- Add automatic dialog continuation on resource limits
- Add HTML parsing with BeautifulSoup for token extraction
- Add debug logging for error tracking
- Add timeout configuration for API requests
* refactor(g4f/Provider/DarkAI.py): update DarkAI default model and aliases
- Change default model from llama-3-405b to llama-3-70b
- Remove llama-3-405b from supported models list
- Remove llama-3.1-405b from model aliases
* feat(g4f/Provider/Blackbox2.py): add image generation support
- Add image model 'flux' with dedicated API endpoint
- Refactor generator to support both text and image outputs
- Extract headers into reusable static method
- Add type hints for AsyncGenerator return type
- Split generation logic into _generate_text and _generate_image methods
- Add ImageResponse handling for image generation results
BREAKING CHANGE: create_async_generator now returns AsyncGenerator instead of AsyncResult
* refactor(g4f/Provider/ChatGptEs.py): update ChatGptEs model configuration
- Update models list to include gpt-3.5-turbo
- Remove chatgpt-4o-latest from supported models
- Remove model_aliases mapping for gpt-4o
* feat(g4f/Provider/DeepInfraChat.py): add Accept-Language header support
- Add Accept-Language header for internationalization
- Maintain existing header configuration
- Improve request compatibility with language preferences
* refactor(g4f/Provider/needs_auth/Gemini.py): add ProviderModelMixin inheritance
- Add ProviderModelMixin to class inheritance
- Import ProviderModelMixin from base_provider
- Move BaseConversation import to base_provider imports
* refactor(g4f/Provider/Liaobots.py): update model details and aliases
- Add version suffix to o1 model IDs
- Update model aliases for o1-preview and o1-mini
- Standardize version format across model definitions
* refactor(g4f/Provider/PollinationsAI.py): enhance model support and generation
- Split generation logic into dedicated image/text methods
- Add additional text models including sur and claude
- Add width/height parameters for image generation
- Add model existence validation
- Add hasattr checks for model lists initialization
* chore(gitignore): add provider cache directory
- Add g4f/Provider/.cache to gitignore patterns
* refactor(g4f/Provider/ReplicateHome.py): update model configuration
- Update default model to gemma-2b-it
- Add default_image_model configuration
- Remove llava-13b from supported models
- Simplify request headers
* feat(g4f/models.py): expand provider and model support
- Add new providers DarkAI and PollinationsAI
- Add new models for Mistral, Flux and image generation
- Update provider lists for existing models
- Add P1 and Evil models with experimental providers
BREAKING CHANGE: Remove llava-13b model support
* refactor(Airforce): Update type hint for split_message return
- Change return type of from to for consistency with import.
- Maintain overall functionality and structure of the class.
- Ensure compatibility with type hinting standards in Python.
* refactor(g4f/Provider/Airforce.py): Update type hint for split_message return
- Change return type of 'split_message' from 'list[str]' to 'List[str]' for consistency with import.
- Maintain overall functionality and structure of the 'Airforce' class.
- Ensure compatibility with type hinting standards in Python.
* feat(g4f/Provider/RobocodersAPI.py): Add support for optional BeautifulSoup dependency
- Introduce a check for the BeautifulSoup library and handle its absence gracefully.
- Raise a if BeautifulSoup is not installed, prompting the user to install it.
- Remove direct import of BeautifulSoup to avoid import errors when the library is missing.
* fix: Updating provider documentation and small fixes in providers
* Disabled the provider (RobocodersAPI)
* Fix: Conflicting file g4f/models.py
* Update g4f/models.py g4f/Provider/Airforce.py
* Update docs/providers-and-models.md g4f/models.py g4f/Provider/Airforce.py g4f/Provider/PollinationsAI.py
* Update docs/providers-and-models.md
* Update .gitignore
* Update g4f/models.py
* Update g4f/Provider/PollinationsAI.py
---------
Co-authored-by: kqlio67 <>
---
.gitignore | 1 -
docs/providers-and-models.md | 272 +++++++++++++-----------------
g4f/Provider/Airforce.py | 6 +-
g4f/Provider/AmigoChat.py | 2 -
g4f/Provider/Blackbox.py | 6 +-
g4f/Provider/ChatGptEs.py | 2 +-
g4f/Provider/DDG.py | 1 +
g4f/Provider/DeepInfraChat.py | 1 +
g4f/Provider/Flux.py | 5 +-
g4f/Provider/FreeGpt.py | 2 +
g4f/Provider/GizAI.py | 2 +-
g4f/Provider/Liaobots.py | 2 +-
g4f/Provider/MagickPen.py | 87 ----------
g4f/Provider/PerplexityLabs.py | 5 +-
g4f/Provider/PollinationsAI.py | 16 +-
g4f/Provider/Reka.py | 148 ----------------
g4f/Provider/RobocodersAPI.py | 238 --------------------------
g4f/Provider/RubiksAI.py | 3 +-
g4f/Provider/Upstage.py | 91 ----------
g4f/Provider/__init__.py | 10 +-
g4f/Provider/needs_auth/Gemini.py | 8 +
g4f/Provider/needs_auth/GeminiPro.py | 10 +-
g4f/Provider/needs_auth/GithubCopilot.py | 6 +-
g4f/Provider/needs_auth/HuggingChat.py | 12 +-
g4f/Provider/needs_auth/HuggingFace.py | 2 +-
g4f/Provider/needs_auth/HuggingFace2.py | 28 ---
g4f/Provider/needs_auth/HuggingFaceAPI.py | 28 +++
g4f/Provider/needs_auth/Poe.py | 4 +-
g4f/Provider/needs_auth/Raycast.py | 2 -
g4f/Provider/needs_auth/Reka.py | 148 ++++++++++++++++
g4f/Provider/needs_auth/Theb.py | 5 +-
g4f/Provider/needs_auth/__init__.py | 3 +-
g4f/Provider/not_working/MagickPen.py | 87 ++++++++++
g4f/Provider/not_working/RobocodersAPI.py | 238 ++++++++++++++++++++++++++
g4f/Provider/not_working/Upstage.py | 91 ++++++++++
g4f/Provider/not_working/__init__.py | 5 +-
g4f/gui/client/index.html | 4 +-
g4f/models.py | 266 +++++++++++------------------
38 files changed, 883 insertions(+), 964 deletions(-)
delete mode 100644 g4f/Provider/MagickPen.py
delete mode 100644 g4f/Provider/Reka.py
delete mode 100755 g4f/Provider/RobocodersAPI.py
delete mode 100644 g4f/Provider/Upstage.py
delete mode 100644 g4f/Provider/needs_auth/HuggingFace2.py
create mode 100644 g4f/Provider/needs_auth/HuggingFaceAPI.py
create mode 100644 g4f/Provider/needs_auth/Reka.py
create mode 100644 g4f/Provider/not_working/MagickPen.py
create mode 100755 g4f/Provider/not_working/RobocodersAPI.py
create mode 100644 g4f/Provider/not_working/Upstage.py
diff --git a/.gitignore b/.gitignore
index 143b5858..f4043d98 100644
--- a/.gitignore
+++ b/.gitignore
@@ -66,4 +66,3 @@ bench.py
to-reverse.txt
g4f/Provider/OpenaiChat2.py
generated_images/
-g4f/Provider/.cache
diff --git a/docs/providers-and-models.md b/docs/providers-and-models.md
index 7c6bc613..134bc0aa 100644
--- a/docs/providers-and-models.md
+++ b/docs/providers-and-models.md
@@ -1,203 +1,171 @@
+
# G4F - Providers and Models
This document provides an overview of various AI providers and models, including text generation, image generation, and vision capabilities. It aims to help users navigate the diverse landscape of AI services and choose the most suitable option for their needs.
## Table of Contents
- [Providers](#providers)
+ - [Free](#providers-free)
+ - [Needs Auth](#providers-needs-auth)
- [Models](#models)
- [Text Models](#text-models)
- [Image Models](#image-models)
- - [Vision Models](#vision-models)
- - [Providers and vision models](#providers-and-vision-models)
- [Conclusion and Usage Tips](#conclusion-and-usage-tips)
---
## Providers
+
+### Providers Free
+| Website | Provider | Text Models | Image Models | Vision Models | Stream | Status | Auth |
+|----------|-------------|--------------|---------------|--------|--------|------|------|
+|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`phi-2, gpt-4, gpt-4o-mini, gpt-4o, gpt-4-turbo, o1-mini, openchat-3.5, deepseek-coder, hermes-2-dpo, hermes-2-pro, openhermes-2.5, lfm-40b, german-7b, llama-2-7b, llama-3.1-70b, neural-7b, zephyr-7b, evil,`|`sdxl, flux-pro, flux, flux-realism, flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, midjourney, dall-e-3`|❌|✔||❌+✔|
+|[amigochat.io](https://amigochat.io/chat/)|`g4f.Provider.AmigoChat`|✔|✔|❌|✔||❌|
+|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackboxai, gpt-4, gpt-4o, gemini-pro, claude-3.5-sonnet, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b`|`flux`|`blackboxai, gpt-4o, gemini-pro, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b`|✔||❌|
+|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox2`|`llama-3.1-70b`|`flux`|❌|✔||❌|
+|[chatgpt.com](https://chatgpt.com)|`g4f.Provider.ChatGpt`|✔|❌|❌|✔||❌|
+|[chatgpt.es](https://chatgpt.es)|`g4f.Provider.ChatGptEs`|`gpt-4, gpt-4o, gpt-4o-mini`|❌|❌|✔||❌|
+|[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-1b, qwen-1.5-7b`|❌|❌|✔||❌|
+|[copilot.microsoft.com](https://copilot.microsoft.com)|`g4f.Provider.Copilot`|`gpt-4`|❌|❌|✔||❌|
+|[darkai.foundation](https://darkai.foundation)|`g4f.Provider.DarkAI`|`gpt-3.5-turbo, gpt-4o, llama-3.1-70b`|❌|❌|✔||❌|
+|[duckduckgo.com/aichat](https://duckduckgo.com/aichat)|`g4f.Provider.DDG`|`gpt-4, gpt-4o-mini, claude-3-haiku, llama-3.1-70b, mixtral-8x7b`|❌|❌|✔||❌|
+|[deepinfra.com/chat](https://deepinfra.com/chat)|`g4f.Provider.DeepInfraChat`|`llama-3.1-8b, llama-3.1-70b, qwq-32b, wizardlm-2-8x22b, qwen-2-72b, qwen-2.5-coder-32b, nemotron-70b`|❌|❌|✔||❌|
+|[black-forest-labs-flux-1-dev.hf.space](https://black-forest-labs-flux-1-dev.hf.space)|`g4f.Provider.Flux`|❌|`flux-dev`|❌|✔||❌|
+|[chat10.free2gpt.xyz](https://chat10.free2gpt.xyz)|`g4f.Provider.Free2GPT`|`mistral-7b`|❌|❌|✔||❌|
+|[freegptsnav.aifree.site](https://freegptsnav.aifree.site)|`g4f.Provider.FreeGpt`|`gemini-pro`|❌|❌|✔||❌|
+|[app.giz.ai/assistant](https://app.giz.ai/assistant)|`g4f.Provider.GizAI`|`gemini-flash`|❌|❌|✔||❌|
+|[liaobots.work](https://liaobots.work)|`g4f.Provider.Liaobots`|`grok-beta, gpt-4o-mini, gpt-4o, gpt-4, o1-preview, o1-mini, claude-3-opus, claude-3.5-sonnet, claude-3-sonnet, gemini-flash, gemini-pro`|❌|❌|✔||❌|
+|[mhystical.cc](https://mhystical.cc)|`g4f.Provider.Mhystical`|`gpt-4`|❌|❌|✔||❌|
+|[labs.perplexity.ai](https://labs.perplexity.ai)|`g4f.Provider.PerplexityLabs`|`sonar-online, sonar-chat, llama-3.3-70b, llama-3.1-8b, llama-3.1-70b, lfm-40b`|❌|❌|✔||❌|
+|[pi.ai/talk](https://pi.ai/talk)|`g4f.Provider.Pi`|`pi`|❌|❌|✔||❌|
+|[pizzagpt.it](https://www.pizzagpt.it)|`g4f.Provider.Pizzagpt`|`gpt-4o-mini`|❌|❌|✔||❌|
+|[pollinations.ai](https://pollinations.ai)|`g4f.Provider.PollinationsAI`|`gpt-4o, mistral-large, mistral-nemo, llama-3.1-70b, gpt-4, qwen-2.5-coder-32b, claude-3.5-sonnet, command-r, evil, p1,turbo, unity, midijourney, rtist`|`flux, flux-realism, flux-cablyai, flux-anime, flux-3d, any-dark, flux-pro, midjourney, dall-e-3`|❌|✔||❌|
+|[app.prodia.com](https://app.prodia.com)|`g4f.Provider.Prodia`|❌|✔|❌|❌||❌|
+|[replicate.com](https://replicate.com)|`g4f.Provider.ReplicateHome`|`gemma-2b`|`sd-3, sdxl, playground-v2.5`|❌|❌||❌|
+|[rubiks.ai](https://rubiks.ai)|`g4f.Provider.RubiksAI`|`gpt-4o-mini, llama-3.1-70b`|❌|❌|✔||❌|
+|[teach-anything.com](https://www.teach-anything.com)|`g4f.Provider.TeachAnything`|`llama-3.1-70b`|❌|❌|✔||❌|
+|[you.com](https://you.com)|`g4f.Provider.You`|✔|✔|✔|✔||❌|
+
+
+---
+### Providers Needs Auth
| Provider | Text Models | Image Models | Vision Models | Stream | Status | Auth |
|----------|-------------|--------------|---------------|--------|--------|------|
-|[ai4chat.co](https://www.ai4chat.co)|`g4f.Provider.Ai4Chat`|`gpt-4`|❌|❌|✔||❌|
-|[aichatfree.info](https://aichatfree.info)|`g4f.Provider.AIChatFree`|`gemini-pro`|❌|❌|✔||❌|
-|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`gpt-4o, gpt-4o-mini, gpt-4-turbo, llama-2-7b, llama-3.1-8b, llama-3.1-70b, hermes-2-pro, hermes-2-dpo, phi-2, deepseek-coder, openchat-3.5, openhermes-2.5, lfm-40b, german-7b, zephyr-7b, neural-7b`|`flux, flux-realism', flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, sdxl`|❌|✔||❌|
-|[aiuncensored.info](https://www.aiuncensored.info)|`g4f.Provider.AIUncensored`|✔|✔|❌|✔||❌|
-|[allyfy.chat](https://allyfy.chat/)|`g4f.Provider.Allyfy`|`gpt-3.5-turbo`|❌|❌|✔||❌|
-|[bing.com](https://bing.com/chat)|`g4f.Provider.Bing`|`gpt-4`|✔|`gpt-4-vision`|✔||❌+✔|
-|[bing.com/images](https://www.bing.com/images/create)|`g4f.Provider.BingCreateImages`|`❌|✔|❌|❌||✔|
-|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackboxai, blackboxai-pro, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gpt-4o, gemini-pro, claude-3.5-sonnet`|`flux`|`blackboxai, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gpt-4o, gemini-pro`|✔||❌|
-|[chatgot.one](https://www.chatgot.one/)|`g4f.Provider.ChatGot`|`gemini-pro`|❌|❌|✔||❌|
-|[chatgpt.com](https://chatgpt.com)|`g4f.Provider.ChatGpt`|`?`|`?`|`?`|?| |❌|
-|[chatgpt.es](https://chatgpt.es)|`g4f.Provider.ChatGptEs`|`gpt-4o, gpt-4o-mini`|❌|❌|✔||❌|
-|[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`gemma-7b, llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-1b, phi-2, qwen-1.5-0-5b, qwen-1.5-8b, qwen-1.5-14b, qwen-1.5-7b`|❌|❌|✔||❌|
-|[darkai.foundation/chat](https://darkai.foundation/chat)|`g4f.Provider.DarkAI`|`gpt-4o, gpt-3.5-turbo, llama-3-70b, llama-3-405b`|❌|❌|✔||❌|
-|[duckduckgo.com](https://duckduckgo.com/duckchat/v1/chat)|`g4f.Provider.DDG`|`gpt-4o-mini, claude-3-haiku, llama-3.1-70b, mixtral-8x7b`|❌|❌|✔||❌|
-|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfra`|✔|❌|❌|✔||✔|
-|[deepinfra.com/chat](https://deepinfra.com/chat)|`g4f.Provider.DeepInfraChat`|`llama-3.1-8b, llama-3.1-70b, wizardlm-2-8x22b, qwen-2-72b`|❌|❌|❌||❌|
-|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfraImage`|❌|✔|❌|❌||✔|
-|[chat10.free2gpt.xyz](chat10.free2gpt.xyz)|`g4f.Provider.Free2GPT`|`mixtral-7b`|❌|❌|✔||❌|
-|[freegptsnav.aifree.site](https://freegptsnav.aifree.site)|`g4f.Provider.FreeGpt`|`gemini-pro`|❌|❌|✔||❌|
-|[gemini.google.com](https://gemini.google.com)|`g4f.Provider.Gemini`|✔|❌|❌|✔||✔|
-|[ai.google.dev](https://ai.google.dev)|`g4f.Provider.GeminiPro`|✔|❌|✔|?||✔|
-|[app.giz.ai](https://app.giz.ai/assistant/)|`g4f.Provider.GizAI`|`gemini-flash`|❌|❌|✔||❌|
-|[developers.sber.ru](https://developers.sber.ru/gigachat)|`g4f.Provider.GigaChat`|✔|❌|❌|✔||✔|
-|[console.groq.com/playground](https://console.groq.com/playground)|`g4f.Provider.Groq`|✔|❌|❌|?||✔|
-|[huggingface.co/chat](https://huggingface.co/chat)|`g4f.Provider.HuggingChat`|`llama-3.1-70b, command-r-plus, qwen-2-72b, llama-3.2-11b, hermes-3, mistral-nemo, phi-3.5-mini`|❌|❌|✔||❌|
-|[huggingface.co](https://huggingface.co/chat)|`g4f.Provider.HuggingFace`|✔|❌|❌|✔||❌|
-|[liaobots.work](https://liaobots.work)|`g4f.Provider.Liaobots`|`gpt-3.5-turbo, gpt-4o-mini, gpt-4o, gpt-4-turbo, grok-2, grok-2-mini, claude-3-opus, claude-3-sonnet, claude-3-5-sonnet, claude-3-haiku, claude-2.1, gemini-flash, gemini-pro`|❌|❌|✔||❌|
-|[magickpen.com](https://magickpen.com)|`g4f.Provider.MagickPen`|`gpt-4o-mini`|❌|❌|✔||❌|
-|[meta.ai](https://www.meta.ai)|`g4f.Provider.MetaAI`|✔|✔|?|?||✔|
-|[platform.openai.com](https://platform.openai.com/)|`g4f.Provider.Openai`|✔|❌|✔|||✔|
-|[chatgpt.com](https://chatgpt.com/)|`g4f.Provider.OpenaiChat`|`gpt-4o, gpt-4o-mini, gpt-4`|❌|✔|||✔|
-|[www.perplexity.ai)](https://www.perplexity.ai)|`g4f.Provider.PerplexityAi`|✔|❌|❌|?||❌|
-|[perplexity.ai](https://www.perplexity.ai)|`g4f.Provider.PerplexityApi`|✔|❌|❌|?||✔|
-|[labs.perplexity.ai](https://labs.perplexity.ai)|`g4f.Provider.PerplexityLabs`|`sonar-online, sonar-chat, llama-3.1-8b, llama-3.1-70b`|❌|❌|?||❌|
-|[pi.ai/talk](https://pi.ai/talk)|`g4f.Provider.Pi`|`pi`|❌|❌|?||❌|
-|[]()|`g4f.Provider.Pizzagpt`|`gpt-4o-mini`|❌|❌|✔||❌|
-|[poe.com](https://poe.com)|`g4f.Provider.Poe`|✔|❌|❌|?||✔|
-|[app.prodia.com](https://app.prodia.com)|`g4f.Provider.Prodia`|❌|✔|❌|❌||❌|
-|[raycast.com](https://raycast.com)|`g4f.Provider.Raycast`|✔|❌|❌|✔||✔|
-|[chat.reka.ai](https://chat.reka.ai/)|`g4f.Provider.Reka`|✔|❌|✔|✔||✔|
-|[replicate.com](https://replicate.com)|`g4f.Provider.Replicate`|✔|❌|❌|?||✔|
-|[replicate.com](https://replicate.com)|`g4f.Provider.ReplicateHome`|`gemma-2b, llava-13b`|`sd-3, sdxl, playground-v2.5`|❌|✔||❌|
-|[replicate.com](https://replicate.com)|`g4f.Provider.RubiksAI`|`llama-3.1-70b, gpt-4o-mini`|❌|❌|✔||❌|
-|[talkai.info](https://talkai.info)|`g4f.Provider.TalkAi`|✔|❌|❌|✔||❌|
-|[teach-anything.com](https://www.teach-anything.com)|`g4f.Provider.TeachAnything`|`llama-3.1-70b`|❌|❌|✔||❌|
-|[beta.theb.ai](https://beta.theb.ai)|`g4f.Provider.Theb`|✔|❌|❌|✔||✔|
-|[beta.theb.ai](https://beta.theb.ai)|`g4f.Provider.ThebApi`|✔|❌|❌|✔||✔|
-|[console.upstage.ai/playground/chat](https://console.upstage.ai/playground/chat)|`g4f.Provider.Upstage`|`solar-pro, solar-mini`|❌|❌|✔||❌|
-|[whiterabbitneo.com](https://www.whiterabbitneo.com)|`g4f.Provider.WhiteRabbitNeo`|✔|❌|❌|?||✔|
-|[you.com](https://you.com)|`g4f.Provider.You`|✔|✔|✔|✔||❌+✔|
+|[bing.com/images/create](https://www.bing.com/images/create)|`g4f.Provider.BingCreateImages`|❌|`dall-e-3`|❌|❌||✔|
+|[inference.cerebras.ai](https://inference.cerebras.ai/)|`g4f.Provider.Cerebras`|✔|❌|❌|✔||❌|
+|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfra`|✔|❌|❌|✔||✔|
+|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfraImage`|❌|✔|❌|❌||✔|
+|[gemini.google.com](https://gemini.google.com)|`g4f.Provider.Gemini`|`gemini`|`gemini`|`gemini`|❌||✔|
+|[ai.google.dev](https://ai.google.dev)|`g4f.Provider.GeminiPro`|`gemini-pro`|❌|`gemini-pro`|❌||✔|
+|[github.com/copilot](https://github.com/copilot)|`g4f.Provider.GithubCopilot`|✔|❌|❌|❌||✔|
+|[console.groq.com/playground](https://console.groq.com/playground)|`g4f.Provider.Groq`|✔|❌|✔|❌||✔|
+|[huggingface.co/chat](https://huggingface.co/chat)|`g4f.Provider.HuggingChat`|`qwen-2.5-72b, llama-3.3-70b, command-r-plus, qwq-32b, nemotron-70b, nemotron-70b, llama-3.2-11b, hermes-3, mistral-nemo, phi-3.5-mini`|`flux-dev`|❌|✔||✔|
+|[huggingface.co/chat](https://huggingface.co/chat)|`g4f.Provider.HuggingFace`|✔|✔|❌|✔||✔|
+|[api-inference.huggingface.co](https://api-inference.huggingface.co)|`g4f.Provider.HuggingFaceAPI`|✔|❌|✔|❌||✔|
+|[meta.ai](https://www.meta.ai)|`g4f.Provider.MetaAI`|`meta-ai`|❌|❌|✔||✔|
+|[designer.microsoft.com](https://designer.microsoft.com)|`g4f.Provider.MicrosoftDesigner`|❌|`dall-e-3`|❌|❌||✔|
+|[platform.openai.com](https://platform.openai.com)|`g4f.Provider.OpenaiAPI`|✔|❌|❌|✔||✔|
+|[chatgpt.com](https://chatgpt.com)|`g4f.Provider.OpenaiChat`|`gpt-4o, gpt-4o-mini, gpt-4, ...`|❌|✔|✔||✔|
+|[perplexity.ai](https://www.perplexity.ai)|`g4f.Provider.PerplexityApi`|`gpt-4o, gpt-4o-mini, gpt-4, ...`|❌|❌|✔||✔|
+|[poe.com](https://poe.com)|`g4f.Provider.Poe`|✔|❌|❌|✔||✔|
+|[raycast.com](https://raycast.com)|`g4f.Provider.Raycast`|✔|❌|❌|✔||✔|
+|[chat.reka.ai](https://chat.reka.ai)|`g4f.Provider.Reka`|`reka-core`|❌|✔|✔||✔|
+|[replicate.com](https://replicate.com)|`g4f.Provider.Replicate`|✔|❌|❌|✔||✔|
+|[beta.theb.ai](https://beta.theb.ai)|`g4f.Provider.Theb`|✔|❌|❌|✔||✔|
+|[beta.theb.ai](https://beta.theb.ai)|`g4f.Provider.WhiteRabbitNeo`|✔|❌|❌|✔||✔|
+|[whiterabbitneo.com](https://www.whiterabbitneo.com)|`g4f.Provider.WhiteRabbitNeo`|✔|❌|❌|✔||✔|
+
+---
## Models
### Text Models
| Model | Base Provider | Providers | Website |
|-------|---------------|-----------|---------|
-|gpt-3.5-turbo|OpenAI|4+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)|
-|gpt-4|OpenAI|6+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
-|gpt-4-turbo|OpenAI|4+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
-|gpt-4o|OpenAI|7+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)|
-|gpt-4o-mini|OpenAI|10+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)|
-|o1|OpenAI|0+ Providers|[platform.openai.com](https://openai.com/index/introducing-openai-o1-preview/)|
-|o1-mini|OpenAI|0+ Providers|[platform.openai.com](https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/)|
-|llama-2-7b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-2-7b)|
-|llama-2-13b|Meta Llama|1+ Providers|[llama.com](https://www.llama.com/llama2/)|
-|llama-3-8b|Meta Llama|4+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)|
-|llama-3-70b|Meta Llama|4+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)|
-|llama-3.1-8b|Meta Llama|7+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
-|llama-3.1-70b|Meta Llama|14+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
-|llama-3.1-405b|Meta Llama|5+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
-|llama-3.2-1b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-1B)|
-|llama-3.2-3b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/blog/llama32)|
-|llama-3.2-11b|Meta Llama|3+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/)|
-|llama-3.2-90b|Meta Llama|2+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/)|
-|llamaguard-7b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/LlamaGuard-7b)|
-|llamaguard-2-8b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Meta-Llama-Guard-2-8B)|
-|mistral-7b|Mistral AI|4+ Providers|[mistral.ai](https://mistral.ai/news/announcing-mistral-7b/)|
-|mixtral-8x7b|Mistral AI|6+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-of-experts/)|
-|mixtral-8x22b|Mistral AI|3+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-8x22b/)|
-|mistral-nemo|Mistral AI|2+ Providers|[huggingface.co](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407)|
-|mistral-large|Mistral AI|2+ Providers|[mistral.ai](https://mistral.ai/news/mistral-large-2407/)|
-|mixtral-8x7b-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
+|gpt_35_turbo|OpenAI|2+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)|
+|gpt-4|OpenAI|8+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
+|gpt-4-turbo|OpenAI|1+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
+|gpt-4o|OpenAI|8+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)|
+|gpt-4o-mini|OpenAI|8+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)|
+|o1-preview|OpenAI|1+ Providers|[platform.openai.com](https://openai.com/index/introducing-openai-o1-preview/)|
+|o1-mini|OpenAI|2+ Providers|[platform.openai.com](https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/)|
+|gigachat||1+ Providers|[]( )|
+|llama-2-7b|Meta Llama|2+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-2-7b)|
+|llama-3-8b|Meta Llama|1+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)|
+|llama-3.1-8b|Meta Llama|5+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
+|llama-3.1-70b|Meta Llama|12+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
+|llama-3.1-405b|Meta Llama|1+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
+|llama-3.2-11b|Meta Llama|2+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/)|
+|llama-3.3-70b|Meta Llama|3+ Providers|[llama.com/]()|
+|mixtral-8x7b|Mistral AI|1+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-of-experts/)|
+|mistral-nemo|Mistral AI|3+ Providers|[huggingface.co](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407)|
+|mistral-large|Mistral AI|1+ Providers|[mistral.ai](https://mistral.ai/news/mistral-large-2407/)|
|hermes-2-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
-|hermes-2|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B)|
-|yi-34b|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Yi-34B)|
-|hermes-3|NousResearch|2+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B)|
+|hermes-2-pro|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B)|
+|hermes-3|NousResearch|1+ Providers|[nousresearch.com](https://nousresearch.com/hermes3/)|
|gemini|Google DeepMind|1+ Providers|[deepmind.google](http://deepmind.google/technologies/gemini/)|
|gemini-flash|Google DeepMind|4+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)|
-|gemini-pro|Google DeepMind|10+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)|
-|gemma-2b|Google|5+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2b)|
-|gemma-2b-9b|Google|1+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2-9b)|
-|gemma-2b-27b|Google|2+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2-27b)|
-|gemma-7b|Google|1+ Providers|[huggingface.co](https://huggingface.co/google/gemma-7b)|
-|gemma_2_27b|Google|1+ Providers|[huggingface.co](https://huggingface.co/blog/gemma2)|
-|claude-2.1|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-2)|
-|claude-3-haiku|Anthropic|4+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)|
-|claude-3-sonnet|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
-|claude-3-opus|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
-|claude-3.5-sonnet|Anthropic|6+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)|
+|gemini-pro|Google DeepMind|5+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)|
+|gemma-2b|Google|1+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2b)|
+|claude-3-haiku|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)|
+|claude-3-sonnet|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
+|claude-3-opus|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
+|claude-3.5-sonnet|Anthropic|3+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)|
+|reka-core|Reka AI|1+ Providers|[reka.ai](https://www.reka.ai/ourmodels)|
|blackboxai|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
|blackboxai-pro|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
-|yi-1.5-9b|01-ai|1+ Providers|[huggingface.co](https://huggingface.co/01-ai/Yi-1.5-9B)|
-|phi-2|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/phi-2)|
-|phi-3-medium-4k|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct)|
-|phi-3.5-mini|Microsoft|2+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct)|
-|dbrx-instruct|Databricks|1+ Providers|[huggingface.co](https://huggingface.co/databricks/dbrx-instruct)|
|command-r-plus|CohereForAI|1+ Providers|[docs.cohere.com](https://docs.cohere.com/docs/command-r-plus)|
-|sparkdesk-v1.1|iFlytek|1+ Providers|[xfyun.cn](https://www.xfyun.cn/doc/spark/Guide.html)|
+|command-r|CohereForAI|1+ Providers|[docs.cohere.com](https://docs.cohere.com/docs/command-r-plus)|
|qwen|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen)|
-|qwen-1.5-0.5b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-0.5B)|
|qwen-1.5-7b|Qwen|2+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-7B)|
-|qwen-1.5-14b|Qwen|3+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-14B)|
-|qwen-1.5-72b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-72B)|
-|qwen-1.5-110b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-110B)|
-|qwen-1.5-1.8b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-1.8B)|
-|qwen-2-72b|Qwen|4+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2-72B)|
-|glm-3-6b|Zhipu AI|1+ Providers|[github.com/THUDM/ChatGLM3](https://github.com/THUDM/ChatGLM3)|
-|glm-4-9B|Zhipu AI|1+ Providers|[github.com/THUDM/GLM-4](https://github.com/THUDM/GLM-4)|
-|solar-1-mini|Upstage|1+ Providers|[upstage.ai/](https://www.upstage.ai/feed/product/solarmini-performance-report)|
-|solar-10-7b|Upstage|1+ Providers|[huggingface.co](https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0)|
-|solar-pro|Upstage|1+ Providers|[huggingface.co](https://huggingface.co/upstage/solar-pro-preview-instruct)|
+|qwen-2-72b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2-72B)|
+|qwen-2.5-72b|Qwen|2+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct)|
+|qwen-2.5-coder-32b|Qwen|4+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-Coder-32B)|
+|qwq-32b|Qwen|3+ Providers|[qwen2.org](https://qwen2.org/qwq-32b-preview/)|
|pi|Inflection|1+ Providers|[inflection.ai](https://inflection.ai/blog/inflection-2-5)|
|deepseek-coder|DeepSeek|1+ Providers|[huggingface.co](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Instruct)|
-|wizardlm-2-7b|WizardLM|1+ Providers|[huggingface.co](https://huggingface.co/dreamgen/WizardLM-2-7B)|
-|wizardlm-2-8x22b|WizardLM|2+ Providers|[huggingface.co](https://huggingface.co/alpindale/WizardLM-2-8x22B)|
-|sh-n-7b|Together|1+ Providers|[huggingface.co](https://huggingface.co/togethercomputer/StripedHyena-Nous-7B)|
-|llava-13b|Yorickvp|1+ Providers|[huggingface.co](https://huggingface.co/liuhaotian/llava-v1.5-13b)|
-|lzlv-70b|Lzlv|1+ Providers|[huggingface.co](https://huggingface.co/lizpreciatior/lzlv_70b_fp16_hf)|
+|wizardlm-2-8x22b|WizardLM|1+ Providers|[huggingface.co](https://huggingface.co/alpindale/WizardLM-2-8x22B)|
|openchat-3.5|OpenChat|1+ Providers|[huggingface.co](https://huggingface.co/openchat/openchat_3.5)|
-|openchat-3.6-8b|OpenChat|1+ Providers|[huggingface.co](https://huggingface.co/openchat/openchat-3.6-8b-20240522)|
-|phind-codellama-34b-v2|Phind|1+ Providers|[huggingface.co](https://huggingface.co/Phind/Phind-CodeLlama-34B-v2)|
-|dolphin-2.9.1-llama-3-70b|Cognitive Computations|1+ Providers|[huggingface.co](https://huggingface.co/cognitivecomputations/dolphin-2.9.1-llama-3-70b)|
-|grok-2-mini|x.ai|1+ Providers|[x.ai](https://x.ai/blog/grok-2)|
-|grok-2|x.ai|1+ Providers|[x.ai](https://x.ai/blog/grok-2)|
+|grok-beta|x.ai|1+ Providers|[x.ai](https://x.ai/blog/grok-2)|
|sonar-online|Perplexity AI|2+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)|
|sonar-chat|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)|
-|mythomax-l2-13b|Gryphe|1+ Providers|[huggingface.co](https://huggingface.co/Gryphe/MythoMax-L2-13b)|
-|cosmosrp|Gryphe|1+ Providers|[huggingface.co](https://huggingface.co/PawanKrd/CosmosRP-8k)|
-|german-7b|TheBloke|1+ Providers|[huggingface.co](https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-GGUF)|
-|tinyllama-1.1b|TinyLlama|1+ Providers|[huggingface.co](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0)|
-|cybertron-7b|TheBloke|1+ Providers|[huggingface.co](https://huggingface.co/fblgit/una-cybertron-7b-v2-bf16)|
+|nemotron-70b|Nvidia|3+ Providers|[build.nvidia.com](https://build.nvidia.com/nvidia/llama-3_1-nemotron-70b-instruct)|
|openhermes-2.5|Teknium|1+ Providers|[huggingface.co](https://huggingface.co/datasets/teknium/OpenHermes-2.5)|
-|lfm-40b|Liquid|1+ Providers|[liquid.ai](https://www.liquid.ai/liquid-foundation-models)|
+|lfm-40b|Liquid|2+ Providers|[liquid.ai](https://www.liquid.ai/liquid-foundation-models)|
+|german-7b|TheBloke|1+ Providers|[huggingface.co](https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-GGUF)|
|zephyr-7b|HuggingFaceH4|1+ Providers|[huggingface.co](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)|
+|neural-7b|Inferless|1+ Providers|[huggingface.co](https://huggingface.co/Intel/neural-chat-7b-v3-1)|
+|p1|PollinationsAI|1+ Providers|[]( )|
+|evil|Evil Mode - Experimental|2+ Providers|[]( )|
+|midijourney||1+ Providers|[]( )|
+|turbo||1+ Providers|[]( )|
+|unity||1+ Providers|[]( )|
+|rtist||1+ Providers|[]( )|
+
### Image Models
| Model | Base Provider | Providers | Website |
|-------|---------------|-----------|---------|
-|sdxl|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/docs/diffusers/en/using-diffusers/sdxl)|
+|sdxl|Stability AI|2+ Providers|[huggingface.co](https://huggingface.co/docs/diffusers/en/using-diffusers/sdxl)|
|sdxl-lora|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/blog/lcm_lora)|
-|sdxl-turbo|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/stabilityai/sdxl-turbo)|
-|sd-1.5|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/runwayml/stable-diffusion-v1-5)|
|sd-3|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_3)|
|playground-v2.5|Playground AI|1+ Providers|[huggingface.co](https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic)|
-|flux|Black Forest Labs|2+ Providers|[github.com/black-forest-labs/flux](https://github.com/black-forest-labs/flux)|
+|flux|Black Forest Labs|4+ Providers|[github.com/black-forest-labs/flux](https://github.com/black-forest-labs/flux)|
|flux-pro|Black Forest Labs|2+ Providers|[github.com/black-forest-labs/flux](https://github.com/black-forest-labs/flux)|
-|flux-realism|Flux AI|2+ Providers|[]()|
-|flux-anime|Flux AI|1+ Providers|[]()|
-|flux-3d|Flux AI|1+ Providers|[]()|
-|flux-disney|Flux AI|1+ Providers|[]()|
-|flux-pixel|Flux AI|1+ Providers|[]()|
-|flux-4o|Flux AI|1+ Providers|[]()|
+|flux-dev|Black Forest Labs|3+ Providers|[huggingface.co](https://huggingface.co/black-forest-labs/FLUX.1-dev)|
+|flux-realism|Flux AI|2+ Providers|[]( )|
+|flux-cablyai|Flux AI|1+ Providers|[]( )|
+|flux-anime|Flux AI|2+ Providers|[]( )|
+|flux-3d|Flux AI|2+ Providers|[]( )|
+|flux-disney|Flux AI|1+ Providers|[]( )|
+|flux-pixel|Flux AI|1+ Providers|[]( )|
+|flux-4o|Flux AI|1+ Providers|[]( )|
|flux-schnell|Black Forest Labs|2+ Providers|[huggingface.co](https://huggingface.co/black-forest-labs/FLUX.1-schnell)|
-|dalle|OpenAI|1+ Providers|[openai.com](https://openai.com/index/dall-e/)|
-|dalle-2|OpenAI|1+ Providers|[openai.com](https://openai.com/index/dall-e-2/)|
-|emi||1+ Providers|[]()|
-|any-dark||1+ Providers|[]()|
-|midjourney|Midjourney|1+ Providers|[docs.midjourney.com](https://docs.midjourney.com/docs/model-versions)|
-
-### Vision Models
-| Model | Base Provider | Providers | Website |
-|-------|---------------|-----------|---------|
-|gpt-4-vision|OpenAI|1+ Providers|[openai.com](https://openai.com/research/gpt-4v-system-card)|
-|gemini-pro-vision|Google DeepMind|1+ Providers | [deepmind.google](https://deepmind.google/technologies/gemini/)|
-|blackboxai|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
-
-### Providers and vision models
-| Provider | Base Provider | | Vision Models | Status | Auth |
-|-------|---------------|-----------|---------|---------|---------|
-| `g4f.Provider.Blackbox` | Blackbox AI | | `blackboxai, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gpt-4o, gemini-pro` |  | ❌ |
+|dall-e-3|OpenAI|5+ Providers|[openai.com](https://openai.com/index/dall-e/)|
+|midjourney|Midjourney|2+ Providers|[docs.midjourney.com](https://docs.midjourney.com/docs/model-versions)|
+|any-dark||2+ Providers|[]( )|
## Conclusion and Usage Tips
This document provides a comprehensive overview of various AI providers and models available for text generation, image generation, and vision tasks. **When choosing a provider or model, consider the following factors:**
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index 474c9f88..442ee9d4 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -42,22 +42,24 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
hidden_models = {"Flux-1.1-Pro"}
- additional_models_imagine = ["flux-1.1-pro", "dall-e-3"]
+ additional_models_imagine = ["flux-1.1-pro", "midjourney", "dall-e-3"]
model_aliases = {
# Alias mappings for models
+ "gpt-4": "gpt-4o",
"openchat-3.5": "openchat-3.5-0106",
"deepseek-coder": "deepseek-coder-6.7b-instruct",
"hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
"hermes-2-pro": "hermes-2-pro-mistral-7b",
"openhermes-2.5": "openhermes-2.5-mistral-7b",
"lfm-40b": "lfm-40b-moe",
- "discolm-german-7b": "discolm-german-7b-v1",
+ "german-7b": "discolm-german-7b-v1",
"llama-2-7b": "llama-2-7b-chat-int8",
"llama-3.1-70b": "llama-3.1-70b-turbo",
"neural-7b": "neural-chat-7b-v3-1",
"zephyr-7b": "zephyr-7b-beta",
"evil": "any-uncensored",
+ "sdxl": "stable-diffusion-xl-lightning",
"sdxl": "stable-diffusion-xl-base",
"flux-pro": "flux-1.1-pro",
"llama-3.1-8b": "llama-3.1-8b-chat"
diff --git a/g4f/Provider/AmigoChat.py b/g4f/Provider/AmigoChat.py
index 48dcfd74..bb732f24 100644
--- a/g4f/Provider/AmigoChat.py
+++ b/g4f/Provider/AmigoChat.py
@@ -108,7 +108,6 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
"mythomax-13b": "Gryphe/MythoMax-L2-13b",
"mixtral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
- "mistral-tiny": "mistralai/mistral-tiny",
"mistral-nemo": "mistralai/mistral-nemo",
"deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat",
@@ -127,7 +126,6 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
### image ###
- "flux-realism": "flux-realism",
"flux-dev": "flux/dev",
}
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 537a3ea8..fec0a8a9 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -98,12 +98,12 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
models = list(dict.fromkeys([default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
model_aliases = {
- "gpt-4": "blackboxai",
+ ### chat ###
"gpt-4": "gpt-4o",
- "gpt-4o-mini": "gpt-4o",
- "gpt-3.5-turbo": "blackboxai",
"gemini-flash": "gemini-1.5-flash",
"claude-3.5-sonnet": "claude-sonnet-3.5",
+
+ ### image ###
"flux": "ImageGeneration",
}
diff --git a/g4f/Provider/ChatGptEs.py b/g4f/Provider/ChatGptEs.py
index ef8441a8..88c4a855 100644
--- a/g4f/Provider/ChatGptEs.py
+++ b/g4f/Provider/ChatGptEs.py
@@ -19,7 +19,7 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = True
default_model = 'gpt-4o'
- models = ['gpt-3.5-turbo', 'gpt-4o', 'gpt-4o-mini']
+ models = ['gpt-4', 'gpt-4o', 'gpt-4o-mini']
@classmethod
def get_model(cls, model: str) -> str:
diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py
index 42c18dfe..070a7db2 100644
--- a/g4f/Provider/DDG.py
+++ b/g4f/Provider/DDG.py
@@ -30,6 +30,7 @@ class Conversation(BaseConversation):
self.model = model
class DDG(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "DuckDuckGo AI Chat"
url = "https://duckduckgo.com/aichat"
api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
working = True
diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py
index 87aeb790..6874b023 100644
--- a/g4f/Provider/DeepInfraChat.py
+++ b/g4f/Provider/DeepInfraChat.py
@@ -9,6 +9,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://deepinfra.com/chat"
api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
+
working = True
supports_stream = True
supports_system_message = True
diff --git a/g4f/Provider/Flux.py b/g4f/Provider/Flux.py
index 7a00e75a..b211ecef 100644
--- a/g4f/Provider/Flux.py
+++ b/g4f/Provider/Flux.py
@@ -8,13 +8,14 @@ from ..image import ImageResponse, ImagePreview
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class Flux(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Flux Provider"
+ label = "HuggingSpace (black-forest-labs-flux-1-dev)"
url = "https://black-forest-labs-flux-1-dev.hf.space"
api_endpoint = "/gradio_api/call/infer"
working = True
default_model = 'flux-dev'
models = [default_model]
image_models = [default_model]
+ model_aliases = {"flux-dev": "flux-1-dev"}
@classmethod
async def create_async_generator(
@@ -55,4 +56,4 @@ class Flux(AsyncGeneratorProvider, ProviderModelMixin):
yield ImagePreview(url, prompt)
else:
yield ImageResponse(url, prompt)
- break
\ No newline at end of file
+ break
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index b38ff428..88189a16 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -21,9 +21,11 @@ RATE_LIMIT_ERROR_MESSAGE = "当前地区当日额度已消耗完"
class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://freegptsnav.aifree.site"
+
working = True
supports_message_history = True
supports_system_message = True
+
default_model = 'gemini-pro'
@classmethod
diff --git a/g4f/Provider/GizAI.py b/g4f/Provider/GizAI.py
index f00b344e..be2fd295 100644
--- a/g4f/Provider/GizAI.py
+++ b/g4f/Provider/GizAI.py
@@ -10,6 +10,7 @@ from .helper import format_prompt
class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://app.giz.ai/assistant"
api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
+
working = True
supports_stream = False
supports_system_message = True
@@ -17,7 +18,6 @@ class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
default_model = 'chat-gemini-flash'
models = [default_model]
-
model_aliases = {"gemini-flash": "chat-gemini-flash",}
@classmethod
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index bf9e79d4..1e8131f8 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -143,9 +143,9 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_message_history = True
supports_system_message = True
+
default_model = "gpt-4o-2024-08-06"
models = list(models.keys())
-
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-free",
"gpt-4o": "gpt-4o-2024-08-06",
diff --git a/g4f/Provider/MagickPen.py b/g4f/Provider/MagickPen.py
deleted file mode 100644
index 1d084a2f..00000000
--- a/g4f/Provider/MagickPen.py
+++ /dev/null
@@ -1,87 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-import hashlib
-import time
-import random
-import re
-import json
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://magickpen.com"
- api_endpoint = "https://api.magickpen.com/ask"
- working = False
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- default_model = 'gpt-4o-mini'
- models = ['gpt-4o-mini']
-
- @classmethod
- async def fetch_api_credentials(cls) -> tuple:
- url = "https://magickpen.com/_nuxt/bf709a9ce19f14e18116.js"
- async with ClientSession() as session:
- async with session.get(url) as response:
- text = await response.text()
-
- pattern = r'"X-API-Secret":"(\w+)"'
- match = re.search(pattern, text)
- X_API_SECRET = match.group(1) if match else None
-
- timestamp = str(int(time.time() * 1000))
- nonce = str(random.random())
-
- s = ["TGDBU9zCgM", timestamp, nonce]
- s.sort()
- signature_string = ''.join(s)
- signature = hashlib.md5(signature_string.encode()).hexdigest()
-
- pattern = r'secret:"(\w+)"'
- match = re.search(pattern, text)
- secret = match.group(1) if match else None
-
- if X_API_SECRET and timestamp and nonce and secret:
- return X_API_SECRET, signature, timestamp, nonce, secret
- else:
- raise Exception("Unable to extract all the necessary data from the JavaScript file.")
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
- X_API_SECRET, signature, timestamp, nonce, secret = await cls.fetch_api_credentials()
-
- headers = {
- 'accept': 'application/json, text/plain, */*',
- 'accept-language': 'en-US,en;q=0.9',
- 'content-type': 'application/json',
- 'nonce': nonce,
- 'origin': cls.url,
- 'referer': f"{cls.url}/",
- 'secret': secret,
- 'signature': signature,
- 'timestamp': timestamp,
- 'x-api-secret': X_API_SECRET,
- }
-
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- payload = {
- 'query': prompt,
- 'turnstileResponse': '',
- 'action': 'verify'
- }
- async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
- response.raise_for_status()
- async for chunk in response.content:
- if chunk:
- yield chunk.decode()
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index b3119cb6..3a6f0d39 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -29,6 +29,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
"sonar-online": "sonar-small-128k-online",
"sonar-chat": "llama-3.1-sonar-large-128k-chat",
"sonar-chat": "llama-3.1-sonar-small-128k-chat",
+ "llama-3.3-70b": "llama-3.3-70b-instruct",
"llama-3.1-8b": "llama-3.1-8b-instruct",
"llama-3.1-70b": "llama-3.1-70b-instruct",
"lfm-40b": "/models/LiquidCloud",
@@ -78,9 +79,9 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
assert(await ws.receive_str())
assert(await ws.receive_str() == "6")
message_data = {
- "version": "2.5",
+ "version": "2.13",
"source": "default",
- "model": cls.get_model(model),
+ "model": model,
"messages": messages
}
await ws.send_str("42" + json.dumps(["perplexity_labs", message_data]))
diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py
index 18349490..9520674a 100644
--- a/g4f/Provider/PollinationsAI.py
+++ b/g4f/Provider/PollinationsAI.py
@@ -13,7 +13,7 @@ from .needs_auth.OpenaiAPI import OpenaiAPI
from .helper import format_prompt
class PollinationsAI(OpenaiAPI):
- label = "Pollinations.AI"
+ label = "Pollinations AI"
url = "https://pollinations.ai"
working = True
@@ -22,28 +22,30 @@ class PollinationsAI(OpenaiAPI):
default_model = "openai"
- additional_models_image = ["unity", "midijourney", "rtist"]
+ additional_models_image = ["midjourney", "dall-e-3"]
additional_models_text = ["sur", "sur-mistral", "claude"]
model_aliases = {
"gpt-4o": "openai",
"mistral-nemo": "mistral",
"llama-3.1-70b": "llama", #
- "gpt-3.5-turbo": "searchgpt",
"gpt-4": "searchgpt",
- "gpt-3.5-turbo": "claude",
"gpt-4": "claude",
"qwen-2.5-coder-32b": "qwen-coder",
"claude-3.5-sonnet": "sur",
}
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
+ }
+
@classmethod
def get_models(cls):
if not hasattr(cls, 'image_models'):
cls.image_models = []
if not cls.image_models:
url = "https://image.pollinations.ai/models"
- response = requests.get(url)
+ response = requests.get(url, headers=cls.headers)
raise_for_status(response)
cls.image_models = response.json()
cls.image_models.extend(cls.additional_models_image)
@@ -51,7 +53,7 @@ class PollinationsAI(OpenaiAPI):
cls.models = []
if not cls.models:
url = "https://text.pollinations.ai/models"
- response = requests.get(url)
+ response = requests.get(url, headers=cls.headers)
raise_for_status(response)
cls.models = [model.get("name") for model in response.json()]
cls.models.extend(cls.image_models)
@@ -94,7 +96,7 @@ class PollinationsAI(OpenaiAPI):
@classmethod
async def _generate_text(cls, model: str, messages: Messages, api_base: str, api_key: str = None, proxy: str = None, **kwargs):
if api_key is None:
- async with ClientSession(connector=get_connector(proxy=proxy)) as session:
+ async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session:
prompt = format_prompt(messages)
async with session.get(f"https://text.pollinations.ai/{quote(prompt)}?model={quote(model)}") as response:
await raise_for_status(response)
diff --git a/g4f/Provider/Reka.py b/g4f/Provider/Reka.py
deleted file mode 100644
index 2306149e..00000000
--- a/g4f/Provider/Reka.py
+++ /dev/null
@@ -1,148 +0,0 @@
-from __future__ import annotations
-
-import os, requests, time, json
-from ..typing import CreateResult, Messages, ImageType
-from .base_provider import AbstractProvider
-from ..cookies import get_cookies
-from ..image import to_bytes
-
-class Reka(AbstractProvider):
- url = "https://chat.reka.ai/"
- working = True
- needs_auth = True
- supports_stream = True
- default_vision_model = "reka"
- cookies = {}
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool,
- proxy: str = None,
- api_key: str = None,
- image: ImageType = None,
- **kwargs
- ) -> CreateResult:
- cls.proxy = proxy
-
- if not api_key:
- cls.cookies = get_cookies("chat.reka.ai")
- if not cls.cookies:
- raise ValueError("No cookies found for chat.reka.ai")
- elif "appSession" not in cls.cookies:
- raise ValueError("No appSession found in cookies for chat.reka.ai, log in or provide bearer_auth")
- api_key = cls.get_access_token(cls)
-
- conversation = []
- for message in messages:
- conversation.append({
- "type": "human",
- "text": message["content"],
- })
-
- if image:
- image_url = cls.upload_image(cls, api_key, image)
- conversation[-1]["image_url"] = image_url
- conversation[-1]["media_type"] = "image"
-
- headers = {
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'authorization': f'Bearer {api_key}',
- 'cache-control': 'no-cache',
- 'content-type': 'application/json',
- 'origin': 'https://chat.reka.ai',
- 'pragma': 'no-cache',
- 'priority': 'u=1, i',
- 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
- }
-
- json_data = {
- 'conversation_history': conversation,
- 'stream': True,
- 'use_search_engine': False,
- 'use_code_interpreter': False,
- 'model_name': 'reka-core',
- 'random_seed': int(time.time() * 1000),
- }
-
- tokens = ''
-
- response = requests.post('https://chat.reka.ai/api/chat',
- cookies=cls.cookies, headers=headers, json=json_data, proxies=cls.proxy, stream=True)
-
- for completion in response.iter_lines():
- if b'data' in completion:
- token_data = json.loads(completion.decode('utf-8')[5:])['text']
-
- yield (token_data.replace(tokens, ''))
-
- tokens = token_data
-
- def upload_image(cls, access_token, image: ImageType) -> str:
- boundary_token = os.urandom(8).hex()
-
- headers = {
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'cache-control': 'no-cache',
- 'authorization': f'Bearer {access_token}',
- 'content-type': f'multipart/form-data; boundary=----WebKitFormBoundary{boundary_token}',
- 'origin': 'https://chat.reka.ai',
- 'pragma': 'no-cache',
- 'priority': 'u=1, i',
- 'referer': 'https://chat.reka.ai/chat/hPReZExtDOPvUfF8vCPC',
- 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
- }
-
- image_data = to_bytes(image)
-
- boundary = f'----WebKitFormBoundary{boundary_token}'
- data = f'--{boundary}\r\nContent-Disposition: form-data; name="image"; filename="image.png"\r\nContent-Type: image/png\r\n\r\n'
- data += image_data.decode('latin-1')
- data += f'\r\n--{boundary}--\r\n'
-
- response = requests.post('https://chat.reka.ai/api/upload-image',
- cookies=cls.cookies, headers=headers, proxies=cls.proxy, data=data.encode('latin-1'))
-
- return response.json()['media_url']
-
- def get_access_token(cls):
- headers = {
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'cache-control': 'no-cache',
- 'pragma': 'no-cache',
- 'priority': 'u=1, i',
- 'referer': 'https://chat.reka.ai/chat',
- 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
- }
-
- try:
- response = requests.get('https://chat.reka.ai/bff/auth/access_token',
- cookies=cls.cookies, headers=headers, proxies=cls.proxy)
-
- return response.json()['accessToken']
-
- except Exception as e:
- raise ValueError(f"Failed to get access token: {e}, refresh your cookies / log in into chat.reka.ai")
\ No newline at end of file
diff --git a/g4f/Provider/RobocodersAPI.py b/g4f/Provider/RobocodersAPI.py
deleted file mode 100755
index 3a94e271..00000000
--- a/g4f/Provider/RobocodersAPI.py
+++ /dev/null
@@ -1,238 +0,0 @@
-from __future__ import annotations
-
-import json
-import aiohttp
-from pathlib import Path
-
-try:
- from bs4 import BeautifulSoup
- HAS_BEAUTIFULSOUP = True
-except ImportError:
- HAS_BEAUTIFULSOUP = False
- BeautifulSoup = None
-
-from aiohttp import ClientTimeout
-from ..errors import MissingRequirementsError
-from ..typing import AsyncResult, Messages
-from ..cookies import get_cookies_dir
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-from .. import debug
-
-
-class RobocodersAPI(AsyncGeneratorProvider, ProviderModelMixin):
- label = "API Robocoders AI"
- url = "https://api.robocoders.ai/docs"
- api_endpoint = "https://api.robocoders.ai/chat"
- working = True
- supports_message_history = True
- default_model = 'GeneralCodingAgent'
- agent = [default_model, "RepoAgent", "FrontEndAgent"]
- models = [*agent]
-
- CACHE_DIR = Path(get_cookies_dir())
- CACHE_FILE = CACHE_DIR / "robocoders.json"
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
-
- timeout = ClientTimeout(total=600)
-
- async with aiohttp.ClientSession(timeout=timeout) as session:
- # Load or create access token and session ID
- access_token, session_id = await cls._get_or_create_access_and_session(session)
- if not access_token or not session_id:
- raise Exception("Failed to initialize API interaction")
-
- headers = {
- "Content-Type": "application/json",
- "Authorization": f"Bearer {access_token}"
- }
-
- prompt = format_prompt(messages)
-
- data = {
- "sid": session_id,
- "prompt": prompt,
- "agent": model
- }
-
- async with session.post(cls.api_endpoint, headers=headers, json=data, proxy=proxy) as response:
- if response.status == 401: # Unauthorized, refresh token
- cls._clear_cached_data()
- raise Exception("Unauthorized: Invalid token, please retry.")
- elif response.status == 422:
- raise Exception("Validation Error: Invalid input.")
- elif response.status >= 500:
- raise Exception(f"Server Error: {response.status}")
- elif response.status != 200:
- raise Exception(f"Unexpected Error: {response.status}")
-
- async for line in response.content:
- if line:
- try:
- # Decode bytes into a string
- line_str = line.decode('utf-8')
- response_data = json.loads(line_str)
-
- # Get the message from the 'args.content' or 'message' field
- message = (response_data.get('args', {}).get('content') or
- response_data.get('message', ''))
-
- if message:
- yield message
-
- # Check for reaching the resource limit
- if (response_data.get('action') == 'message' and
- response_data.get('args', {}).get('wait_for_response')):
- # Automatically continue the dialog
- continue_data = {
- "sid": session_id,
- "prompt": "continue",
- "agent": model
- }
- async with session.post(
- cls.api_endpoint,
- headers=headers,
- json=continue_data,
- proxy=proxy
- ) as continue_response:
- if continue_response.status == 200:
- async for continue_line in continue_response.content:
- if continue_line:
- try:
- continue_line_str = continue_line.decode('utf-8')
- continue_data = json.loads(continue_line_str)
- continue_message = (
- continue_data.get('args', {}).get('content') or
- continue_data.get('message', '')
- )
- if continue_message:
- yield continue_message
- except json.JSONDecodeError:
- debug.log(f"Failed to decode continue JSON: {continue_line}")
- except Exception as e:
- debug.log(f"Error processing continue response: {e}")
-
- except json.JSONDecodeError:
- debug.log(f"Failed to decode JSON: {line}")
- except Exception as e:
- debug.log(f"Error processing response: {e}")
-
- @staticmethod
- async def _get_or_create_access_and_session(session: aiohttp.ClientSession):
- RobocodersAPI.CACHE_DIR.mkdir(exist_ok=True) # Ensure cache directory exists
-
- # Load data from cache
- if RobocodersAPI.CACHE_FILE.exists():
- with open(RobocodersAPI.CACHE_FILE, "r") as f:
- data = json.load(f)
- access_token = data.get("access_token")
- session_id = data.get("sid")
-
- # Validate loaded data
- if access_token and session_id:
- return access_token, session_id
-
- # If data not valid, create new access token and session ID
- access_token = await RobocodersAPI._fetch_and_cache_access_token(session)
- session_id = await RobocodersAPI._create_and_cache_session(session, access_token)
- return access_token, session_id
-
- @staticmethod
- async def _fetch_and_cache_access_token(session: aiohttp.ClientSession) -> str:
- if not HAS_BEAUTIFULSOUP:
- raise MissingRequirementsError('Install "beautifulsoup4" package | pip install -U beautifulsoup4')
- return token
-
- url_auth = 'https://api.robocoders.ai/auth'
- headers_auth = {
- 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
- }
-
- async with session.get(url_auth, headers=headers_auth) as response:
- if response.status == 200:
- html = await response.text()
- soup = BeautifulSoup(html, 'html.parser')
- token_element = soup.find('pre', id='token')
- if token_element:
- token = token_element.text.strip()
-
- # Cache the token
- RobocodersAPI._save_cached_data({"access_token": token})
- return token
- return None
-
- @staticmethod
- async def _create_and_cache_session(session: aiohttp.ClientSession, access_token: str) -> str:
- url_create_session = 'https://api.robocoders.ai/create-session'
- headers_create_session = {
- 'Authorization': f'Bearer {access_token}'
- }
-
- async with session.get(url_create_session, headers=headers_create_session) as response:
- if response.status == 200:
- data = await response.json()
- session_id = data.get('sid')
-
- # Cache session ID
- RobocodersAPI._update_cached_data({"sid": session_id})
- return session_id
- elif response.status == 401:
- RobocodersAPI._clear_cached_data()
- raise Exception("Unauthorized: Invalid token during session creation.")
- elif response.status == 422:
- raise Exception("Validation Error: Check input parameters.")
- return None
-
- @staticmethod
- def _save_cached_data(new_data: dict):
- """Save new data to cache file"""
- RobocodersAPI.CACHE_DIR.mkdir(exist_ok=True)
- RobocodersAPI.CACHE_FILE.touch(exist_ok=True)
- with open(RobocodersAPI.CACHE_FILE, "w") as f:
- json.dump(new_data, f)
-
- @staticmethod
- def _update_cached_data(updated_data: dict):
- """Update existing cache data with new values"""
- data = {}
- if RobocodersAPI.CACHE_FILE.exists():
- with open(RobocodersAPI.CACHE_FILE, "r") as f:
- try:
- data = json.load(f)
- except json.JSONDecodeError:
- # If cache file is corrupted, start with empty dict
- data = {}
-
- data.update(updated_data)
- with open(RobocodersAPI.CACHE_FILE, "w") as f:
- json.dump(data, f)
-
- @staticmethod
- def _clear_cached_data():
- """Remove cache file"""
- try:
- if RobocodersAPI.CACHE_FILE.exists():
- RobocodersAPI.CACHE_FILE.unlink()
- except Exception as e:
- debug.log(f"Error clearing cache: {e}")
-
- @staticmethod
- def _get_cached_data() -> dict:
- """Get all cached data"""
- if RobocodersAPI.CACHE_FILE.exists():
- try:
- with open(RobocodersAPI.CACHE_FILE, "r") as f:
- return json.load(f)
- except json.JSONDecodeError:
- return {}
- return {}
diff --git a/g4f/Provider/RubiksAI.py b/g4f/Provider/RubiksAI.py
index 816ea60c..86d61564 100644
--- a/g4f/Provider/RubiksAI.py
+++ b/g4f/Provider/RubiksAI.py
@@ -16,6 +16,7 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Rubiks AI"
url = "https://rubiks.ai"
api_endpoint = "https://rubiks.ai/search/api/"
+
working = True
supports_stream = True
supports_system_message = True
@@ -127,4 +128,4 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
yield content
if web_search and sources:
- yield Sources(sources)
\ No newline at end of file
+ yield Sources(sources)
diff --git a/g4f/Provider/Upstage.py b/g4f/Provider/Upstage.py
deleted file mode 100644
index f6683c45..00000000
--- a/g4f/Provider/Upstage.py
+++ /dev/null
@@ -1,91 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-import json
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-
-class Upstage(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://console.upstage.ai/playground/chat"
- api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions"
- working = False
- default_model = 'solar-pro'
- models = [
- 'upstage/solar-1-mini-chat',
- 'upstage/solar-1-mini-chat-ja',
- 'solar-pro',
- ]
- model_aliases = {
- "solar-mini": "upstage/solar-1-mini-chat",
- "solar-mini": "upstage/solar-1-mini-chat-ja",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "dnt": "1",
- "origin": "https://console.upstage.ai",
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": "https://console.upstage.ai/",
- "sec-ch-ua": '"Not?A_Brand";v="99", "Chromium";v="130"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "cross-site",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
- }
-
- async with ClientSession(headers=headers) as session:
- data = {
- "stream": True,
- "messages": [{"role": "user", "content": format_prompt(messages)}],
- "model": model
- }
-
- async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
- response.raise_for_status()
-
- response_text = ""
-
- async for line in response.content:
- if line:
- line = line.decode('utf-8').strip()
-
- if line.startswith("data: ") and line != "data: [DONE]":
- try:
- data = json.loads(line[6:])
- content = data['choices'][0]['delta'].get('content', '')
- if content:
- response_text += content
- yield content
- except json.JSONDecodeError:
- continue
-
- if line == "data: [DONE]":
- break
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 56a5262f..04ff8396 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -22,25 +22,21 @@ from .Copilot import Copilot
from .DarkAI import DarkAI
from .DDG import DDG
from .DeepInfraChat import DeepInfraChat
+from .Flux import Flux
from .Free2GPT import Free2GPT
from .FreeGpt import FreeGpt
from .GizAI import GizAI
from .Liaobots import Liaobots
-from .MagickPen import MagickPen
+from .Mhystical import Mhystical
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
from .PollinationsAI import PollinationsAI
from .Prodia import Prodia
-from .Reka import Reka
from .ReplicateHome import ReplicateHome
-from .RobocodersAPI import RobocodersAPI
from .RubiksAI import RubiksAI
from .TeachAnything import TeachAnything
-from .Upstage import Upstage
from .You import You
-from .Mhystical import Mhystical
-from .Flux import Flux
import sys
@@ -61,4 +57,4 @@ __map__: dict[str, ProviderType] = dict([
])
class ProviderUtils:
- convert: dict[str, ProviderType] = __map__
\ No newline at end of file
+ convert: dict[str, ProviderType] = __map__
diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py
index 9127708c..b55a604b 100644
--- a/g4f/Provider/needs_auth/Gemini.py
+++ b/g4f/Provider/needs_auth/Gemini.py
@@ -51,14 +51,22 @@ UPLOAD_IMAGE_HEADERS = {
}
class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Google Gemini"
url = "https://gemini.google.com"
+
needs_auth = True
working = True
+
default_model = 'gemini'
image_models = ["gemini"]
default_vision_model = "gemini"
models = ["gemini", "gemini-1.5-flash", "gemini-1.5-pro"]
+ model_aliases = {
+ "gemini-flash": "gemini-1.5-flash",
+ "gemini-pro": "gemini-1.5-pro",
+ }
synthesize_content_type = "audio/vnd.wav"
+
_cookies: Cookies = None
_snlm0e: str = None
_sid: str = None
diff --git a/g4f/Provider/needs_auth/GeminiPro.py b/g4f/Provider/needs_auth/GeminiPro.py
index a7f1e0aa..d9204b25 100644
--- a/g4f/Provider/needs_auth/GeminiPro.py
+++ b/g4f/Provider/needs_auth/GeminiPro.py
@@ -11,14 +11,20 @@ from ...errors import MissingAuthError
from ..helper import get_connector
class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Gemini API"
+ label = "Google Gemini API"
url = "https://ai.google.dev"
+
working = True
supports_message_history = True
needs_auth = True
+
default_model = "gemini-1.5-pro"
default_vision_model = default_model
models = [default_model, "gemini-pro", "gemini-1.5-flash", "gemini-1.5-flash-8b"]
+ model_aliases = {
+ "gemini-flash": "gemini-1.5-flash",
+ "gemini-flash": "gemini-1.5-flash-8b",
+ }
@classmethod
async def create_async_generator(
@@ -108,4 +114,4 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
if candidate["finishReason"] == "STOP":
yield candidate["content"]["parts"][0]["text"]
else:
- yield candidate["finishReason"] + ' ' + candidate["safetyRatings"]
\ No newline at end of file
+ yield candidate["finishReason"] + ' ' + candidate["safetyRatings"]
diff --git a/g4f/Provider/needs_auth/GithubCopilot.py b/g4f/Provider/needs_auth/GithubCopilot.py
index 3eb66b5e..754c8d4e 100644
--- a/g4f/Provider/needs_auth/GithubCopilot.py
+++ b/g4f/Provider/needs_auth/GithubCopilot.py
@@ -16,10 +16,12 @@ class Conversation(BaseConversation):
self.conversation_id = conversation_id
class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://copilot.microsoft.com"
+ url = "https://github.com/copilot"
+
working = True
needs_auth = True
supports_stream = True
+
default_model = "gpt-4o"
models = [default_model, "o1-mini", "o1-preview", "claude-3.5-sonnet"]
@@ -90,4 +92,4 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
if line.startswith(b"data: "):
data = json.loads(line[6:])
if data.get("type") == "content":
- yield data.get("body")
\ No newline at end of file
+ yield data.get("body")
diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py
index 2f3dbb57..431273f6 100644
--- a/g4f/Provider/needs_auth/HuggingChat.py
+++ b/g4f/Provider/needs_auth/HuggingChat.py
@@ -24,16 +24,19 @@ class Conversation(BaseConversation):
class HuggingChat(AbstractProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"
+
working = True
supports_stream = True
needs_auth = True
+
default_model = "Qwen/Qwen2.5-72B-Instruct"
+ default_image_model = "black-forest-labs/FLUX.1-dev"
image_models = [
"black-forest-labs/FLUX.1-dev"
]
models = [
default_model,
- 'meta-llama/Meta-Llama-3.1-70B-Instruct',
+ 'meta-llama/Llama-3.3-70B-Instruct',
'CohereForAI/c4ai-command-r-plus-08-2024',
'Qwen/QwQ-32B-Preview',
'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF',
@@ -45,8 +48,9 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
*image_models
]
model_aliases = {
+ ### Chat ###
"qwen-2.5-72b": "Qwen/Qwen2.5-72B-Instruct",
- "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
+ "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct",
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
"qwq-32b": "Qwen/QwQ-32B-Preview",
"nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
@@ -55,6 +59,8 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
"hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B",
"mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407",
"phi-3.5-mini": "microsoft/Phi-3.5-mini-instruct",
+
+ ### Image ###
"flux-dev": "black-forest-labs/FLUX.1-dev",
}
@@ -214,4 +220,4 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
return data[message_keys["id"]]
except (KeyError, IndexError, TypeError) as e:
- raise RuntimeError(f"Failed to extract message ID: {str(e)}")
\ No newline at end of file
+ raise RuntimeError(f"Failed to extract message ID: {str(e)}")
diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py
index 94530252..fd1da2a7 100644
--- a/g4f/Provider/needs_auth/HuggingFace.py
+++ b/g4f/Provider/needs_auth/HuggingFace.py
@@ -17,7 +17,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_message_history = True
default_model = HuggingChat.default_model
- default_image_model = "black-forest-labs/FLUX.1-dev"
+ default_image_model = HuggingChat.default_image_model
models = [*HuggingChat.models, default_image_model]
image_models = [default_image_model]
model_aliases = HuggingChat.model_aliases
diff --git a/g4f/Provider/needs_auth/HuggingFace2.py b/g4f/Provider/needs_auth/HuggingFace2.py
deleted file mode 100644
index 0bde770b..00000000
--- a/g4f/Provider/needs_auth/HuggingFace2.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from __future__ import annotations
-
-from .OpenaiAPI import OpenaiAPI
-from .HuggingChat import HuggingChat
-from ...typing import AsyncResult, Messages
-
-class HuggingFace2(OpenaiAPI):
- label = "HuggingFace (Inference API)"
- url = "https://huggingface.co"
- working = True
- default_model = "meta-llama/Llama-3.2-11B-Vision-Instruct"
- default_vision_model = default_model
- models = [
- *HuggingChat.models
- ]
-
- @classmethod
- def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- api_base: str = "https://api-inference.huggingface.co/v1",
- max_tokens: int = 500,
- **kwargs
- ) -> AsyncResult:
- return super().create_async_generator(
- model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs
- )
diff --git a/g4f/Provider/needs_auth/HuggingFaceAPI.py b/g4f/Provider/needs_auth/HuggingFaceAPI.py
new file mode 100644
index 00000000..a93ab3a6
--- /dev/null
+++ b/g4f/Provider/needs_auth/HuggingFaceAPI.py
@@ -0,0 +1,28 @@
+from __future__ import annotations
+
+from .OpenaiAPI import OpenaiAPI
+from .HuggingChat import HuggingChat
+from ...typing import AsyncResult, Messages
+
+class HuggingFaceAPI(OpenaiAPI):
+ label = "HuggingFace (Inference API)"
+ url = "https://api-inference.huggingface.co"
+ working = True
+ default_model = "meta-llama/Llama-3.2-11B-Vision-Instruct"
+ default_vision_model = default_model
+ models = [
+ *HuggingChat.models
+ ]
+
+ @classmethod
+ def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_base: str = "https://api-inference.huggingface.co/v1",
+ max_tokens: int = 500,
+ **kwargs
+ ) -> AsyncResult:
+ return super().create_async_generator(
+ model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs
+ )
diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py
index 65fdbef9..46b998e8 100644
--- a/g4f/Provider/needs_auth/Poe.py
+++ b/g4f/Provider/needs_auth/Poe.py
@@ -24,8 +24,8 @@ class Poe(AbstractProvider):
url = "https://poe.com"
working = True
needs_auth = True
- supports_gpt_35_turbo = True
supports_stream = True
+
models = models.keys()
@classmethod
@@ -113,4 +113,4 @@ if(window._message && window._message != window._last_message) {
elif chunk != "":
break
else:
- time.sleep(0.1)
\ No newline at end of file
+ time.sleep(0.1)
diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py
index b8ec5a97..008fcad8 100644
--- a/g4f/Provider/needs_auth/Raycast.py
+++ b/g4f/Provider/needs_auth/Raycast.py
@@ -10,8 +10,6 @@ from ..base_provider import AbstractProvider
class Raycast(AbstractProvider):
url = "https://raycast.com"
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
supports_stream = True
needs_auth = True
working = True
diff --git a/g4f/Provider/needs_auth/Reka.py b/g4f/Provider/needs_auth/Reka.py
new file mode 100644
index 00000000..780ff31e
--- /dev/null
+++ b/g4f/Provider/needs_auth/Reka.py
@@ -0,0 +1,148 @@
+from __future__ import annotations
+
+import os, requests, time, json
+from ...typing import CreateResult, Messages, ImageType
+from ..base_provider import AbstractProvider
+from ...cookies import get_cookies
+from ...image import to_bytes
+
+class Reka(AbstractProvider):
+ url = "https://chat.reka.ai/"
+ working = True
+ needs_auth = True
+ supports_stream = True
+ default_vision_model = "reka"
+ cookies = {}
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ api_key: str = None,
+ image: ImageType = None,
+ **kwargs
+ ) -> CreateResult:
+ cls.proxy = proxy
+
+ if not api_key:
+ cls.cookies = get_cookies("chat.reka.ai")
+ if not cls.cookies:
+ raise ValueError("No cookies found for chat.reka.ai")
+ elif "appSession" not in cls.cookies:
+ raise ValueError("No appSession found in cookies for chat.reka.ai, log in or provide bearer_auth")
+ api_key = cls.get_access_token(cls)
+
+ conversation = []
+ for message in messages:
+ conversation.append({
+ "type": "human",
+ "text": message["content"],
+ })
+
+ if image:
+ image_url = cls.upload_image(cls, api_key, image)
+ conversation[-1]["image_url"] = image_url
+ conversation[-1]["media_type"] = "image"
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'authorization': f'Bearer {api_key}',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.reka.ai',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
+ }
+
+ json_data = {
+ 'conversation_history': conversation,
+ 'stream': True,
+ 'use_search_engine': False,
+ 'use_code_interpreter': False,
+ 'model_name': 'reka-core',
+ 'random_seed': int(time.time() * 1000),
+ }
+
+ tokens = ''
+
+ response = requests.post('https://chat.reka.ai/api/chat',
+ cookies=cls.cookies, headers=headers, json=json_data, proxies=cls.proxy, stream=True)
+
+ for completion in response.iter_lines():
+ if b'data' in completion:
+ token_data = json.loads(completion.decode('utf-8')[5:])['text']
+
+ yield (token_data.replace(tokens, ''))
+
+ tokens = token_data
+
+ def upload_image(cls, access_token, image: ImageType) -> str:
+ boundary_token = os.urandom(8).hex()
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'cache-control': 'no-cache',
+ 'authorization': f'Bearer {access_token}',
+ 'content-type': f'multipart/form-data; boundary=----WebKitFormBoundary{boundary_token}',
+ 'origin': 'https://chat.reka.ai',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://chat.reka.ai/chat/hPReZExtDOPvUfF8vCPC',
+ 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
+ }
+
+ image_data = to_bytes(image)
+
+ boundary = f'----WebKitFormBoundary{boundary_token}'
+ data = f'--{boundary}\r\nContent-Disposition: form-data; name="image"; filename="image.png"\r\nContent-Type: image/png\r\n\r\n'
+ data += image_data.decode('latin-1')
+ data += f'\r\n--{boundary}--\r\n'
+
+ response = requests.post('https://chat.reka.ai/api/upload-image',
+ cookies=cls.cookies, headers=headers, proxies=cls.proxy, data=data.encode('latin-1'))
+
+ return response.json()['media_url']
+
+ def get_access_token(cls):
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'cache-control': 'no-cache',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://chat.reka.ai/chat',
+ 'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
+ }
+
+ try:
+ response = requests.get('https://chat.reka.ai/bff/auth/access_token',
+ cookies=cls.cookies, headers=headers, proxies=cls.proxy)
+
+ return response.json()['accessToken']
+
+ except Exception as e:
+ raise ValueError(f"Failed to get access token: {e}, refresh your cookies / log in into chat.reka.ai")
diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py
index c7d7d58e..7d3de027 100644
--- a/g4f/Provider/needs_auth/Theb.py
+++ b/g4f/Provider/needs_auth/Theb.py
@@ -35,9 +35,8 @@ class Theb(AbstractProvider):
label = "TheB.AI"
url = "https://beta.theb.ai"
working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
supports_stream = True
+
models = models.keys()
@classmethod
@@ -155,4 +154,4 @@ return '';
elif chunk != "":
break
else:
- time.sleep(0.1)
\ No newline at end of file
+ time.sleep(0.1)
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index c67dfb56..d79e7e3d 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -11,7 +11,7 @@ from .GithubCopilot import GithubCopilot
from .Groq import Groq
from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
-from .HuggingFace2 import HuggingFace2
+from .HuggingFaceAPI import HuggingFaceAPI
from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount
from .MicrosoftDesigner import MicrosoftDesigner
@@ -21,6 +21,7 @@ from .OpenaiChat import OpenaiChat
from .PerplexityApi import PerplexityApi
from .Poe import Poe
from .Raycast import Raycast
+from .Reka import Reka
from .Replicate import Replicate
from .Theb import Theb
from .ThebApi import ThebApi
diff --git a/g4f/Provider/not_working/MagickPen.py b/g4f/Provider/not_working/MagickPen.py
new file mode 100644
index 00000000..56d8e4c4
--- /dev/null
+++ b/g4f/Provider/not_working/MagickPen.py
@@ -0,0 +1,87 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import hashlib
+import time
+import random
+import re
+import json
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://magickpen.com"
+ api_endpoint = "https://api.magickpen.com/ask"
+ working = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-4o-mini'
+ models = ['gpt-4o-mini']
+
+ @classmethod
+ async def fetch_api_credentials(cls) -> tuple:
+ url = "https://magickpen.com/_nuxt/bf709a9ce19f14e18116.js"
+ async with ClientSession() as session:
+ async with session.get(url) as response:
+ text = await response.text()
+
+ pattern = r'"X-API-Secret":"(\w+)"'
+ match = re.search(pattern, text)
+ X_API_SECRET = match.group(1) if match else None
+
+ timestamp = str(int(time.time() * 1000))
+ nonce = str(random.random())
+
+ s = ["TGDBU9zCgM", timestamp, nonce]
+ s.sort()
+ signature_string = ''.join(s)
+ signature = hashlib.md5(signature_string.encode()).hexdigest()
+
+ pattern = r'secret:"(\w+)"'
+ match = re.search(pattern, text)
+ secret = match.group(1) if match else None
+
+ if X_API_SECRET and timestamp and nonce and secret:
+ return X_API_SECRET, signature, timestamp, nonce, secret
+ else:
+ raise Exception("Unable to extract all the necessary data from the JavaScript file.")
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ X_API_SECRET, signature, timestamp, nonce, secret = await cls.fetch_api_credentials()
+
+ headers = {
+ 'accept': 'application/json, text/plain, */*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'content-type': 'application/json',
+ 'nonce': nonce,
+ 'origin': cls.url,
+ 'referer': f"{cls.url}/",
+ 'secret': secret,
+ 'signature': signature,
+ 'timestamp': timestamp,
+ 'x-api-secret': X_API_SECRET,
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ payload = {
+ 'query': prompt,
+ 'turnstileResponse': '',
+ 'action': 'verify'
+ }
+ async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/not_working/RobocodersAPI.py b/g4f/Provider/not_working/RobocodersAPI.py
new file mode 100755
index 00000000..2716b704
--- /dev/null
+++ b/g4f/Provider/not_working/RobocodersAPI.py
@@ -0,0 +1,238 @@
+from __future__ import annotations
+
+import json
+import aiohttp
+from pathlib import Path
+
+try:
+ from bs4 import BeautifulSoup
+ HAS_BEAUTIFULSOUP = True
+except ImportError:
+ HAS_BEAUTIFULSOUP = False
+ BeautifulSoup = None
+
+from aiohttp import ClientTimeout
+from ...errors import MissingRequirementsError
+from ...typing import AsyncResult, Messages
+from ...cookies import get_cookies_dir
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+from ... import debug
+
+
+class RobocodersAPI(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "API Robocoders AI"
+ url = "https://api.robocoders.ai/docs"
+ api_endpoint = "https://api.robocoders.ai/chat"
+ working = False
+ supports_message_history = True
+ default_model = 'GeneralCodingAgent'
+ agent = [default_model, "RepoAgent", "FrontEndAgent"]
+ models = [*agent]
+
+ CACHE_DIR = Path(get_cookies_dir())
+ CACHE_FILE = CACHE_DIR / "robocoders.json"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+
+ timeout = ClientTimeout(total=600)
+
+ async with aiohttp.ClientSession(timeout=timeout) as session:
+ # Load or create access token and session ID
+ access_token, session_id = await cls._get_or_create_access_and_session(session)
+ if not access_token or not session_id:
+ raise Exception("Failed to initialize API interaction")
+
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {access_token}"
+ }
+
+ prompt = format_prompt(messages)
+
+ data = {
+ "sid": session_id,
+ "prompt": prompt,
+ "agent": model
+ }
+
+ async with session.post(cls.api_endpoint, headers=headers, json=data, proxy=proxy) as response:
+ if response.status == 401: # Unauthorized, refresh token
+ cls._clear_cached_data()
+ raise Exception("Unauthorized: Invalid token, please retry.")
+ elif response.status == 422:
+ raise Exception("Validation Error: Invalid input.")
+ elif response.status >= 500:
+ raise Exception(f"Server Error: {response.status}")
+ elif response.status != 200:
+ raise Exception(f"Unexpected Error: {response.status}")
+
+ async for line in response.content:
+ if line:
+ try:
+ # Decode bytes into a string
+ line_str = line.decode('utf-8')
+ response_data = json.loads(line_str)
+
+ # Get the message from the 'args.content' or 'message' field
+ message = (response_data.get('args', {}).get('content') or
+ response_data.get('message', ''))
+
+ if message:
+ yield message
+
+ # Check for reaching the resource limit
+ if (response_data.get('action') == 'message' and
+ response_data.get('args', {}).get('wait_for_response')):
+ # Automatically continue the dialog
+ continue_data = {
+ "sid": session_id,
+ "prompt": "continue",
+ "agent": model
+ }
+ async with session.post(
+ cls.api_endpoint,
+ headers=headers,
+ json=continue_data,
+ proxy=proxy
+ ) as continue_response:
+ if continue_response.status == 200:
+ async for continue_line in continue_response.content:
+ if continue_line:
+ try:
+ continue_line_str = continue_line.decode('utf-8')
+ continue_data = json.loads(continue_line_str)
+ continue_message = (
+ continue_data.get('args', {}).get('content') or
+ continue_data.get('message', '')
+ )
+ if continue_message:
+ yield continue_message
+ except json.JSONDecodeError:
+ debug.log(f"Failed to decode continue JSON: {continue_line}")
+ except Exception as e:
+ debug.log(f"Error processing continue response: {e}")
+
+ except json.JSONDecodeError:
+ debug.log(f"Failed to decode JSON: {line}")
+ except Exception as e:
+ debug.log(f"Error processing response: {e}")
+
+ @staticmethod
+ async def _get_or_create_access_and_session(session: aiohttp.ClientSession):
+ RobocodersAPI.CACHE_DIR.mkdir(exist_ok=True) # Ensure cache directory exists
+
+ # Load data from cache
+ if RobocodersAPI.CACHE_FILE.exists():
+ with open(RobocodersAPI.CACHE_FILE, "r") as f:
+ data = json.load(f)
+ access_token = data.get("access_token")
+ session_id = data.get("sid")
+
+ # Validate loaded data
+ if access_token and session_id:
+ return access_token, session_id
+
+ # If data not valid, create new access token and session ID
+ access_token = await RobocodersAPI._fetch_and_cache_access_token(session)
+ session_id = await RobocodersAPI._create_and_cache_session(session, access_token)
+ return access_token, session_id
+
+ @staticmethod
+ async def _fetch_and_cache_access_token(session: aiohttp.ClientSession) -> str:
+ if not HAS_BEAUTIFULSOUP:
+ raise MissingRequirementsError('Install "beautifulsoup4" package | pip install -U beautifulsoup4')
+ return token
+
+ url_auth = 'https://api.robocoders.ai/auth'
+ headers_auth = {
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
+ }
+
+ async with session.get(url_auth, headers=headers_auth) as response:
+ if response.status == 200:
+ html = await response.text()
+ soup = BeautifulSoup(html, 'html.parser')
+ token_element = soup.find('pre', id='token')
+ if token_element:
+ token = token_element.text.strip()
+
+ # Cache the token
+ RobocodersAPI._save_cached_data({"access_token": token})
+ return token
+ return None
+
+ @staticmethod
+ async def _create_and_cache_session(session: aiohttp.ClientSession, access_token: str) -> str:
+ url_create_session = 'https://api.robocoders.ai/create-session'
+ headers_create_session = {
+ 'Authorization': f'Bearer {access_token}'
+ }
+
+ async with session.get(url_create_session, headers=headers_create_session) as response:
+ if response.status == 200:
+ data = await response.json()
+ session_id = data.get('sid')
+
+ # Cache session ID
+ RobocodersAPI._update_cached_data({"sid": session_id})
+ return session_id
+ elif response.status == 401:
+ RobocodersAPI._clear_cached_data()
+ raise Exception("Unauthorized: Invalid token during session creation.")
+ elif response.status == 422:
+ raise Exception("Validation Error: Check input parameters.")
+ return None
+
+ @staticmethod
+ def _save_cached_data(new_data: dict):
+ """Save new data to cache file"""
+ RobocodersAPI.CACHE_DIR.mkdir(exist_ok=True)
+ RobocodersAPI.CACHE_FILE.touch(exist_ok=True)
+ with open(RobocodersAPI.CACHE_FILE, "w") as f:
+ json.dump(new_data, f)
+
+ @staticmethod
+ def _update_cached_data(updated_data: dict):
+ """Update existing cache data with new values"""
+ data = {}
+ if RobocodersAPI.CACHE_FILE.exists():
+ with open(RobocodersAPI.CACHE_FILE, "r") as f:
+ try:
+ data = json.load(f)
+ except json.JSONDecodeError:
+ # If cache file is corrupted, start with empty dict
+ data = {}
+
+ data.update(updated_data)
+ with open(RobocodersAPI.CACHE_FILE, "w") as f:
+ json.dump(data, f)
+
+ @staticmethod
+ def _clear_cached_data():
+ """Remove cache file"""
+ try:
+ if RobocodersAPI.CACHE_FILE.exists():
+ RobocodersAPI.CACHE_FILE.unlink()
+ except Exception as e:
+ debug.log(f"Error clearing cache: {e}")
+
+ @staticmethod
+ def _get_cached_data() -> dict:
+ """Get all cached data"""
+ if RobocodersAPI.CACHE_FILE.exists():
+ try:
+ with open(RobocodersAPI.CACHE_FILE, "r") as f:
+ return json.load(f)
+ except json.JSONDecodeError:
+ return {}
+ return {}
diff --git a/g4f/Provider/not_working/Upstage.py b/g4f/Provider/not_working/Upstage.py
new file mode 100644
index 00000000..74355631
--- /dev/null
+++ b/g4f/Provider/not_working/Upstage.py
@@ -0,0 +1,91 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class Upstage(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://console.upstage.ai/playground/chat"
+ api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions"
+ working = False
+ default_model = 'solar-pro'
+ models = [
+ 'upstage/solar-1-mini-chat',
+ 'upstage/solar-1-mini-chat-ja',
+ 'solar-pro',
+ ]
+ model_aliases = {
+ "solar-mini": "upstage/solar-1-mini-chat",
+ "solar-mini": "upstage/solar-1-mini-chat-ja",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": "https://console.upstage.ai",
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": "https://console.upstage.ai/",
+ "sec-ch-ua": '"Not?A_Brand";v="99", "Chromium";v="130"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "cross-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "stream": True,
+ "messages": [{"role": "user", "content": format_prompt(messages)}],
+ "model": model
+ }
+
+ async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ response_text = ""
+
+ async for line in response.content:
+ if line:
+ line = line.decode('utf-8').strip()
+
+ if line.startswith("data: ") and line != "data: [DONE]":
+ try:
+ data = json.loads(line[6:])
+ content = data['choices'][0]['delta'].get('content', '')
+ if content:
+ response_text += content
+ yield content
+ except json.JSONDecodeError:
+ continue
+
+ if line == "data: [DONE]":
+ break
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
index 69e38879..a58870c2 100644
--- a/g4f/Provider/not_working/__init__.py
+++ b/g4f/Provider/not_working/__init__.py
@@ -5,10 +5,13 @@ from .AiChats import AiChats
from .AIUncensored import AIUncensored
from .Aura import Aura
from .Chatgpt4o import Chatgpt4o
+from .Chatgpt4Online import Chatgpt4Online
from .ChatgptFree import ChatgptFree
from .FlowGpt import FlowGpt
from .FreeNetfly import FreeNetfly
from .GPROChat import GPROChat
from .Koala import Koala
+from .MagickPen import MagickPen
from .MyShell import MyShell
-from .Chatgpt4Online import Chatgpt4Online
+from .RobocodersAPI import RobocodersAPI
+from .Upstage import Upstage
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index 5410b46e..b5fcd280 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -284,7 +284,7 @@
-
+
@@ -302,4 +302,4 @@