summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/Allyfy.py71
-rw-r--r--g4f/Provider/ChatGot.py75
-rw-r--r--g4f/Provider/Chatgpt4Online.py101
-rw-r--r--g4f/Provider/FreeNetfly.py107
-rw-r--r--g4f/Provider/GeminiProChat.py4
-rw-r--r--g4f/Provider/HuggingChat.py3
-rw-r--r--g4f/Provider/HuggingFace.py5
-rw-r--r--g4f/Provider/Liaobots.py143
-rw-r--r--g4f/Provider/LiteIcoding.py97
-rw-r--r--g4f/Provider/MagickPenAsk.py50
-rw-r--r--g4f/Provider/MagickPenChat.py50
-rw-r--r--g4f/Provider/Marsyoo.py64
-rw-r--r--g4f/Provider/PerplexityLabs.py15
-rw-r--r--g4f/Provider/Pi.py3
-rw-r--r--g4f/Provider/ReplicateHome.py48
-rw-r--r--g4f/Provider/TeachAnything.py62
-rw-r--r--g4f/Provider/You.py20
-rw-r--r--g4f/Provider/__init__.py9
-rw-r--r--g4f/Provider/needs_auth/Openai.py3
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py5
20 files changed, 727 insertions, 208 deletions
diff --git a/g4f/Provider/Allyfy.py b/g4f/Provider/Allyfy.py
new file mode 100644
index 00000000..8733b1ec
--- /dev/null
+++ b/g4f/Provider/Allyfy.py
@@ -0,0 +1,71 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+from .helper import format_prompt
+
+
+class Allyfy(AsyncGeneratorProvider):
+ url = "https://chatbot.allyfy.chat"
+ api_endpoint = "/api/v1/message/stream/super/chat"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json;charset=utf-8",
+ "dnt": "1",
+ "origin": "https://www.allyfy.chat",
+ "priority": "u=1, i",
+ "referer": "https://www.allyfy.chat/",
+ "referrer": "https://www.allyfy.chat",
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "messages": [{"content": prompt, "role": "user"}],
+ "content": prompt,
+ "baseInfo": {
+ "clientId": "q08kdrde1115003lyedfoir6af0yy531",
+ "pid": "38281",
+ "channelId": "100000",
+ "locale": "en-US",
+ "localZone": 180,
+ "packageName": "com.cch.allyfy.webh",
+ }
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_response = []
+ async for line in response.content:
+ line = line.decode().strip()
+ if line.startswith("data:"):
+ data_content = line[5:]
+ if data_content == "[DONE]":
+ break
+ try:
+ json_data = json.loads(data_content)
+ if "content" in json_data:
+ full_response.append(json_data["content"])
+ except json.JSONDecodeError:
+ continue
+ yield "".join(full_response)
diff --git a/g4f/Provider/ChatGot.py b/g4f/Provider/ChatGot.py
new file mode 100644
index 00000000..55e8d0b6
--- /dev/null
+++ b/g4f/Provider/ChatGot.py
@@ -0,0 +1,75 @@
+from __future__ import annotations
+
+import time
+from hashlib import sha256
+
+from aiohttp import BaseConnector, ClientSession
+
+from ..errors import RateLimitError
+from ..requests import raise_for_status
+from ..requests.aiohttp import get_connector
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class ChatGot(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.chatgot.one/"
+ working = True
+ supports_message_history = True
+ default_model = 'gemini-pro'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ connector: BaseConnector = None,
+ **kwargs,
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "text/plain;charset=UTF-8",
+ "Referer": f"{cls.url}/",
+ "Origin": cls.url,
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Connection": "keep-alive",
+ "TE": "trailers",
+ }
+ async with ClientSession(
+ connector=get_connector(connector, proxy), headers=headers
+ ) as session:
+ timestamp = int(time.time() * 1e3)
+ data = {
+ "messages": [
+ {
+ "role": "model" if message["role"] == "assistant" else "user",
+ "parts": [{"text": message["content"]}],
+ }
+ for message in messages
+ ],
+ "time": timestamp,
+ "pass": None,
+ "sign": generate_signature(timestamp, messages[-1]["content"]),
+ }
+ async with session.post(
+ f"{cls.url}/api/generate", json=data, proxy=proxy
+ ) as response:
+ if response.status == 500:
+ if "Quota exceeded" in await response.text():
+ raise RateLimitError(
+ f"Response {response.status}: Rate limit reached"
+ )
+ await raise_for_status(response)
+ async for chunk in response.content.iter_any():
+ yield chunk.decode(errors="ignore")
+
+
+def generate_signature(time: int, text: str, secret: str = ""):
+ message = f"{time}:{text}:{secret}"
+ return sha256(message.encode()).hexdigest()
diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py
index ff9a2c8f..d55be65b 100644
--- a/g4f/Provider/Chatgpt4Online.py
+++ b/g4f/Provider/Chatgpt4Online.py
@@ -1,22 +1,18 @@
from __future__ import annotations
-import re
import json
from aiohttp import ClientSession
-from ..typing import Messages, AsyncResult
-from ..requests import get_args_from_browser
-from ..webdriver import WebDriver
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
-from .helper import get_random_string
+from .helper import format_prompt
+
class Chatgpt4Online(AsyncGeneratorProvider):
url = "https://chatgpt4online.org"
- supports_message_history = True
- supports_gpt_35_turbo = True
- working = True
- _wpnonce = None
- _context_id = None
+ api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
+ working = True
+ supports_gpt_4 = True
@classmethod
async def create_async_generator(
@@ -24,49 +20,52 @@ class Chatgpt4Online(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
- webdriver: WebDriver = None,
**kwargs
) -> AsyncResult:
- args = get_args_from_browser(f"{cls.url}/chat/", webdriver, proxy=proxy)
- async with ClientSession(**args) as session:
- if not cls._wpnonce:
- async with session.get(f"{cls.url}/chat/", proxy=proxy) as response:
- response.raise_for_status()
- response = await response.text()
- result = re.search(r'restNonce":"(.*?)"', response)
- if result:
- cls._wpnonce = result.group(1)
- else:
- raise RuntimeError("No nonce found")
- result = re.search(r'contextId":(.*?),', response)
- if result:
- cls._context_id = result.group(1)
- else:
- raise RuntimeError("No contextId found")
+ headers = {
+ "accept": "text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ "x-wp-nonce": "d9505e9877",
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
data = {
- "botId":"default",
- "customId":None,
- "session":"N/A",
- "chatId":get_random_string(11),
- "contextId":cls._context_id,
- "messages":messages[:-1],
- "newMessage":messages[-1]["content"],
- "newImageId":None,
- "stream":True
+ "botId": "default",
+ "newMessage": prompt,
+ "stream": True,
}
- async with session.post(
- f"{cls.url}/wp-json/mwai-ui/v1/chats/submit",
- json=data,
- proxy=proxy,
- headers={"x-wp-nonce": cls._wpnonce}
- ) as response:
+
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
- async for line in response.content:
- if line.startswith(b"data: "):
- line = json.loads(line[6:])
- if "type" not in line:
- raise RuntimeError(f"Response: {line}")
- elif line["type"] == "live":
- yield line["data"]
- elif line["type"] == "end":
- break
+ full_response = ""
+
+ async for chunk in response.content.iter_any():
+ if chunk:
+ try:
+ # Extract the JSON object from the chunk
+ for line in chunk.decode().splitlines():
+ if line.startswith("data: "):
+ json_data = json.loads(line[6:])
+ if json_data["type"] == "live":
+ full_response += json_data["data"]
+ elif json_data["type"] == "end":
+ final_data = json.loads(json_data["data"])
+ full_response = final_data["reply"]
+ break
+ except json.JSONDecodeError:
+ continue
+
+ yield full_response
diff --git a/g4f/Provider/FreeNetfly.py b/g4f/Provider/FreeNetfly.py
new file mode 100644
index 00000000..624f33cf
--- /dev/null
+++ b/g4f/Provider/FreeNetfly.py
@@ -0,0 +1,107 @@
+from __future__ import annotations
+
+import json
+import asyncio
+from aiohttp import ClientSession, ClientTimeout, ClientError
+from typing import AsyncGenerator
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://free.netfly.top"
+ api_endpoint = "/api/openai/v1/chat/completions"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'gpt-4',
+ ]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ }
+ data = {
+ "messages": messages,
+ "stream": True,
+ "model": model,
+ "temperature": 0.5,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ "top_p": 1
+ }
+
+ max_retries = 3
+ retry_delay = 1
+
+ for attempt in range(max_retries):
+ try:
+ async with ClientSession(headers=headers) as session:
+ timeout = ClientTimeout(total=60)
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy, timeout=timeout) as response:
+ response.raise_for_status()
+ async for chunk in cls._process_response(response):
+ yield chunk
+ return # If successful, exit the function
+ except (ClientError, asyncio.TimeoutError) as e:
+ if attempt == max_retries - 1:
+ raise # If all retries failed, raise the last exception
+ await asyncio.sleep(retry_delay)
+ retry_delay *= 2 # Exponential backoff
+
+ @classmethod
+ async def _process_response(cls, response) -> AsyncGenerator[str, None]:
+ buffer = ""
+ async for line in response.content:
+ buffer += line.decode('utf-8')
+ if buffer.endswith('\n\n'):
+ for subline in buffer.strip().split('\n'):
+ if subline.startswith('data: '):
+ if subline == 'data: [DONE]':
+ return
+ try:
+ data = json.loads(subline[6:])
+ content = data['choices'][0]['delta'].get('content')
+ if content:
+ yield content
+ except json.JSONDecodeError:
+ print(f"Failed to parse JSON: {subline}")
+ except KeyError:
+ print(f"Unexpected JSON structure: {data}")
+ buffer = ""
+
+ # Process any remaining data in the buffer
+ if buffer:
+ for subline in buffer.strip().split('\n'):
+ if subline.startswith('data: ') and subline != 'data: [DONE]':
+ try:
+ data = json.loads(subline[6:])
+ content = data['choices'][0]['delta'].get('content')
+ if content:
+ yield content
+ except (json.JSONDecodeError, KeyError):
+ pass
+
diff --git a/g4f/Provider/GeminiProChat.py b/g4f/Provider/GeminiProChat.py
index c61e2ff3..208ca773 100644
--- a/g4f/Provider/GeminiProChat.py
+++ b/g4f/Provider/GeminiProChat.py
@@ -13,10 +13,10 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class GeminiProChat(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://www.chatgot.one/"
+ url = "https://gemini-pro.chat/"
working = True
supports_message_history = True
- default_model = ''
+ default_model = 'gemini-pro'
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index d480d13c..f7c6b581 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -13,8 +13,9 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
supports_stream = True
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
+ 'meta-llama/Meta-Llama-3.1-70B-Instruct',
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
'CohereForAI/c4ai-command-r-plus',
- 'meta-llama/Meta-Llama-3-70B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'01-ai/Yi-1.5-34B-Chat',
diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py
index a5e27ccf..6634aa75 100644
--- a/g4f/Provider/HuggingFace.py
+++ b/g4f/Provider/HuggingFace.py
@@ -14,16 +14,17 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
working = True
needs_auth = True
supports_message_history = True
+ default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
+ 'meta-llama/Meta-Llama-3.1-70B-Instruct',
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
'CohereForAI/c4ai-command-r-plus',
- 'meta-llama/Meta-Llama-3-70B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'01-ai/Yi-1.5-34B-Chat',
'mistralai/Mistral-7B-Instruct-v0.2',
'microsoft/Phi-3-mini-4k-instruct',
]
- default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 277d8ea2..af90860d 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -10,14 +10,23 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
- "gpt-3.5-turbo": {
- "id": "gpt-3.5-turbo",
- "name": "GPT-3.5-Turbo",
+ "gpt-4o-mini-free": {
+ "id": "gpt-4o-mini-free",
+ "name": "GPT-4o-Mini-Free",
"model": "ChatGPT",
"provider": "OpenAI",
- "maxLength": 48000,
- "tokenLimit": 14000,
- "context": "16K",
+ "maxLength": 31200,
+ "tokenLimit": 7800,
+ "context": "8K",
+ },
+ "gpt-4o-mini": {
+ "id": "gpt-4o-mini",
+ "name": "GPT-4o-Mini",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
},
"gpt-4o-free": {
"context": "8K",
@@ -48,106 +57,26 @@ models = {
},
"gpt-4-0613": {
"id": "gpt-4-0613",
- "name": "GPT-4-0613",
+ "name": "GPT-4",
"model": "ChatGPT",
"provider": "OpenAI",
- "maxLength": 32000,
- "tokenLimit": 7600,
- "context": "8K",
- },
- "claude-3-opus-20240229": {
- "id": "claude-3-opus-20240229",
- "name": "Claude-3-Opus",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-3-opus-20240229-aws": {
- "id": "claude-3-opus-20240229-aws",
- "name": "Claude-3-Opus-Aws",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-3-opus-100k-poe": {
- "id": "claude-3-opus-100k-poe",
- "name": "Claude-3-Opus-100k-Poe",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 400000,
- "tokenLimit": 99000,
- "context": "100K",
- },
- "claude-3-sonnet-20240229": {
- "id": "claude-3-sonnet-20240229",
- "name": "Claude-3-Sonnet",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-3-haiku-20240307": {
- "id": "claude-3-haiku-20240307",
- "name": "Claude-3-Haiku",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-2.1": {
- "id": "claude-2.1",
- "name": "Claude-2.1-200k",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-2.0": {
- "id": "claude-2.0",
- "name": "Claude-2.0-100k",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 400000,
- "tokenLimit": 100000,
- "context": "100K",
- },
- "gemini-1.0-pro-latest": {
- "id": "gemini-1.0-pro-latest",
- "name": "Gemini-Pro",
- "model": "Gemini",
- "provider": "Google",
- "maxLength": 120000,
- "tokenLimit": 30000,
- "context": "32K",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
},
- "gemini-1.5-flash-latest": {
- "id": "gemini-1.5-flash-latest",
- "name": "Gemini-1.5-Flash-1M",
- "model": "Gemini",
- "provider": "Google",
- "maxLength": 4000000,
- "tokenLimit": 1000000,
- "context": "1024K",
+ "gpt-4-turbo": {
+ "id": "gpt-4-turbo",
+ "name": "GPT-4-Turbo",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
},
- "gemini-1.5-pro-latest": {
- "id": "gemini-1.5-pro-latest",
- "name": "Gemini-1.5-Pro-1M",
- "model": "Gemini",
- "provider": "Google",
- "maxLength": 4000000,
- "tokenLimit": 1000000,
- "context": "1024K",
- }
}
+
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://liaobots.site"
working = True
@@ -155,10 +84,22 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
- default_model = "gpt-3.5-turbo"
+ default_model = "gpt-4o"
models = list(models.keys())
model_aliases = {
- "claude-v2": "claude-2.0"
+ "gpt-4o-mini": "gpt-4o-mini-free",
+ "gpt-4o": "gpt-4o-free",
+ "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
+ "gpt-4-": "gpt-4-0613",
+ "claude-3-opus": "claude-3-opus-20240229",
+ "claude-3-opus": "claude-3-opus-20240229-aws",
+ "claude-3-opus": "claude-3-opus-20240229-gcp",
+ "claude-3-sonnet": "claude-3-sonnet-20240229",
+ "claude-3-5-sonnet": "claude-3-5-sonnet-20240620",
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ "gemini-pro": "gemini-1.5-pro-latest",
+ "gemini-pro": "gemini-1.0-pro-latest",
+ "gemini-flash": "gemini-1.5-flash-latest",
}
_auth_code = ""
_cookie_jar = None
diff --git a/g4f/Provider/LiteIcoding.py b/g4f/Provider/LiteIcoding.py
new file mode 100644
index 00000000..6aa407ca
--- /dev/null
+++ b/g4f/Provider/LiteIcoding.py
@@ -0,0 +1,97 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession, ClientResponseError
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://lite.icoding.ink"
+ api_endpoint = "/api/v1/gpt/message"
+ working = True
+ supports_gpt_4 = True
+ default_model = "gpt-4o"
+ models = [
+ 'gpt-4o',
+ 'gpt-4-turbo',
+ 'claude-3',
+ 'claude-3.5',
+ 'gemini-1.5',
+ ]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.9",
+ "Authorization": "Bearer null",
+ "Connection": "keep-alive",
+ "Content-Type": "application/json;charset=utf-8",
+ "DNT": "1",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "User-Agent": (
+ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "
+ "Chrome/126.0.0.0 Safari/537.36"
+ ),
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ }
+
+ data = {
+ "model": model,
+ "chatId": "-1",
+ "messages": [
+ {
+ "role": msg["role"],
+ "content": msg["content"],
+ "time": msg.get("time", ""),
+ "attachments": msg.get("attachments", []),
+ }
+ for msg in messages
+ ],
+ "plugins": [],
+ "systemPrompt": "",
+ "temperature": 0.5,
+ }
+
+ async with ClientSession(headers=headers) as session:
+ try:
+ async with session.post(
+ f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy
+ ) as response:
+ response.raise_for_status()
+ buffer = ""
+ full_response = ""
+ async for chunk in response.content.iter_any():
+ if chunk:
+ buffer += chunk.decode()
+ while "\n\n" in buffer:
+ part, buffer = buffer.split("\n\n", 1)
+ if part.startswith("data: "):
+ content = part[6:].strip()
+ if content and content != "[DONE]":
+ content = content.strip('"')
+ full_response += content
+
+ full_response = full_response.replace('" "', ' ')
+ yield full_response.strip()
+
+ except ClientResponseError as e:
+ raise RuntimeError(
+ f"ClientResponseError {e.status}: {e.message}, url={e.request_info.url}, data={data}"
+ ) from e
+
+ except Exception as e:
+ raise RuntimeError(f"Unexpected error: {str(e)}") from e
diff --git a/g4f/Provider/MagickPenAsk.py b/g4f/Provider/MagickPenAsk.py
new file mode 100644
index 00000000..54058228
--- /dev/null
+++ b/g4f/Provider/MagickPenAsk.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class MagickPenAsk(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://api.magickpen.com"
+ api_endpoint = "/ask"
+ working = True
+ supports_gpt_4 = True
+ default_model = "gpt-4o"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/plain, */*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": "https://magickpen.com",
+ "priority": "u=1, i",
+ "referer": "https://magickpen.com/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "query": format_prompt(messages),
+ "plan": "Pay as you go"
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/MagickPenChat.py b/g4f/Provider/MagickPenChat.py
new file mode 100644
index 00000000..6c30028a
--- /dev/null
+++ b/g4f/Provider/MagickPenChat.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class MagickPenChat(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://api.magickpen.com"
+ api_endpoint = "/chat/free"
+ working = True
+ supports_gpt_4 = True
+ default_model = "gpt-4o-mini"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/plain, */*",
+ "accept-language": "en-US,en;q=0.9",
+ "access-control-allow-origin": "*",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": "https://magickpen.com",
+ "priority": "u=1, i",
+ "referer": "https://magickpen.com/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "history": [{"role": "user", "content": format_prompt(messages)}]
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/Marsyoo.py b/g4f/Provider/Marsyoo.py
new file mode 100644
index 00000000..1c5fa9fd
--- /dev/null
+++ b/g4f/Provider/Marsyoo.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession, ClientResponseError
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Marsyoo(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://aiagent.marsyoo.com"
+ api_endpoint = "/api/chat-messages"
+ working = True
+ supports_gpt_4 = True
+ default_model = 'gpt-4o'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.9",
+ "Connection": "keep-alive",
+ "DNT": "1",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/chat",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ "authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI0MWNkOTE3MS1mNTg1LTRjMTktOTY0Ni01NzgxMTBjYWViNTciLCJzdWIiOiJXZWIgQVBJIFBhc3Nwb3J0IiwiYXBwX2lkIjoiNDFjZDkxNzEtZjU4NS00YzE5LTk2NDYtNTc4MTEwY2FlYjU3IiwiYXBwX2NvZGUiOiJMakhzdWJqNjhMTXZCT0JyIiwiZW5kX3VzZXJfaWQiOiI4YjE5YjY2Mi05M2E1LTRhYTktOGNjNS03MDhmNWE0YmQxNjEifQ.pOzdQ4wTrQjjRlEv1XY9TZitkW5KW1K-wbcUJAoBJ5I",
+ "content-type": "application/json",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": "Linux",
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "response_mode": "streaming",
+ "query": prompt,
+ "inputs": {},
+ }
+ try:
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line:
+ try:
+ json_data = json.loads(line.decode('utf-8').strip().lstrip('data: '))
+ if json_data['event'] == 'message':
+ yield json_data['answer']
+ elif json_data['event'] == 'message_end':
+ return
+ except json.JSONDecodeError:
+ continue
+ except ClientResponseError as e:
+ yield f"Error: HTTP {e.status}: {e.message}"
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index 4a2cc9e5..0a298e55 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -15,21 +15,8 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
working = True
default_model = "mixtral-8x7b-instruct"
models = [
- "llama-3-sonar-large-32k-online", "llama-3-sonar-small-32k-online", "llama-3-sonar-large-32k-chat", "llama-3-sonar-small-32k-chat",
- "dbrx-instruct", "claude-3-haiku-20240307", "llama-3-8b-instruct", "llama-3-70b-instruct", "codellama-70b-instruct", "mistral-7b-instruct",
- "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct", "mixtral-8x22b-instruct", "mistral-medium", "gemma-2b-it", "gemma-7b-it",
- "related"
+ "llama-3-sonar-large-32k-online", "llama-3-sonar-small-32k-online", "llama-3-sonar-large-32k-chat", "llama-3-sonar-small-32k-chat", "llama-3-8b-instruct", "llama-3-70b-instruct", "gemma-2-9b-it", "gemma-2-27b-it", "nemotron-4-340b-instruct", "mixtral-8x7b-instruct",
]
- model_aliases = {
- "mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
- "mistralai/Mistral-7B-Instruct-v0.2": "mistral-7b-instruct",
- "mistralai/Mixtral-8x7B-Instruct-v0.1": "mixtral-8x7b-instruct",
- "codellama/CodeLlama-70b-Instruct-hf": "codellama-70b-instruct",
- "llava-v1.5-7b": "llava-v1.5-7b-wrapper",
- "databricks/dbrx-instruct": "dbrx-instruct",
- "meta-llama/Meta-Llama-3-70B-Instruct": "llama-3-70b-instruct",
- "meta-llama/Meta-Llama-3-8B-Instruct": "llama-3-8b-instruct"
- }
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py
index 5a1e9f0e..e03830f4 100644
--- a/g4f/Provider/Pi.py
+++ b/g4f/Provider/Pi.py
@@ -11,6 +11,7 @@ class Pi(AbstractProvider):
working = True
supports_stream = True
_session = None
+ default_model = "pi"
@classmethod
def create_completion(
@@ -65,4 +66,4 @@ class Pi(AbstractProvider):
yield json.loads(line.split(b'data: ')[1])
elif line.startswith(b'data: {"title":'):
yield json.loads(line.split(b'data: ')[1])
- \ No newline at end of file
+
diff --git a/g4f/Provider/ReplicateHome.py b/g4f/Provider/ReplicateHome.py
index 48336831..e6c8d2d3 100644
--- a/g4f/Provider/ReplicateHome.py
+++ b/g4f/Provider/ReplicateHome.py
@@ -14,40 +14,46 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
parent = "Replicate"
working = True
- default_model = 'stability-ai/sdxl'
+ default_model = 'stability-ai/stable-diffusion-3'
models = [
- # image
- 'stability-ai/sdxl',
- 'ai-forever/kandinsky-2.2',
+ # Models for image generation
+ 'stability-ai/stable-diffusion-3',
+ 'bytedance/sdxl-lightning-4step',
+ 'playgroundai/playground-v2.5-1024px-aesthetic',
- # text
- 'meta/llama-2-70b-chat',
- 'mistralai/mistral-7b-instruct-v0.2'
+ # Models for image generation
+ 'meta/meta-llama-3-70b-instruct',
+ 'mistralai/mixtral-8x7b-instruct-v0.1',
+ 'google-deepmind/gemma-2b-it',
]
versions = {
- # image
- 'stability-ai/sdxl': [
- "39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b",
- "2b017d9b67edd2ee1401238df49d75da53c523f36e363881e057f5dc3ed3c5b2",
- "7762fd07cf82c948538e41f63f77d685e02b063e37e496e96eefd46c929f9bdc"
+ # Model versions for generating images
+ 'stability-ai/stable-diffusion-3': [
+ "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f"
],
- 'ai-forever/kandinsky-2.2': [
- "ad9d7879fbffa2874e1d909d1d37d9bc682889cc65b31f7bb00d2362619f194a"
+ 'bytedance/sdxl-lightning-4step': [
+ "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f"
+ ],
+ 'playgroundai/playground-v2.5-1024px-aesthetic': [
+ "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24"
],
-
- # Text
- 'meta/llama-2-70b-chat': [
- "dp-542693885b1777c98ef8c5a98f2005e7"
+
+ # Model versions for text generation
+ 'meta/meta-llama-3-70b-instruct': [
+ "dp-cf04fe09351e25db628e8b6181276547"
],
- 'mistralai/mistral-7b-instruct-v0.2': [
+ 'mistralai/mixtral-8x7b-instruct-v0.1': [
"dp-89e00f489d498885048e94f9809fbc76"
+ ],
+ 'google-deepmind/gemma-2b-it': [
+ "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626"
]
}
- image_models = {"stability-ai/sdxl", "ai-forever/kandinsky-2.2"}
- text_models = {"meta/llama-2-70b-chat", "mistralai/mistral-7b-instruct-v0.2"}
+ image_models = {"stability-ai/stable-diffusion-3", "bytedance/sdxl-lightning-4step", "playgroundai/playground-v2.5-1024px-aesthetic"}
+ text_models = {"meta/meta-llama-3-70b-instruct", "mistralai/mixtral-8x7b-instruct-v0.1", "google-deepmind/gemma-2b-it"}
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/TeachAnything.py b/g4f/Provider/TeachAnything.py
new file mode 100644
index 00000000..908dd56e
--- /dev/null
+++ b/g4f/Provider/TeachAnything.py
@@ -0,0 +1,62 @@
+from __future__ import annotations
+
+from typing import Any, Dict
+
+from aiohttp import ClientSession, ClientTimeout
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.teach-anything.com"
+ api_endpoint = "/api/generate"
+ working = True
+ default_model = "llama-3-70b-instruct"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str | None = None,
+ **kwargs: Any
+ ) -> AsyncResult:
+ headers = cls._get_headers()
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {"prompt": prompt}
+
+ timeout = ClientTimeout(total=60)
+
+ async with session.post(
+ f"{cls.url}{cls.api_endpoint}",
+ json=data,
+ proxy=proxy,
+ timeout=timeout
+ ) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode()
+
+ @staticmethod
+ def _get_headers() -> Dict[str, str]:
+ return {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": "https://www.teach-anything.com",
+ "priority": "u=1, i",
+ "referer": "https://www.teach-anything.com/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index 162d6adb..cdf5f430 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -24,27 +24,27 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
image_models = ["dall-e"]
models = [
default_model,
+ "gpt-4o-mini",
"gpt-4o",
- "gpt-4",
"gpt-4-turbo",
- "claude-instant",
- "claude-2",
+ "gpt-4",
+ "claude-3.5-sonnet",
"claude-3-opus",
"claude-3-sonnet",
"claude-3-haiku",
- "gemini-pro",
+ "claude-2",
+ "llama-3.1-70b",
+ "llama-3",
+ "gemini-1-5-flash",
"gemini-1-5-pro",
+ "gemini-1-0-pro",
"databricks-dbrx-instruct",
"command-r",
"command-r-plus",
- "llama3",
- "zephyr",
+ "dolphin-2.5",
default_vision_model,
*image_models
]
- model_aliases = {
- "claude-v2": "claude-2",
- }
_cookies = None
_cookies_used = 0
_telemetry_ids = []
@@ -220,4 +220,4 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
'stytch_session_jwt': session["session_jwt"],
'ydc_stytch_session': session["session_token"],
'ydc_stytch_session_jwt': session["session_jwt"],
- } \ No newline at end of file
+ }
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 56c01150..8bbf71b3 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -11,10 +11,12 @@ from .selenium import *
from .needs_auth import *
from .AI365VIP import AI365VIP
+from .Allyfy import Allyfy
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .Blackbox import Blackbox
+from .ChatGot import ChatGot
from .Chatgpt4o import Chatgpt4o
from .Chatgpt4Online import Chatgpt4Online
from .ChatgptFree import ChatgptFree
@@ -25,16 +27,22 @@ from .DeepInfraImage import DeepInfraImage
from .FlowGpt import FlowGpt
from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
+from .FreeNetfly import FreeNetfly
from .GeminiPro import GeminiPro
from .GeminiProChat import GeminiProChat
from .GigaChat import GigaChat
from .GptTalkRu import GptTalkRu
from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
+from .HuggingFace import HuggingFace
from .Koala import Koala
from .Liaobots import Liaobots
+from .LiteIcoding import LiteIcoding
from .Llama import Llama
from .Local import Local
+from .MagickPenAsk import MagickPenAsk
+from .MagickPenChat import MagickPenChat
+from .Marsyoo import Marsyoo
from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount
from .Ollama import Ollama
@@ -44,6 +52,7 @@ from .Pizzagpt import Pizzagpt
from .Reka import Reka
from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
+from .TeachAnything import TeachAnything
from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You
diff --git a/g4f/Provider/needs_auth/Openai.py b/g4f/Provider/needs_auth/Openai.py
index 9da6bad8..a0740c47 100644
--- a/g4f/Provider/needs_auth/Openai.py
+++ b/g4f/Provider/needs_auth/Openai.py
@@ -16,6 +16,7 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
needs_auth = True
supports_message_history = True
supports_system_message = True
+ default_model = ""
@classmethod
async def create_async_generator(
@@ -120,4 +121,4 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
if api_key is not None else {}
),
**({} if headers is None else headers)
- } \ No newline at end of file
+ }
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 9321c24a..82462040 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -55,16 +55,13 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI ChatGPT"
url = "https://chatgpt.com"
working = True
- supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_message_history = True
supports_system_message = True
default_model = None
default_vision_model = "gpt-4o"
- models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo", "gpt-4o", "auto"]
+ models = [ "auto", "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-gizmo"]
model_aliases = {
- "text-davinci-002-render-sha": "gpt-3.5-turbo",
- "": "gpt-3.5-turbo",
"gpt-4-turbo-preview": "gpt-4",
"dall-e": "gpt-4",
}