summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkqlio67 <166700875+kqlio67@users.noreply.github.com>2024-04-12 21:00:57 +0200
committerGitHub <noreply@github.com>2024-04-12 21:00:57 +0200
commitfa739d2e7cb5328c93c0b4ef0d2192858a805d34 (patch)
treeaf219e57aeeecd24fa05faf0fdb06d4bbb8de8d2
parentAdd labels to provider list (diff)
downloadgpt4free-fa739d2e7cb5328c93c0b4ef0d2192858a805d34.tar
gpt4free-fa739d2e7cb5328c93c0b4ef0d2192858a805d34.tar.gz
gpt4free-fa739d2e7cb5328c93c0b4ef0d2192858a805d34.tar.bz2
gpt4free-fa739d2e7cb5328c93c0b4ef0d2192858a805d34.tar.lz
gpt4free-fa739d2e7cb5328c93c0b4ef0d2192858a805d34.tar.xz
gpt4free-fa739d2e7cb5328c93c0b4ef0d2192858a805d34.tar.zst
gpt4free-fa739d2e7cb5328c93c0b4ef0d2192858a805d34.zip
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Aichatos.py57
-rw-r--r--g4f/Provider/Blackbox.py56
-rw-r--r--g4f/Provider/Cnote.py58
-rw-r--r--g4f/Provider/Feedough.py52
-rw-r--r--g4f/Provider/Liaobots.py2
-rw-r--r--g4f/Provider/__init__.py6
6 files changed, 229 insertions, 2 deletions
diff --git a/g4f/Provider/Aichatos.py b/g4f/Provider/Aichatos.py
new file mode 100644
index 00000000..427aa0e7
--- /dev/null
+++ b/g4f/Provider/Aichatos.py
@@ -0,0 +1,57 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+from .helper import format_prompt
+
+import random
+
+
+class Aichatos(AsyncGeneratorProvider):
+ url = "https://chat10.aichatos.xyz"
+ api = "https://api.binjie.fun"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
+ "Accept": "application/json, text/plain, */*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "application/json",
+ "Origin": "https://chat10.aichatos.xyz",
+ "DNT": "1",
+ "Sec-GPC": "1",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "cross-site",
+ "TE": "trailers",
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ userId = random.randint(1000000000000, 9999999999999)
+ system_message: str = "",
+ data = {
+ "prompt": prompt,
+ "userId": "#/chat/{userId}",
+ "network": True,
+ "system": system_message,
+ "withoutContext": False,
+ "stream": True,
+ }
+ async with session.post(f"{cls.api}/api/generateStream", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
new file mode 100644
index 00000000..6e43a7a4
--- /dev/null
+++ b/g4f/Provider/Blackbox.py
@@ -0,0 +1,56 @@
+from __future__ import annotations
+
+import uuid
+import secrets
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+
+class Blackbox(AsyncGeneratorProvider):
+ url = "https://www.blackbox.ai"
+ working = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": cls.url,
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "DNT": "1",
+ "Sec-GPC": "1",
+ "Alt-Used": "www.blackbox.ai",
+ "Connection": "keep-alive",
+ }
+ async with ClientSession(headers=headers) as session:
+ random_id = secrets.token_hex(16)
+ random_user_id = str(uuid.uuid4())
+ data = {
+ "messages": messages,
+ "id": random_id,
+ "userId": random_user_id,
+ "codeModelMode": True,
+ "agentMode": {},
+ "trendingAgentMode": {},
+ "isMicMode": False,
+ "isChromeExt": False,
+ "playgroundMode": False,
+ "webSearchMode": False,
+ "userSystemPrompt": "",
+ "githubToken": None
+ }
+ async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/Cnote.py b/g4f/Provider/Cnote.py
new file mode 100644
index 00000000..7d4018bb
--- /dev/null
+++ b/g4f/Provider/Cnote.py
@@ -0,0 +1,58 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+from .helper import format_prompt
+
+
+class Cnote(AsyncGeneratorProvider):
+ url = "https://f1.cnote.top"
+ api_url = "https://p1api.xjai.pro/freeapi/chat-process"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
+ "Accept": "application/json, text/plain, */*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "DNT": "1",
+ "Sec-GPC": "1",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "cross-site",
+ "TE": "trailers",
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ system_message: str = "",
+ data = {
+ "prompt": prompt,
+ "systemMessage": system_message,
+ "temperature": 0.8,
+ "top_p": 1,
+ }
+ async with session.post(cls.api_url, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ try:
+ data = json.loads(chunk.decode().split("&KFw6loC9Qvy&")[-1])
+ text = data.get("text", "")
+ yield text
+ except (json.JSONDecodeError, IndexError):
+ pass
diff --git a/g4f/Provider/Feedough.py b/g4f/Provider/Feedough.py
new file mode 100644
index 00000000..2ce4561e
--- /dev/null
+++ b/g4f/Provider/Feedough.py
@@ -0,0 +1,52 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+from .helper import format_prompt
+
+
+class Feedough(AsyncGeneratorProvider):
+ url = "https://www.feedough.com"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": "https://www.feedough.com/ai-prompt-generator/",
+ "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
+ "Origin": "https://www.feedough.com",
+ "DNT": "1",
+ "Sec-GPC": "1",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "TE": "trailers",
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "action": "aixg_generate",
+ "prompt": prompt,
+ }
+ async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+ response_json = json.loads(response_text)
+ if response_json["success"]:
+ message = response_json["data"]["message"]
+ yield message
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index b3bd6540..deb7899c 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -142,7 +142,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"model": models[cls.get_model(model)],
"messages": messages,
"key": "",
- "prompt": kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully."),
+ "prompt": kwargs.get("system_message", "You are a helpful assistant."),
}
async with session.post(
"https://liaobots.work/api/chat",
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index e7d1e4b3..b818a752 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -11,18 +11,22 @@ from .selenium import *
from .needs_auth import *
from .unfinished import *
+from .Aichatos import Aichatos
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
+from .Blackbox import Blackbox
from .ChatForAi import ChatForAi
from .Chatgpt4Online import Chatgpt4Online
from .ChatgptAi import ChatgptAi
from .ChatgptFree import ChatgptFree
from .ChatgptNext import ChatgptNext
from .ChatgptX import ChatgptX
+from .Cnote import Cnote
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
from .DuckDuckGo import DuckDuckGo
+from .Feedough import Feedough
from .FlowGpt import FlowGpt
from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
@@ -62,4 +66,4 @@ __map__: dict[str, ProviderType] = dict([
])
class ProviderUtils:
- convert: dict[str, ProviderType] = __map__ \ No newline at end of file
+ convert: dict[str, ProviderType] = __map__