summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/not_working
diff options
context:
space:
mode:
authorkqlio67 <166700875+kqlio67@users.noreply.github.com>2024-07-08 22:52:43 +0200
committerGitHub <noreply@github.com>2024-07-08 22:52:43 +0200
commit051dfc158407bfb618255a249bdf18e844d437d8 (patch)
tree77df815bcbb4092392e314c656149972c5c2e955 /g4f/Provider/not_working
parentAdd files via upload (diff)
downloadgpt4free-051dfc158407bfb618255a249bdf18e844d437d8.tar
gpt4free-051dfc158407bfb618255a249bdf18e844d437d8.tar.gz
gpt4free-051dfc158407bfb618255a249bdf18e844d437d8.tar.bz2
gpt4free-051dfc158407bfb618255a249bdf18e844d437d8.tar.lz
gpt4free-051dfc158407bfb618255a249bdf18e844d437d8.tar.xz
gpt4free-051dfc158407bfb618255a249bdf18e844d437d8.tar.zst
gpt4free-051dfc158407bfb618255a249bdf18e844d437d8.zip
Diffstat (limited to 'g4f/Provider/not_working')
-rw-r--r--g4f/Provider/not_working/Aichatos.py56
-rw-r--r--g4f/Provider/not_working/ChatForAi.py66
-rw-r--r--g4f/Provider/not_working/ChatgptAi.py88
-rw-r--r--g4f/Provider/not_working/ChatgptNext.py66
-rw-r--r--g4f/Provider/not_working/ChatgptX.py106
-rw-r--r--g4f/Provider/not_working/Cnote.py58
-rw-r--r--g4f/Provider/not_working/Feedough.py78
-rw-r--r--g4f/Provider/not_working/__init__.py9
8 files changed, 526 insertions, 1 deletions
diff --git a/g4f/Provider/not_working/Aichatos.py b/g4f/Provider/not_working/Aichatos.py
new file mode 100644
index 00000000..d651abf3
--- /dev/null
+++ b/g4f/Provider/not_working/Aichatos.py
@@ -0,0 +1,56 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+
+import random
+
+class Aichatos(AsyncGeneratorProvider):
+ url = "https://chat10.aichatos.xyz"
+ api = "https://api.binjie.fun"
+ working = False
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
+ "Accept": "application/json, text/plain, */*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "application/json",
+ "Origin": "https://chat10.aichatos.xyz",
+ "DNT": "1",
+ "Sec-GPC": "1",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "cross-site",
+ "TE": "trailers",
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ userId = random.randint(1000000000000, 9999999999999)
+ system_message: str = "",
+ data = {
+ "prompt": prompt,
+ "userId": "#/chat/{userId}",
+ "network": True,
+ "system": system_message,
+ "withoutContext": False,
+ "stream": True,
+ }
+ async with session.post(f"{cls.api}/api/generateStream", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/not_working/ChatForAi.py b/g4f/Provider/not_working/ChatForAi.py
new file mode 100644
index 00000000..b7f13c3d
--- /dev/null
+++ b/g4f/Provider/not_working/ChatForAi.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+import time
+import hashlib
+import uuid
+
+from ...typing import AsyncResult, Messages
+from ...requests import StreamSession, raise_for_status
+from ...errors import RateLimitError
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+class ChatForAi(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://chatforai.store"
+ working = False
+ default_model = "gpt-3.5-turbo"
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ temperature: float = 0.7,
+ top_p: float = 1,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ headers = {
+ "Content-Type": "text/plain;charset=UTF-8",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/?r=b",
+ }
+ async with StreamSession(impersonate="chrome", headers=headers, proxies={"https": proxy}, timeout=timeout) as session:
+ timestamp = int(time.time() * 1e3)
+ conversation_id = str(uuid.uuid4())
+ data = {
+ "conversationId": conversation_id,
+ "conversationType": "chat_continuous",
+ "botId": "chat_continuous",
+ "globalSettings":{
+ "baseUrl": "https://api.openai.com",
+ "model": model,
+ "messageHistorySize": 5,
+ "temperature": temperature,
+ "top_p": top_p,
+ **kwargs
+ },
+ "prompt": "",
+ "messages": messages,
+ "timestamp": timestamp,
+ "sign": generate_signature(timestamp, "", conversation_id)
+ }
+ async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response:
+ await raise_for_status(response)
+ async for chunk in response.iter_content():
+ if b"https://chatforai.store" in chunk:
+ raise RuntimeError(f"Response: {chunk.decode(errors='ignore')}")
+ yield chunk.decode(errors="ignore")
+
+
+def generate_signature(timestamp: int, message: str, id: str):
+ buffer = f"{id}:{timestamp}:{message}:h496Jd6b"
+ return hashlib.sha256(buffer.encode()).hexdigest()
diff --git a/g4f/Provider/not_working/ChatgptAi.py b/g4f/Provider/not_working/ChatgptAi.py
new file mode 100644
index 00000000..5c694549
--- /dev/null
+++ b/g4f/Provider/not_working/ChatgptAi.py
@@ -0,0 +1,88 @@
+from __future__ import annotations
+
+import re, html, json, string, random
+from aiohttp import ClientSession
+
+from ...typing import Messages, AsyncResult
+from ...errors import RateLimitError
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_random_string
+
+class ChatgptAi(AsyncGeneratorProvider):
+ url = "https://chatgpt.ai"
+ working = False
+ supports_message_history = True
+ supports_system_message = True,
+ supports_gpt_4 = True,
+ _system = None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "authority" : "chatgpt.ai",
+ "accept" : "*/*",
+ "accept-language" : "en-US",
+ "cache-control" : "no-cache",
+ "origin" : cls.url,
+ "pragma" : "no-cache",
+ "referer" : f"{cls.url}/",
+ "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ "sec-ch-ua-mobile" : "?0",
+ "sec-ch-ua-platform" : '"Windows"',
+ "sec-fetch-dest" : "empty",
+ "sec-fetch-mode" : "cors",
+ "sec-fetch-site" : "same-origin",
+ "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ }
+ async with ClientSession(
+ headers=headers
+ ) as session:
+ if not cls._system:
+ async with session.get(cls.url, proxy=proxy) as response:
+ response.raise_for_status()
+ text = await response.text()
+ result = re.search(r"data-system='(.*?)'", text)
+ if result :
+ cls._system = json.loads(html.unescape(result.group(1)))
+ if not cls._system:
+ raise RuntimeError("System args not found")
+
+ data = {
+ "botId": cls._system["botId"],
+ "customId": cls._system["customId"],
+ "session": cls._system["sessionId"],
+ "chatId": get_random_string(),
+ "contextId": cls._system["contextId"],
+ "messages": messages[:-1],
+ "newMessage": messages[-1]["content"],
+ "newFileId": None,
+ "stream":True
+ }
+ async with session.post(
+ "https://chatgate.ai/wp-json/mwai-ui/v1/chats/submit",
+ proxy=proxy,
+ json=data,
+ headers={"X-Wp-Nonce": cls._system["restNonce"]}
+ ) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ try:
+ line = json.loads(line[6:])
+ assert "type" in line
+ except:
+ raise RuntimeError(f"Broken line: {line.decode()}")
+ if line["type"] == "error":
+ if "https://chatgate.ai/login" in line["data"]:
+ raise RateLimitError("Rate limit reached")
+ raise RuntimeError(line["data"])
+ if line["type"] == "live":
+ yield line["data"]
+ elif line["type"] == "end":
+ break
diff --git a/g4f/Provider/not_working/ChatgptNext.py b/g4f/Provider/not_working/ChatgptNext.py
new file mode 100644
index 00000000..1c15dd67
--- /dev/null
+++ b/g4f/Provider/not_working/ChatgptNext.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+
+class ChatgptNext(AsyncGeneratorProvider):
+ url = "https://www.chatgpt-free.cc"
+ working = False
+ supports_gpt_35_turbo = True
+ supports_message_history = True
+ supports_system_message = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ max_tokens: int = None,
+ temperature: float = 0.7,
+ top_p: float = 1,
+ presence_penalty: float = 0,
+ frequency_penalty: float = 0,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "text/event-stream",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "application/json",
+ "Referer": "https://chat.fstha.com/",
+ "x-requested-with": "XMLHttpRequest",
+ "Origin": "https://chat.fstha.com",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Authorization": "Bearer ak-chatgpt-nice",
+ "Connection": "keep-alive",
+ "Alt-Used": "chat.fstha.com",
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": messages,
+ "stream": True,
+ "model": model,
+ "temperature": temperature,
+ "presence_penalty": presence_penalty,
+ "frequency_penalty": frequency_penalty,
+ "top_p": top_p,
+ "max_tokens": max_tokens,
+ }
+ async with session.post(f"https://chat.fstha.com/api/openai/v1/chat/completions", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk.startswith(b"data: [DONE]"):
+ break
+ if chunk.startswith(b"data: "):
+ content = json.loads(chunk[6:])["choices"][0]["delta"].get("content")
+ if content:
+ yield content
diff --git a/g4f/Provider/not_working/ChatgptX.py b/g4f/Provider/not_working/ChatgptX.py
new file mode 100644
index 00000000..760333d9
--- /dev/null
+++ b/g4f/Provider/not_working/ChatgptX.py
@@ -0,0 +1,106 @@
+from __future__ import annotations
+
+import re
+import json
+
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+from ...errors import RateLimitError
+
+class ChatgptX(AsyncGeneratorProvider):
+ url = "https://chatgptx.de"
+ supports_gpt_35_turbo = True
+ working = False
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ 'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US',
+ 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': 'Linux',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
+ }
+ async with ClientSession(headers=headers) as session:
+ async with session.get(f"{cls.url}/", proxy=proxy) as response:
+ response = await response.text()
+
+ result = re.search(
+ r'<meta name="csrf-token" content="(.*?)"', response
+ )
+ if result:
+ csrf_token = result.group(1)
+
+ result = re.search(r"openconversions\('(.*?)'\)", response)
+ if result:
+ chat_id = result.group(1)
+
+ result = re.search(
+ r'<input type="hidden" id="user_id" value="(.*?)"', response
+ )
+ if result:
+ user_id = result.group(1)
+
+ if not csrf_token or not chat_id or not user_id:
+ raise RuntimeError("Missing csrf_token, chat_id or user_id")
+
+ data = {
+ '_token': csrf_token,
+ 'user_id': user_id,
+ 'chats_id': chat_id,
+ 'prompt': format_prompt(messages),
+ 'current_model': "gpt3"
+ }
+ headers = {
+ 'authority': 'chatgptx.de',
+ 'accept': 'application/json, text/javascript, */*; q=0.01',
+ 'origin': cls.url,
+ 'referer': f'{cls.url}/',
+ 'x-csrf-token': csrf_token,
+ 'x-requested-with': 'XMLHttpRequest'
+ }
+ async with session.post(f'{cls.url}/sendchat', data=data, headers=headers, proxy=proxy) as response:
+ response.raise_for_status()
+ chat = await response.json()
+ if "messages" in chat and "Anfragelimit" in chat["messages"]:
+ raise RateLimitError("Rate limit reached")
+ if "response" not in chat or not chat["response"]:
+ raise RuntimeError(f'Response: {chat}')
+ headers = {
+ 'authority': 'chatgptx.de',
+ 'accept': 'text/event-stream',
+ 'referer': f'{cls.url}/',
+ 'x-csrf-token': csrf_token,
+ 'x-requested-with': 'XMLHttpRequest'
+ }
+ data = {
+ "user_id": user_id,
+ "chats_id": chat_id,
+ "current_model": "gpt3",
+ "conversions_id": chat["conversions_id"],
+ "ass_conversions_id": chat["ass_conversions_id"],
+ }
+ async with session.get(f'{cls.url}/chats_stream', params=data, headers=headers, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ row = line[6:-1]
+ if row == b"[DONE]":
+ break
+ try:
+ content = json.loads(row)["choices"][0]["delta"].get("content")
+ except:
+ raise RuntimeError(f"Broken line: {line.decode()}")
+ if content:
+ yield content
diff --git a/g4f/Provider/not_working/Cnote.py b/g4f/Provider/not_working/Cnote.py
new file mode 100644
index 00000000..48626982
--- /dev/null
+++ b/g4f/Provider/not_working/Cnote.py
@@ -0,0 +1,58 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+
+
+class Cnote(AsyncGeneratorProvider):
+ url = "https://f1.cnote.top"
+ api_url = "https://p1api.xjai.pro/freeapi/chat-process"
+ working = False
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
+ "Accept": "application/json, text/plain, */*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "DNT": "1",
+ "Sec-GPC": "1",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "cross-site",
+ "TE": "trailers",
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ system_message: str = "",
+ data = {
+ "prompt": prompt,
+ "systemMessage": system_message,
+ "temperature": 0.8,
+ "top_p": 1,
+ }
+ async with session.post(cls.api_url, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ try:
+ data = json.loads(chunk.decode().split("&KFw6loC9Qvy&")[-1])
+ text = data.get("text", "")
+ yield text
+ except (json.JSONDecodeError, IndexError):
+ pass
diff --git a/g4f/Provider/not_working/Feedough.py b/g4f/Provider/not_working/Feedough.py
new file mode 100644
index 00000000..7df4a5bd
--- /dev/null
+++ b/g4f/Provider/not_working/Feedough.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+import json
+import asyncio
+from aiohttp import ClientSession, TCPConnector
+from urllib.parse import urlencode
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Feedough(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.feedough.com"
+ api_endpoint = "/wp-admin/admin-ajax.php"
+ working = False
+ default_model = ''
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/x-www-form-urlencoded;charset=UTF-8",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/ai-prompt-generator/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ }
+
+ connector = TCPConnector(ssl=False)
+
+ async with ClientSession(headers=headers, connector=connector) as session:
+ data = {
+ "action": "aixg_generate",
+ "prompt": format_prompt(messages),
+ "aixg_generate_nonce": "110c021031"
+ }
+
+ try:
+ async with session.post(
+ f"{cls.url}{cls.api_endpoint}",
+ data=urlencode(data),
+ proxy=proxy
+ ) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+ try:
+ response_json = json.loads(response_text)
+ if response_json.get("success") and "data" in response_json:
+ message = response_json["data"].get("message", "")
+ yield message
+ except json.JSONDecodeError:
+ yield response_text
+ except Exception as e:
+ print(f"An error occurred: {e}")
+
+ @classmethod
+ async def run(cls, *args, **kwargs):
+ async for item in cls.create_async_generator(*args, **kwargs):
+ yield item
+
+ tasks = asyncio.all_tasks()
+ for task in tasks:
+ if not task.done():
+ await task
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
index 4778c968..c4c9a5a1 100644
--- a/g4f/Provider/not_working/__init__.py
+++ b/g4f/Provider/not_working/__init__.py
@@ -1,14 +1,21 @@
from .AItianhu import AItianhu
+from .Aichatos import Aichatos
from .Bestim import Bestim
from .ChatBase import ChatBase
+from .ChatForAi import ChatForAi
+from .ChatgptAi import ChatgptAi
from .ChatgptDemo import ChatgptDemo
from .ChatgptDemoAi import ChatgptDemoAi
from .ChatgptLogin import ChatgptLogin
+from .ChatgptNext import ChatgptNext
+from .ChatgptX import ChatgptX
from .Chatxyz import Chatxyz
+from .Cnote import Cnote
+from .Feedough import Feedough
from .Gpt6 import Gpt6
from .GptChatly import GptChatly
from .GptForLove import GptForLove
from .GptGo import GptGo
from .GptGod import GptGod
-from .OnlineGpt import OnlineGpt \ No newline at end of file
+from .OnlineGpt import OnlineGpt