summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/deprecated
diff options
context:
space:
mode:
authorHeiner Lohaus <hlohaus@users.noreply.github.com>2024-01-22 03:38:11 +0100
committerHeiner Lohaus <hlohaus@users.noreply.github.com>2024-01-22 03:38:11 +0100
commit38dbe4b8e5ca7f9bc0508e1ba1bf878fd6d8c19c (patch)
tree6cdb82f14fcb04e9f6b339f5cab13e73a6f75d41 /g4f/Provider/deprecated
parentFix error in copilot (diff)
downloadgpt4free-38dbe4b8e5ca7f9bc0508e1ba1bf878fd6d8c19c.tar
gpt4free-38dbe4b8e5ca7f9bc0508e1ba1bf878fd6d8c19c.tar.gz
gpt4free-38dbe4b8e5ca7f9bc0508e1ba1bf878fd6d8c19c.tar.bz2
gpt4free-38dbe4b8e5ca7f9bc0508e1ba1bf878fd6d8c19c.tar.lz
gpt4free-38dbe4b8e5ca7f9bc0508e1ba1bf878fd6d8c19c.tar.xz
gpt4free-38dbe4b8e5ca7f9bc0508e1ba1bf878fd6d8c19c.tar.zst
gpt4free-38dbe4b8e5ca7f9bc0508e1ba1bf878fd6d8c19c.zip
Diffstat (limited to 'g4f/Provider/deprecated')
-rw-r--r--g4f/Provider/deprecated/Aichat.py64
-rw-r--r--g4f/Provider/deprecated/Berlin.py78
-rw-r--r--g4f/Provider/deprecated/Opchatgpts.py59
-rw-r--r--g4f/Provider/deprecated/Yqcloud.py61
-rw-r--r--g4f/Provider/deprecated/__init__.py6
5 files changed, 267 insertions, 1 deletions
diff --git a/g4f/Provider/deprecated/Aichat.py b/g4f/Provider/deprecated/Aichat.py
new file mode 100644
index 00000000..68c9c546
--- /dev/null
+++ b/g4f/Provider/deprecated/Aichat.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+from ...typing import Messages
+from ..base_provider import AsyncProvider, format_prompt
+from ..helper import get_cookies
+from ...requests import StreamSession
+
+class Aichat(AsyncProvider):
+ url = "https://chat-gpt.org/chat"
+ working = False
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ async def create_async(
+ model: str,
+ messages: Messages,
+ proxy: str = None, **kwargs) -> str:
+
+ cookies = get_cookies('chat-gpt.org') if not kwargs.get('cookies') else kwargs.get('cookies')
+ if not cookies:
+ raise RuntimeError(
+ "g4f.provider.Aichat requires cookies, [refresh https://chat-gpt.org on chrome]"
+ )
+
+ headers = {
+ 'authority': 'chat-gpt.org',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat-gpt.org',
+ 'referer': 'https://chat-gpt.org/chat',
+ 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
+ }
+
+ async with StreamSession(headers=headers,
+ cookies=cookies,
+ timeout=6,
+ proxies={"https": proxy} if proxy else None,
+ impersonate="chrome110", verify=False) as session:
+
+ json_data = {
+ "message": format_prompt(messages),
+ "temperature": kwargs.get('temperature', 0.5),
+ "presence_penalty": 0,
+ "top_p": kwargs.get('top_p', 1),
+ "frequency_penalty": 0,
+ }
+
+ async with session.post("https://chat-gpt.org/api/text",
+ json=json_data) as response:
+
+ response.raise_for_status()
+ result = await response.json()
+
+ if not result['response']:
+ raise Exception(f"Error Response: {result}")
+
+ return result["message"]
diff --git a/g4f/Provider/deprecated/Berlin.py b/g4f/Provider/deprecated/Berlin.py
new file mode 100644
index 00000000..5e81705a
--- /dev/null
+++ b/g4f/Provider/deprecated/Berlin.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+import secrets
+import uuid
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+
+
+class Berlin(AsyncGeneratorProvider):
+ url = "https://ai.berlin4h.top"
+ working = False
+ supports_gpt_35_turbo = True
+ _token = None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
+ "Accept": "*/*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "Alt-Used": "ai.berlin4h.top",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Pragma": "no-cache",
+ "Cache-Control": "no-cache",
+ "TE": "trailers",
+ }
+ async with ClientSession(headers=headers) as session:
+ if not cls._token:
+ data = {
+ "account": '免费使用GPT3.5模型@163.com',
+ "password": '659e945c2d004686bad1a75b708c962f'
+ }
+ async with session.post(f"{cls.url}/api/login", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ cls._token = (await response.json())["data"]["token"]
+ headers = {
+ "token": cls._token
+ }
+ prompt = format_prompt(messages)
+ data = {
+ "prompt": prompt,
+ "parentMessageId": str(uuid.uuid4()),
+ "options": {
+ "model": model,
+ "temperature": 0,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ "max_tokens": 1888,
+ **kwargs
+ },
+ }
+ async with session.post(f"{cls.url}/api/chat/completions", json=data, proxy=proxy, headers=headers) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk.strip():
+ try:
+ yield json.loads(chunk)["content"]
+ except:
+ raise RuntimeError(f"Response: {chunk.decode()}")
diff --git a/g4f/Provider/deprecated/Opchatgpts.py b/g4f/Provider/deprecated/Opchatgpts.py
new file mode 100644
index 00000000..94b1d099
--- /dev/null
+++ b/g4f/Provider/deprecated/Opchatgpts.py
@@ -0,0 +1,59 @@
+from __future__ import annotations
+
+import random, string, json
+from aiohttp import ClientSession
+
+from ...typing import Messages, AsyncResult
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_random_string
+
+class Opchatgpts(AsyncGeneratorProvider):
+ url = "https://opchatgpts.net"
+ working = False
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None, **kwargs) -> AsyncResult:
+
+ headers = {
+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
+ "Accept" : "*/*",
+ "Accept-Language" : "de,en-US;q=0.7,en;q=0.3",
+ "Origin" : cls.url,
+ "Alt-Used" : "opchatgpts.net",
+ "Referer" : f"{cls.url}/chatgpt-free-use/",
+ "Sec-Fetch-Dest" : "empty",
+ "Sec-Fetch-Mode" : "cors",
+ "Sec-Fetch-Site" : "same-origin",
+ }
+ async with ClientSession(
+ headers=headers
+ ) as session:
+ data = {
+ "botId": "default",
+ "chatId": get_random_string(),
+ "contextId": 28,
+ "customId": None,
+ "messages": messages,
+ "newMessage": messages[-1]["content"],
+ "session": "N/A",
+ "stream": True
+ }
+ async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ try:
+ line = json.loads(line[6:])
+ assert "type" in line
+ except:
+ raise RuntimeError(f"Broken line: {line.decode()}")
+ if line["type"] == "live":
+ yield line["data"]
+ elif line["type"] == "end":
+ break \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Yqcloud.py b/g4f/Provider/deprecated/Yqcloud.py
new file mode 100644
index 00000000..2ec6931a
--- /dev/null
+++ b/g4f/Provider/deprecated/Yqcloud.py
@@ -0,0 +1,61 @@
+from __future__ import annotations
+
+import random
+from ...requests import StreamSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, format_prompt
+
+
+class Yqcloud(AsyncGeneratorProvider):
+ url = "https://chat9.yqcloud.top/"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ async def create_async_generator(
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ **kwargs,
+ ) -> AsyncResult:
+ async with StreamSession(
+ headers=_create_header(), proxies={"https": proxy}, timeout=timeout
+ ) as session:
+ payload = _create_payload(messages, **kwargs)
+ async with session.post("https://api.aichatos.cloud/api/generateStream", json=payload) as response:
+ response.raise_for_status()
+ async for chunk in response.iter_content():
+ if chunk:
+ chunk = chunk.decode()
+ if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
+ raise RuntimeError("IP address is blocked by abuse detection.")
+ yield chunk
+
+
+def _create_header():
+ return {
+ "accept" : "application/json, text/plain, */*",
+ "content-type" : "application/json",
+ "origin" : "https://chat9.yqcloud.top",
+ "referer" : "https://chat9.yqcloud.top/"
+ }
+
+
+def _create_payload(
+ messages: Messages,
+ system_message: str = "",
+ user_id: int = None,
+ **kwargs
+):
+ if not user_id:
+ user_id = random.randint(1690000544336, 2093025544336)
+ return {
+ "prompt": format_prompt(messages),
+ "network": True,
+ "system": system_message,
+ "withoutContext": False,
+ "stream": True,
+ "userId": f"#/chat/{user_id}"
+ }
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
index ca5ac83e..4d7eb5da 100644
--- a/g4f/Provider/deprecated/__init__.py
+++ b/g4f/Provider/deprecated/__init__.py
@@ -18,4 +18,8 @@ from .Acytoo import Acytoo
from .Aibn import Aibn
from .Ails import Ails
from .ChatgptDuo import ChatgptDuo
-from .Cromicle import Cromicle \ No newline at end of file
+from .Cromicle import Cromicle
+from .Opchatgpts import Opchatgpts
+from .Yqcloud import Yqcloud
+from .Aichat import Aichat
+from .Berlin import Berlin \ No newline at end of file