summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/__init__.py1
-rw-r--r--g4f/Provider/deprecated/Acytoo.py51
-rw-r--r--g4f/Provider/deprecated/AiAsk.py46
-rw-r--r--g4f/Provider/deprecated/AiService.py39
-rw-r--r--g4f/Provider/deprecated/Aibn.py46
-rw-r--r--g4f/Provider/deprecated/Aichat.py64
-rw-r--r--g4f/Provider/deprecated/Ails.py90
-rw-r--r--g4f/Provider/deprecated/Aivvm.py73
-rw-r--r--g4f/Provider/deprecated/Berlin.py78
-rw-r--r--g4f/Provider/deprecated/ChatAnywhere.py54
-rw-r--r--g4f/Provider/deprecated/ChatgptDuo.py47
-rw-r--r--g4f/Provider/deprecated/CodeLinkAva.py52
-rw-r--r--g4f/Provider/deprecated/Cromicle.py50
-rw-r--r--g4f/Provider/deprecated/DfeHub.py62
-rw-r--r--g4f/Provider/deprecated/EasyChat.py89
-rw-r--r--g4f/Provider/deprecated/Equing.py71
-rw-r--r--g4f/Provider/deprecated/FakeGpt.py91
-rw-r--r--g4f/Provider/deprecated/FastGpt.py76
-rw-r--r--g4f/Provider/deprecated/Forefront.py40
-rw-r--r--g4f/Provider/deprecated/GPTalk.py87
-rw-r--r--g4f/Provider/deprecated/GeekGpt.py73
-rw-r--r--g4f/Provider/deprecated/GetGpt.py77
-rw-r--r--g4f/Provider/deprecated/H2o.py89
-rw-r--r--g4f/Provider/deprecated/Hashnode.py80
-rw-r--r--g4f/Provider/deprecated/Lockchat.py54
-rw-r--r--g4f/Provider/deprecated/Myshell.py165
-rw-r--r--g4f/Provider/deprecated/NoowAi.py66
-rw-r--r--g4f/Provider/deprecated/Opchatgpts.py59
-rw-r--r--g4f/Provider/deprecated/OpenAssistant.py88
-rw-r--r--g4f/Provider/deprecated/Phind.py140
-rw-r--r--g4f/Provider/deprecated/V50.py61
-rw-r--r--g4f/Provider/deprecated/Vercel.py392
-rw-r--r--g4f/Provider/deprecated/Vitalentum.py55
-rw-r--r--g4f/Provider/deprecated/VoiGpt.py91
-rw-r--r--g4f/Provider/deprecated/Wewordle.py65
-rw-r--r--g4f/Provider/deprecated/Wuguokai.py57
-rw-r--r--g4f/Provider/deprecated/Ylokh.py58
-rw-r--r--g4f/Provider/deprecated/Yqcloud.py61
-rw-r--r--g4f/Provider/deprecated/__init__.py35
39 files changed, 2973 insertions, 0 deletions
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 087c9c8a..515f5912 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -5,6 +5,7 @@ from ..providers.retry_provider import RetryProvider, IterListProvider
from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
from ..providers.create_images import CreateImagesProvider
+from .deprecated import *
from .selenium import *
from .needs_auth import *
diff --git a/g4f/Provider/deprecated/Acytoo.py b/g4f/Provider/deprecated/Acytoo.py
new file mode 100644
index 00000000..f75821ce
--- /dev/null
+++ b/g4f/Provider/deprecated/Acytoo.py
@@ -0,0 +1,51 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+
+
+class Acytoo(AsyncGeneratorProvider):
+ url = 'https://chat.acytoo.com'
+ working = False
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ async with ClientSession(
+ headers=_create_header()
+ ) as session:
+ async with session.post(
+ f'{cls.url}/api/completions',
+ proxy=proxy,
+ json=_create_payload(messages, **kwargs)
+ ) as response:
+ response.raise_for_status()
+ async for stream in response.content.iter_any():
+ if stream:
+ yield stream.decode()
+
+
+def _create_header():
+ return {
+ 'accept': '*/*',
+ 'content-type': 'application/json',
+ }
+
+
+def _create_payload(messages: Messages, temperature: float = 0.5, **kwargs):
+ return {
+ 'key' : '',
+ 'model' : 'gpt-3.5-turbo',
+ 'messages' : messages,
+ 'temperature' : temperature,
+ 'password' : ''
+ } \ No newline at end of file
diff --git a/g4f/Provider/deprecated/AiAsk.py b/g4f/Provider/deprecated/AiAsk.py
new file mode 100644
index 00000000..6ea5f3e0
--- /dev/null
+++ b/g4f/Provider/deprecated/AiAsk.py
@@ -0,0 +1,46 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+
+class AiAsk(AsyncGeneratorProvider):
+ url = "https://e.aiask.me"
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+ working = False
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/plain, */*",
+ "origin": cls.url,
+ "referer": f"{cls.url}/chat",
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "continuous": True,
+ "id": "fRMSQtuHl91A4De9cCvKD",
+ "list": messages,
+ "models": "0",
+ "prompt": "",
+ "temperature": kwargs.get("temperature", 0.5),
+ "title": "",
+ }
+ buffer = ""
+ rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
+ async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ buffer += chunk.decode()
+ if not rate_limit.startswith(buffer):
+ yield buffer
+ buffer = ""
+ elif buffer == rate_limit:
+ raise RuntimeError("Rate limit reached") \ No newline at end of file
diff --git a/g4f/Provider/deprecated/AiService.py b/g4f/Provider/deprecated/AiService.py
new file mode 100644
index 00000000..acd7f5ea
--- /dev/null
+++ b/g4f/Provider/deprecated/AiService.py
@@ -0,0 +1,39 @@
+from __future__ import annotations
+
+import requests
+
+from ...typing import Any, CreateResult, Messages
+from ..base_provider import AbstractProvider
+
+
+class AiService(AbstractProvider):
+ url = "https://aiservice.vercel.app/"
+ working = False
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: Messages,
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ base = (
+ "\n".join(
+ f"{message['role']}: {message['content']}" for message in messages
+ )
+ + "\nassistant: "
+ )
+ headers = {
+ "accept": "*/*",
+ "content-type": "text/plain;charset=UTF-8",
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "Referer": "https://aiservice.vercel.app/chat",
+ }
+ data = {"input": base}
+ url = "https://aiservice.vercel.app/api/chat/answer"
+ response = requests.post(url, headers=headers, json=data)
+ response.raise_for_status()
+ yield response.json()["data"]
diff --git a/g4f/Provider/deprecated/Aibn.py b/g4f/Provider/deprecated/Aibn.py
new file mode 100644
index 00000000..0bbfb436
--- /dev/null
+++ b/g4f/Provider/deprecated/Aibn.py
@@ -0,0 +1,46 @@
+from __future__ import annotations
+
+import time
+import hashlib
+
+from ...typing import AsyncResult, Messages
+from ...requests import StreamSession
+from ..base_provider import AsyncGeneratorProvider
+
+
+class Aibn(AsyncGeneratorProvider):
+ url = "https://aibn.cc"
+ working = False
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ **kwargs
+ ) -> AsyncResult:
+ async with StreamSession(
+ impersonate="chrome107",
+ proxies={"https": proxy},
+ timeout=timeout
+ ) as session:
+ timestamp = int(time.time())
+ data = {
+ "messages": messages,
+ "pass": None,
+ "sign": generate_signature(timestamp, messages[-1]["content"]),
+ "time": timestamp
+ }
+ async with session.post(f"{cls.url}/api/generate", json=data) as response:
+ response.raise_for_status()
+ async for chunk in response.iter_content():
+ yield chunk.decode()
+
+
+def generate_signature(timestamp: int, message: str, secret: str = "undefined"):
+ data = f"{timestamp}:{message}:{secret}"
+ return hashlib.sha256(data.encode()).hexdigest() \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Aichat.py b/g4f/Provider/deprecated/Aichat.py
new file mode 100644
index 00000000..68c9c546
--- /dev/null
+++ b/g4f/Provider/deprecated/Aichat.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+from ...typing import Messages
+from ..base_provider import AsyncProvider, format_prompt
+from ..helper import get_cookies
+from ...requests import StreamSession
+
+class Aichat(AsyncProvider):
+ url = "https://chat-gpt.org/chat"
+ working = False
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ async def create_async(
+ model: str,
+ messages: Messages,
+ proxy: str = None, **kwargs) -> str:
+
+ cookies = get_cookies('chat-gpt.org') if not kwargs.get('cookies') else kwargs.get('cookies')
+ if not cookies:
+ raise RuntimeError(
+ "g4f.provider.Aichat requires cookies, [refresh https://chat-gpt.org on chrome]"
+ )
+
+ headers = {
+ 'authority': 'chat-gpt.org',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat-gpt.org',
+ 'referer': 'https://chat-gpt.org/chat',
+ 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
+ }
+
+ async with StreamSession(headers=headers,
+ cookies=cookies,
+ timeout=6,
+ proxies={"https": proxy} if proxy else None,
+ impersonate="chrome110", verify=False) as session:
+
+ json_data = {
+ "message": format_prompt(messages),
+ "temperature": kwargs.get('temperature', 0.5),
+ "presence_penalty": 0,
+ "top_p": kwargs.get('top_p', 1),
+ "frequency_penalty": 0,
+ }
+
+ async with session.post("https://chat-gpt.org/api/text",
+ json=json_data) as response:
+
+ response.raise_for_status()
+ result = await response.json()
+
+ if not result['response']:
+ raise Exception(f"Error Response: {result}")
+
+ return result["message"]
diff --git a/g4f/Provider/deprecated/Ails.py b/g4f/Provider/deprecated/Ails.py
new file mode 100644
index 00000000..e87ceb32
--- /dev/null
+++ b/g4f/Provider/deprecated/Ails.py
@@ -0,0 +1,90 @@
+from __future__ import annotations
+
+import hashlib
+import time
+import uuid
+import json
+from datetime import datetime
+from aiohttp import ClientSession
+
+from ...typing import SHA256, AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+
+
+class Ails(AsyncGeneratorProvider):
+ url = "https://ai.ls"
+ working = False
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ async def create_async_generator(
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "authority": "api.caipacity.com",
+ "accept": "*/*",
+ "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "authorization": "Bearer free",
+ "client-id": str(uuid.uuid4()),
+ "client-v": "0.1.278",
+ "content-type": "application/json",
+ "origin": "https://ai.ls",
+ "referer": "https://ai.ls/",
+ "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Windows"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "cross-site",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ "from-url": "https://ai.ls/?chat=1"
+ }
+ async with ClientSession(
+ headers=headers
+ ) as session:
+ timestamp = _format_timestamp(int(time.time() * 1000))
+ json_data = {
+ "model": "gpt-3.5-turbo",
+ "temperature": kwargs.get("temperature", 0.6),
+ "stream": True,
+ "messages": messages,
+ "d": datetime.now().strftime("%Y-%m-%d"),
+ "t": timestamp,
+ "s": _hash({"t": timestamp, "m": messages[-1]["content"]}),
+ }
+ async with session.post(
+ "https://api.caipacity.com/v1/chat/completions",
+ proxy=proxy,
+ json=json_data
+ ) as response:
+ response.raise_for_status()
+ start = "data: "
+ async for line in response.content:
+ line = line.decode('utf-8')
+ if line.startswith(start) and line != "data: [DONE]":
+ line = line[len(start):-1]
+ line = json.loads(line)
+ token = line["choices"][0]["delta"].get("content")
+
+ if token:
+ if "ai.ls" in token or "ai.ci" in token:
+ raise Exception(f"Response Error: {token}")
+ yield token
+
+
+def _hash(json_data: dict[str, str]) -> SHA256:
+ base_string: str = f'{json_data["t"]}:{json_data["m"]}:WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf:{len(json_data["m"])}'
+
+ return SHA256(hashlib.sha256(base_string.encode()).hexdigest())
+
+
+def _format_timestamp(timestamp: int) -> str:
+ e = timestamp
+ n = e % 10
+ r = n + 1 if n % 2 == 0 else n
+ return str(e - n + r) \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Aivvm.py b/g4f/Provider/deprecated/Aivvm.py
new file mode 100644
index 00000000..c973adf8
--- /dev/null
+++ b/g4f/Provider/deprecated/Aivvm.py
@@ -0,0 +1,73 @@
+from __future__ import annotations
+
+import requests
+import json
+
+from ..base_provider import AbstractProvider
+from ...typing import CreateResult, Messages
+
+# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
+models = {
+ 'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'},
+ 'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'},
+ 'gpt-3.5-turbo-16k': {'id': 'gpt-3.5-turbo-16k', 'name': 'GPT-3.5-16K'},
+ 'gpt-3.5-turbo-16k-0613': {'id': 'gpt-3.5-turbo-16k-0613', 'name': 'GPT-3.5-16K-0613'},
+ 'gpt-4': {'id': 'gpt-4', 'name': 'GPT-4'},
+ 'gpt-4-0613': {'id': 'gpt-4-0613', 'name': 'GPT-4-0613'},
+ 'gpt-4-32k': {'id': 'gpt-4-32k', 'name': 'GPT-4-32K'},
+ 'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
+}
+
+class Aivvm(AbstractProvider):
+ url = 'https://chat.aivvm.com'
+ supports_stream = True
+ working = False
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+
+ @classmethod
+ def create_completion(cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ **kwargs
+ ) -> CreateResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ elif model not in models:
+ raise ValueError(f"Model is not supported: {model}")
+
+ json_data = {
+ "model" : models[model],
+ "messages" : messages,
+ "key" : "",
+ "prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
+ "temperature" : kwargs.get("temperature", 0.7)
+ }
+
+ data = json.dumps(json_data)
+
+ headers = {
+ "accept" : "text/event-stream",
+ "accept-language" : "en-US,en;q=0.9",
+ "content-type" : "application/json",
+ "content-length" : str(len(data)),
+ "sec-ch-ua" : "\"Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"",
+ "sec-ch-ua-mobile" : "?0",
+ "sec-ch-ua-platform": "\"Windows\"",
+ "sec-fetch-dest" : "empty",
+ "sec-fetch-mode" : "cors",
+ "sec-fetch-site" : "same-origin",
+ "sec-gpc" : "1",
+ "referrer" : "https://chat.aivvm.com/",
+ "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
+ }
+
+ response = requests.post("https://chat.aivvm.com/api/chat", headers=headers, data=data, stream=True)
+ response.raise_for_status()
+
+ for chunk in response.iter_content(chunk_size=4096):
+ try:
+ yield chunk.decode("utf-8")
+ except UnicodeDecodeError:
+ yield chunk.decode("unicode-escape") \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Berlin.py b/g4f/Provider/deprecated/Berlin.py
new file mode 100644
index 00000000..5e81705a
--- /dev/null
+++ b/g4f/Provider/deprecated/Berlin.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+import secrets
+import uuid
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+
+
+class Berlin(AsyncGeneratorProvider):
+ url = "https://ai.berlin4h.top"
+ working = False
+ supports_gpt_35_turbo = True
+ _token = None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
+ "Accept": "*/*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "Alt-Used": "ai.berlin4h.top",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Pragma": "no-cache",
+ "Cache-Control": "no-cache",
+ "TE": "trailers",
+ }
+ async with ClientSession(headers=headers) as session:
+ if not cls._token:
+ data = {
+ "account": '免费使用GPT3.5模型@163.com',
+ "password": '659e945c2d004686bad1a75b708c962f'
+ }
+ async with session.post(f"{cls.url}/api/login", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ cls._token = (await response.json())["data"]["token"]
+ headers = {
+ "token": cls._token
+ }
+ prompt = format_prompt(messages)
+ data = {
+ "prompt": prompt,
+ "parentMessageId": str(uuid.uuid4()),
+ "options": {
+ "model": model,
+ "temperature": 0,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ "max_tokens": 1888,
+ **kwargs
+ },
+ }
+ async with session.post(f"{cls.url}/api/chat/completions", json=data, proxy=proxy, headers=headers) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk.strip():
+ try:
+ yield json.loads(chunk)["content"]
+ except:
+ raise RuntimeError(f"Response: {chunk.decode()}")
diff --git a/g4f/Provider/deprecated/ChatAnywhere.py b/g4f/Provider/deprecated/ChatAnywhere.py
new file mode 100644
index 00000000..d035eaf0
--- /dev/null
+++ b/g4f/Provider/deprecated/ChatAnywhere.py
@@ -0,0 +1,54 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession, ClientTimeout
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+
+
+class ChatAnywhere(AsyncGeneratorProvider):
+ url = "https://chatanywhere.cn"
+ supports_gpt_35_turbo = True
+ supports_message_history = True
+ working = False
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ temperature: float = 0.5,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
+ "Accept": "application/json, text/plain, */*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "application/json",
+ "Referer": f"{cls.url}/",
+ "Origin": cls.url,
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Authorization": "",
+ "Connection": "keep-alive",
+ "TE": "trailers"
+ }
+ async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
+ data = {
+ "list": messages,
+ "id": "s1_qYuOLXjI3rEpc7WHfQ",
+ "title": messages[-1]["content"],
+ "prompt": "",
+ "temperature": temperature,
+ "models": "61490748",
+ "continuous": True
+ }
+ async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode() \ No newline at end of file
diff --git a/g4f/Provider/deprecated/ChatgptDuo.py b/g4f/Provider/deprecated/ChatgptDuo.py
new file mode 100644
index 00000000..bd9e195d
--- /dev/null
+++ b/g4f/Provider/deprecated/ChatgptDuo.py
@@ -0,0 +1,47 @@
+from __future__ import annotations
+
+from ...typing import Messages
+from ...requests import StreamSession
+from ..base_provider import AsyncProvider, format_prompt
+
+
+class ChatgptDuo(AsyncProvider):
+ url = "https://chatgptduo.com"
+ supports_gpt_35_turbo = True
+ working = False
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ **kwargs
+ ) -> str:
+ async with StreamSession(
+ impersonate="chrome107",
+ proxies={"https": proxy},
+ timeout=timeout
+ ) as session:
+ prompt = format_prompt(messages),
+ data = {
+ "prompt": prompt,
+ "search": prompt,
+ "purpose": "ask",
+ }
+ response = await session.post(f"{cls.url}/", data=data)
+ response.raise_for_status()
+ data = response.json()
+
+ cls._sources = [{
+ "title": source["title"],
+ "url": source["link"],
+ "snippet": source["snippet"]
+ } for source in data["results"]]
+
+ return data["answer"]
+
+ @classmethod
+ def get_sources(cls):
+ return cls._sources \ No newline at end of file
diff --git a/g4f/Provider/deprecated/CodeLinkAva.py b/g4f/Provider/deprecated/CodeLinkAva.py
new file mode 100644
index 00000000..22f4468a
--- /dev/null
+++ b/g4f/Provider/deprecated/CodeLinkAva.py
@@ -0,0 +1,52 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncGenerator
+from ..base_provider import AsyncGeneratorProvider
+
+
+class CodeLinkAva(AsyncGeneratorProvider):
+ url = "https://ava-ai-ef611.web.app"
+ supports_gpt_35_turbo = True
+ working = False
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ **kwargs
+ ) -> AsyncGenerator:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
+ "Accept": "*/*",
+ "Accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ }
+ async with ClientSession(
+ headers=headers
+ ) as session:
+ data = {
+ "messages": messages,
+ "temperature": 0.6,
+ "stream": True,
+ **kwargs
+ }
+ async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ line = line.decode()
+ if line.startswith("data: "):
+ if line.startswith("data: [DONE]"):
+ break
+ line = json.loads(line[6:-1])
+
+ content = line["choices"][0]["delta"].get("content")
+ if content:
+ yield content \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Cromicle.py b/g4f/Provider/deprecated/Cromicle.py
new file mode 100644
index 00000000..9f986cb5
--- /dev/null
+++ b/g4f/Provider/deprecated/Cromicle.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+from hashlib import sha256
+from ...typing import AsyncResult, Messages, Dict
+
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+
+
+class Cromicle(AsyncGeneratorProvider):
+ url: str = 'https://cromicle.top'
+ working: bool = False
+ supports_gpt_35_turbo: bool = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ async with ClientSession(
+ headers=_create_header()
+ ) as session:
+ async with session.post(
+ f'{cls.url}/chat',
+ proxy=proxy,
+ json=_create_payload(format_prompt(messages))
+ ) as response:
+ response.raise_for_status()
+ async for stream in response.content.iter_any():
+ if stream:
+ yield stream.decode()
+
+
+def _create_header() -> Dict[str, str]:
+ return {
+ 'accept': '*/*',
+ 'content-type': 'application/json',
+ }
+
+
+def _create_payload(message: str) -> Dict[str, str]:
+ return {
+ 'message': message,
+ 'token': 'abc',
+ 'hash': sha256('abc'.encode() + message.encode()).hexdigest()
+ } \ No newline at end of file
diff --git a/g4f/Provider/deprecated/DfeHub.py b/g4f/Provider/deprecated/DfeHub.py
new file mode 100644
index 00000000..e6d13444
--- /dev/null
+++ b/g4f/Provider/deprecated/DfeHub.py
@@ -0,0 +1,62 @@
+from __future__ import annotations
+
+import json
+import re
+import time
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import AbstractProvider
+
+
+class DfeHub(AbstractProvider):
+ url = "https://chat.dfehub.com/"
+ supports_stream = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ headers = {
+ "authority" : "chat.dfehub.com",
+ "accept" : "*/*",
+ "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "content-type" : "application/json",
+ "origin" : "https://chat.dfehub.com",
+ "referer" : "https://chat.dfehub.com/",
+ "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ "sec-ch-ua-mobile" : "?0",
+ "sec-ch-ua-platform": '"macOS"',
+ "sec-fetch-dest" : "empty",
+ "sec-fetch-mode" : "cors",
+ "sec-fetch-site" : "same-origin",
+ "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ "x-requested-with" : "XMLHttpRequest",
+ }
+
+ json_data = {
+ "messages" : messages,
+ "model" : "gpt-3.5-turbo",
+ "temperature" : kwargs.get("temperature", 0.5),
+ "presence_penalty" : kwargs.get("presence_penalty", 0),
+ "frequency_penalty" : kwargs.get("frequency_penalty", 0),
+ "top_p" : kwargs.get("top_p", 1),
+ "stream" : True
+ }
+
+ response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions",
+ headers=headers, json=json_data, timeout=3)
+
+ for chunk in response.iter_lines():
+ if b"detail" in chunk:
+ delay = re.findall(r"\d+\.\d+", chunk.decode())
+ delay = float(delay[-1])
+ time.sleep(delay)
+ yield from DfeHub.create_completion(model, messages, stream, **kwargs)
+ if b"content" in chunk:
+ data = json.loads(chunk.decode().split("data: ")[1])
+ yield (data["choices"][0]["delta"]["content"])
diff --git a/g4f/Provider/deprecated/EasyChat.py b/g4f/Provider/deprecated/EasyChat.py
new file mode 100644
index 00000000..7a00f523
--- /dev/null
+++ b/g4f/Provider/deprecated/EasyChat.py
@@ -0,0 +1,89 @@
+from __future__ import annotations
+
+import json
+import random
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import AbstractProvider
+
+
+class EasyChat(AbstractProvider):
+ url: str = "https://free.easychat.work"
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ working = False
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ active_servers = [
+ "https://chat10.fastgpt.me",
+ "https://chat9.fastgpt.me",
+ "https://chat1.fastgpt.me",
+ "https://chat2.fastgpt.me",
+ "https://chat3.fastgpt.me",
+ "https://chat4.fastgpt.me",
+ "https://gxos1h1ddt.fastgpt.me"
+ ]
+
+ server = active_servers[kwargs.get("active_server", random.randint(0, 5))]
+ headers = {
+ "authority" : f"{server}".replace("https://", ""),
+ "accept" : "text/event-stream",
+ "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
+ "content-type" : "application/json",
+ "origin" : f"{server}",
+ "referer" : f"{server}/",
+ "x-requested-with" : "XMLHttpRequest",
+ 'plugins' : '0',
+ 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
+ 'sec-ch-ua-mobile' : '?0',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-fetch-dest' : 'empty',
+ 'sec-fetch-mode' : 'cors',
+ 'sec-fetch-site' : 'same-origin',
+ 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
+ 'usesearch' : 'false',
+ 'x-requested-with' : 'XMLHttpRequest'
+ }
+
+ json_data = {
+ "messages" : messages,
+ "stream" : stream,
+ "model" : model,
+ "temperature" : kwargs.get("temperature", 0.5),
+ "presence_penalty" : kwargs.get("presence_penalty", 0),
+ "frequency_penalty" : kwargs.get("frequency_penalty", 0),
+ "top_p" : kwargs.get("top_p", 1)
+ }
+
+ session = requests.Session()
+ # init cookies from server
+ session.get(f"{server}/")
+
+ response = session.post(f"{server}/api/openai/v1/chat/completions",
+ headers=headers, json=json_data, stream=stream)
+
+ if response.status_code != 200:
+ raise Exception(f"Error {response.status_code} from server : {response.reason}")
+ if not stream:
+ json_data = response.json()
+
+ if "choices" in json_data:
+ yield json_data["choices"][0]["message"]["content"]
+ else:
+ raise Exception("No response from server")
+
+ else:
+
+ for chunk in response.iter_lines():
+
+ if b"content" in chunk:
+ splitData = chunk.decode().split("data:")
+
+ if len(splitData) > 1:
+ yield json.loads(splitData[1])["choices"][0]["delta"]["content"] \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Equing.py b/g4f/Provider/deprecated/Equing.py
new file mode 100644
index 00000000..5fd9797b
--- /dev/null
+++ b/g4f/Provider/deprecated/Equing.py
@@ -0,0 +1,71 @@
+from __future__ import annotations
+
+import json
+from abc import ABC, abstractmethod
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import AbstractProvider
+
+
+class Equing(AbstractProvider):
+ url: str = 'https://next.eqing.tech/'
+ working = False
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = False
+
+ @staticmethod
+ @abstractmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ headers = {
+ 'authority' : 'next.eqing.tech',
+ 'accept' : 'text/event-stream',
+ 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'cache-control' : 'no-cache',
+ 'content-type' : 'application/json',
+ 'origin' : 'https://next.eqing.tech',
+ 'plugins' : '0',
+ 'pragma' : 'no-cache',
+ 'referer' : 'https://next.eqing.tech/',
+ 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
+ 'sec-ch-ua-mobile' : '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest' : 'empty',
+ 'sec-fetch-mode' : 'cors',
+ 'sec-fetch-site' : 'same-origin',
+ 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
+ 'usesearch' : 'false',
+ 'x-requested-with' : 'XMLHttpRequest'
+ }
+
+ json_data = {
+ 'messages' : messages,
+ 'stream' : stream,
+ 'model' : model,
+ 'temperature' : kwargs.get('temperature', 0.5),
+ 'presence_penalty' : kwargs.get('presence_penalty', 0),
+ 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
+ 'top_p' : kwargs.get('top_p', 1),
+ }
+
+ response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
+ headers=headers, json=json_data, stream=stream)
+
+ if not stream:
+ yield response.json()["choices"][0]["message"]["content"]
+ return
+
+ for line in response.iter_content(chunk_size=1024):
+ if line:
+ if b'content' in line:
+ line_json = json.loads(line.decode('utf-8').split('data: ')[1])
+
+ token = line_json['choices'][0]['delta'].get('content')
+ if token:
+ yield token \ No newline at end of file
diff --git a/g4f/Provider/deprecated/FakeGpt.py b/g4f/Provider/deprecated/FakeGpt.py
new file mode 100644
index 00000000..99b6bb1a
--- /dev/null
+++ b/g4f/Provider/deprecated/FakeGpt.py
@@ -0,0 +1,91 @@
+from __future__ import annotations
+
+import uuid, time, random, json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt, get_random_string
+
+
+class FakeGpt(AsyncGeneratorProvider):
+ url = "https://chat-shared2.zhile.io"
+ supports_gpt_35_turbo = True
+ working = False
+ _access_token = None
+ _cookie_jar = None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Accept-Language": "en-US",
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
+ "Referer": "https://chat-shared2.zhile.io/?v=2",
+ "sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-ch-ua-mobile": "?0",
+ }
+ async with ClientSession(headers=headers, cookie_jar=cls._cookie_jar) as session:
+ if not cls._access_token:
+ async with session.get(f"{cls.url}/api/loads", params={"t": int(time.time())}, proxy=proxy) as response:
+ response.raise_for_status()
+ list = (await response.json())["loads"]
+ token_ids = [t["token_id"] for t in list]
+ data = {
+ "token_key": random.choice(token_ids),
+ "session_password": get_random_string()
+ }
+ async with session.post(f"{cls.url}/auth/login", data=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async with session.get(f"{cls.url}/api/auth/session", proxy=proxy) as response:
+ response.raise_for_status()
+ cls._access_token = (await response.json())["accessToken"]
+ cls._cookie_jar = session.cookie_jar
+ headers = {
+ "Content-Type": "application/json",
+ "Accept": "text/event-stream",
+ "X-Authorization": f"Bearer {cls._access_token}",
+ }
+ prompt = format_prompt(messages)
+ data = {
+ "action": "next",
+ "messages": [
+ {
+ "id": str(uuid.uuid4()),
+ "author": {"role": "user"},
+ "content": {"content_type": "text", "parts": [prompt]},
+ "metadata": {},
+ }
+ ],
+ "parent_message_id": str(uuid.uuid4()),
+ "model": "text-davinci-002-render-sha",
+ "plugin_ids": [],
+ "timezone_offset_min": -120,
+ "suggestions": [],
+ "history_and_training_disabled": True,
+ "arkose_token": "",
+ "force_paragen": False,
+ }
+ last_message = ""
+ async with session.post(f"{cls.url}/api/conversation", json=data, headers=headers, proxy=proxy) as response:
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ line = line[6:]
+ if line == b"[DONE]":
+ break
+ try:
+ line = json.loads(line)
+ if line["message"]["metadata"]["message_type"] == "next":
+ new_message = line["message"]["content"]["parts"][0]
+ yield new_message[len(last_message):]
+ last_message = new_message
+ except:
+ continue
+ if not last_message:
+ raise RuntimeError("No valid response") \ No newline at end of file
diff --git a/g4f/Provider/deprecated/FastGpt.py b/g4f/Provider/deprecated/FastGpt.py
new file mode 100644
index 00000000..6a79d9aa
--- /dev/null
+++ b/g4f/Provider/deprecated/FastGpt.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+import json
+import random
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import AbstractProvider
+
+
+class FastGpt(AbstractProvider):
+ url: str = 'https://chat9.fastgpt.me/'
+ working = False
+ needs_auth = False
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = False
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ headers = {
+ 'authority' : 'chat9.fastgpt.me',
+ 'accept' : 'text/event-stream',
+ 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'cache-control' : 'no-cache',
+ 'content-type' : 'application/json',
+ 'origin' : 'https://chat9.fastgpt.me',
+ 'plugins' : '0',
+ 'pragma' : 'no-cache',
+ 'referer' : 'https://chat9.fastgpt.me/',
+ 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
+ 'sec-ch-ua-mobile' : '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest' : 'empty',
+ 'sec-fetch-mode' : 'cors',
+ 'sec-fetch-site' : 'same-origin',
+ 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
+ 'usesearch' : 'false',
+ 'x-requested-with' : 'XMLHttpRequest',
+ }
+
+ json_data = {
+ 'messages' : messages,
+ 'stream' : stream,
+ 'model' : model,
+ 'temperature' : kwargs.get('temperature', 0.5),
+ 'presence_penalty' : kwargs.get('presence_penalty', 0),
+ 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
+ 'top_p' : kwargs.get('top_p', 1),
+ }
+
+ subdomain = random.choice([
+ 'jdaen979ew',
+ 'chat9'
+ ])
+
+ response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions',
+ headers=headers, json=json_data, stream=stream)
+
+ for line in response.iter_lines():
+ if line:
+ try:
+ if b'content' in line:
+ line_json = json.loads(line.decode('utf-8').split('data: ')[1])
+ token = line_json['choices'][0]['delta'].get(
+ 'content'
+ )
+
+ if token:
+ yield token
+ except:
+ continue \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Forefront.py b/g4f/Provider/deprecated/Forefront.py
new file mode 100644
index 00000000..39654b2c
--- /dev/null
+++ b/g4f/Provider/deprecated/Forefront.py
@@ -0,0 +1,40 @@
+from __future__ import annotations
+
+import json
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import AbstractProvider
+
+
+class Forefront(AbstractProvider):
+ url = "https://forefront.com"
+ supports_stream = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ json_data = {
+ "text" : messages[-1]["content"],
+ "action" : "noauth",
+ "id" : "",
+ "parentId" : "",
+ "workspaceId" : "",
+ "messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0",
+ "model" : "gpt-4",
+ "messages" : messages[:-1] if len(messages) > 1 else [],
+ "internetMode" : "auto",
+ }
+
+ response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
+ json=json_data, stream=True)
+
+ response.raise_for_status()
+ for token in response.iter_lines():
+ if b"delta" in token:
+ yield json.loads(token.decode().split("data: ")[1])["delta"]
diff --git a/g4f/Provider/deprecated/GPTalk.py b/g4f/Provider/deprecated/GPTalk.py
new file mode 100644
index 00000000..5b36d37b
--- /dev/null
+++ b/g4f/Provider/deprecated/GPTalk.py
@@ -0,0 +1,87 @@
+from __future__ import annotations
+
+import secrets, time, json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+
+
+class GPTalk(AsyncGeneratorProvider):
+ url = "https://gptalk.net"
+ working = False
+ supports_gpt_35_turbo = True
+ _auth = None
+ used_times = 0
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ timestamp = int(time.time())
+ headers = {
+ 'authority': 'gptalk.net',
+ 'accept': '*/*',
+ 'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6,nl;q=0.5,zh-CN;q=0.4,zh-TW;q=0.3,zh;q=0.2',
+ 'content-type': 'application/json',
+ 'origin': 'https://gptalk.net',
+ 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
+ 'x-auth-appid': '2229',
+ 'x-auth-openid': '',
+ 'x-auth-platform': '',
+ 'x-auth-timestamp': f"{timestamp}",
+ }
+ async with ClientSession(headers=headers) as session:
+ if not cls._auth or cls._auth["expires_at"] < timestamp or cls.used_times == 5:
+ data = {
+ "fingerprint": secrets.token_hex(16).zfill(32),
+ "platform": "fingerprint"
+ }
+ async with session.post(f"{cls.url}/api/chatgpt/user/login", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ cls._auth = (await response.json())["data"]
+ cls.used_times = 0
+ data = {
+ "content": format_prompt(messages),
+ "accept": "stream",
+ "from": 1,
+ "model": model,
+ "is_mobile": 0,
+ "user_agent": headers["user-agent"],
+ "is_open_ctx": 0,
+ "prompt": "",
+ "roid": 111,
+ "temperature": 0,
+ "ctx_msg_count": 3,
+ "created_at": timestamp
+ }
+ headers = {
+ 'authorization': f'Bearer {cls._auth["token"]}',
+ }
+ async with session.post(f"{cls.url}/api/chatgpt/chatapi/text", json=data, headers=headers, proxy=proxy) as response:
+ response.raise_for_status()
+ token = (await response.json())["data"]["token"]
+ cls.used_times += 1
+ last_message = ""
+ async with session.get(f"{cls.url}/api/chatgpt/chatapi/stream", params={"token": token}, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ if line.startswith(b"data: [DONE]"):
+ break
+ message = json.loads(line[6:-1])["content"]
+ yield message[len(last_message):]
+ last_message = message
diff --git a/g4f/Provider/deprecated/GeekGpt.py b/g4f/Provider/deprecated/GeekGpt.py
new file mode 100644
index 00000000..7a460083
--- /dev/null
+++ b/g4f/Provider/deprecated/GeekGpt.py
@@ -0,0 +1,73 @@
+from __future__ import annotations
+import requests, json
+
+from ..base_provider import AbstractProvider
+from ...typing import CreateResult, Messages
+from json import dumps
+
+
+class GeekGpt(AbstractProvider):
+ url = 'https://chat.geekgpt.org'
+ working = False
+ supports_message_history = True
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ **kwargs
+ ) -> CreateResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ json_data = {
+ 'messages': messages,
+ 'model': model,
+ 'temperature': kwargs.get('temperature', 0.9),
+ 'presence_penalty': kwargs.get('presence_penalty', 0),
+ 'top_p': kwargs.get('top_p', 1),
+ 'frequency_penalty': kwargs.get('frequency_penalty', 0),
+ 'stream': True
+ }
+
+ data = dumps(json_data, separators=(',', ':'))
+
+ headers = {
+ 'authority': 'ai.fakeopen.com',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'authorization': 'Bearer pk-this-is-a-real-free-pool-token-for-everyone',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.geekgpt.org',
+ 'referer': 'https://chat.geekgpt.org/',
+ 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
+ }
+
+ response = requests.post("https://ai.fakeopen.com/v1/chat/completions",
+ headers=headers, data=data, stream=True)
+ response.raise_for_status()
+
+ for chunk in response.iter_lines():
+ if b'content' in chunk:
+ json_data = chunk.decode().replace("data: ", "")
+
+ if json_data == "[DONE]":
+ break
+
+ try:
+ content = json.loads(json_data)["choices"][0]["delta"].get("content")
+ except Exception as e:
+ raise RuntimeError(f'error | {e} :', json_data)
+
+ if content:
+ yield content \ No newline at end of file
diff --git a/g4f/Provider/deprecated/GetGpt.py b/g4f/Provider/deprecated/GetGpt.py
new file mode 100644
index 00000000..dd586569
--- /dev/null
+++ b/g4f/Provider/deprecated/GetGpt.py
@@ -0,0 +1,77 @@
+from __future__ import annotations
+
+import json
+import os
+import uuid
+
+import requests
+# try:
+# from Crypto.Cipher import AES
+# except ImportError:
+# from Cryptodome.Cipher import AES
+
+from ...typing import Any, CreateResult
+from ..base_provider import AbstractProvider
+
+
+class GetGpt(AbstractProvider):
+ url = 'https://chat.getgpt.world/'
+ supports_stream = True
+ working = False
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ headers = {
+ 'Content-Type' : 'application/json',
+ 'Referer' : 'https://chat.getgpt.world/',
+ 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
+ }
+
+ data = json.dumps(
+ {
+ 'messages' : messages,
+ 'frequency_penalty' : kwargs.get('frequency_penalty', 0),
+ 'max_tokens' : kwargs.get('max_tokens', 4000),
+ 'model' : 'gpt-3.5-turbo',
+ 'presence_penalty' : kwargs.get('presence_penalty', 0),
+ 'temperature' : kwargs.get('temperature', 1),
+ 'top_p' : kwargs.get('top_p', 1),
+ 'stream' : True,
+ 'uuid' : str(uuid.uuid4())
+ }
+ )
+
+ res = requests.post('https://chat.getgpt.world/api/chat/stream',
+ headers=headers, json={'signature': _encrypt(data)}, stream=True)
+
+ res.raise_for_status()
+ for line in res.iter_lines():
+ if b'content' in line:
+ line_json = json.loads(line.decode('utf-8').split('data: ')[1])
+ yield (line_json['choices'][0]['delta']['content'])
+
+
+def _encrypt(e: str):
+ # t = os.urandom(8).hex().encode('utf-8')
+ # n = os.urandom(8).hex().encode('utf-8')
+ # r = e.encode('utf-8')
+
+ # cipher = AES.new(t, AES.MODE_CBC, n)
+ # ciphertext = cipher.encrypt(_pad_data(r))
+
+ # return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
+ return
+
+
+def _pad_data(data: bytes) -> bytes:
+ # block_size = AES.block_size
+ # padding_size = block_size - len(data) % block_size
+ # padding = bytes([padding_size] * padding_size)
+
+ # return data + padding
+ return \ No newline at end of file
diff --git a/g4f/Provider/deprecated/H2o.py b/g4f/Provider/deprecated/H2o.py
new file mode 100644
index 00000000..ba4ca507
--- /dev/null
+++ b/g4f/Provider/deprecated/H2o.py
@@ -0,0 +1,89 @@
+from __future__ import annotations
+
+import json
+import uuid
+
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, format_prompt
+
+
+class H2o(AsyncGeneratorProvider):
+ url = "https://gpt-gm.h2o.ai"
+ model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = model if model else cls.model
+ headers = {"Referer": f"{cls.url}/"}
+
+ async with ClientSession(
+ headers=headers
+ ) as session:
+ data = {
+ "ethicsModalAccepted": "true",
+ "shareConversationsWithModelAuthors": "true",
+ "ethicsModalAcceptedAt": "",
+ "activeModel": model,
+ "searchEnabled": "true",
+ }
+ async with session.post(
+ f"{cls.url}/settings",
+ proxy=proxy,
+ data=data
+ ) as response:
+ response.raise_for_status()
+
+ async with session.post(
+ f"{cls.url}/conversation",
+ proxy=proxy,
+ json={"model": model},
+ ) as response:
+ response.raise_for_status()
+ conversationId = (await response.json())["conversationId"]
+
+ data = {
+ "inputs": format_prompt(messages),
+ "parameters": {
+ "temperature": 0.4,
+ "truncate": 2048,
+ "max_new_tokens": 1024,
+ "do_sample": True,
+ "repetition_penalty": 1.2,
+ "return_full_text": False,
+ **kwargs
+ },
+ "stream": True,
+ "options": {
+ "id": str(uuid.uuid4()),
+ "response_id": str(uuid.uuid4()),
+ "is_retry": False,
+ "use_cache": False,
+ "web_search_id": "",
+ },
+ }
+ async with session.post(
+ f"{cls.url}/conversation/{conversationId}",
+ proxy=proxy,
+ json=data
+ ) as response:
+ start = "data:"
+ async for line in response.content:
+ line = line.decode("utf-8")
+ if line and line.startswith(start):
+ line = json.loads(line[len(start):-1])
+ if not line["token"]["special"]:
+ yield line["token"]["text"]
+
+ async with session.delete(
+ f"{cls.url}/conversation/{conversationId}",
+ proxy=proxy,
+ ) as response:
+ response.raise_for_status() \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Hashnode.py b/g4f/Provider/deprecated/Hashnode.py
new file mode 100644
index 00000000..c2c0ffb7
--- /dev/null
+++ b/g4f/Provider/deprecated/Hashnode.py
@@ -0,0 +1,80 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_random_hex
+
+class SearchTypes():
+ quick = "quick"
+ code = "code"
+ websearch = "websearch"
+
+class Hashnode(AsyncGeneratorProvider):
+ url = "https://hashnode.com"
+ working = False
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+ _sources = []
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ search_type: str = SearchTypes.websearch,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
+ "Accept": "*/*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/rix",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Pragma": "no-cache",
+ "Cache-Control": "no-cache",
+ "TE": "trailers",
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = messages[-1]["content"]
+ cls._sources = []
+ if search_type == "websearch":
+ async with session.post(
+ f"{cls.url}/api/ai/rix/search",
+ json={"prompt": prompt},
+ proxy=proxy,
+ ) as response:
+ response.raise_for_status()
+ cls._sources = (await response.json())["result"]
+ data = {
+ "chatId": get_random_hex(),
+ "history": messages,
+ "prompt": prompt,
+ "searchType": search_type,
+ "urlToScan": None,
+ "searchResults": cls._sources,
+ }
+ async with session.post(
+ f"{cls.url}/api/ai/rix/completion",
+ json=data,
+ proxy=proxy,
+ ) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode()
+
+ @classmethod
+ def get_sources(cls) -> list:
+ return [{
+ "title": source["name"],
+ "url": source["url"]
+ } for source in cls._sources] \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Lockchat.py b/g4f/Provider/deprecated/Lockchat.py
new file mode 100644
index 00000000..edab0bd4
--- /dev/null
+++ b/g4f/Provider/deprecated/Lockchat.py
@@ -0,0 +1,54 @@
+from __future__ import annotations
+
+import json
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import AbstractProvider
+
+
+class Lockchat(AbstractProvider):
+ url: str = "http://supertest.lockchat.app"
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ temperature = float(kwargs.get("temperature", 0.7))
+ payload = {
+ "temperature": temperature,
+ "messages" : messages,
+ "model" : model,
+ "stream" : True,
+ }
+
+ headers = {
+ "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
+ }
+ response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
+ json=payload, headers=headers, stream=True)
+
+ response.raise_for_status()
+ for token in response.iter_lines():
+ if b"The model: `gpt-4` does not exist" in token:
+ print("error, retrying...")
+
+ Lockchat.create_completion(
+ model = model,
+ messages = messages,
+ stream = stream,
+ temperature = temperature,
+ **kwargs)
+
+ if b"content" in token:
+ token = json.loads(token.decode("utf-8").split("data: ")[1])
+ token = token["choices"][0]["delta"].get("content")
+
+ if token:
+ yield (token) \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Myshell.py b/g4f/Provider/deprecated/Myshell.py
new file mode 100644
index 00000000..2487440d
--- /dev/null
+++ b/g4f/Provider/deprecated/Myshell.py
@@ -0,0 +1,165 @@
+# not using WS anymore
+
+from __future__ import annotations
+
+import json, uuid, hashlib, time, random
+
+from aiohttp import ClientSession
+from aiohttp.http import WSMsgType
+import asyncio
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, format_prompt
+
+
+models = {
+ "samantha": "1e3be7fe89e94a809408b1154a2ee3e1",
+ "gpt-3.5-turbo": "8077335db7cd47e29f7de486612cc7fd",
+ "gpt-4": "01c8de4fbfc548df903712b0922a4e01",
+}
+
+
+class Myshell(AsyncGeneratorProvider):
+ url = "https://app.myshell.ai/chat"
+ working = False
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 90,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ bot_id = models["samantha"]
+ elif model in models:
+ bot_id = models[model]
+ else:
+ raise ValueError(f"Model are not supported: {model}")
+
+ user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36'
+ visitor_id = generate_visitor_id(user_agent)
+
+ async with ClientSession(
+ headers={'User-Agent': user_agent}
+ ) as session:
+ async with session.ws_connect(
+ "wss://api.myshell.ai/ws/?EIO=4&transport=websocket",
+ autoping=False,
+ timeout=timeout,
+ proxy=proxy
+ ) as wss:
+ # Send and receive hello message
+ await wss.receive_str()
+ message = json.dumps({"token": None, "visitorId": visitor_id})
+ await wss.send_str(f"40/chat,{message}")
+ await wss.receive_str()
+
+ # Fix "need_verify_captcha" issue
+ await asyncio.sleep(5)
+
+ # Create chat message
+ text = format_prompt(messages)
+ chat_data = json.dumps(["text_chat",{
+ "reqId": str(uuid.uuid4()),
+ "botUid": bot_id,
+ "sourceFrom": "myshellWebsite",
+ "text": text,
+ **generate_signature(text)
+ }])
+
+ # Send chat message
+ chat_start = "42/chat,"
+ chat_message = f"{chat_start}{chat_data}"
+ await wss.send_str(chat_message)
+
+ # Receive messages
+ async for message in wss:
+ if message.type != WSMsgType.TEXT:
+ continue
+ # Ping back
+ if message.data == "2":
+ await wss.send_str("3")
+ continue
+ # Is not chat message
+ if not message.data.startswith(chat_start):
+ continue
+ data_type, data = json.loads(message.data[len(chat_start):])
+ if data_type == "text_stream":
+ if data["data"]["text"]:
+ yield data["data"]["text"]
+ elif data["data"]["isFinal"]:
+ break
+ elif data_type in ("message_replied", "need_verify_captcha"):
+ raise RuntimeError(f"Received unexpected message: {data_type}")
+
+
+def generate_timestamp() -> str:
+ return str(
+ int(
+ str(int(time.time() * 1000))[:-1]
+ + str(
+ sum(
+ 2 * int(digit)
+ if idx % 2 == 0
+ else 3 * int(digit)
+ for idx, digit in enumerate(str(int(time.time() * 1000))[:-1])
+ )
+ % 10
+ )
+ )
+ )
+
+def generate_signature(text: str):
+ timestamp = generate_timestamp()
+ version = 'v1.0.0'
+ secret = '8@VXGK3kKHr!u2gA'
+ data = f"{version}#{text}#{timestamp}#{secret}"
+ signature = hashlib.md5(data.encode()).hexdigest()
+ signature = signature[::-1]
+ return {
+ "signature": signature,
+ "timestamp": timestamp,
+ "version": version
+ }
+
+def xor_hash(B: str):
+ r = []
+ i = 0
+
+ def o(e, t):
+ o_val = 0
+ for i in range(len(t)):
+ o_val |= r[i] << (8 * i)
+ return e ^ o_val
+
+ for e in range(len(B)):
+ t = ord(B[e])
+ r.insert(0, 255 & t)
+
+ if len(r) >= 4:
+ i = o(i, r)
+ r = []
+
+ if len(r) > 0:
+ i = o(i, r)
+
+ return hex(i)[2:]
+
+def performance() -> str:
+ t = int(time.time() * 1000)
+ e = 0
+ while t == int(time.time() * 1000):
+ e += 1
+ return hex(t)[2:] + hex(e)[2:]
+
+def generate_visitor_id(user_agent: str) -> str:
+ f = performance()
+ r = hex(int(random.random() * (16**16)))[2:-2]
+ d = xor_hash(user_agent)
+ e = hex(1080 * 1920)[2:]
+ return f"{f}-{r}-{d}-{e}-{f}" \ No newline at end of file
diff --git a/g4f/Provider/deprecated/NoowAi.py b/g4f/Provider/deprecated/NoowAi.py
new file mode 100644
index 00000000..dba87273
--- /dev/null
+++ b/g4f/Provider/deprecated/NoowAi.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+from .helper import get_random_string
+
+class NoowAi(AsyncGeneratorProvider):
+ url = "https://noowai.com"
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+ working = False
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
+ "Accept": "*/*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "Alt-Used": "noowai.com",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Pragma": "no-cache",
+ "Cache-Control": "no-cache",
+ "TE": "trailers"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "botId": "default",
+ "customId": "d49bc3670c3d858458576d75c8ea0f5d",
+ "session": "N/A",
+ "chatId": get_random_string(),
+ "contextId": 25,
+ "messages": messages,
+ "newMessage": messages[-1]["content"],
+ "stream": True
+ }
+ async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ try:
+ line = json.loads(line[6:])
+ assert "type" in line
+ except:
+ raise RuntimeError(f"Broken line: {line.decode()}")
+ if line["type"] == "live":
+ yield line["data"]
+ elif line["type"] == "end":
+ break
+ elif line["type"] == "error":
+ raise RuntimeError(line["data"]) \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Opchatgpts.py b/g4f/Provider/deprecated/Opchatgpts.py
new file mode 100644
index 00000000..94b1d099
--- /dev/null
+++ b/g4f/Provider/deprecated/Opchatgpts.py
@@ -0,0 +1,59 @@
+from __future__ import annotations
+
+import random, string, json
+from aiohttp import ClientSession
+
+from ...typing import Messages, AsyncResult
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_random_string
+
+class Opchatgpts(AsyncGeneratorProvider):
+ url = "https://opchatgpts.net"
+ working = False
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None, **kwargs) -> AsyncResult:
+
+ headers = {
+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
+ "Accept" : "*/*",
+ "Accept-Language" : "de,en-US;q=0.7,en;q=0.3",
+ "Origin" : cls.url,
+ "Alt-Used" : "opchatgpts.net",
+ "Referer" : f"{cls.url}/chatgpt-free-use/",
+ "Sec-Fetch-Dest" : "empty",
+ "Sec-Fetch-Mode" : "cors",
+ "Sec-Fetch-Site" : "same-origin",
+ }
+ async with ClientSession(
+ headers=headers
+ ) as session:
+ data = {
+ "botId": "default",
+ "chatId": get_random_string(),
+ "contextId": 28,
+ "customId": None,
+ "messages": messages,
+ "newMessage": messages[-1]["content"],
+ "session": "N/A",
+ "stream": True
+ }
+ async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ try:
+ line = json.loads(line[6:])
+ assert "type" in line
+ except:
+ raise RuntimeError(f"Broken line: {line.decode()}")
+ if line["type"] == "live":
+ yield line["data"]
+ elif line["type"] == "end":
+ break \ No newline at end of file
diff --git a/g4f/Provider/deprecated/OpenAssistant.py b/g4f/Provider/deprecated/OpenAssistant.py
new file mode 100644
index 00000000..80cae3c2
--- /dev/null
+++ b/g4f/Provider/deprecated/OpenAssistant.py
@@ -0,0 +1,88 @@
+from __future__ import annotations
+
+import json
+
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt, get_cookies
+
+class OpenAssistant(AsyncGeneratorProvider):
+ url = "https://open-assistant.io/chat"
+ needs_auth = True
+ working = False
+ model = "OA_SFT_Llama_30B_6"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ cookies: dict = None,
+ **kwargs
+ ) -> AsyncResult:
+ if not cookies:
+ cookies = get_cookies("open-assistant.io")
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
+ }
+ async with ClientSession(
+ cookies=cookies,
+ headers=headers
+ ) as session:
+ async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response:
+ chat_id = (await response.json())["id"]
+
+ data = {
+ "chat_id": chat_id,
+ "content": f"<s>[INST]\n{format_prompt(messages)}\n[/INST]",
+ "parent_id": None
+ }
+ async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response:
+ parent_id = (await response.json())["id"]
+
+ data = {
+ "chat_id": chat_id,
+ "parent_id": parent_id,
+ "model_config_name": model if model else cls.model,
+ "sampling_parameters":{
+ "top_k": 50,
+ "top_p": None,
+ "typical_p": None,
+ "temperature": 0.35,
+ "repetition_penalty": 1.1111111111111112,
+ "max_new_tokens": 1024,
+ **kwargs
+ },
+ "plugins":[]
+ }
+ async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response:
+ data = await response.json()
+ if "id" in data:
+ message_id = data["id"]
+ elif "message" in data:
+ raise RuntimeError(data["message"])
+ else:
+ response.raise_for_status()
+
+ params = {
+ 'chat_id': chat_id,
+ 'message_id': message_id,
+ }
+ async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response:
+ start = "data: "
+ async for line in response.content:
+ line = line.decode("utf-8")
+ if line and line.startswith(start):
+ line = json.loads(line[len(start):])
+ if line["event_type"] == "token":
+ yield line["text"]
+
+ params = {
+ 'chat_id': chat_id,
+ }
+ async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
+ response.raise_for_status()
diff --git a/g4f/Provider/deprecated/Phind.py b/g4f/Provider/deprecated/Phind.py
new file mode 100644
index 00000000..c72bf09e
--- /dev/null
+++ b/g4f/Provider/deprecated/Phind.py
@@ -0,0 +1,140 @@
+from __future__ import annotations
+
+import re
+import json
+from urllib import parse
+from datetime import datetime
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ...requests import StreamSession
+
+class Phind(AsyncGeneratorProvider):
+ url = "https://www.phind.com"
+ working = False
+ lockdown = True
+ supports_stream = True
+ supports_message_history = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ creative_mode: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Accept": "*/*",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/search",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ }
+ async with StreamSession(
+ headers=headers,
+ impersonate="chrome",
+ proxies={"https": proxy},
+ timeout=timeout
+ ) as session:
+ url = "https://www.phind.com/search?home=true"
+ async with session.get(url) as response:
+ text = await response.text()
+ match = re.search(r'<script id="__NEXT_DATA__" type="application/json">(?P<json>[\S\s]+?)</script>', text)
+ data = json.loads(match.group("json"))
+ challenge_seeds = data["props"]["pageProps"]["challengeSeeds"]
+
+ prompt = messages[-1]["content"]
+ data = {
+ "question": prompt,
+ "question_history": [
+ message["content"] for message in messages[:-1] if message["role"] == "user"
+ ],
+ "answer_history": [
+ message["content"] for message in messages if message["role"] == "assistant"
+ ],
+ "webResults": [],
+ "options": {
+ "date": datetime.now().strftime("%d.%m.%Y"),
+ "language": "en-US",
+ "detailed": True,
+ "anonUserId": "",
+ "answerModel": "GPT-4" if model.startswith("gpt-4") else "Phind-34B",
+ "creativeMode": creative_mode,
+ "customLinks": []
+ },
+ "context": "\n".join([message["content"] for message in messages if message["role"] == "system"]),
+ }
+ data["challenge"] = generate_challenge(data, **challenge_seeds)
+ async with session.post(f"https://https.api.phind.com/infer/", headers=headers, json=data) as response:
+ new_line = False
+ async for line in response.iter_lines():
+ if line.startswith(b"data: "):
+ chunk = line[6:]
+ if chunk.startswith(b'<PHIND_DONE/>'):
+ break
+ if chunk.startswith(b'<PHIND_BACKEND_ERROR>'):
+ raise RuntimeError(f"Response: {chunk.decode()}")
+ if chunk.startswith(b'<PHIND_WEBRESULTS>') or chunk.startswith(b'<PHIND_FOLLOWUP>'):
+ pass
+ elif chunk.startswith(b"<PHIND_METADATA>") or chunk.startswith(b"<PHIND_INDICATOR>"):
+ pass
+ elif chunk.startswith(b"<PHIND_SPAN_BEGIN>") or chunk.startswith(b"<PHIND_SPAN_END>"):
+ pass
+ elif chunk:
+ yield chunk.decode()
+ elif new_line:
+ yield "\n"
+ new_line = False
+ else:
+ new_line = True
+
+def deterministic_stringify(obj):
+ def handle_value(value):
+ if isinstance(value, (dict, list)):
+ if isinstance(value, list):
+ return '[' + ','.join(sorted(map(handle_value, value))) + ']'
+ else: # It's a dict
+ return '{' + deterministic_stringify(value) + '}'
+ elif isinstance(value, bool):
+ return 'true' if value else 'false'
+ elif isinstance(value, (int, float)):
+ return format(value, '.8f').rstrip('0').rstrip('.')
+ elif isinstance(value, str):
+ return f'"{value}"'
+ else:
+ return 'null'
+
+ items = sorted(obj.items(), key=lambda x: x[0])
+ return ','.join([f'{k}:{handle_value(v)}' for k, v in items if handle_value(v) is not None])
+
+def prng_general(seed, multiplier, addend, modulus):
+ a = seed * multiplier + addend
+ if a < 0:
+ return ((a%modulus)-modulus)/modulus
+ else:
+ return a%modulus/modulus
+
+def generate_challenge_seed(l):
+ I = deterministic_stringify(l)
+ d = parse.quote(I, safe='')
+ return simple_hash(d)
+
+def simple_hash(s):
+ d = 0
+ for char in s:
+ if len(char) > 1 or ord(char) >= 256:
+ continue
+ d = ((d << 5) - d + ord(char[0])) & 0xFFFFFFFF
+ if d > 0x7FFFFFFF: # 2147483647
+ d -= 0x100000000 # Subtract 2**32
+ return d
+
+def generate_challenge(obj, **kwargs):
+ return prng_general(
+ seed=generate_challenge_seed(obj),
+ **kwargs
+ ) \ No newline at end of file
diff --git a/g4f/Provider/deprecated/V50.py b/g4f/Provider/deprecated/V50.py
new file mode 100644
index 00000000..456445f7
--- /dev/null
+++ b/g4f/Provider/deprecated/V50.py
@@ -0,0 +1,61 @@
+from __future__ import annotations
+
+import uuid
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import AbstractProvider
+
+
+class V50(AbstractProvider):
+ url = 'https://p5.v50.ltd'
+ supports_gpt_35_turbo = True
+ supports_stream = False
+ needs_auth = False
+ working = False
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ conversation = (
+ "\n".join(
+ f"{message['role']}: {message['content']}" for message in messages
+ )
+ + "\nassistant: "
+ )
+ payload = {
+ "prompt" : conversation,
+ "options" : {},
+ "systemMessage" : ".",
+ "temperature" : kwargs.get("temperature", 0.4),
+ "top_p" : kwargs.get("top_p", 0.4),
+ "model" : model,
+ "user" : str(uuid.uuid4())
+ }
+
+ headers = {
+ 'authority' : 'p5.v50.ltd',
+ 'accept' : 'application/json, text/plain, */*',
+ 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
+ 'content-type' : 'application/json',
+ 'origin' : 'https://p5.v50.ltd',
+ 'referer' : 'https://p5.v50.ltd/',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-fetch-dest' : 'empty',
+ 'sec-fetch-mode' : 'cors',
+ 'sec-fetch-site' : 'same-origin',
+ 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
+ }
+ response = requests.post(
+ "https://p5.v50.ltd/api/chat-process",
+ json=payload,
+ headers=headers,
+ proxies=kwargs.get('proxy', {}),
+ )
+
+ if "https://fk1.v50.ltd" not in response.text:
+ yield response.text \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Vercel.py b/g4f/Provider/deprecated/Vercel.py
new file mode 100644
index 00000000..adf7b208
--- /dev/null
+++ b/g4f/Provider/deprecated/Vercel.py
@@ -0,0 +1,392 @@
+from __future__ import annotations
+
+import json, base64, requests, random, uuid
+
+try:
+ import execjs
+ has_requirements = True
+except ImportError:
+ has_requirements = False
+
+from ...typing import Messages, TypedDict, CreateResult, Any
+from ..base_provider import AbstractProvider
+from ...errors import MissingRequirementsError
+
+class Vercel(AbstractProvider):
+ url = 'https://sdk.vercel.ai'
+ working = False
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+ supports_stream = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ **kwargs
+ ) -> CreateResult:
+ if not has_requirements:
+ raise MissingRequirementsError('Install "PyExecJS" package')
+
+ if not model:
+ model = "gpt-3.5-turbo"
+ elif model not in model_info:
+ raise ValueError(f"Vercel does not support {model}")
+
+ headers = {
+ 'authority': 'sdk.vercel.ai',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'custom-encoding': get_anti_bot_token(),
+ 'origin': 'https://sdk.vercel.ai',
+ 'pragma': 'no-cache',
+ 'referer': 'https://sdk.vercel.ai/',
+ 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
+ }
+
+ json_data = {
+ 'model' : model_info[model]['id'],
+ 'messages' : messages,
+ 'playgroundId': str(uuid.uuid4()),
+ 'chatIndex' : 0,
+ **model_info[model]['default_params'],
+ **kwargs
+ }
+
+ max_retries = kwargs.get('max_retries', 20)
+ for _ in range(max_retries):
+ response = requests.post('https://chat.vercel.ai/api/chat',
+ headers=headers, json=json_data, stream=True, proxies={"https": proxy})
+ try:
+ response.raise_for_status()
+ except:
+ continue
+ for token in response.iter_content(chunk_size=None):
+ yield token.decode()
+ break
+
+
+def get_anti_bot_token() -> str:
+ headers = {
+ 'authority': 'sdk.vercel.ai',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'cache-control': 'no-cache',
+ 'pragma': 'no-cache',
+ 'referer': 'https://sdk.vercel.ai/',
+ 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
+ }
+
+ response = requests.get('https://sdk.vercel.ai/openai.jpeg',
+ headers=headers).text
+
+ raw_data = json.loads(base64.b64decode(response,
+ validate=True))
+
+ js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
+ return (%s)(%s)''' % (raw_data['c'], raw_data['a'])
+
+ raw_token = json.dumps({'r': execjs.compile(js_script).call(''), 't': raw_data['t']},
+ separators = (",", ":"))
+
+ return base64.b64encode(raw_token.encode('utf-16le')).decode()
+
+class ModelInfo(TypedDict):
+ id: str
+ default_params: dict[str, Any]
+
+model_info: dict[str, ModelInfo] = {
+ # 'claude-instant-v1': {
+ # 'id': 'anthropic:claude-instant-v1',
+ # 'default_params': {
+ # 'temperature': 1,
+ # 'maximumLength': 1024,
+ # 'topP': 1,
+ # 'topK': 1,
+ # 'presencePenalty': 1,
+ # 'frequencyPenalty': 1,
+ # 'stopSequences': ['\n\nHuman:'],
+ # },
+ # },
+ # 'claude-v1': {
+ # 'id': 'anthropic:claude-v1',
+ # 'default_params': {
+ # 'temperature': 1,
+ # 'maximumLength': 1024,
+ # 'topP': 1,
+ # 'topK': 1,
+ # 'presencePenalty': 1,
+ # 'frequencyPenalty': 1,
+ # 'stopSequences': ['\n\nHuman:'],
+ # },
+ # },
+ # 'claude-v2': {
+ # 'id': 'anthropic:claude-v2',
+ # 'default_params': {
+ # 'temperature': 1,
+ # 'maximumLength': 1024,
+ # 'topP': 1,
+ # 'topK': 1,
+ # 'presencePenalty': 1,
+ # 'frequencyPenalty': 1,
+ # 'stopSequences': ['\n\nHuman:'],
+ # },
+ # },
+ 'replicate/llama70b-v2-chat': {
+ 'id': 'replicate:replicate/llama-2-70b-chat',
+ 'default_params': {
+ 'temperature': 0.75,
+ 'maximumLength': 3000,
+ 'topP': 1,
+ 'repetitionPenalty': 1,
+ },
+ },
+ 'a16z-infra/llama7b-v2-chat': {
+ 'id': 'replicate:a16z-infra/llama7b-v2-chat',
+ 'default_params': {
+ 'temperature': 0.75,
+ 'maximumLength': 3000,
+ 'topP': 1,
+ 'repetitionPenalty': 1,
+ },
+ },
+ 'a16z-infra/llama13b-v2-chat': {
+ 'id': 'replicate:a16z-infra/llama13b-v2-chat',
+ 'default_params': {
+ 'temperature': 0.75,
+ 'maximumLength': 3000,
+ 'topP': 1,
+ 'repetitionPenalty': 1,
+ },
+ },
+ 'replicate/llama-2-70b-chat': {
+ 'id': 'replicate:replicate/llama-2-70b-chat',
+ 'default_params': {
+ 'temperature': 0.75,
+ 'maximumLength': 3000,
+ 'topP': 1,
+ 'repetitionPenalty': 1,
+ },
+ },
+ 'bigscience/bloom': {
+ 'id': 'huggingface:bigscience/bloom',
+ 'default_params': {
+ 'temperature': 0.5,
+ 'maximumLength': 1024,
+ 'topP': 0.95,
+ 'topK': 4,
+ 'repetitionPenalty': 1.03,
+ },
+ },
+ 'google/flan-t5-xxl': {
+ 'id': 'huggingface:google/flan-t5-xxl',
+ 'default_params': {
+ 'temperature': 0.5,
+ 'maximumLength': 1024,
+ 'topP': 0.95,
+ 'topK': 4,
+ 'repetitionPenalty': 1.03,
+ },
+ },
+ 'EleutherAI/gpt-neox-20b': {
+ 'id': 'huggingface:EleutherAI/gpt-neox-20b',
+ 'default_params': {
+ 'temperature': 0.5,
+ 'maximumLength': 1024,
+ 'topP': 0.95,
+ 'topK': 4,
+ 'repetitionPenalty': 1.03,
+ 'stopSequences': [],
+ },
+ },
+ 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {
+ 'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
+ 'default_params': {
+ 'maximumLength': 1024,
+ 'typicalP': 0.2,
+ 'repetitionPenalty': 1,
+ },
+ },
+ 'OpenAssistant/oasst-sft-1-pythia-12b': {
+ 'id': 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
+ 'default_params': {
+ 'maximumLength': 1024,
+ 'typicalP': 0.2,
+ 'repetitionPenalty': 1,
+ },
+ },
+ 'bigcode/santacoder': {
+ 'id': 'huggingface:bigcode/santacoder',
+ 'default_params': {
+ 'temperature': 0.5,
+ 'maximumLength': 1024,
+ 'topP': 0.95,
+ 'topK': 4,
+ 'repetitionPenalty': 1.03,
+ },
+ },
+ 'command-light-nightly': {
+ 'id': 'cohere:command-light-nightly',
+ 'default_params': {
+ 'temperature': 0.9,
+ 'maximumLength': 1024,
+ 'topP': 1,
+ 'topK': 0,
+ 'presencePenalty': 0,
+ 'frequencyPenalty': 0,
+ 'stopSequences': [],
+ },
+ },
+ 'command-nightly': {
+ 'id': 'cohere:command-nightly',
+ 'default_params': {
+ 'temperature': 0.9,
+ 'maximumLength': 1024,
+ 'topP': 1,
+ 'topK': 0,
+ 'presencePenalty': 0,
+ 'frequencyPenalty': 0,
+ 'stopSequences': [],
+ },
+ },
+ # 'gpt-4': {
+ # 'id': 'openai:gpt-4',
+ # 'default_params': {
+ # 'temperature': 0.7,
+ # 'maximumLength': 8192,
+ # 'topP': 1,
+ # 'presencePenalty': 0,
+ # 'frequencyPenalty': 0,
+ # 'stopSequences': [],
+ # },
+ # },
+ # 'gpt-4-0613': {
+ # 'id': 'openai:gpt-4-0613',
+ # 'default_params': {
+ # 'temperature': 0.7,
+ # 'maximumLength': 8192,
+ # 'topP': 1,
+ # 'presencePenalty': 0,
+ # 'frequencyPenalty': 0,
+ # 'stopSequences': [],
+ # },
+ # },
+ 'code-davinci-002': {
+ 'id': 'openai:code-davinci-002',
+ 'default_params': {
+ 'temperature': 0.5,
+ 'maximumLength': 1024,
+ 'topP': 1,
+ 'presencePenalty': 0,
+ 'frequencyPenalty': 0,
+ 'stopSequences': [],
+ },
+ },
+ 'gpt-3.5-turbo': {
+ 'id': 'openai:gpt-3.5-turbo',
+ 'default_params': {
+ 'temperature': 0.7,
+ 'maximumLength': 4096,
+ 'topP': 1,
+ 'topK': 1,
+ 'presencePenalty': 1,
+ 'frequencyPenalty': 1,
+ 'stopSequences': [],
+ },
+ },
+ 'gpt-3.5-turbo-16k': {
+ 'id': 'openai:gpt-3.5-turbo-16k',
+ 'default_params': {
+ 'temperature': 0.7,
+ 'maximumLength': 16280,
+ 'topP': 1,
+ 'topK': 1,
+ 'presencePenalty': 1,
+ 'frequencyPenalty': 1,
+ 'stopSequences': [],
+ },
+ },
+ 'gpt-3.5-turbo-16k-0613': {
+ 'id': 'openai:gpt-3.5-turbo-16k-0613',
+ 'default_params': {
+ 'temperature': 0.7,
+ 'maximumLength': 16280,
+ 'topP': 1,
+ 'topK': 1,
+ 'presencePenalty': 1,
+ 'frequencyPenalty': 1,
+ 'stopSequences': [],
+ },
+ },
+ 'text-ada-001': {
+ 'id': 'openai:text-ada-001',
+ 'default_params': {
+ 'temperature': 0.5,
+ 'maximumLength': 1024,
+ 'topP': 1,
+ 'presencePenalty': 0,
+ 'frequencyPenalty': 0,
+ 'stopSequences': [],
+ },
+ },
+ 'text-babbage-001': {
+ 'id': 'openai:text-babbage-001',
+ 'default_params': {
+ 'temperature': 0.5,
+ 'maximumLength': 1024,
+ 'topP': 1,
+ 'presencePenalty': 0,
+ 'frequencyPenalty': 0,
+ 'stopSequences': [],
+ },
+ },
+ 'text-curie-001': {
+ 'id': 'openai:text-curie-001',
+ 'default_params': {
+ 'temperature': 0.5,
+ 'maximumLength': 1024,
+ 'topP': 1,
+ 'presencePenalty': 0,
+ 'frequencyPenalty': 0,
+ 'stopSequences': [],
+ },
+ },
+ 'text-davinci-002': {
+ 'id': 'openai:text-davinci-002',
+ 'default_params': {
+ 'temperature': 0.5,
+ 'maximumLength': 1024,
+ 'topP': 1,
+ 'presencePenalty': 0,
+ 'frequencyPenalty': 0,
+ 'stopSequences': [],
+ },
+ },
+ 'text-davinci-003': {
+ 'id': 'openai:text-davinci-003',
+ 'default_params': {
+ 'temperature': 0.5,
+ 'maximumLength': 4097,
+ 'topP': 1,
+ 'presencePenalty': 0,
+ 'frequencyPenalty': 0,
+ 'stopSequences': [],
+ },
+ },
+} \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Vitalentum.py b/g4f/Provider/deprecated/Vitalentum.py
new file mode 100644
index 00000000..8f466a52
--- /dev/null
+++ b/g4f/Provider/deprecated/Vitalentum.py
@@ -0,0 +1,55 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
+
+class Vitalentum(AsyncGeneratorProvider):
+ url = "https://app.vitalentum.io"
+ supports_gpt_35_turbo = True
+
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
+ "Accept": "text/event-stream",
+ "Accept-language": "de,en-US;q=0.7,en;q=0.3",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ }
+ conversation = json.dumps({"history": [{
+ "speaker": "human" if message["role"] == "user" else "bot",
+ "text": message["content"],
+ } for message in messages]})
+ data = {
+ "conversation": conversation,
+ "temperature": 0.7,
+ **kwargs
+ }
+ async with ClientSession(
+ headers=headers
+ ) as session:
+ async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ line = line.decode()
+ if line.startswith("data: "):
+ if line.startswith("data: [DONE]"):
+ break
+ line = json.loads(line[6:-1])
+ content = line["choices"][0]["delta"].get("content")
+
+ if content:
+ yield content \ No newline at end of file
diff --git a/g4f/Provider/deprecated/VoiGpt.py b/g4f/Provider/deprecated/VoiGpt.py
new file mode 100644
index 00000000..9b061e63
--- /dev/null
+++ b/g4f/Provider/deprecated/VoiGpt.py
@@ -0,0 +1,91 @@
+from __future__ import annotations
+
+import json
+import requests
+from ..base_provider import AbstractProvider
+from ...typing import Messages, CreateResult
+
+
+class VoiGpt(AbstractProvider):
+ """
+ VoiGpt - A provider for VoiGpt.com
+
+ **Note** : to use this provider you have to get your csrf token/cookie from the voigpt.com website
+
+ Args:
+ model: The model to use
+ messages: The messages to send
+ stream: Whether to stream the response
+ proxy: The proxy to use
+ access_token: The access token to use
+ **kwargs: Additional keyword arguments
+
+ Returns:
+ A CreateResult object
+ """
+ url = "https://voigpt.com"
+ working = False
+ supports_gpt_35_turbo = True
+ supports_message_history = True
+ supports_stream = False
+ _access_token: str = None
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ access_token: str = None,
+ **kwargs
+ ) -> CreateResult:
+
+ if not model:
+ model = "gpt-3.5-turbo"
+ if not access_token:
+ access_token = cls._access_token
+ if not access_token:
+ headers = {
+ "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
+ "accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6",
+ "sec-ch-ua": "\"Google Chrome\";v=\"119\", \"Chromium\";v=\"119\", \"Not?A_Brand\";v=\"24\"",
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": "\"Linux\"",
+ "sec-fetch-dest": "document",
+ "sec-fetch-mode": "navigate",
+ "sec-fetch-site": "none",
+ "sec-fetch-user": "?1",
+ "upgrade-insecure-requests": "1",
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
+ }
+ req_response = requests.get(cls.url, headers=headers)
+ access_token = cls._access_token = req_response.cookies.get("csrftoken")
+
+ headers = {
+ "Accept-Encoding": "gzip, deflate, br",
+ "Accept-Language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6",
+ "Cookie": f"csrftoken={access_token};",
+ "Origin": "https://voigpt.com",
+ "Referer": "https://voigpt.com/",
+ "Sec-Ch-Ua": "'Google Chrome';v='119', 'Chromium';v='119', 'Not?A_Brand';v='24'",
+ "Sec-Ch-Ua-Mobile": "?0",
+ "Sec-Ch-Ua-Platform": "'Windows'",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
+ "X-Csrftoken": access_token,
+ }
+
+ payload = {
+ "messages": messages,
+ }
+ request_url = f"{cls.url}/generate_response/"
+ req_response = requests.post(request_url, headers=headers, json=payload)
+ try:
+ response = json.loads(req_response.text)
+ yield response["response"]
+ except:
+ raise RuntimeError(f"Response: {req_response.text}")
+
diff --git a/g4f/Provider/deprecated/Wewordle.py b/g4f/Provider/deprecated/Wewordle.py
new file mode 100644
index 00000000..c30887fb
--- /dev/null
+++ b/g4f/Provider/deprecated/Wewordle.py
@@ -0,0 +1,65 @@
+from __future__ import annotations
+
+import random, string, time
+from aiohttp import ClientSession
+
+from ..base_provider import AsyncProvider
+
+
+class Wewordle(AsyncProvider):
+ url = "https://wewordle.org"
+ working = False
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ proxy: str = None,
+ **kwargs
+ ) -> str:
+
+ headers = {
+ "accept" : "*/*",
+ "pragma" : "no-cache",
+ "Content-Type" : "application/json",
+ "Connection" : "keep-alive"
+ }
+
+ _user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
+ _app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
+ _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
+ data = {
+ "user" : _user_id,
+ "messages" : messages,
+ "subscriber": {
+ "originalPurchaseDate" : None,
+ "originalApplicationVersion" : None,
+ "allPurchaseDatesMillis" : {},
+ "entitlements" : {"active": {}, "all": {}},
+ "allPurchaseDates" : {},
+ "allExpirationDatesMillis" : {},
+ "allExpirationDates" : {},
+ "originalAppUserId" : f"$RCAnonymousID:{_app_id}",
+ "latestExpirationDate" : None,
+ "requestDate" : _request_date,
+ "latestExpirationDateMillis" : None,
+ "nonSubscriptionTransactions" : [],
+ "originalPurchaseDateMillis" : None,
+ "managementURL" : None,
+ "allPurchasedProductIdentifiers": [],
+ "firstSeen" : _request_date,
+ "activeSubscriptions" : [],
+ }
+ }
+
+
+ async with ClientSession(
+ headers=headers
+ ) as session:
+ async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response:
+ response.raise_for_status()
+ content = (await response.json())["message"]["content"]
+ if content:
+ return content \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Wuguokai.py b/g4f/Provider/deprecated/Wuguokai.py
new file mode 100644
index 00000000..f12d1bfe
--- /dev/null
+++ b/g4f/Provider/deprecated/Wuguokai.py
@@ -0,0 +1,57 @@
+from __future__ import annotations
+
+import random
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import AbstractProvider, format_prompt
+
+
+class Wuguokai(AbstractProvider):
+ url = 'https://chat.wuguokai.xyz'
+ supports_gpt_35_turbo = True
+ working = False
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ headers = {
+ 'authority': 'ai-api.wuguokai.xyz',
+ 'accept': 'application/json, text/plain, */*',
+ 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.wuguokai.xyz',
+ 'referer': 'https://chat.wuguokai.xyz/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-site',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
+ }
+ data ={
+ "prompt": format_prompt(messages),
+ "options": {},
+ "userId": f"#/chat/{random.randint(1,99999999)}",
+ "usingContext": True
+ }
+ response = requests.post(
+ "https://ai-api20.wuguokai.xyz/api/chat-process",
+ headers=headers,
+ timeout=3,
+ json=data,
+ proxies=kwargs.get('proxy', {}),
+ )
+ _split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
+ if response.status_code != 200:
+ raise Exception(f"Error: {response.status_code} {response.reason}")
+ if len(_split) > 1:
+ yield _split[1].strip()
+ else:
+ yield _split[0].strip() \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Ylokh.py b/g4f/Provider/deprecated/Ylokh.py
new file mode 100644
index 00000000..dbff4602
--- /dev/null
+++ b/g4f/Provider/deprecated/Ylokh.py
@@ -0,0 +1,58 @@
+from __future__ import annotations
+
+import json
+
+from ...requests import StreamSession
+from ..base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
+
+class Ylokh(AsyncGeneratorProvider):
+ url = "https://chat.ylokh.xyz"
+ working = False
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = True,
+ proxy: str = None,
+ timeout: int = 120,
+ **kwargs
+ ) -> AsyncResult:
+ model = model if model else "gpt-3.5-turbo"
+ headers = {"Origin": cls.url, "Referer": f"{cls.url}/"}
+ data = {
+ "messages": messages,
+ "model": model,
+ "temperature": 1,
+ "presence_penalty": 0,
+ "top_p": 1,
+ "frequency_penalty": 0,
+ "allow_fallback": True,
+ "stream": stream,
+ **kwargs
+ }
+ async with StreamSession(
+ headers=headers,
+ proxies={"https": proxy},
+ timeout=timeout
+ ) as session:
+ async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
+ response.raise_for_status()
+ if stream:
+ async for line in response.iter_lines():
+ line = line.decode()
+ if line.startswith("data: "):
+ if line.startswith("data: [DONE]"):
+ break
+ line = json.loads(line[6:])
+ content = line["choices"][0]["delta"].get("content")
+ if content:
+ yield content
+ else:
+ chat = await response.json()
+ yield chat["choices"][0]["message"].get("content") \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Yqcloud.py b/g4f/Provider/deprecated/Yqcloud.py
new file mode 100644
index 00000000..227f8995
--- /dev/null
+++ b/g4f/Provider/deprecated/Yqcloud.py
@@ -0,0 +1,61 @@
+from __future__ import annotations
+
+import random
+from ...requests import StreamSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, format_prompt
+
+
+class Yqcloud(AsyncGeneratorProvider):
+ url = "https://chat9.yqcloud.top/"
+ working = False
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ async def create_async_generator(
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ **kwargs,
+ ) -> AsyncResult:
+ async with StreamSession(
+ headers=_create_header(), proxies={"https": proxy}, timeout=timeout
+ ) as session:
+ payload = _create_payload(messages, **kwargs)
+ async with session.post("https://api.aichatos.cloud/api/generateStream", json=payload) as response:
+ response.raise_for_status()
+ async for chunk in response.iter_content():
+ if chunk:
+ chunk = chunk.decode()
+ if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
+ raise RuntimeError("IP address is blocked by abuse detection.")
+ yield chunk
+
+
+def _create_header():
+ return {
+ "accept" : "application/json, text/plain, */*",
+ "content-type" : "application/json",
+ "origin" : "https://chat9.yqcloud.top",
+ "referer" : "https://chat9.yqcloud.top/"
+ }
+
+
+def _create_payload(
+ messages: Messages,
+ system_message: str = "",
+ user_id: int = None,
+ **kwargs
+):
+ if not user_id:
+ user_id = random.randint(1690000544336, 2093025544336)
+ return {
+ "prompt": format_prompt(messages),
+ "network": True,
+ "system": system_message,
+ "withoutContext": False,
+ "stream": True,
+ "userId": f"#/chat/{user_id}"
+ }
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
new file mode 100644
index 00000000..bf923f2a
--- /dev/null
+++ b/g4f/Provider/deprecated/__init__.py
@@ -0,0 +1,35 @@
+from .AiService import AiService
+from .CodeLinkAva import CodeLinkAva
+from .DfeHub import DfeHub
+from .EasyChat import EasyChat
+from .Forefront import Forefront
+from .GetGpt import GetGpt
+from .Lockchat import Lockchat
+from .Wewordle import Wewordle
+from .Equing import Equing
+from .Wuguokai import Wuguokai
+from .V50 import V50
+from .FastGpt import FastGpt
+from .Aivvm import Aivvm
+from .Vitalentum import Vitalentum
+from .H2o import H2o
+from .Myshell import Myshell
+from .Acytoo import Acytoo
+from .Aibn import Aibn
+from .Ails import Ails
+from .ChatgptDuo import ChatgptDuo
+from .Cromicle import Cromicle
+from .Opchatgpts import Opchatgpts
+from .Yqcloud import Yqcloud
+from .Aichat import Aichat
+from .Berlin import Berlin
+from .Phind import Phind
+from .AiAsk import AiAsk
+from ..AiChatOnline import AiChatOnline
+from .ChatAnywhere import ChatAnywhere
+from .FakeGpt import FakeGpt
+from .GeekGpt import GeekGpt
+from .GPTalk import GPTalk
+from .Hashnode import Hashnode
+from .Ylokh import Ylokh
+from .OpenAssistant import OpenAssistant \ No newline at end of file