summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorMIDORIBIN <aquarion123@gmail.com>2023-07-28 12:07:17 +0200
committerMIDORIBIN <aquarion123@gmail.com>2023-08-14 04:46:32 +0200
commitf6ef3cb2237d8c336e915ef77ddbe6f37934c4fd (patch)
treec8bc44917ea03909cf586140f984ff0814bc30ea /g4f/Provider
parent~ | small fixes & new pypi version | v-0.0.1.9 (diff)
downloadgpt4free-f6ef3cb2237d8c336e915ef77ddbe6f37934c4fd.tar
gpt4free-f6ef3cb2237d8c336e915ef77ddbe6f37934c4fd.tar.gz
gpt4free-f6ef3cb2237d8c336e915ef77ddbe6f37934c4fd.tar.bz2
gpt4free-f6ef3cb2237d8c336e915ef77ddbe6f37934c4fd.tar.lz
gpt4free-f6ef3cb2237d8c336e915ef77ddbe6f37934c4fd.tar.xz
gpt4free-f6ef3cb2237d8c336e915ef77ddbe6f37934c4fd.tar.zst
gpt4free-f6ef3cb2237d8c336e915ef77ddbe6f37934c4fd.zip
Diffstat (limited to '')
-rw-r--r--g4f/Provider/AItianhu.py54
-rw-r--r--g4f/Provider/Acytoo.py47
-rw-r--r--g4f/Provider/AiService.py36
-rw-r--r--g4f/Provider/Aichat.py55
-rw-r--r--g4f/Provider/Ails.py116
-rw-r--r--g4f/Provider/Bard.py110
-rw-r--r--g4f/Provider/Bing.py362
-rw-r--r--g4f/Provider/ChatgptAi.py61
-rw-r--r--g4f/Provider/ChatgptLogin.py133
-rw-r--r--g4f/Provider/DeepAi.py60
-rw-r--r--g4f/Provider/DfeHub.py78
-rw-r--r--g4f/Provider/EasyChat.py83
-rw-r--r--g4f/Provider/Forefront.py41
-rw-r--r--g4f/Provider/GetGpt.py87
-rw-r--r--g4f/Provider/H2o.py98
-rw-r--r--g4f/Provider/Liaobots.py73
-rw-r--r--g4f/Provider/Lockchat.py66
-rw-r--r--g4f/Provider/Opchatgpts.py62
-rw-r--r--g4f/Provider/Provider.py16
-rw-r--r--g4f/Provider/Providers/AItianhu.py38
-rw-r--r--g4f/Provider/Providers/Acytoo.py42
-rw-r--r--g4f/Provider/Providers/AiService.py41
-rw-r--r--g4f/Provider/Providers/Aichat.py45
-rw-r--r--g4f/Provider/Providers/Ails.py95
-rw-r--r--g4f/Provider/Providers/Bard.py76
-rw-r--r--g4f/Provider/Providers/Bing.py352
-rw-r--r--g4f/Provider/Providers/BingHuan.py28
-rw-r--r--g4f/Provider/Providers/ChatgptAi.py52
-rw-r--r--g4f/Provider/Providers/ChatgptLogin.py96
-rw-r--r--g4f/Provider/Providers/DeepAi.py74
-rw-r--r--g4f/Provider/Providers/DfeHub.py56
-rw-r--r--g4f/Provider/Providers/EasyChat.py59
-rw-r--r--g4f/Provider/Providers/Forefront.py32
-rw-r--r--g4f/Provider/Providers/GetGpt.py59
-rw-r--r--g4f/Provider/Providers/H2o.py94
-rw-r--r--g4f/Provider/Providers/Liaobots.py53
-rw-r--r--g4f/Provider/Providers/Lockchat.py33
-rw-r--r--g4f/Provider/Providers/Theb.py29
-rw-r--r--g4f/Provider/Providers/Vercel.py170
-rw-r--r--g4f/Provider/Providers/Wewordle.py73
-rw-r--r--g4f/Provider/Providers/You.py25
-rw-r--r--g4f/Provider/Providers/Yqcloud.py38
-rw-r--r--g4f/Provider/Providers/__init__.py0
-rw-r--r--g4f/Provider/Providers/helpers/binghuan.py221
-rw-r--r--g4f/Provider/Providers/helpers/theb.py48
-rw-r--r--g4f/Provider/Providers/helpers/you.py79
-rw-r--r--g4f/Provider/Providers/opchatgpts.py42
-rw-r--r--g4f/Provider/Raycast.py56
-rw-r--r--g4f/Provider/Theb.py39
-rw-r--r--g4f/Provider/Vercel.py337
-rw-r--r--g4f/Provider/Wewordle.py75
-rw-r--r--g4f/Provider/You.py59
-rw-r--r--g4f/Provider/Yqcloud.py44
-rw-r--r--g4f/Provider/__init__.py81
-rw-r--r--g4f/Provider/base_provider.py33
55 files changed, 2317 insertions, 2095 deletions
diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/AItianhu.py
new file mode 100644
index 00000000..e8e5714a
--- /dev/null
+++ b/g4f/Provider/AItianhu.py
@@ -0,0 +1,54 @@
+import json
+
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class AItianhu(BaseProvider):
+ url = "https://www.aitianhu.com/api/chat-process"
+ working = False
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ base = ""
+ for message in messages:
+ base += "%s: %s\n" % (message["role"], message["content"])
+ base += "assistant:"
+
+ headers = {
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
+ }
+ data: dict[str, Any] = {
+ "prompt": base,
+ "options": {},
+ "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
+ "temperature": kwargs.get("temperature", 0.8),
+ "top_p": kwargs.get("top_p", 1),
+ }
+ url = "https://www.aitianhu.com/api/chat-process"
+ response = requests.post(url, headers=headers, json=data)
+ response.raise_for_status()
+ lines = response.text.strip().split("\n")
+ res = json.loads(lines[-1])
+ yield res["text"]
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ("top_p", "int"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Acytoo.py b/g4f/Provider/Acytoo.py
new file mode 100644
index 00000000..32c67c0c
--- /dev/null
+++ b/g4f/Provider/Acytoo.py
@@ -0,0 +1,47 @@
+import time
+
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class Acytoo(BaseProvider):
+ url = "https://chat.acytoo.com/api/completions"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ headers = _create_header()
+ payload = _create_payload(messages)
+
+ url = "https://chat.acytoo.com/api/completions"
+ response = requests.post(url=url, headers=headers, json=payload)
+ response.raise_for_status()
+ yield response.text
+
+
+def _create_header():
+ return {
+ "accept": "*/*",
+ "content-type": "application/json",
+ }
+
+
+def _create_payload(messages: list[dict[str, str]]):
+ payload_messages = [
+ message | {"createdAt": int(time.time()) * 1000} for message in messages
+ ]
+ return {
+ "key": "",
+ "model": "gpt-3.5-turbo",
+ "messages": payload_messages,
+ "temperature": 1,
+ "password": "",
+ }
diff --git a/g4f/Provider/AiService.py b/g4f/Provider/AiService.py
new file mode 100644
index 00000000..2c0d5de2
--- /dev/null
+++ b/g4f/Provider/AiService.py
@@ -0,0 +1,36 @@
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class AiService(BaseProvider):
+ url = "https://aiservice.vercel.app/api/chat/answer"
+ working = False
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ base = ""
+ for message in messages:
+ base += "%s: %s\n" % (message["role"], message["content"])
+ base += "assistant:"
+
+ headers = {
+ "accept": "*/*",
+ "content-type": "text/plain;charset=UTF-8",
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "Referer": "https://aiservice.vercel.app/chat",
+ }
+ data = {"input": base}
+ url = "https://aiservice.vercel.app/api/chat/answer"
+ response = requests.post(url, headers=headers, json=data)
+ response.raise_for_status()
+ yield response.json()["data"]
diff --git a/g4f/Provider/Aichat.py b/g4f/Provider/Aichat.py
new file mode 100644
index 00000000..6992d071
--- /dev/null
+++ b/g4f/Provider/Aichat.py
@@ -0,0 +1,55 @@
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class Aichat(BaseProvider):
+ url = "https://chat-gpt.org/chat"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ base = ""
+
+ for message in messages:
+ base += "%s: %s\n" % (message["role"], message["content"])
+ base += "assistant:"
+
+ headers = {
+ "authority": "chat-gpt.org",
+ "accept": "*/*",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": "https://chat-gpt.org",
+ "pragma": "no-cache",
+ "referer": "https://chat-gpt.org/chat",
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"macOS"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
+ }
+
+ json_data = {
+ "message": base,
+ "temperature": 1,
+ "presence_penalty": 0,
+ "top_p": 1,
+ "frequency_penalty": 0,
+ }
+
+ response = requests.post(
+ "https://chat-gpt.org/api/text",
+ headers=headers,
+ json=json_data,
+ )
+ response.raise_for_status()
+ yield response.json()["message"]
diff --git a/g4f/Provider/Ails.py b/g4f/Provider/Ails.py
new file mode 100644
index 00000000..52b3745d
--- /dev/null
+++ b/g4f/Provider/Ails.py
@@ -0,0 +1,116 @@
+import hashlib
+import json
+import time
+import uuid
+from datetime import datetime
+
+import requests
+
+from ..typing import SHA256, Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class Ails(BaseProvider):
+ url: str = "https://ai.ls"
+ working = True
+ supports_stream = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ headers = {
+ "authority": "api.caipacity.com",
+ "accept": "*/*",
+ "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "authorization": "Bearer free",
+ "client-id": str(uuid.uuid4()),
+ "client-v": _get_client_v(),
+ "content-type": "application/json",
+ "origin": "https://ai.ls",
+ "referer": "https://ai.ls/",
+ "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Windows"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "cross-site",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ }
+
+ timestamp = _format_timestamp(int(time.time() * 1000))
+ sig = {
+ "d": datetime.now().strftime("%Y-%m-%d"),
+ "t": timestamp,
+ "s": _hash({"t": timestamp, "m": messages[-1]["content"]}),
+ }
+
+ json_data = json.dumps(
+ separators=(",", ":"),
+ obj={
+ "model": "gpt-3.5-turbo",
+ "temperature": kwargs.get("temperature", 0.6),
+ "stream": True,
+ "messages": messages,
+ }
+ | sig,
+ )
+
+ response = requests.post(
+ "https://api.caipacity.com/v1/chat/completions",
+ headers=headers,
+ data=json_data,
+ stream=True,
+ )
+ response.raise_for_status()
+
+ for token in response.iter_lines():
+ if b"content" in token:
+ completion_chunk = json.loads(token.decode().replace("data: ", ""))
+ token = completion_chunk["choices"][0]["delta"].get("content")
+ if token != None:
+ yield token
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
+
+
+def _hash(json_data: dict[str, str]) -> SHA256:
+ base_string: str = "%s:%s:%s:%s" % (
+ json_data["t"],
+ json_data["m"],
+ "WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf",
+ len(json_data["m"]),
+ )
+
+ return SHA256(hashlib.sha256(base_string.encode()).hexdigest())
+
+
+def _format_timestamp(timestamp: int) -> str:
+ e = timestamp
+ n = e % 10
+ r = n + 1 if n % 2 == 0 else n
+ return str(e - n + r)
+
+
+def _get_client_v():
+ response = requests.get("https://ai.ls/?chat=1")
+ response.raise_for_status()
+ js_path = response.text.split('crossorigin href="')[1].split('"')[0]
+
+ response = requests.get("https://ai.ls" + js_path)
+ response.raise_for_status()
+ return response.text.split('G4="')[1].split('"')[0]
diff --git a/g4f/Provider/Bard.py b/g4f/Provider/Bard.py
new file mode 100644
index 00000000..cc8ea055
--- /dev/null
+++ b/g4f/Provider/Bard.py
@@ -0,0 +1,110 @@
+import json
+import random
+import re
+
+import browser_cookie3
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class Bard(BaseProvider):
+ url = "https://bard.google.com"
+ needs_auth = True
+ working = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ psid = {
+ cookie.name: cookie.value
+ for cookie in browser_cookie3.chrome(domain_name=".google.com")
+ }["__Secure-1PSID"]
+
+ formatted = "\n".join(
+ ["%s: %s" % (message["role"], message["content"]) for message in messages]
+ )
+ prompt = f"{formatted}\nAssistant:"
+
+ proxy = kwargs.get("proxy", False)
+ if proxy == False:
+ print(
+ "warning!, you did not give a proxy, a lot of countries are banned from Google Bard, so it may not work"
+ )
+
+ snlm0e = None
+ conversation_id = None
+ response_id = None
+ choice_id = None
+
+ client = requests.Session()
+ client.proxies = (
+ {"http": f"http://{proxy}", "https": f"http://{proxy}"} if proxy else {}
+ )
+
+ client.headers = {
+ "authority": "bard.google.com",
+ "content-type": "application/x-www-form-urlencoded;charset=UTF-8",
+ "origin": "https://bard.google.com",
+ "referer": "https://bard.google.com/",
+ "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
+ "x-same-domain": "1",
+ "cookie": f"__Secure-1PSID={psid}",
+ }
+
+ if snlm0e is not None:
+ result = re.search(
+ r"SNlM0e\":\"(.*?)\"", client.get("https://bard.google.com/").text
+ )
+ if result is not None:
+ snlm0e = result.group(1)
+
+ params = {
+ "bl": "boq_assistant-bard-web-server_20230326.21_p0",
+ "_reqid": random.randint(1111, 9999),
+ "rt": "c",
+ }
+
+ data = {
+ "at": snlm0e,
+ "f.req": json.dumps(
+ [
+ None,
+ json.dumps(
+ [[prompt], None, [conversation_id, response_id, choice_id]]
+ ),
+ ]
+ ),
+ }
+
+ intents = ".".join(["assistant", "lamda", "BardFrontendService"])
+
+ response = client.post(
+ f"https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate",
+ data=data,
+ params=params,
+ )
+ response.raise_for_status()
+
+ chat_data = json.loads(response.content.splitlines()[3])[0][2]
+ if chat_data:
+ json_chat_data = json.loads(chat_data)
+
+ yield json_chat_data[0][0]
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("proxy", "str"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
new file mode 100644
index 00000000..40e112a9
--- /dev/null
+++ b/g4f/Provider/Bing.py
@@ -0,0 +1,362 @@
+import asyncio
+import json
+import os
+import random
+import ssl
+import uuid
+
+import aiohttp
+import certifi
+import requests
+
+from ..typing import Any, AsyncGenerator, CreateResult, Tuple
+from .base_provider import BaseProvider
+
+
+class Bing(BaseProvider):
+ url = "https://bing.com/chat"
+ supports_gpt_4 = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ if len(messages) < 2:
+ prompt = messages[0]["content"]
+ context = False
+
+ else:
+ prompt = messages[-1]["content"]
+ context = convert(messages[:-1])
+
+ response = run(stream_generate(prompt, jailbreak, context))
+ for token in response:
+ yield token
+
+
+def convert(messages: list[dict[str, str]]):
+ context = ""
+
+ for message in messages:
+ context += "[%s](#message)\n%s\n\n" % (message["role"], message["content"])
+
+ return context
+
+
+jailbreak = {
+ "optionsSets": [
+ "saharasugg",
+ "enablenewsfc",
+ "clgalileo",
+ "gencontentv3",
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "h3precise"
+ # "harmonyv3",
+ "dtappid",
+ "cricinfo",
+ "cricinfov2",
+ "dv3sugg",
+ "nojbfedge",
+ ]
+}
+
+
+ssl_context = ssl.create_default_context()
+ssl_context.load_verify_locations(certifi.where())
+
+
+def _format(msg: dict[str, Any]) -> str:
+ return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
+
+
+async def stream_generate(
+ prompt: str,
+ mode: dict[str, list[str]] = jailbreak,
+ context: bool | str = False,
+):
+ timeout = aiohttp.ClientTimeout(total=900)
+ session = aiohttp.ClientSession(timeout=timeout)
+
+ conversationId, clientId, conversationSignature = await create_conversation()
+
+ wss = await session.ws_connect(
+ "wss://sydney.bing.com/sydney/ChatHub",
+ ssl=ssl_context,
+ autoping=False,
+ headers={
+ "accept": "application/json",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "sec-ch-ua": '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"',
+ "sec-ch-ua-arch": '"x86"',
+ "sec-ch-ua-bitness": '"64"',
+ "sec-ch-ua-full-version": '"109.0.1518.78"',
+ "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-model": "",
+ "sec-ch-ua-platform": '"Windows"',
+ "sec-ch-ua-platform-version": '"15.0.0"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "x-ms-client-request-id": str(uuid.uuid4()),
+ "x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32",
+ "Referer": "https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx",
+ "Referrer-Policy": "origin-when-cross-origin",
+ "x-forwarded-for": Defaults.ip_address,
+ },
+ )
+
+ await wss.send_str(_format({"protocol": "json", "version": 1}))
+ await wss.receive(timeout=900)
+
+ argument: dict[str, Any] = {
+ **mode,
+ "source": "cib",
+ "allowedMessageTypes": Defaults.allowedMessageTypes,
+ "sliceIds": Defaults.sliceIds,
+ "traceId": os.urandom(16).hex(),
+ "isStartOfSession": True,
+ "message": Defaults.location
+ | {
+ "author": "user",
+ "inputMethod": "Keyboard",
+ "text": prompt,
+ "messageType": "Chat",
+ },
+ "conversationSignature": conversationSignature,
+ "participant": {"id": clientId},
+ "conversationId": conversationId,
+ }
+
+ if context:
+ argument["previousMessages"] = [
+ {
+ "author": "user",
+ "description": context,
+ "contextType": "WebPage",
+ "messageType": "Context",
+ "messageId": "discover-web--page-ping-mriduna-----",
+ }
+ ]
+
+ struct: dict[str, list[dict[str, Any]] | str | int] = {
+ "arguments": [argument],
+ "invocationId": "0",
+ "target": "chat",
+ "type": 4,
+ }
+
+ await wss.send_str(_format(struct))
+
+ final = False
+ draw = False
+ resp_txt = ""
+ result_text = ""
+ resp_txt_no_link = ""
+ cache_text = ""
+
+ while not final:
+ msg = await wss.receive(timeout=900)
+ objects = msg.data.split(Defaults.delimiter) # type: ignore
+
+ for obj in objects: # type: ignore
+ if obj is None or not obj:
+ continue
+
+ response = json.loads(obj) # type: ignore
+ if response.get("type") == 1 and response["arguments"][0].get(
+ "messages",
+ ):
+ if not draw:
+ if (
+ response["arguments"][0]["messages"][0]["contentOrigin"]
+ != "Apology"
+ ) and not draw:
+ resp_txt = result_text + response["arguments"][0]["messages"][
+ 0
+ ]["adaptiveCards"][0]["body"][0].get("text", "")
+ resp_txt_no_link = result_text + response["arguments"][0][
+ "messages"
+ ][0].get("text", "")
+
+ if response["arguments"][0]["messages"][0].get(
+ "messageType",
+ ):
+ resp_txt = (
+ resp_txt
+ + response["arguments"][0]["messages"][0][
+ "adaptiveCards"
+ ][0]["body"][0]["inlines"][0].get("text")
+ + "\n"
+ )
+ result_text = (
+ result_text
+ + response["arguments"][0]["messages"][0][
+ "adaptiveCards"
+ ][0]["body"][0]["inlines"][0].get("text")
+ + "\n"
+ )
+
+ if cache_text.endswith(" "):
+ final = True
+ if wss and not wss.closed:
+ await wss.close()
+ if session and not session.closed:
+ await session.close()
+
+ yield (resp_txt.replace(cache_text, ""))
+ cache_text = resp_txt
+
+ elif response.get("type") == 2:
+ if response["item"]["result"].get("error"):
+ if wss and not wss.closed:
+ await wss.close()
+ if session and not session.closed:
+ await session.close()
+
+ raise Exception(
+ f"{response['item']['result']['value']}: {response['item']['result']['message']}"
+ )
+
+ if draw:
+ cache = response["item"]["messages"][1]["adaptiveCards"][0]["body"][
+ 0
+ ]["text"]
+ response["item"]["messages"][1]["adaptiveCards"][0]["body"][0][
+ "text"
+ ] = (cache + resp_txt)
+
+ if (
+ response["item"]["messages"][-1]["contentOrigin"] == "Apology"
+ and resp_txt
+ ):
+ response["item"]["messages"][-1]["text"] = resp_txt_no_link
+ response["item"]["messages"][-1]["adaptiveCards"][0]["body"][0][
+ "text"
+ ] = resp_txt
+
+ # print('Preserved the message from being deleted', file=sys.stderr)
+
+ final = True
+ if wss and not wss.closed:
+ await wss.close()
+ if session and not session.closed:
+ await session.close()
+
+
+async def create_conversation() -> Tuple[str, str, str]:
+ create = requests.get(
+ "https://www.bing.com/turing/conversation/create",
+ headers={
+ "authority": "edgeservices.bing.com",
+ "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "max-age=0",
+ "sec-ch-ua": '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+ "sec-ch-ua-arch": '"x86"',
+ "sec-ch-ua-bitness": '"64"',
+ "sec-ch-ua-full-version": '"110.0.1587.69"',
+ "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-model": '""',
+ "sec-ch-ua-platform": '"Windows"',
+ "sec-ch-ua-platform-version": '"15.0.0"',
+ "sec-fetch-dest": "document",
+ "sec-fetch-mode": "navigate",
+ "sec-fetch-site": "none",
+ "sec-fetch-user": "?1",
+ "upgrade-insecure-requests": "1",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69",
+ "x-edge-shopping-flag": "1",
+ "x-forwarded-for": Defaults.ip_address,
+ },
+ )
+
+ conversationId = create.json().get("conversationId")
+ clientId = create.json().get("clientId")
+ conversationSignature = create.json().get("conversationSignature")
+
+ if not conversationId or not clientId or not conversationSignature:
+ raise Exception("Failed to create conversation.")
+
+ return conversationId, clientId, conversationSignature
+
+
+class Defaults:
+ delimiter = "\x1e"
+ ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
+
+ allowedMessageTypes = [
+ "Chat",
+ "Disengaged",
+ "AdsQuery",
+ "SemanticSerp",
+ "GenerateContentQuery",
+ "SearchQuery",
+ "ActionRequest",
+ "Context",
+ "Progress",
+ "AdsQuery",
+ "SemanticSerp",
+ ]
+
+ sliceIds = [
+ # "222dtappid",
+ # "225cricinfo",
+ # "224locals0"
+ "winmuid3tf",
+ "osbsdusgreccf",
+ "ttstmout",
+ "crchatrev",
+ "winlongmsgtf",
+ "ctrlworkpay",
+ "norespwtf",
+ "tempcacheread",
+ "temptacache",
+ "505scss0",
+ "508jbcars0",
+ "515enbotdets0",
+ "5082tsports",
+ "515vaoprvs",
+ "424dagslnv1s0",
+ "kcimgattcf",
+ "427startpms0",
+ ]
+
+ location = {
+ "locale": "en-US",
+ "market": "en-US",
+ "region": "US",
+ "locationHints": [
+ {
+ "country": "United States",
+ "state": "California",
+ "city": "Los Angeles",
+ "timezoneoffset": 8,
+ "countryConfidence": 8,
+ "Center": {"Latitude": 34.0536909, "Longitude": -118.242766},
+ "RegionType": 2,
+ "SourceType": 1,
+ }
+ ],
+ }
+
+
+def run(generator: AsyncGenerator[Any | str, Any]):
+ loop = asyncio.get_event_loop()
+ gen = generator.__aiter__()
+
+ while True:
+ try:
+ yield loop.run_until_complete(gen.__anext__())
+
+ except StopAsyncIteration:
+ break
diff --git a/g4f/Provider/ChatgptAi.py b/g4f/Provider/ChatgptAi.py
new file mode 100644
index 00000000..53518f65
--- /dev/null
+++ b/g4f/Provider/ChatgptAi.py
@@ -0,0 +1,61 @@
+import re
+
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class ChatgptAi(BaseProvider):
+ url = "https://chatgpt.ai/gpt-4/"
+ working = True
+ supports_gpt_4 = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ chat = ""
+ for message in messages:
+ chat += "%s: %s\n" % (message["role"], message["content"])
+ chat += "assistant: "
+
+ response = requests.get("https://chatgpt.ai/")
+ nonce, post_id, _, bot_id = re.findall(
+ r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width',
+ response.text,
+ )[0]
+
+ headers = {
+ "authority": "chatgpt.ai",
+ "accept": "*/*",
+ "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "cache-control": "no-cache",
+ "origin": "https://chatgpt.ai",
+ "pragma": "no-cache",
+ "referer": "https://chatgpt.ai/gpt-4/",
+ "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Windows"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ }
+ data = {
+ "_wpnonce": nonce,
+ "post_id": post_id,
+ "url": "https://chatgpt.ai/gpt-4",
+ "action": "wpaicg_chat_shortcode_message",
+ "message": chat,
+ "bot_id": bot_id,
+ }
+
+ response = requests.post(
+ "https://chatgpt.ai/wp-admin/admin-ajax.php", headers=headers, data=data
+ )
+ response.raise_for_status()
+ yield response.json()["data"]
diff --git a/g4f/Provider/ChatgptLogin.py b/g4f/Provider/ChatgptLogin.py
new file mode 100644
index 00000000..05bd1262
--- /dev/null
+++ b/g4f/Provider/ChatgptLogin.py
@@ -0,0 +1,133 @@
+import base64
+import os
+import re
+
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class ChatgptLogin(BaseProvider):
+ url = "https://chatgptlogin.ac"
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ headers = {
+ "authority": "chatgptlogin.ac",
+ "accept": "*/*",
+ "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "content-type": "application/json",
+ "origin": "https://chatgptlogin.ac",
+ "referer": "https://chatgptlogin.ac/use-chatgpt-free/",
+ "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Windows"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ "x-wp-nonce": _get_nonce(),
+ }
+
+ conversation = _transform(messages)
+
+ json_data = {
+ "env": "chatbot",
+ "session": "N/A",
+ "prompt": "Converse as if you were an AI assistant. Be friendly, creative.",
+ "context": "Converse as if you were an AI assistant. Be friendly, creative.",
+ "messages": conversation,
+ "newMessage": messages[-1]["content"],
+ "userName": '<div class="mwai-name-text">User:</div>',
+ "aiName": '<div class="mwai-name-text">AI:</div>',
+ "model": "gpt-3.5-turbo",
+ "temperature": kwargs.get("temperature", 0.8),
+ "maxTokens": 1024,
+ "maxResults": 1,
+ "apiKey": "",
+ "service": "openai",
+ "embeddingsIndex": "",
+ "stop": "",
+ "clientId": os.urandom(6).hex(),
+ }
+
+ response = requests.post(
+ "https://chatgptlogin.ac/wp-json/ai-chatbot/v1/chat",
+ headers=headers,
+ json=json_data,
+ )
+ response.raise_for_status()
+ yield response.json()["reply"]
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
+
+
+def _get_nonce() -> str:
+ res = requests.get(
+ "https://chatgptlogin.ac/use-chatgpt-free/",
+ headers={
+ "Referer": "https://chatgptlogin.ac/use-chatgpt-free/",
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ },
+ )
+
+ result = re.search(
+ r'class="mwai-chat mwai-chatgpt">.*<span>Send</span></button></div></div></div> <script defer src="(.*?)">',
+ res.text,
+ )
+ if result is None:
+ return ""
+
+ src = result.group(1)
+ decoded_string = base64.b64decode(src.split(",")[-1]).decode("utf-8")
+ result = re.search(r"let restNonce = '(.*?)';", decoded_string)
+
+ return "" if result is None else result.group(1)
+
+
+def _transform(messages: list[dict[str, str]]) -> list[dict[str, Any]]:
+ return [
+ {
+ "id": os.urandom(6).hex(),
+ "role": message["role"],
+ "content": message["content"],
+ "who": "AI: " if message["role"] == "assistant" else "User: ",
+ "html": _html_encode(message["content"]),
+ }
+ for message in messages
+ ]
+
+
+def _html_encode(string: str) -> str:
+ table = {
+ '"': "&quot;",
+ "'": "&#39;",
+ "&": "&amp;",
+ ">": "&gt;",
+ "<": "&lt;",
+ "\n": "<br>",
+ "\t": "&nbsp;&nbsp;&nbsp;&nbsp;",
+ " ": "&nbsp;",
+ }
+
+ for key in table:
+ string = string.replace(key, table[key])
+
+ return string
diff --git a/g4f/Provider/DeepAi.py b/g4f/Provider/DeepAi.py
new file mode 100644
index 00000000..01dc426d
--- /dev/null
+++ b/g4f/Provider/DeepAi.py
@@ -0,0 +1,60 @@
+import json
+
+import js2py
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class DeepAi(BaseProvider):
+ url = "https://deepai.org"
+ working = True
+ supports_stream = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ url = "https://api.deepai.org/make_me_a_pizza"
+ token_js = """
+var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
+var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y;
+h = Math.round(1E11 * Math.random()) + "";
+f = function () {
+ for (var p = [], q = 0; 64 > q;) p[q] = 0 | 4294967296 * Math.sin(++q % Math.PI);
+
+ return function (t) {
+ var v, y, H, ea = [v = 1732584193, y = 4023233417, ~v, ~y],
+ Z = [],
+ A = unescape(encodeURI(t)) + "\u0080",
+ z = A.length;
+ t = --z / 4 + 2 | 15;
+ for (Z[--t] = 8 * z; ~z;) Z[z >> 2] |= A.charCodeAt(z) << 8 * z--;
+ for (q = A = 0; q < t; q += 16) {
+ for (z = ea; 64 > A; z = [H = z[3], v + ((H = z[0] + [v & y | ~v & H, H & v | ~H & y, v ^ y ^ H, y ^ (v | ~H)][z = A >> 4] + p[A] + ~~Z[q | [A, 5 * A + 1, 3 * A + 5, 7 * A][z] & 15]) << (z = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * z + A++ % 4]) | H >>> -z), v, y]) v = z[1] | 0, y = z[2];
+ for (A = 4; A;) ea[--A] += z[A]
+ }
+ for (t = ""; 32 > A;) t += (ea[A >> 3] >> 4 * (1 ^ A++) & 15).toString(16);
+ return t.split("").reverse().join("")
+ }
+}();
+
+"tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x")));
+"""
+
+ payload = {"chas_style": "chat", "chatHistory": json.dumps(messages)}
+ api_key = js2py.eval_js(token_js)
+ headers = {
+ "api-key": api_key,
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
+ }
+
+ response = requests.post(url, headers=headers, data=payload, stream=True)
+ for chunk in response.iter_content(chunk_size=None):
+ response.raise_for_status()
+ yield chunk.decode()
diff --git a/g4f/Provider/DfeHub.py b/g4f/Provider/DfeHub.py
new file mode 100644
index 00000000..4093d0e4
--- /dev/null
+++ b/g4f/Provider/DfeHub.py
@@ -0,0 +1,78 @@
+import json
+import re
+import time
+
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class DfeHub(BaseProvider):
+ url = "https://chat.dfehub.com/api/chat"
+ supports_stream = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ headers = {
+ "authority": "chat.dfehub.com",
+ "accept": "*/*",
+ "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "content-type": "application/json",
+ "origin": "https://chat.dfehub.com",
+ "referer": "https://chat.dfehub.com/",
+ "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"macOS"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ "x-requested-with": "XMLHttpRequest",
+ }
+
+ json_data = {
+ "messages": messages,
+ "model": "gpt-3.5-turbo",
+ "temperature": kwargs.get("temperature", 0.5),
+ "presence_penalty": kwargs.get("presence_penalty", 0),
+ "frequency_penalty": kwargs.get("frequency_penalty", 0),
+ "top_p": kwargs.get("top_p", 1),
+ "stream": True,
+ }
+ response = requests.post(
+ "https://chat.dfehub.com/api/openai/v1/chat/completions",
+ headers=headers,
+ json=json_data,
+ )
+
+ for chunk in response.iter_lines():
+ if b"detail" in chunk:
+ delay = re.findall(r"\d+\.\d+", chunk.decode())
+ delay = float(delay[-1])
+ time.sleep(delay)
+ yield from DfeHub.create_completion(model, messages, stream, **kwargs)
+ if b"content" in chunk:
+ data = json.loads(chunk.decode().split("data: ")[1])
+ yield (data["choices"][0]["delta"]["content"])
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ("presence_penalty", "int"),
+ ("frequency_penalty", "int"),
+ ("top_p", "int"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/EasyChat.py b/g4f/Provider/EasyChat.py
new file mode 100644
index 00000000..59c46ffa
--- /dev/null
+++ b/g4f/Provider/EasyChat.py
@@ -0,0 +1,83 @@
+import json
+
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class EasyChat(BaseProvider):
+ url = "https://free.easychat.work"
+ supports_stream = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ active_servers = [
+ "https://chat10.fastgpt.me",
+ "https://chat9.fastgpt.me",
+ "https://chat1.fastgpt.me",
+ "https://chat2.fastgpt.me",
+ "https://chat3.fastgpt.me",
+ "https://chat4.fastgpt.me",
+ ]
+ server = active_servers[kwargs.get("active_server", 0)]
+ headers = {
+ "authority": f"{server}".replace("https://", ""),
+ "accept": "text/event-stream",
+ "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
+ "content-type": "application/json",
+ "origin": f"{server}",
+ "referer": f"{server}/",
+ "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ "x-requested-with": "XMLHttpRequest",
+ }
+
+ json_data = {
+ "messages": messages,
+ "stream": stream,
+ "model": model,
+ "temperature": kwargs.get("temperature", 0.5),
+ "presence_penalty": kwargs.get("presence_penalty", 0),
+ "frequency_penalty": kwargs.get("frequency_penalty", 0),
+ "top_p": kwargs.get("top_p", 1),
+ }
+
+ session = requests.Session()
+ # init cookies from server
+ session.get(f"{server}/")
+
+ response = session.post(
+ f"{server}/api/openai/v1/chat/completions",
+ headers=headers,
+ json=json_data,
+ )
+
+ response.raise_for_status()
+ print(response.text)
+ for chunk in response.iter_lines():
+ if b"content" in chunk:
+ data = json.loads(chunk.decode().split("data: ")[1])
+ yield data["choices"][0]["delta"]["content"]
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ("presence_penalty", "int"),
+ ("frequency_penalty", "int"),
+ ("top_p", "int"),
+ ("active_server", "int"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Forefront.py b/g4f/Provider/Forefront.py
new file mode 100644
index 00000000..76f6c780
--- /dev/null
+++ b/g4f/Provider/Forefront.py
@@ -0,0 +1,41 @@
+import json
+
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class Forefront(BaseProvider):
+ url = "https://forefront.com"
+ supports_stream = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ json_data = {
+ "text": messages[-1]["content"],
+ "action": "noauth",
+ "id": "",
+ "parentId": "",
+ "workspaceId": "",
+ "messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0",
+ "model": "gpt-4",
+ "messages": messages[:-1] if len(messages) > 1 else [],
+ "internetMode": "auto",
+ }
+
+ response = requests.post(
+ "https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
+ json=json_data,
+ stream=True,
+ )
+ response.raise_for_status()
+ for token in response.iter_lines():
+ if b"delta" in token:
+ yield json.loads(token.decode().split("data: ")[1])["delta"]
diff --git a/g4f/Provider/GetGpt.py b/g4f/Provider/GetGpt.py
new file mode 100644
index 00000000..fb581ecb
--- /dev/null
+++ b/g4f/Provider/GetGpt.py
@@ -0,0 +1,87 @@
+import json
+import os
+import uuid
+
+import requests
+from Crypto.Cipher import AES
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class GetGpt(BaseProvider):
+ url = "https://chat.getgpt.world/"
+ supports_stream = True
+ working = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ headers = {
+ "Content-Type": "application/json",
+ "Referer": "https://chat.getgpt.world/",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ }
+ data = json.dumps(
+ {
+ "messages": messages,
+ "frequency_penalty": kwargs.get("frequency_penalty", 0),
+ "max_tokens": kwargs.get("max_tokens", 4000),
+ "model": "gpt-3.5-turbo",
+ "presence_penalty": kwargs.get("presence_penalty", 0),
+ "temperature": kwargs.get("temperature", 1),
+ "top_p": kwargs.get("top_p", 1),
+ "stream": True,
+ "uuid": str(uuid.uuid4()),
+ }
+ )
+
+ res = requests.post(
+ "https://chat.getgpt.world/api/chat/stream",
+ headers=headers,
+ json={"signature": _encrypt(data)},
+ stream=True,
+ )
+
+ res.raise_for_status()
+ for line in res.iter_lines():
+ if b"content" in line:
+ line_json = json.loads(line.decode("utf-8").split("data: ")[1])
+ yield (line_json["choices"][0]["delta"]["content"])
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ("presence_penalty", "int"),
+ ("frequency_penalty", "int"),
+ ("top_p", "int"),
+ ("max_tokens", "int"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
+
+
+def _encrypt(e: str):
+ t = os.urandom(8).hex().encode("utf-8")
+ n = os.urandom(8).hex().encode("utf-8")
+ r = e.encode("utf-8")
+ cipher = AES.new(t, AES.MODE_CBC, n)
+ ciphertext = cipher.encrypt(_pad_data(r))
+ return ciphertext.hex() + t.decode("utf-8") + n.decode("utf-8")
+
+
+def _pad_data(data: bytes) -> bytes:
+ block_size = AES.block_size
+ padding_size = block_size - len(data) % block_size
+ padding = bytes([padding_size] * padding_size)
+ return data + padding
diff --git a/g4f/Provider/H2o.py b/g4f/Provider/H2o.py
new file mode 100644
index 00000000..c2492e59
--- /dev/null
+++ b/g4f/Provider/H2o.py
@@ -0,0 +1,98 @@
+import json
+import uuid
+
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class H2o(BaseProvider):
+ url = "https://gpt-gm.h2o.ai"
+ working = True
+ supports_stream = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ conversation = ""
+ for message in messages:
+ conversation += "%s: %s\n" % (message["role"], message["content"])
+ conversation += "assistant: "
+
+ session = requests.Session()
+
+ headers = {"Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"}
+ data = {
+ "ethicsModalAccepted": "true",
+ "shareConversationsWithModelAuthors": "true",
+ "ethicsModalAcceptedAt": "",
+ "activeModel": model,
+ "searchEnabled": "true",
+ }
+ session.post(
+ "https://gpt-gm.h2o.ai/settings",
+ headers=headers,
+ data=data,
+ )
+
+ headers = {"Referer": "https://gpt-gm.h2o.ai/"}
+ data = {"model": model}
+
+ response = session.post(
+ "https://gpt-gm.h2o.ai/conversation",
+ headers=headers,
+ json=data,
+ )
+ conversation_id = response.json()["conversationId"]
+
+ data = {
+ "inputs": conversation,
+ "parameters": {
+ "temperature": kwargs.get("temperature", 0.4),
+ "truncate": kwargs.get("truncate", 2048),
+ "max_new_tokens": kwargs.get("max_new_tokens", 1024),
+ "do_sample": kwargs.get("do_sample", True),
+ "repetition_penalty": kwargs.get("repetition_penalty", 1.2),
+ "return_full_text": kwargs.get("return_full_text", False),
+ },
+ "stream": True,
+ "options": {
+ "id": kwargs.get("id", str(uuid.uuid4())),
+ "response_id": kwargs.get("response_id", str(uuid.uuid4())),
+ "is_retry": False,
+ "use_cache": False,
+ "web_search_id": "",
+ },
+ }
+
+ response = session.post(
+ f"https://gpt-gm.h2o.ai/conversation/{conversation_id}",
+ headers=headers,
+ json=data,
+ )
+ generated_text = response.text.replace("\n", "").split("data:")
+ generated_text = json.loads(generated_text[-1])
+
+ yield generated_text["generated_text"]
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ("truncate", "int"),
+ ("max_new_tokens", "int"),
+ ("do_sample", "bool"),
+ ("repetition_penalty", "float"),
+ ("return_full_text", "bool"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
new file mode 100644
index 00000000..a969b643
--- /dev/null
+++ b/g4f/Provider/Liaobots.py
@@ -0,0 +1,73 @@
+import uuid
+
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class Liaobots(BaseProvider):
+ url = "https://liaobots.com"
+ supports_stream = True
+ needs_auth = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ headers = {
+ "authority": "liaobots.com",
+ "content-type": "application/json",
+ "origin": "https://liaobots.com",
+ "referer": "https://liaobots.com/",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
+ "x-auth-code": str(kwargs.get("auth")),
+ }
+ models = {
+ "gpt-4": {
+ "id": "gpt-4",
+ "name": "GPT-4",
+ "maxLength": 24000,
+ "tokenLimit": 8000,
+ },
+ "gpt-3.5-turbo": {
+ "id": "gpt-3.5-turbo",
+ "name": "GPT-3.5",
+ "maxLength": 12000,
+ "tokenLimit": 4000,
+ },
+ }
+ json_data = {
+ "conversationId": str(uuid.uuid4()),
+ "model": models[model],
+ "messages": messages,
+ "key": "",
+ "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
+ }
+
+ response = requests.post(
+ "https://liaobots.com/api/chat",
+ headers=headers,
+ json=json_data,
+ stream=True,
+ )
+ response.raise_for_status()
+ for token in response.iter_content(chunk_size=2046):
+ yield token.decode("utf-8")
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("auth", "str"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Lockchat.py b/g4f/Provider/Lockchat.py
new file mode 100644
index 00000000..974d1331
--- /dev/null
+++ b/g4f/Provider/Lockchat.py
@@ -0,0 +1,66 @@
+import json
+
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class Lockchat(BaseProvider):
+ url = "http://supertest.lockchat.app"
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ temperature = float(kwargs.get("temperature", 0.7))
+ payload = {
+ "temperature": temperature,
+ "messages": messages,
+ "model": model,
+ "stream": True,
+ }
+
+ headers = {
+ "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
+ }
+ response = requests.post(
+ "http://supertest.lockchat.app/v1/chat/completions",
+ json=payload,
+ headers=headers,
+ stream=True,
+ )
+ response.raise_for_status()
+ for token in response.iter_lines():
+ if b"The model: `gpt-4` does not exist" in token:
+ print("error, retrying...")
+ Lockchat.create_completion(
+ model=model,
+ messages=messages,
+ stream=stream,
+ temperature=temperature,
+ **kwargs,
+ )
+ if b"content" in token:
+ token = json.loads(token.decode("utf-8").split("data: ")[1])
+ token = token["choices"][0]["delta"].get("content")
+ if token:
+ yield (token)
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Opchatgpts.py b/g4f/Provider/Opchatgpts.py
new file mode 100644
index 00000000..9daa0ed9
--- /dev/null
+++ b/g4f/Provider/Opchatgpts.py
@@ -0,0 +1,62 @@
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class Opchatgpts(BaseProvider):
+ url = "https://opchatgpts.net"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ temperature = kwargs.get("temperature", 0.8)
+ max_tokens = kwargs.get("max_tokens", 1024)
+ system_prompt = kwargs.get(
+ "system_prompt",
+ "Converse as if you were an AI assistant. Be friendly, creative.",
+ )
+ payload = _create_payload(
+ messages=messages,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ system_prompt=system_prompt,
+ )
+
+ response = requests.post(
+ "https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", json=payload
+ )
+ response.raise_for_status()
+ yield response.json()["reply"]
+
+
+def _create_payload(
+ messages: list[dict[str, str]],
+ temperature: float,
+ max_tokens: int,
+ system_prompt: str,
+):
+ return {
+ "env": "chatbot",
+ "session": "N/A",
+ "prompt": "\n",
+ "context": system_prompt,
+ "messages": messages,
+ "newMessage": messages[::-1][0]["content"],
+ "userName": '<div class="mwai-name-text">User:</div>',
+ "aiName": '<div class="mwai-name-text">AI:</div>',
+ "model": "gpt-3.5-turbo",
+ "temperature": temperature,
+ "maxTokens": max_tokens,
+ "maxResults": 1,
+ "apiKey": "",
+ "service": "openai",
+ "embeddingsIndex": "",
+ "stop": "",
+ }
diff --git a/g4f/Provider/Provider.py b/g4f/Provider/Provider.py
deleted file mode 100644
index f123becd..00000000
--- a/g4f/Provider/Provider.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import os
-from ..typing import sha256, Dict, get_type_hints
-
-url = None
-model = None
-supports_stream = False
-needs_auth = False
-working = False
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- return
-
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/AItianhu.py b/g4f/Provider/Providers/AItianhu.py
deleted file mode 100644
index 0bdaa09a..00000000
--- a/g4f/Provider/Providers/AItianhu.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import os, requests
-from ...typing import sha256, Dict, get_type_hints
-import json
-
-url = "https://www.aitianhu.com/api/chat-process"
-model = ['gpt-3.5-turbo']
-supports_stream = False
-needs_auth = False
-working = True
-
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- base = ''
- for message in messages:
- base += '%s: %s\n' % (message['role'], message['content'])
- base += 'assistant:'
-
- headers = {
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
- }
- data = {
- "prompt": base,
- "options": {},
- "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
- "temperature": kwargs.get("temperature", 0.8),
- "top_p": kwargs.get("top_p", 1)
- }
- response = requests.post(url, headers=headers, json=data)
- if response.status_code == 200:
- lines = response.text.strip().split('\n')
- res = json.loads(lines[-1])
- yield res['text']
- else:
- print(f"Error Occurred::{response.status_code}")
- return None
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Acytoo.py b/g4f/Provider/Providers/Acytoo.py
deleted file mode 100644
index 4f40eac2..00000000
--- a/g4f/Provider/Providers/Acytoo.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import os, requests
-from ...typing import sha256, Dict, get_type_hints
-import json
-
-url = "https://chat.acytoo.com/api/completions"
-model = ['gpt-3.5-turbo']
-supports_stream = False
-needs_auth = False
-working = False
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- base = ''
- for message in messages:
- base += '%s: %s\n' % (message['role'], message['content'])
- base += 'assistant:'
-
- headers = {
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
- }
- data = {
- "key": "",
- "model": "gpt-3.5-turbo",
- "messages": [
- {
- "role": "user",
- "content": base,
- "createdAt": 1688518523500
- }
- ],
- "temperature": 1,
- "password": ""
- }
-
- response = requests.post(url, headers=headers, data=json.dumps(data))
- if response.status_code == 200:
- yield response.text
- else:
- print(f"Error Occurred::{response.status_code}")
- return None
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/AiService.py b/g4f/Provider/Providers/AiService.py
deleted file mode 100644
index 0f9d9c47..00000000
--- a/g4f/Provider/Providers/AiService.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import os,sys
-import requests
-from ...typing import get_type_hints
-
-url = "https://aiservice.vercel.app/api/chat/answer"
-model = ['gpt-3.5-turbo']
-supports_stream = False
-needs_auth = False
-working = True
-
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- base = ''
- for message in messages:
- base += '%s: %s\n' % (message['role'], message['content'])
- base += 'assistant:'
-
- headers = {
- "accept": "*/*",
- "content-type": "text/plain;charset=UTF-8",
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "Referer": "https://aiservice.vercel.app/chat",
- }
- data = {
- "input": base
- }
- response = requests.post(url, headers=headers, json=data)
- if response.status_code == 200:
- _json = response.json()
- yield _json['data']
- else:
- print(f"Error Occurred::{response.status_code}")
- return None
-
-
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Aichat.py b/g4f/Provider/Providers/Aichat.py
deleted file mode 100644
index 919486f2..00000000
--- a/g4f/Provider/Providers/Aichat.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import os, requests
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://chat-gpt.org/chat'
-model = ['gpt-3.5-turbo']
-supports_stream = False
-needs_auth = False
-working = True
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- base = ''
- for message in messages:
- base += '%s: %s\n' % (message['role'], message['content'])
- base += 'assistant:'
-
-
- headers = {
- 'authority': 'chat-gpt.org',
- 'accept': '*/*',
- 'cache-control': 'no-cache',
- 'content-type': 'application/json',
- 'origin': 'https://chat-gpt.org',
- 'pragma': 'no-cache',
- 'referer': 'https://chat-gpt.org/chat',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
- }
-
- json_data = {
- 'message':base,
- 'temperature': 1,
- 'presence_penalty': 0,
- 'top_p': 1,
- 'frequency_penalty': 0
- }
-
- response = requests.post('https://chat-gpt.org/api/text', headers=headers, json=json_data)
- yield response.json()['message']
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Ails.py b/g4f/Provider/Providers/Ails.py
deleted file mode 100644
index d681dadb..00000000
--- a/g4f/Provider/Providers/Ails.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import os
-import time
-import json
-import uuid
-import random
-import hashlib
-import requests
-
-from ...typing import sha256, Dict, get_type_hints
-from datetime import datetime
-
-url: str = 'https://ai.ls'
-model: str = 'gpt-3.5-turbo'
-supports_stream = True
-needs_auth = False
-working = True
-
-
-class Utils:
- def hash(json_data: Dict[str, str]) -> sha256:
-
- secretKey: bytearray = bytearray([79, 86, 98, 105, 91, 84, 80, 78, 123, 83,
- 35, 41, 99, 123, 51, 54, 37, 57, 63, 103, 59, 117, 115, 108, 41, 67, 76])
-
- base_string: str = '%s:%s:%s:%s' % (
- json_data['t'],
- json_data['m'],
- 'WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf',
- len(json_data['m'])
- )
-
- return hashlib.sha256(base_string.encode()).hexdigest()
-
- def format_timestamp(timestamp: int) -> str:
-
- e = timestamp
- n = e % 10
- r = n + 1 if n % 2 == 0 else n
- return str(e - n + r)
- def getV():
- crossref = requests.get("https://ai.ls"+ requests.get("https://ai.ls/?chat=1").text.split('crossorigin href="')[1].split('"')[0]).text.split('G4="')[1].split('"')[0]
- return crossref
-
-def _create_completion(model: str, messages: list, stream: bool = False, temperature: float = 0.6, **kwargs):
-
- headers = {
- 'authority': 'api.caipacity.com',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'authorization': 'Bearer free',
- 'client-id': str(uuid.uuid4()),
- 'client-v': Utils.getV(),
- 'content-type': 'application/json',
- 'origin': 'https://ai.ls',
- 'referer': 'https://ai.ls/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'cross-site',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- }
-
- params = {
- 'full': 'false',
- }
-
- timestamp = Utils.format_timestamp(int(time.time() * 1000))
-
- sig = {
- 'd': datetime.now().strftime('%Y-%m-%d'),
- 't': timestamp,
- 's': Utils.hash({
- 't': timestamp,
- 'm': messages[-1]['content']})}
-
- json_data = json.dumps(separators=(',', ':'), obj={
- 'model': 'gpt-3.5-turbo',
- 'temperature': temperature,
- 'stream': True,
- 'messages': messages} | sig)
-
- response = requests.post('https://api.caipacity.com/v1/chat/completions',
- headers=headers, data=json_data, stream=True)
-
- for token in response.iter_lines():
- if b'content' in token:
- completion_chunk = json.loads(token.decode().replace('data: ', ''))
- token = completion_chunk['choices'][0]['delta'].get('content')
- if token != None:
- yield token
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/Bard.py b/g4f/Provider/Providers/Bard.py
deleted file mode 100644
index 0d007a10..00000000
--- a/g4f/Provider/Providers/Bard.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import os, requests, json, browser_cookie3, re, random
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://bard.google.com'
-model = ['Palm2']
-supports_stream = False
-needs_auth = True
-working = True
-
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
- domain_name='.google.com')}['__Secure-1PSID']
-
- formatted = '\n'.join([
- '%s: %s' % (message['role'], message['content']) for message in messages
- ])
- prompt = f'{formatted}\nAssistant:'
-
- proxy = kwargs.get('proxy', False)
- if proxy == False:
- print('warning!, you did not give a proxy, a lot of countries are banned from Google Bard, so it may not work')
-
- snlm0e = None
- conversation_id = None
- response_id = None
- choice_id = None
-
- client = requests.Session()
- client.proxies = {
- 'http': f'http://{proxy}',
- 'https': f'http://{proxy}'} if proxy else None
-
- client.headers = {
- 'authority': 'bard.google.com',
- 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
- 'origin': 'https://bard.google.com',
- 'referer': 'https://bard.google.com/',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
- 'x-same-domain': '1',
- 'cookie': f'__Secure-1PSID={psid}'
- }
-
- snlm0e = re.search(r'SNlM0e\":\"(.*?)\"',
- client.get('https://bard.google.com/').text).group(1) if not snlm0e else snlm0e
-
- params = {
- 'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
- '_reqid': random.randint(1111, 9999),
- 'rt': 'c'
- }
-
- data = {
- 'at': snlm0e,
- 'f.req': json.dumps([None, json.dumps([[prompt], None, [conversation_id, response_id, choice_id]])])}
-
- intents = '.'.join([
- 'assistant',
- 'lamda',
- 'BardFrontendService'
- ])
-
- response = client.post(f'https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate',
- data=data, params=params)
-
- chat_data = json.loads(response.content.splitlines()[3])[0][2]
- if chat_data:
- json_chat_data = json.loads(chat_data)
-
- yield json_chat_data[0][0]
-
- else:
- yield 'error'
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Bing.py b/g4f/Provider/Providers/Bing.py
deleted file mode 100644
index 5e290f91..00000000
--- a/g4f/Provider/Providers/Bing.py
+++ /dev/null
@@ -1,352 +0,0 @@
-import os
-import json
-import random
-import json
-import os
-import uuid
-import ssl
-import certifi
-import aiohttp
-import asyncio
-
-import requests
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://bing.com/chat'
-model = ['gpt-4']
-supports_stream = True
-needs_auth = False
-working = True
-
-ssl_context = ssl.create_default_context()
-ssl_context.load_verify_locations(certifi.where())
-
-
-class optionsSets:
- optionSet: dict = {
- 'tone': str,
- 'optionsSets': list
- }
-
- jailbreak: dict = {
- "optionsSets": [
- 'saharasugg',
- 'enablenewsfc',
- 'clgalileo',
- 'gencontentv3',
- "nlu_direct_response_filter",
- "deepleo",
- "disable_emoji_spoken_text",
- "responsible_ai_policy_235",
- "enablemm",
- "h3precise"
- # "harmonyv3",
- "dtappid",
- "cricinfo",
- "cricinfov2",
- "dv3sugg",
- "nojbfedge"
- ]
- }
-
-
-class Defaults:
- delimiter = '\x1e'
- ip_address = f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
-
- allowedMessageTypes = [
- 'Chat',
- 'Disengaged',
- 'AdsQuery',
- 'SemanticSerp',
- 'GenerateContentQuery',
- 'SearchQuery',
- 'ActionRequest',
- 'Context',
- 'Progress',
- 'AdsQuery',
- 'SemanticSerp'
- ]
-
- sliceIds = [
-
- # "222dtappid",
- # "225cricinfo",
- # "224locals0"
-
- 'winmuid3tf',
- 'osbsdusgreccf',
- 'ttstmout',
- 'crchatrev',
- 'winlongmsgtf',
- 'ctrlworkpay',
- 'norespwtf',
- 'tempcacheread',
- 'temptacache',
- '505scss0',
- '508jbcars0',
- '515enbotdets0',
- '5082tsports',
- '515vaoprvs',
- '424dagslnv1s0',
- 'kcimgattcf',
- '427startpms0'
- ]
-
- location = {
- 'locale': 'en-US',
- 'market': 'en-US',
- 'region': 'US',
- 'locationHints': [
- {
- 'country': 'United States',
- 'state': 'California',
- 'city': 'Los Angeles',
- 'timezoneoffset': 8,
- 'countryConfidence': 8,
- 'Center': {
- 'Latitude': 34.0536909,
- 'Longitude': -118.242766
- },
- 'RegionType': 2,
- 'SourceType': 1
- }
- ],
- }
-
-
-def _format(msg: dict) -> str:
- return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
-
-
-async def create_conversation():
- for _ in range(5):
- create = requests.get('https://www.bing.com/turing/conversation/create',
- headers={
- 'authority': 'edgeservices.bing.com',
- 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
- 'accept-language': 'en-US,en;q=0.9',
- 'cache-control': 'max-age=0',
- 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
- 'sec-ch-ua-arch': '"x86"',
- 'sec-ch-ua-bitness': '"64"',
- 'sec-ch-ua-full-version': '"110.0.1587.69"',
- 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-model': '""',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-ch-ua-platform-version': '"15.0.0"',
- 'sec-fetch-dest': 'document',
- 'sec-fetch-mode': 'navigate',
- 'sec-fetch-site': 'none',
- 'sec-fetch-user': '?1',
- 'upgrade-insecure-requests': '1',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
- 'x-edge-shopping-flag': '1',
- 'x-forwarded-for': Defaults.ip_address
- })
-
- conversationId = create.json().get('conversationId')
- clientId = create.json().get('clientId')
- conversationSignature = create.json().get('conversationSignature')
-
- if not conversationId or not clientId or not conversationSignature and _ == 4:
- raise Exception('Failed to create conversation.')
-
- return conversationId, clientId, conversationSignature
-
-
-async def stream_generate(prompt: str, mode: optionsSets.optionSet = optionsSets.jailbreak, context: bool or str = False):
- timeout = aiohttp.ClientTimeout(total=900)
- session = aiohttp.ClientSession(timeout=timeout)
-
- conversationId, clientId, conversationSignature = await create_conversation()
-
- wss = await session.ws_connect('wss://sydney.bing.com/sydney/ChatHub', ssl=ssl_context, autoping=False,
- headers={
- 'accept': 'application/json',
- 'accept-language': 'en-US,en;q=0.9',
- 'content-type': 'application/json',
- 'sec-ch-ua': '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"',
- 'sec-ch-ua-arch': '"x86"',
- 'sec-ch-ua-bitness': '"64"',
- 'sec-ch-ua-full-version': '"109.0.1518.78"',
- 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-model': '',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-ch-ua-platform-version': '"15.0.0"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'x-ms-client-request-id': str(uuid.uuid4()),
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
- 'Referer': 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx',
- 'Referrer-Policy': 'origin-when-cross-origin',
- 'x-forwarded-for': Defaults.ip_address
- })
-
- await wss.send_str(_format({'protocol': 'json', 'version': 1}))
- await wss.receive(timeout=900)
-
- struct = {
- 'arguments': [
- {
- **mode,
- 'source': 'cib',
- 'allowedMessageTypes': Defaults.allowedMessageTypes,
- 'sliceIds': Defaults.sliceIds,
- 'traceId': os.urandom(16).hex(),
- 'isStartOfSession': True,
- 'message': Defaults.location | {
- 'author': 'user',
- 'inputMethod': 'Keyboard',
- 'text': prompt,
- 'messageType': 'Chat'
- },
- 'conversationSignature': conversationSignature,
- 'participant': {
- 'id': clientId
- },
- 'conversationId': conversationId
- }
- ],
- 'invocationId': '0',
- 'target': 'chat',
- 'type': 4
- }
-
- if context:
- struct['arguments'][0]['previousMessages'] = [
- {
- "author": "user",
- "description": context,
- "contextType": "WebPage",
- "messageType": "Context",
- "messageId": "discover-web--page-ping-mriduna-----"
- }
- ]
-
- await wss.send_str(_format(struct))
-
- final = False
- draw = False
- resp_txt = ''
- result_text = ''
- resp_txt_no_link = ''
- cache_text = ''
-
- while not final:
- msg = await wss.receive(timeout=900)
- objects = msg.data.split(Defaults.delimiter)
-
- for obj in objects:
- if obj is None or not obj:
- continue
-
- response = json.loads(obj)
- if response.get('type') == 1 and response['arguments'][0].get('messages',):
- if not draw:
- if (response['arguments'][0]['messages'][0]['contentOrigin'] != 'Apology') and not draw:
- resp_txt = result_text + \
- response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get(
- 'text', '')
- resp_txt_no_link = result_text + \
- response['arguments'][0]['messages'][0].get(
- 'text', '')
-
- if response['arguments'][0]['messages'][0].get('messageType',):
- resp_txt = (
- resp_txt
- + response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0]['inlines'][0].get('text')
- + '\n'
- )
- result_text = (
- result_text
- + response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0]['inlines'][0].get('text')
- + '\n'
- )
-
- if cache_text.endswith(' '):
- final = True
- if wss and not wss.closed:
- await wss.close()
- if session and not session.closed:
- await session.close()
-
- yield (resp_txt.replace(cache_text, ''))
- cache_text = resp_txt
-
- elif response.get('type') == 2:
- if response['item']['result'].get('error'):
- if wss and not wss.closed:
- await wss.close()
- if session and not session.closed:
- await session.close()
-
- raise Exception(
- f"{response['item']['result']['value']}: {response['item']['result']['message']}")
-
- if draw:
- cache = response['item']['messages'][1]['adaptiveCards'][0]['body'][0]['text']
- response['item']['messages'][1]['adaptiveCards'][0]['body'][0]['text'] = (
- cache + resp_txt)
-
- if (response['item']['messages'][-1]['contentOrigin'] == 'Apology' and resp_txt):
- response['item']['messages'][-1]['text'] = resp_txt_no_link
- response['item']['messages'][-1]['adaptiveCards'][0]['body'][0]['text'] = resp_txt
-
- # print('Preserved the message from being deleted', file=sys.stderr)
-
- final = True
- if wss and not wss.closed:
- await wss.close()
- if session and not session.closed:
- await session.close()
-
-
-def run(generator):
- loop = asyncio.new_event_loop()
- asyncio.set_event_loop(loop)
- gen = generator.__aiter__()
-
- while True:
- try:
- next_val = loop.run_until_complete(gen.__anext__())
- yield next_val
-
- except StopAsyncIteration:
- break
- #print('Done')
-
-
-
-def convert(messages):
- context = ""
-
- for message in messages:
- context += "[%s](#message)\n%s\n\n" % (message['role'],
- message['content'])
-
- return context
-
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- if len(messages) < 2:
- prompt = messages[0]['content']
- context = False
-
- else:
- prompt = messages[-1]['content']
- context = convert(messages[:-1])
-
- response = run(stream_generate(prompt, optionsSets.jailbreak, context))
- for token in response:
- yield (token)
-
- #print('Done')
-
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/BingHuan.py b/g4f/Provider/Providers/BingHuan.py
deleted file mode 100644
index 64b67e4b..00000000
--- a/g4f/Provider/Providers/BingHuan.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import os,sys
-import json
-import subprocess
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://b.ai-huan.xyz'
-model = ['gpt-3.5-turbo', 'gpt-4']
-supports_stream = True
-needs_auth = False
-working = False
-
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- path = os.path.dirname(os.path.realpath(__file__))
- config = json.dumps({
- 'messages': messages,
- 'model': model}, separators=(',', ':'))
- cmd = ['python', f'{path}/helpers/binghuan.py', config]
-
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-
- for line in iter(p.stdout.readline, b''):
- yield line.decode('cp1252')
-
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/ChatgptAi.py b/g4f/Provider/Providers/ChatgptAi.py
deleted file mode 100644
index 1f9ead0e..00000000
--- a/g4f/Provider/Providers/ChatgptAi.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import os
-import requests, re
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://chatgpt.ai/gpt-4/'
-model = ['gpt-4']
-supports_stream = False
-needs_auth = False
-working = True
-
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- chat = ''
- for message in messages:
- chat += '%s: %s\n' % (message['role'], message['content'])
- chat += 'assistant: '
-
- response = requests.get('https://chatgpt.ai/')
- nonce, post_id, _, bot_id = re.findall(r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width', response.text)[0]
-
- headers = {
- 'authority': 'chatgpt.ai',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'cache-control': 'no-cache',
- 'origin': 'https://chatgpt.ai',
- 'pragma': 'no-cache',
- 'referer': 'https://chatgpt.ai/gpt-4/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- }
- data = {
- '_wpnonce': nonce,
- 'post_id': post_id,
- 'url': 'https://chatgpt.ai/gpt-4',
- 'action': 'wpaicg_chat_shortcode_message',
- 'message': chat,
- 'bot_id': bot_id
- }
-
- response = requests.post('https://chatgpt.ai/wp-admin/admin-ajax.php',
- headers=headers, data=data)
-
- yield (response.json()['data'])
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/ChatgptLogin.py b/g4f/Provider/Providers/ChatgptLogin.py
deleted file mode 100644
index 0fdbab8e..00000000
--- a/g4f/Provider/Providers/ChatgptLogin.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import os
-from ...typing import sha256, Dict, get_type_hints
-import requests
-import re
-import base64
-
-url = 'https://chatgptlogin.ac'
-model = ['gpt-3.5-turbo']
-supports_stream = False
-needs_auth = False
-working = False
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- def get_nonce():
- res = requests.get('https://chatgptlogin.ac/use-chatgpt-free/', headers={
- "Referer": "https://chatgptlogin.ac/use-chatgpt-free/",
- "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
- })
-
- src = re.search(r'class="mwai-chat mwai-chatgpt">.*<span>Send</span></button></div></div></div> <script defer src="(.*?)">', res.text).group(1)
- decoded_string = base64.b64decode(src.split(",")[-1]).decode('utf-8')
- return re.search(r"let restNonce = '(.*?)';", decoded_string).group(1)
-
- def transform(messages: list) -> list:
- def html_encode(string: str) -> str:
- table = {
- '"': '&quot;',
- "'": '&#39;',
- '&': '&amp;',
- '>': '&gt;',
- '<': '&lt;',
- '\n': '<br>',
- '\t': '&nbsp;&nbsp;&nbsp;&nbsp;',
- ' ': '&nbsp;'
- }
-
- for key in table:
- string = string.replace(key, table[key])
-
- return string
-
- return [{
- 'id': os.urandom(6).hex(),
- 'role': message['role'],
- 'content': message['content'],
- 'who': 'AI: ' if message['role'] == 'assistant' else 'User: ',
- 'html': html_encode(message['content'])} for message in messages]
-
- headers = {
- 'authority': 'chatgptlogin.ac',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'content-type': 'application/json',
- 'origin': 'https://chatgptlogin.ac',
- 'referer': 'https://chatgptlogin.ac/use-chatgpt-free/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- 'x-wp-nonce': get_nonce()
- }
-
- conversation = transform(messages)
-
- json_data = {
- 'env': 'chatbot',
- 'session': 'N/A',
- 'prompt': 'Converse as if you were an AI assistant. Be friendly, creative.',
- 'context': 'Converse as if you were an AI assistant. Be friendly, creative.',
- 'messages': conversation,
- 'newMessage': messages[-1]['content'],
- 'userName': '<div class="mwai-name-text">User:</div>',
- 'aiName': '<div class="mwai-name-text">AI:</div>',
- 'model': 'gpt-3.5-turbo',
- 'temperature': kwargs.get('temperature', 0.8),
- 'maxTokens': 1024,
- 'maxResults': 1,
- 'apiKey': '',
- 'service': 'openai',
- 'embeddingsIndex': '',
- 'stop': '',
- 'clientId': os.urandom(6).hex()
- }
-
- response = requests.post('https://chatgptlogin.ac/wp-json/ai-chatbot/v1/chat',
- headers=headers, json=json_data)
-
- return response.json()['reply']
-
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/DeepAi.py b/g4f/Provider/Providers/DeepAi.py
deleted file mode 100644
index 27618cbb..00000000
--- a/g4f/Provider/Providers/DeepAi.py
+++ /dev/null
@@ -1,74 +0,0 @@
-import json
-import os
-import requests
-import js2py
-from ...typing import sha256, Dict, get_type_hints
-
-
-url = "https://api.deepai.org/"
-model = ['gpt-3.5-turbo']
-supports_stream = True
-needs_auth = False
-working = True
-
-token_js = """
-var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
-var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y;
-h = Math.round(1E11 * Math.random()) + "";
-f = function () {
- for (var p = [], q = 0; 64 > q;) p[q] = 0 | 4294967296 * Math.sin(++q % Math.PI);
-
- return function (t) {
- var v, y, H, ea = [v = 1732584193, y = 4023233417, ~v, ~y],
- Z = [],
- A = unescape(encodeURI(t)) + "\u0080",
- z = A.length;
- t = --z / 4 + 2 | 15;
- for (Z[--t] = 8 * z; ~z;) Z[z >> 2] |= A.charCodeAt(z) << 8 * z--;
- for (q = A = 0; q < t; q += 16) {
- for (z = ea; 64 > A; z = [H = z[3], v + ((H = z[0] + [v & y | ~v & H, H & v | ~H & y, v ^ y ^ H, y ^ (v | ~H)][z = A >> 4] + p[A] + ~~Z[q | [A, 5 * A + 1, 3 * A + 5, 7 * A][z] & 15]) << (z = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * z + A++ % 4]) | H >>> -z), v, y]) v = z[1] | 0, y = z[2];
- for (A = 4; A;) ea[--A] += z[A]
- }
- for (t = ""; 32 > A;) t += (ea[A >> 3] >> 4 * (1 ^ A++) & 15).toString(16);
- return t.split("").reverse().join("")
- }
-}();
-
-"tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x")));
-"""
-
-uuid4_js = """
-function uuidv4() {
- for (var a = [], b = 0; 36 > b; b++) a[b] = "0123456789abcdef".substr(Math.floor(16 * Math.random()), 1);
- a[14] = "4";
- a[19] = "0123456789abcdef".substr(a[19] & 3 | 8, 1);
- a[8] = a[13] = a[18] = a[23] = "-";
- return a.join("")
-}
-uuidv4();"""
-
-def create_session():
- url = "https://api.deepai.org/save_chat_session"
-
- payload = {'uuid': js2py.eval_js(uuid4_js), "title":"", "chat_style": "chat", "messages": '[]'}
- headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"}
-
- response = requests.request("POST", url, headers=headers, data=payload)
- return response
-
-def _create_completion(model: str, messages:list, stream: bool = True, **kwargs):
- create_session()
- url = "https://api.deepai.org/make_me_a_pizza"
-
- payload = {'chas_style': "chat", "chatHistory": json.dumps(messages)}
- api_key = js2py.eval_js(token_js)
- headers = {"api-key": api_key, "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"}
-
- response = requests.request("POST", url, headers=headers, data=payload, stream=True)
- for chunk in response.iter_content(chunk_size=None):
- response.raise_for_status()
- yield chunk.decode()
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/DfeHub.py b/g4f/Provider/Providers/DfeHub.py
deleted file mode 100644
index e3ff8045..00000000
--- a/g4f/Provider/Providers/DfeHub.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import os, requests
-from ...typing import sha256, Dict, get_type_hints
-import json
-import re
-import time
-
-url = "https://chat.dfehub.com/api/chat"
-model = ['gpt-3.5-turbo']
-supports_stream = True
-needs_auth = False
-working = True
-
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- headers = {
- 'authority': 'chat.dfehub.com',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'content-type': 'application/json',
- 'origin': 'https://chat.dfehub.com',
- 'referer': 'https://chat.dfehub.com/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- 'x-requested-with': 'XMLHttpRequest',
- }
-
- json_data = {
- 'messages': messages,
- 'model': 'gpt-3.5-turbo',
- 'temperature': kwargs.get('temperature', 0.5),
- 'presence_penalty': kwargs.get('presence_penalty', 0),
- 'frequency_penalty': kwargs.get('frequency_penalty', 0),
- 'top_p': kwargs.get('top_p', 1),
- "stream": True,
- }
- response = requests.post('https://chat.dfehub.com/api/openai/v1/chat/completions',
- headers=headers, json=json_data)
-
- for chunk in response.iter_lines():
- if b'detail' in chunk:
- delay = re.findall(r"\d+\.\d+", chunk.decode())
- delay = float(delay[-1])
- print(f"Provider.DfeHub::Rate Limit Reached::Waiting {delay} seconds")
- time.sleep(delay)
- yield from _create_completion(model, messages, stream, **kwargs)
- if b'content' in chunk:
- data = json.loads(chunk.decode().split('data: ')[1])
- yield (data['choices'][0]['delta']['content'])
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/EasyChat.py b/g4f/Provider/Providers/EasyChat.py
deleted file mode 100644
index c6621f4e..00000000
--- a/g4f/Provider/Providers/EasyChat.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import os, requests
-from ...typing import sha256, Dict, get_type_hints
-import json
-
-url = "https://free.easychat.work"
-model = ['gpt-3.5-turbo']
-supports_stream = True
-needs_auth = False
-working = True
-active_servers = [
- "https://chat10.fastgpt.me",
- "https://chat9.fastgpt.me",
- "https://chat1.fastgpt.me",
- "https://chat2.fastgpt.me",
- "https://chat3.fastgpt.me",
- "https://chat4.fastgpt.me"
-]
-
-# Change server if not work current server
-server = active_servers[0]
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- req = requests.Session()
-
- headers = {
- 'authority': f'{server}'.replace("https://",""),
- 'accept': 'text/event-stream',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2',
- 'content-type': 'application/json',
- 'origin': f'{server}',
- 'referer': f'{server}/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- 'x-requested-with': 'XMLHttpRequest',
- }
-
- json_data = {
- 'messages': messages,
- 'stream': True,
- 'model': "gpt-3.5-turbo",
- 'temperature': kwargs.get('temperature', 0.5),
- 'presence_penalty': kwargs.get('presence_penalty', 0),
- 'frequency_penalty': kwargs.get('frequency_penalty', 0),
- 'top_p': kwargs.get('top_p', 1),
- }
-
- # init cookies from server
- req.get(f'{server}/')
-
- response = req.post(f'{server}/api/openai/v1/chat/completions',
- headers=headers, json=json_data)
-
- for chunk in response.iter_lines():
- if b'content' in chunk:
- data = json.loads(chunk.decode().split('data: ')[1])
- yield (data['choices'][0]['delta']['content'])
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/Forefront.py b/g4f/Provider/Providers/Forefront.py
deleted file mode 100644
index 70ea6725..00000000
--- a/g4f/Provider/Providers/Forefront.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import os
-import json
-import requests
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://forefront.com'
-model = ['gpt-3.5-turbo']
-supports_stream = True
-needs_auth = False
-working = False
-
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- json_data = {
- 'text': messages[-1]['content'],
- 'action': 'noauth',
- 'id': '',
- 'parentId': '',
- 'workspaceId': '',
- 'messagePersona': '607e41fe-95be-497e-8e97-010a59b2e2c0',
- 'model': 'gpt-4',
- 'messages': messages[:-1] if len(messages) > 1 else [],
- 'internetMode': 'auto'
- }
- response = requests.post( 'https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat',
- json=json_data, stream=True)
- for token in response.iter_lines():
- if b'delta' in token:
- token = json.loads(token.decode().split('data: ')[1])['delta']
- yield (token)
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/GetGpt.py b/g4f/Provider/Providers/GetGpt.py
deleted file mode 100644
index bafc0ce8..00000000
--- a/g4f/Provider/Providers/GetGpt.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import os
-import json
-import uuid
-import requests
-from Crypto.Cipher import AES
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://chat.getgpt.world/'
-model = ['gpt-3.5-turbo']
-supports_stream = True
-needs_auth = False
-working = True
-
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- def encrypt(e):
- t = os.urandom(8).hex().encode('utf-8')
- n = os.urandom(8).hex().encode('utf-8')
- r = e.encode('utf-8')
- cipher = AES.new(t, AES.MODE_CBC, n)
- ciphertext = cipher.encrypt(pad_data(r))
- return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
-
- def pad_data(data: bytes) -> bytes:
- block_size = AES.block_size
- padding_size = block_size - len(data) % block_size
- padding = bytes([padding_size] * padding_size)
- return data + padding
-
- headers = {
- 'Content-Type': 'application/json',
- 'Referer': 'https://chat.getgpt.world/',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
- }
-
- data = json.dumps({
- 'messages': messages,
- 'frequency_penalty': kwargs.get('frequency_penalty', 0),
- 'max_tokens': kwargs.get('max_tokens', 4000),
- 'model': 'gpt-3.5-turbo',
- 'presence_penalty': kwargs.get('presence_penalty', 0),
- 'temperature': kwargs.get('temperature', 1),
- 'top_p': kwargs.get('top_p', 1),
- 'stream': True,
- 'uuid': str(uuid.uuid4())
- })
-
- res = requests.post('https://chat.getgpt.world/api/chat/stream',
- headers=headers, json={'signature': encrypt(data)}, stream=True)
-
- for line in res.iter_lines():
- if b'content' in line:
- line_json = json.loads(line.decode('utf-8').split('data: ')[1])
- yield (line_json['choices'][0]['delta']['content'])
-
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f'{name}: {get_type_hints(_create_completion)[name].__name__}' for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Providers/H2o.py b/g4f/Provider/Providers/H2o.py
deleted file mode 100644
index 92043026..00000000
--- a/g4f/Provider/Providers/H2o.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from requests import Session
-from uuid import uuid4
-from json import loads
-import os
-import json
-import requests
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://gpt-gm.h2o.ai'
-model = ['falcon-40b', 'falcon-7b', 'llama-13b']
-supports_stream = True
-needs_auth = False
-working = True
-
-models = {
- 'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
- 'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
- 'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
-}
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
-
- conversation = ''
- for message in messages:
- conversation += '%s: %s\n' % (message['role'], message['content'])
-
- conversation += 'assistant: '
- session = requests.Session()
-
- response = session.get("https://gpt-gm.h2o.ai/")
- headers = {
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
- "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
- "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
- "Content-Type": "application/x-www-form-urlencoded",
- "Upgrade-Insecure-Requests": "1",
- "Sec-Fetch-Dest": "document",
- "Sec-Fetch-Mode": "navigate",
- "Sec-Fetch-Site": "same-origin",
- "Sec-Fetch-User": "?1",
- "Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"
- }
- data = {
- "ethicsModalAccepted": "true",
- "shareConversationsWithModelAuthors": "true",
- "ethicsModalAcceptedAt": "",
- "activeModel": "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
- "searchEnabled": "true"
- }
- response = session.post("https://gpt-gm.h2o.ai/settings", headers=headers, data=data)
-
- headers = {
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
- "Accept": "*/*",
- "Accept-Language": "ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3",
- "Content-Type": "application/json",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "Referer": "https://gpt-gm.h2o.ai/"
- }
- data = {
- "model": models[model]
- }
-
- conversation_id = session.post("https://gpt-gm.h2o.ai/conversation", headers=headers, json=data)
- data = {
- "inputs": conversation,
- "parameters": {
- "temperature": kwargs.get('temperature', 0.4),
- "truncate": kwargs.get('truncate', 2048),
- "max_new_tokens": kwargs.get('max_new_tokens', 1024),
- "do_sample": kwargs.get('do_sample', True),
- "repetition_penalty": kwargs.get('repetition_penalty', 1.2),
- "return_full_text": kwargs.get('return_full_text', False)
- },
- "stream": True,
- "options": {
- "id": kwargs.get('id', str(uuid4())),
- "response_id": kwargs.get('response_id', str(uuid4())),
- "is_retry": False,
- "use_cache": False,
- "web_search_id": ""
- }
- }
-
- response = session.post(f"https://gpt-gm.h2o.ai/conversation/{conversation_id.json()['conversationId']}", headers=headers, json=data)
- generated_text = response.text.replace("\n", "").split("data:")
- generated_text = json.loads(generated_text[-1])
-
- return generated_text["generated_text"]
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Liaobots.py b/g4f/Provider/Providers/Liaobots.py
deleted file mode 100644
index 75746c03..00000000
--- a/g4f/Provider/Providers/Liaobots.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import os, uuid, requests
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://liaobots.com'
-model = ['gpt-3.5-turbo', 'gpt-4']
-supports_stream = True
-needs_auth = True
-working = False
-
-models = {
- 'gpt-4': {
- "id":"gpt-4",
- "name":"GPT-4",
- "maxLength":24000,
- "tokenLimit":8000
- },
- 'gpt-3.5-turbo': {
- "id":"gpt-3.5-turbo",
- "name":"GPT-3.5",
- "maxLength":12000,
- "tokenLimit":4000
- },
-}
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
-
- print(kwargs)
-
- headers = {
- 'authority': 'liaobots.com',
- 'content-type': 'application/json',
- 'origin': 'https://liaobots.com',
- 'referer': 'https://liaobots.com/',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
- 'x-auth-code': kwargs.get('auth')
- }
-
- json_data = {
- 'conversationId': str(uuid.uuid4()),
- 'model': models[model],
- 'messages': messages,
- 'key': '',
- 'prompt': "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
- }
-
- response = requests.post('https://liaobots.com/api/chat',
- headers=headers, json=json_data, stream=True)
-
- for token in response.iter_content(chunk_size=2046):
- yield (token.decode('utf-8'))
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Lockchat.py b/g4f/Provider/Providers/Lockchat.py
deleted file mode 100644
index dd1edb84..00000000
--- a/g4f/Provider/Providers/Lockchat.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import requests
-import os
-import json
-from ...typing import sha256, Dict, get_type_hints
-url = 'http://supertest.lockchat.app'
-model = ['gpt-4', 'gpt-3.5-turbo']
-supports_stream = True
-needs_auth = False
-working = False
-
-def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
-
- payload = {
- "temperature": temperature,
- "messages": messages,
- "model": model,
- "stream": True,
- }
- headers = {
- "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
- }
- response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
- json=payload, headers=headers, stream=True)
- for token in response.iter_lines():
- if b'The model: `gpt-4` does not exist' in token:
- print('error, retrying...')
- _create_completion(model=model, messages=messages, stream=stream, temperature=temperature, **kwargs)
- if b"content" in token:
- token = json.loads(token.decode('utf-8').split('data: ')[1])['choices'][0]['delta'].get('content')
- if token: yield (token)
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Theb.py b/g4f/Provider/Providers/Theb.py
deleted file mode 100644
index a78fb51f..00000000
--- a/g4f/Provider/Providers/Theb.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import os
-import json
-import time
-import subprocess
-
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://theb.ai'
-model = ['gpt-3.5-turbo']
-supports_stream = True
-needs_auth = False
-working = False
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
-
- path = os.path.dirname(os.path.realpath(__file__))
- config = json.dumps({
- 'messages': messages,
- 'model': model}, separators=(',', ':'))
-
- cmd = ['python3', f'{path}/helpers/theb.py', config]
-
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-
- for line in iter(p.stdout.readline, b''):
- yield line.decode('utf-8')
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Vercel.py b/g4f/Provider/Providers/Vercel.py
deleted file mode 100644
index f9331bfc..00000000
--- a/g4f/Provider/Providers/Vercel.py
+++ /dev/null
@@ -1,170 +0,0 @@
-import os
-import json
-import base64
-import execjs
-import queue
-import threading
-
-from curl_cffi import requests
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://play.vercel.ai'
-supports_stream = True
-needs_auth = False
-working = False
-
-models = {
- 'claude-instant-v1': 'anthropic:claude-instant-v1',
- 'claude-v1': 'anthropic:claude-v1',
- 'alpaca-7b': 'replicate:replicate/alpaca-7b',
- 'stablelm-tuned-alpha-7b': 'replicate:stability-ai/stablelm-tuned-alpha-7b',
- 'bloom': 'huggingface:bigscience/bloom',
- 'bloomz': 'huggingface:bigscience/bloomz',
- 'flan-t5-xxl': 'huggingface:google/flan-t5-xxl',
- 'flan-ul2': 'huggingface:google/flan-ul2',
- 'gpt-neox-20b': 'huggingface:EleutherAI/gpt-neox-20b',
- 'oasst-sft-4-pythia-12b-epoch-3.5': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
- 'santacoder': 'huggingface:bigcode/santacoder',
- 'command-medium-nightly': 'cohere:command-medium-nightly',
- 'command-xlarge-nightly': 'cohere:command-xlarge-nightly',
- 'code-cushman-001': 'openai:code-cushman-001',
- 'code-davinci-002': 'openai:code-davinci-002',
- 'gpt-3.5-turbo': 'openai:gpt-3.5-turbo',
- 'text-ada-001': 'openai:text-ada-001',
- 'text-babbage-001': 'openai:text-babbage-001',
- 'text-curie-001': 'openai:text-curie-001',
- 'text-davinci-002': 'openai:text-davinci-002',
- 'text-davinci-003': 'openai:text-davinci-003'
-}
-model = models.keys()
-
-vercel_models = {'anthropic:claude-instant-v1': {'id': 'anthropic:claude-instant-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-instant-v1'}, 'anthropic:claude-v1': {'id': 'anthropic:claude-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-v1'}, 'replicate:replicate/alpaca-7b': {'id': 'replicate:replicate/alpaca-7b', 'provider': 'replicate', 'providerHumanName': 'Replicate', 'makerHumanName': 'Stanford', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '2014ee1247354f2e81c0b3650d71ca715bc1e610189855f134c30ecb841fae21', 'name': 'alpaca-7b'}, 'replicate:stability-ai/stablelm-tuned-alpha-7b': {'id': 'replicate:stability-ai/stablelm-tuned-alpha-7b', 'provider': 'replicate', 'makerHumanName': 'StabilityAI', 'providerHumanName': 'Replicate', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '4a9a32b4fd86c2d047f1d271fa93972683ec6ef1cf82f402bd021f267330b50b', 'name': 'stablelm-tuned-alpha-7b'}, 'huggingface:bigscience/bloom': {'id': 'huggingface:bigscience/bloom', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': "Do NOT talk to Bloom as an entity, it's not a chatbot but a webpage/blog/article completion model. For the best results: mimic a few words of a webpage similar to the content you want to generate. Start a sentence as if YOU were writing a blog, webpage, math post, coding article and Bloom will generate a coherent follow-up.", 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloom'}, 'huggingface:bigscience/bloomz': {'id': 'huggingface:bigscience/bloomz', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': 'We recommend using the model to perform tasks expressed in natural language. For example, given the prompt "Translate to English: Je t\'aime.", the model will most likely answer "I love you.".', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloomz'}, 'huggingface:google/flan-t5-xxl': {'id': 'huggingface:google/flan-t5-xxl', 'provider': 'huggingface', 'makerHumanName': 'Google', 'providerHumanName': 'HuggingFace', 'name': 'flan-t5-xxl', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}}, 'huggingface:google/flan-ul2': {'id': 'huggingface:google/flan-ul2', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'Google', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'flan-ul2'}, 'huggingface:EleutherAI/gpt-neox-20b': {'id': 'huggingface:EleutherAI/gpt-neox-20b', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'EleutherAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-neox-20b'}, 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'OpenAssistant', 'parameters': {'maximumLength': {'value': 200, 'range': [50, 1024]}, 'typicalP': {'value': 0.2, 'range': [0.1, 0.99]}, 'repetitionPenalty': {'value': 1, 'range': [0.1, 2]}}, 'name': 'oasst-sft-4-pythia-12b-epoch-3.5'}, 'huggingface:bigcode/santacoder': {
- 'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}}
-
-
-# import requests
-# import execjs
-# import ubox
-# import json
-# import re
-
-
-# html = requests.get('https://sdk.vercel.ai/').text
-# paths_regex = r'static\/chunks.+?\.js'
-# separator_regex = r'"\]\)<\/script><script>self\.__next_f\.push\(\[.,"'
-
-# paths = re.findall(paths_regex, html)
-# for i in range(len(paths)):
-# paths[i] = re.sub(separator_regex, "", paths[i])
-# paths = list(set(paths))
-# print(paths)
-
-# scripts = []
-# threads = []
-
-# print(f"Downloading and parsing scripts...")
-# def download_thread(path):
-# script_url = f"{self.base_url}/_next/{path}"
-# script = self.session.get(script_url).text
-# scripts.append(script)
-
-# for path in paths:
-# thread = threading.Thread(target=download_thread, args=(path,), daemon=True)
-# thread.start()
-# threads.append(thread)
-
-# for thread in threads:
-# thread.join()
-
-# for script in scripts:
-# models_regex = r'let .="\\n\\nHuman:\",r=(.+?),.='
-# matches = re.findall(models_regex, script)
-
-# if matches:
-# models_str = matches[0]
-# stop_sequences_regex = r'(?<=stopSequences:{value:\[)\D(?<!\])'
-# models_str = re.sub(stop_sequences_regex, re.escape('"\\n\\nHuman:"'), models_str)
-
-# context = quickjs.Context()
-# json_str = context.eval(f"({models_str})").json()
-# #return json.loads(json_str)
-
-# quit()
-# headers = {
-# 'authority': 'sdk.vercel.ai',
-# 'accept': '*/*',
-# 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
-# 'content-type': 'application/json',
-# 'origin': 'https://sdk.vercel.ai',
-# 'referer': 'https://sdk.vercel.ai/',
-# 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
-# 'sec-ch-ua-mobile': '?0',
-# 'sec-ch-ua-platform': '"macOS"',
-# 'sec-fetch-dest': 'empty',
-# 'sec-fetch-mode': 'cors',
-# 'sec-fetch-site': 'same-origin',
-# 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
-# }
-
-# response = requests.get('https://sdk.vercel.ai/openai.jpeg', headers=headers)
-
-# data = (json.loads(ubox.b64dec(response.text)))
-
-# script = 'globalThis={data: "sentinel"};a=()=>{return (%s)(%s)}' % (data['c'], data['a'])
-
-# token_data = execjs.compile(script).call('a')
-# print(token_data)
-
-# token = {
-# 'r': token_data,
-# 't': data["t"]
-# }
-
-# botToken = ubox.b64enc(json.dumps(token, separators=(',', ':')))
-# print(botToken)
-
-# import requests
-
-# headers['custom-encoding'] = botToken
-
-# json_data = {
-# 'messages': [
-# {
-# 'role': 'user',
-# 'content': 'hello',
-# },
-# ],
-# 'playgroundId': ubox.uuid4(),
-# 'chatIndex': 0,
-# 'model': 'openai:gpt-3.5-turbo',
-# 'temperature': 0.7,
-# 'maxTokens': 500,
-# 'topK': 1,
-# 'topP': 1,
-# 'frequencyPenalty': 1,
-# 'presencePenalty': 1,
-# 'stopSequences': []
-# }
-
-# response = requests.post('https://sdk.vercel.ai/api/generate',
-# headers=headers, json=json_data, stream=True)
-
-# for token in response.iter_content(chunk_size=2046):
-# print(token)
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- return
- # conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
-
- # for message in messages:
- # conversation += '%s: %s\n' % (message['role'], message['content'])
-
- # conversation += 'assistant: '
-
- # completion = Client().generate(model, conversation)
-
- # for token in completion:
- # yield token
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Wewordle.py b/g4f/Provider/Providers/Wewordle.py
deleted file mode 100644
index 116ebb85..00000000
--- a/g4f/Provider/Providers/Wewordle.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import os,sys
-import requests
-import json
-import random
-import time
-import string
-from ...typing import sha256, Dict, get_type_hints
-
-url = "https://wewordle.org/gptapi/v1/android/turbo"
-model = ['gpt-3.5-turbo']
-supports_stream = False
-needs_auth = False
-working = False
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- base = ''
- for message in messages:
- base += '%s: %s\n' % (message['role'], message['content'])
- base += 'assistant:'
- # randomize user id and app id
- _user_id = ''.join(random.choices(f'{string.ascii_lowercase}{string.digits}', k=16))
- _app_id = ''.join(random.choices(f'{string.ascii_lowercase}{string.digits}', k=31))
- # make current date with format utc
- _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
- headers = {
- 'accept': '*/*',
- 'pragma': 'no-cache',
- 'Content-Type': 'application/json',
- 'Connection':'keep-alive'
- # user agent android client
- # 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 10; SM-G975F Build/QP1A.190711.020)',
-
- }
- data = {
- "user": _user_id,
- "messages": [
- {"role": "user", "content": base}
- ],
- "subscriber": {
- "originalPurchaseDate": None,
- "originalApplicationVersion": None,
- "allPurchaseDatesMillis": {},
- "entitlements": {
- "active": {},
- "all": {}
- },
- "allPurchaseDates": {},
- "allExpirationDatesMillis": {},
- "allExpirationDates": {},
- "originalAppUserId": f"$RCAnonymousID:{_app_id}",
- "latestExpirationDate": None,
- "requestDate": _request_date,
- "latestExpirationDateMillis": None,
- "nonSubscriptionTransactions": [],
- "originalPurchaseDateMillis": None,
- "managementURL": None,
- "allPurchasedProductIdentifiers": [],
- "firstSeen": _request_date,
- "activeSubscriptions": []
- }
- }
- response = requests.post(url, headers=headers, data=json.dumps(data))
- if response.status_code == 200:
- _json = response.json()
- if 'message' in _json:
- yield _json['message']['content']
- else:
- print(f"Error Occurred::{response.status_code}")
- return None
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/You.py b/g4f/Provider/Providers/You.py
deleted file mode 100644
index 3c321118..00000000
--- a/g4f/Provider/Providers/You.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import os
-import json
-import time
-import subprocess
-
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://you.com'
-model = 'gpt-3.5-turbo'
-supports_stream = True
-needs_auth = False
-working = False
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
-
- path = os.path.dirname(os.path.realpath(__file__))
- config = json.dumps({
- 'messages': messages}, separators=(',', ':'))
-
- cmd = ['python3', f'{path}/helpers/you.py', config]
-
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-
- for line in iter(p.stdout.readline, b''):
- yield line.decode('utf-8') #[:-1] \ No newline at end of file
diff --git a/g4f/Provider/Providers/Yqcloud.py b/g4f/Provider/Providers/Yqcloud.py
deleted file mode 100644
index fae44682..00000000
--- a/g4f/Provider/Providers/Yqcloud.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import os
-import time
-import requests
-
-from ...typing import sha256, Dict, get_type_hints
-url = 'https://chat9.yqcloud.top/'
-model = [
- 'gpt-3.5-turbo',
-]
-supports_stream = True
-needs_auth = False
-working = False
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
-
- headers = {
- 'authority': 'api.aichatos.cloud',
- 'origin': 'https://chat9.yqcloud.top',
- 'referer': 'https://chat9.yqcloud.top/',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
- }
-
- json_data = {
- 'prompt': 'always respond in english | %s' % messages[-1]['content'],
- 'userId': f'#/chat/{int(time.time() * 1000)}',
- 'network': True,
- 'apikey': '',
- 'system': '',
- 'withoutContext': False,
- }
-
- response = requests.post('https://api.aichatos.cloud/api/generateStream', headers=headers, json=json_data, stream=True)
- for token in response.iter_content(chunk_size=2046):
- if not b'always respond in english' in token:
- yield (token.decode('utf-8'))
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/__init__.py b/g4f/Provider/Providers/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/g4f/Provider/Providers/__init__.py
+++ /dev/null
diff --git a/g4f/Provider/Providers/helpers/binghuan.py b/g4f/Provider/Providers/helpers/binghuan.py
deleted file mode 100644
index 203bbe45..00000000
--- a/g4f/Provider/Providers/helpers/binghuan.py
+++ /dev/null
@@ -1,221 +0,0 @@
-# Original Code From : https://gitler.moe/g4f/gpt4free
-# https://gitler.moe/g4f/gpt4free/src/branch/main/g4f/Provider/Providers/helpers/bing.py
-import sys
-import ssl
-import uuid
-import json
-import time
-import random
-import asyncio
-import certifi
-# import requests
-from curl_cffi import requests
-import websockets
-import browser_cookie3
-
-config = json.loads(sys.argv[1])
-
-ssl_context = ssl.create_default_context()
-ssl_context.load_verify_locations(certifi.where())
-
-
-
-conversationstyles = {
- 'gpt-4': [ #'precise'
- "nlu_direct_response_filter",
- "deepleo",
- "disable_emoji_spoken_text",
- "responsible_ai_policy_235",
- "enablemm",
- "h3precise",
- "rcsprtsalwlst",
- "dv3sugg",
- "autosave",
- "clgalileo",
- "gencontentv3"
- ],
- 'balanced': [
- "nlu_direct_response_filter",
- "deepleo",
- "disable_emoji_spoken_text",
- "responsible_ai_policy_235",
- "enablemm",
- "harmonyv3",
- "rcsprtsalwlst",
- "dv3sugg",
- "autosave"
- ],
- 'gpt-3.5-turbo': [ #'precise'
- "nlu_direct_response_filter",
- "deepleo",
- "disable_emoji_spoken_text",
- "responsible_ai_policy_235",
- "enablemm",
- "h3imaginative",
- "rcsprtsalwlst",
- "dv3sugg",
- "autosave",
- "gencontentv3"
- ]
-}
-
-def format(msg: dict) -> str:
- return json.dumps(msg) + '\x1e'
-
-def get_token():
- return
-
- try:
- cookies = {c.name: c.value for c in browser_cookie3.edge(domain_name='bing.com')}
- return cookies['_U']
- except:
- print('Error: could not find bing _U cookie in edge browser.')
- exit(1)
-
-class AsyncCompletion:
- async def create(
- prompt : str = None,
- optionSets : list = None,
- token : str = None): # No auth required anymore
-
- create = None
- for _ in range(5):
- try:
- create = requests.get('https://b.ai-huan.xyz/turing/conversation/create',
- headers = {
- 'host': 'b.ai-huan.xyz',
- 'accept-encoding': 'gzip, deflate, br',
- 'connection': 'keep-alive',
- 'authority': 'b.ai-huan.xyz',
- 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
- 'accept-language': 'en-US,en;q=0.9',
- 'cache-control': 'max-age=0',
- 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
- 'sec-ch-ua-arch': '"x86"',
- 'sec-ch-ua-bitness': '"64"',
- 'sec-ch-ua-full-version': '"110.0.1587.69"',
- 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-model': '""',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-ch-ua-platform-version': '"15.0.0"',
- 'sec-fetch-dest': 'document',
- 'sec-fetch-mode': 'navigate',
- 'sec-fetch-site': 'none',
- 'sec-fetch-user': '?1',
- 'upgrade-insecure-requests': '1',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
- 'x-edge-shopping-flag': '1',
- 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
- }
- )
-
- conversationId = create.json()['conversationId']
- clientId = create.json()['clientId']
- conversationSignature = create.json()['conversationSignature']
-
- except Exception as e:
- time.sleep(0.5)
- continue
-
- if create == None: raise Exception('Failed to create conversation.')
-
- wss: websockets.WebSocketClientProtocol or None = None
-
- wss = await websockets.connect('wss://sydney.vcanbb.chat/sydney/ChatHub', max_size = None, ssl = ssl_context,
- extra_headers = {
- 'accept': 'application/json',
- 'accept-language': 'en-US,en;q=0.9',
- 'content-type': 'application/json',
- 'sec-ch-ua': '"Not_A Brand";v="99", Microsoft Edge";v="110", "Chromium";v="110"',
- 'sec-ch-ua-arch': '"x86"',
- 'sec-ch-ua-bitness': '"64"',
- 'sec-ch-ua-full-version': '"109.0.1518.78"',
- 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-model': "",
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-ch-ua-platform-version': '"15.0.0"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'x-ms-client-request-id': str(uuid.uuid4()),
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
- 'Referer': 'https://b.ai-huan.xyz/search?q=Bing+AI&showconv=1&FORM=hpcodx',
- 'Referrer-Policy': 'origin-when-cross-origin',
- 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
- }
- )
-
- await wss.send(format({'protocol': 'json', 'version': 1}))
- await wss.recv()
-
- struct = {
- 'arguments': [
- {
- 'source': 'cib',
- 'optionsSets': optionSets,
- 'isStartOfSession': True,
- 'message': {
- 'author': 'user',
- 'inputMethod': 'Keyboard',
- 'text': prompt,
- 'messageType': 'Chat'
- },
- 'conversationSignature': conversationSignature,
- 'participant': {
- 'id': clientId
- },
- 'conversationId': conversationId
- }
- ],
- 'invocationId': '0',
- 'target': 'chat',
- 'type': 4
- }
-
- await wss.send(format(struct))
-
- base_string = ''
-
- final = False
- while not final:
- objects = str(await wss.recv()).split('\x1e')
- for obj in objects:
- if obj is None or obj == '':
- continue
-
- response = json.loads(obj)
- #print(response, flush=True, end='')
- if response.get('type') == 1 and response['arguments'][0].get('messages',):
- response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text')
-
- yield (response_text.replace(base_string, ''))
- base_string = response_text
-
- elif response.get('type') == 2:
- final = True
-
- await wss.close()
-
-# i thing bing realy donset understand multi message (based on prompt template)
-def convert(messages):
- context = ""
- for message in messages:
- context += "[%s](#message)\n%s\n\n" % (message['role'],
- message['content'])
- return context
-
-async def run(optionSets, messages):
- prompt = messages[-1]['content']
- if(len(messages) > 1):
- prompt = convert(messages)
- async for value in AsyncCompletion.create(prompt=prompt, optionSets=optionSets):
- try:
- print(value, flush=True, end='')
- except UnicodeEncodeError as e:
- # emoji encoding problem
- print(value.encode('utf-8'), flush=True, end='')
-
-optionSet = conversationstyles[config['model']]
-asyncio.run(run(optionSet, config['messages'])) \ No newline at end of file
diff --git a/g4f/Provider/Providers/helpers/theb.py b/g4f/Provider/Providers/helpers/theb.py
deleted file mode 100644
index 71cfd23f..00000000
--- a/g4f/Provider/Providers/helpers/theb.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import json
-import sys
-from re import findall
-from curl_cffi import requests
-
-config = json.loads(sys.argv[1])
-prompt = config['messages'][-1]['content']
-
-headers = {
- 'authority': 'chatbot.theb.ai',
- 'accept': 'application/json, text/plain, */*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'content-type': 'application/json',
- 'origin': 'https://chatbot.theb.ai',
- 'referer': 'https://chatbot.theb.ai/',
- 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
-}
-
-json_data = {
- 'prompt': prompt,
- 'options': {}
-}
-
-def format(chunk):
- try:
- completion_chunk = findall(r'content":"(.*)"},"fin', chunk.decode())[0]
- print(completion_chunk, flush=True, end='')
-
- except Exception as e:
- print(f'[ERROR] an error occured, retrying... | [[{chunk.decode()}]]', flush=True)
- return
-
-while True:
- try:
- response = requests.post('https://chatbot.theb.ai/api/chat-process',
- headers=headers, json=json_data, content_callback=format, impersonate='chrome110')
-
- exit(0)
-
- except Exception as e:
- print('[ERROR] an error occured, retrying... |', e, flush=True)
- continue \ No newline at end of file
diff --git a/g4f/Provider/Providers/helpers/you.py b/g4f/Provider/Providers/helpers/you.py
deleted file mode 100644
index 02985ed1..00000000
--- a/g4f/Provider/Providers/helpers/you.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import sys
-import json
-import urllib.parse
-
-from curl_cffi import requests
-
-config = json.loads(sys.argv[1])
-messages = config['messages']
-prompt = ''
-
-
-def transform(messages: list) -> list:
- result = []
- i = 0
-
- while i < len(messages):
- if messages[i]['role'] == 'user':
- question = messages[i]['content']
- i += 1
-
- if i < len(messages) and messages[i]['role'] == 'assistant':
- answer = messages[i]['content']
- i += 1
- else:
- answer = ''
-
- result.append({'question': question, 'answer': answer})
-
- elif messages[i]['role'] == 'assistant':
- result.append({'question': '', 'answer': messages[i]['content']})
- i += 1
-
- elif messages[i]['role'] == 'system':
- result.append({'question': messages[i]['content'], 'answer': ''})
- i += 1
-
- return result
-
-headers = {
- 'Content-Type': 'application/x-www-form-urlencoded',
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
- 'Sec-Fetch-Site': 'same-origin',
- 'Accept-Language': 'en-GB,en;q=0.9',
- 'Sec-Fetch-Mode': 'navigate',
- 'Host': 'you.com',
- 'Origin': 'https://you.com',
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
- 'Referer': 'https://you.com/api/streamingSearch?q=nice&safeSearch=Moderate&onShoppingPage=false&mkt=&responseFilter=WebPages,Translations,TimeZone,Computation,RelatedSearches&domain=youchat&queryTraceId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&chat=%5B%7B%22question%22%3A%22hi%22%2C%22answer%22%3A%22Hello!%20How%20can%20I%20assist%20you%20today%3F%22%7D%5D&chatId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&__cf_chl_tk=ex2bw6vn5vbLsUm8J5rDYUC0Bjzc1XZqka6vUl6765A-1684108495-0-gaNycGzNDtA',
- 'Connection': 'keep-alive',
- 'Sec-Fetch-Dest': 'document',
- 'Priority': 'u=0, i',
-}
-
-if messages[-1]['role'] == 'user':
- prompt = messages[-1]['content']
- messages = messages[:-1]
-
-params = urllib.parse.urlencode({
- 'q': prompt,
- 'domain': 'youchat',
- 'chat': transform(messages)
-})
-
-def output(chunk):
- if b'"youChatToken"' in chunk:
- chunk_json = json.loads(chunk.decode().split('data: ')[1])
-
- print(chunk_json['youChatToken'], flush=True, end = '')
-
-while True:
- try:
- response = requests.get(f'https://you.com/api/streamingSearch?{params}',
- headers=headers, content_callback=output, impersonate='safari15_5')
-
- exit(0)
-
- except Exception as e:
- print('an error occured, retrying... |', e, flush=True)
- continue \ No newline at end of file
diff --git a/g4f/Provider/Providers/opchatgpts.py b/g4f/Provider/Providers/opchatgpts.py
deleted file mode 100644
index 0ff652fb..00000000
--- a/g4f/Provider/Providers/opchatgpts.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import os
-import requests
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://opchatgpts.net'
-model = ['gpt-3.5-turbo']
-supports_stream = False
-needs_auth = False
-working = True
-
-def _create_completion(model: str, messages: list, stream: bool = False, temperature: float = 0.8, max_tokens: int = 1024, system_prompt: str = "Converse as if you were an AI assistant. Be friendly, creative.", **kwargs):
-
- data = {
- 'env': 'chatbot',
- 'session': 'N/A',
- 'prompt': "\n",
- 'context': system_prompt,
- 'messages': messages,
- 'newMessage': messages[::-1][0]["content"],
- 'userName': '<div class="mwai-name-text">User:</div>',
- 'aiName': '<div class="mwai-name-text">AI:</div>',
- 'model': 'gpt-3.5-turbo',
- 'temperature': temperature,
- 'maxTokens': max_tokens,
- 'maxResults': 1,
- 'apiKey': '',
- 'service': 'openai',
- 'embeddingsIndex': '',
- 'stop': ''
- }
-
- response = requests.post('https://opchatgpts.net/wp-json/ai-chatbot/v1/chat', json=data).json()
-
- if response["success"]:
-
- return response["reply"] # `yield (response["reply"])` doesn't work
-
- raise Exception("Request failed: " + response)
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/g4f/Provider/Raycast.py b/g4f/Provider/Raycast.py
new file mode 100644
index 00000000..9441f629
--- /dev/null
+++ b/g4f/Provider/Raycast.py
@@ -0,0 +1,56 @@
+import json
+
+import requests
+
+from ..provider.base_provider import BaseProvider
+from ..typing import Any, CreateResult
+
+
+class Raycast(BaseProvider):
+ url = "https://backend.raycast.com/api/v1/ai/chat_completions"
+ working = True
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ needs_auth = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ auth = kwargs.get("auth")
+
+ headers = {
+ "Accept": "application/json",
+ "Accept-Language": "en-US,en;q=0.9",
+ "Authorization": f"Bearer {auth}",
+ "Content-Type": "application/json",
+ "User-Agent": "Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0",
+ }
+ parsed_messages: list[dict[str, Any]] = []
+ for message in messages:
+ parsed_messages.append(
+ {"author": message["role"], "content": {"text": message["content"]}}
+ )
+ data = {
+ "debug": False,
+ "locale": "en-CN",
+ "messages": parsed_messages,
+ "model": model,
+ "provider": "openai",
+ "source": "ai_chat",
+ "system_instruction": "markdown",
+ "temperature": 0.5,
+ }
+ url = "https://backend.raycast.com/api/v1/ai/chat_completions"
+ response = requests.post(url, headers=headers, json=data, stream=True)
+ for token in response.iter_lines():
+ if b"data: " not in token:
+ continue
+ completion_chunk = json.loads(token.decode().replace("data: ", ""))
+ token = completion_chunk["text"]
+ if token != None:
+ yield token
diff --git a/g4f/Provider/Theb.py b/g4f/Provider/Theb.py
new file mode 100644
index 00000000..c6fd6f19
--- /dev/null
+++ b/g4f/Provider/Theb.py
@@ -0,0 +1,39 @@
+import json
+
+from curl_cffi import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class Theb(BaseProvider):
+ url = "https://theb.ai"
+ working = False
+ supports_stream = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ prompt = messages[-1]["content"]
+
+ headers = {
+ "accept": "application/json, text/plain, */*",
+ "content-type": "application/json",
+ }
+
+ json_data: dict[str, Any] = {"prompt": prompt, "options": {}}
+ response = requests.post(
+ "https://chatbot.theb.ai/api/chat-process",
+ headers=headers,
+ json=json_data,
+ impersonate="chrome110",
+ )
+ response.raise_for_status()
+ line = response.text.splitlines()[-1]
+ text = json.loads(line)["text"]
+ yield text
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py
new file mode 100644
index 00000000..186662c4
--- /dev/null
+++ b/g4f/Provider/Vercel.py
@@ -0,0 +1,337 @@
+import base64
+import json
+import uuid
+
+import quickjs
+from curl_cffi import requests
+
+from ..typing import Any, CreateResult, TypedDict
+from .base_provider import BaseProvider
+
+
+class Vercel(BaseProvider):
+ url = "https://play.vercel.ai"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ if model in ["gpt-3.5-turbo", "gpt-4"]:
+ model = "openai:" + model
+ yield _chat(model_id=model, messages=messages)
+
+
+def _chat(model_id: str, messages: list[dict[str, str]]) -> str:
+ session = requests.Session(impersonate="chrome107")
+
+ url = "https://sdk.vercel.ai/api/generate"
+ header = _create_header(session)
+ payload = _create_payload(model_id, messages)
+
+ response = session.post(url=url, headers=header, json=payload)
+ response.raise_for_status()
+ return response.text
+
+
+def _create_payload(model_id: str, messages: list[dict[str, str]]) -> dict[str, Any]:
+ default_params = model_info[model_id]["default_params"]
+ return {
+ "messages": messages,
+ "playgroundId": str(uuid.uuid4()),
+ "chatIndex": 0,
+ "model": model_id,
+ } | default_params
+
+
+def _create_header(session: requests.Session):
+ custom_encoding = _get_custom_encoding(session)
+ return {"custom-encoding": custom_encoding}
+
+
+# based on https://github.com/ading2210/vercel-llm-api
+def _get_custom_encoding(session: requests.Session):
+ url = "https://sdk.vercel.ai/openai.jpeg"
+ response = session.get(url=url)
+
+ data = json.loads(base64.b64decode(response.text, validate=True))
+ script = """
+ String.prototype.fontcolor = function() {{
+ return `<font>${{this}}</font>`
+ }}
+ var globalThis = {{marker: "mark"}};
+ ({script})({key})
+ """.format(
+ script=data["c"], key=data["a"]
+ )
+
+ context = quickjs.Context() # type: ignore
+ token_data = json.loads(context.eval(script).json()) # type: ignore
+ token_data[2] = "mark"
+ token = {"r": token_data, "t": data["t"]}
+ token_str = json.dumps(token, separators=(",", ":")).encode("utf-16le")
+ return base64.b64encode(token_str).decode()
+
+
+class ModelInfo(TypedDict):
+ id: str
+ default_params: dict[str, Any]
+
+
+model_info: dict[str, ModelInfo] = {
+ "anthropic:claude-instant-v1": {
+ "id": "anthropic:claude-instant-v1",
+ "default_params": {
+ "temperature": 1,
+ "maxTokens": 200,
+ "topP": 1,
+ "topK": 1,
+ "presencePenalty": 1,
+ "frequencyPenalty": 1,
+ "stopSequences": ["\n\nHuman:"],
+ },
+ },
+ "anthropic:claude-v1": {
+ "id": "anthropic:claude-v1",
+ "default_params": {
+ "temperature": 1,
+ "maxTokens": 200,
+ "topP": 1,
+ "topK": 1,
+ "presencePenalty": 1,
+ "frequencyPenalty": 1,
+ "stopSequences": ["\n\nHuman:"],
+ },
+ },
+ "anthropic:claude-v2": {
+ "id": "anthropic:claude-v2",
+ "default_params": {
+ "temperature": 1,
+ "maxTokens": 200,
+ "topP": 1,
+ "topK": 1,
+ "presencePenalty": 1,
+ "frequencyPenalty": 1,
+ "stopSequences": ["\n\nHuman:"],
+ },
+ },
+ "replicate:a16z-infra/llama7b-v2-chat": {
+ "id": "replicate:a16z-infra/llama7b-v2-chat",
+ "default_params": {
+ "temperature": 0.75,
+ "maxTokens": 500,
+ "topP": 1,
+ "repetitionPenalty": 1,
+ },
+ },
+ "replicate:a16z-infra/llama13b-v2-chat": {
+ "id": "replicate:a16z-infra/llama13b-v2-chat",
+ "default_params": {
+ "temperature": 0.75,
+ "maxTokens": 500,
+ "topP": 1,
+ "repetitionPenalty": 1,
+ },
+ },
+ "huggingface:bigscience/bloom": {
+ "id": "huggingface:bigscience/bloom",
+ "default_params": {
+ "temperature": 0.5,
+ "maxTokens": 200,
+ "topP": 0.95,
+ "topK": 4,
+ "repetitionPenalty": 1.03,
+ },
+ },
+ "huggingface:google/flan-t5-xxl": {
+ "id": "huggingface:google/flan-t5-xxl",
+ "default_params": {
+ "temperature": 0.5,
+ "maxTokens": 200,
+ "topP": 0.95,
+ "topK": 4,
+ "repetitionPenalty": 1.03,
+ },
+ },
+ "huggingface:EleutherAI/gpt-neox-20b": {
+ "id": "huggingface:EleutherAI/gpt-neox-20b",
+ "default_params": {
+ "temperature": 0.5,
+ "maxTokens": 200,
+ "topP": 0.95,
+ "topK": 4,
+ "repetitionPenalty": 1.03,
+ "stopSequences": [],
+ },
+ },
+ "huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5": {
+ "id": "huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
+ "default_params": {"maxTokens": 200, "typicalP": 0.2, "repetitionPenalty": 1},
+ },
+ "huggingface:OpenAssistant/oasst-sft-1-pythia-12b": {
+ "id": "huggingface:OpenAssistant/oasst-sft-1-pythia-12b",
+ "default_params": {"maxTokens": 200, "typicalP": 0.2, "repetitionPenalty": 1},
+ },
+ "huggingface:bigcode/santacoder": {
+ "id": "huggingface:bigcode/santacoder",
+ "default_params": {
+ "temperature": 0.5,
+ "maxTokens": 200,
+ "topP": 0.95,
+ "topK": 4,
+ "repetitionPenalty": 1.03,
+ },
+ },
+ "cohere:command-light-nightly": {
+ "id": "cohere:command-light-nightly",
+ "default_params": {
+ "temperature": 0.9,
+ "maxTokens": 200,
+ "topP": 1,
+ "topK": 0,
+ "presencePenalty": 0,
+ "frequencyPenalty": 0,
+ "stopSequences": [],
+ },
+ },
+ "cohere:command-nightly": {
+ "id": "cohere:command-nightly",
+ "default_params": {
+ "temperature": 0.9,
+ "maxTokens": 200,
+ "topP": 1,
+ "topK": 0,
+ "presencePenalty": 0,
+ "frequencyPenalty": 0,
+ "stopSequences": [],
+ },
+ },
+ "openai:gpt-4": {
+ "id": "openai:gpt-4",
+ "default_params": {
+ "temperature": 0.7,
+ "maxTokens": 500,
+ "topP": 1,
+ "presencePenalty": 0,
+ "frequencyPenalty": 0,
+ "stopSequences": [],
+ },
+ },
+ "openai:gpt-4-0613": {
+ "id": "openai:gpt-4-0613",
+ "default_params": {
+ "temperature": 0.7,
+ "maxTokens": 500,
+ "topP": 1,
+ "presencePenalty": 0,
+ "frequencyPenalty": 0,
+ "stopSequences": [],
+ },
+ },
+ "openai:code-davinci-002": {
+ "id": "openai:code-davinci-002",
+ "default_params": {
+ "temperature": 0.5,
+ "maxTokens": 200,
+ "topP": 1,
+ "presencePenalty": 0,
+ "frequencyPenalty": 0,
+ "stopSequences": [],
+ },
+ },
+ "openai:gpt-3.5-turbo": {
+ "id": "openai:gpt-3.5-turbo",
+ "default_params": {
+ "temperature": 0.7,
+ "maxTokens": 500,
+ "topP": 1,
+ "topK": 1,
+ "presencePenalty": 1,
+ "frequencyPenalty": 1,
+ "stopSequences": [],
+ },
+ },
+ "openai:gpt-3.5-turbo-16k": {
+ "id": "openai:gpt-3.5-turbo-16k",
+ "default_params": {
+ "temperature": 0.7,
+ "maxTokens": 500,
+ "topP": 1,
+ "topK": 1,
+ "presencePenalty": 1,
+ "frequencyPenalty": 1,
+ "stopSequences": [],
+ },
+ },
+ "openai:gpt-3.5-turbo-16k-0613": {
+ "id": "openai:gpt-3.5-turbo-16k-0613",
+ "default_params": {
+ "temperature": 0.7,
+ "maxTokens": 500,
+ "topP": 1,
+ "topK": 1,
+ "presencePenalty": 1,
+ "frequencyPenalty": 1,
+ "stopSequences": [],
+ },
+ },
+ "openai:text-ada-001": {
+ "id": "openai:text-ada-001",
+ "default_params": {
+ "temperature": 0.5,
+ "maxTokens": 200,
+ "topP": 1,
+ "presencePenalty": 0,
+ "frequencyPenalty": 0,
+ "stopSequences": [],
+ },
+ },
+ "openai:text-babbage-001": {
+ "id": "openai:text-babbage-001",
+ "default_params": {
+ "temperature": 0.5,
+ "maxTokens": 200,
+ "topP": 1,
+ "presencePenalty": 0,
+ "frequencyPenalty": 0,
+ "stopSequences": [],
+ },
+ },
+ "openai:text-curie-001": {
+ "id": "openai:text-curie-001",
+ "default_params": {
+ "temperature": 0.5,
+ "maxTokens": 200,
+ "topP": 1,
+ "presencePenalty": 0,
+ "frequencyPenalty": 0,
+ "stopSequences": [],
+ },
+ },
+ "openai:text-davinci-002": {
+ "id": "openai:text-davinci-002",
+ "default_params": {
+ "temperature": 0.5,
+ "maxTokens": 200,
+ "topP": 1,
+ "presencePenalty": 0,
+ "frequencyPenalty": 0,
+ "stopSequences": [],
+ },
+ },
+ "openai:text-davinci-003": {
+ "id": "openai:text-davinci-003",
+ "default_params": {
+ "temperature": 0.5,
+ "maxTokens": 200,
+ "topP": 1,
+ "presencePenalty": 0,
+ "frequencyPenalty": 0,
+ "stopSequences": [],
+ },
+ },
+}
diff --git a/g4f/Provider/Wewordle.py b/g4f/Provider/Wewordle.py
new file mode 100644
index 00000000..f7f47ee0
--- /dev/null
+++ b/g4f/Provider/Wewordle.py
@@ -0,0 +1,75 @@
+import json
+import random
+import string
+import time
+
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class Wewordle(BaseProvider):
+ url = "https://wewordle.org/gptapi/v1/android/turbo"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ base = ""
+
+ for message in messages:
+ base += "%s: %s\n" % (message["role"], message["content"])
+ base += "assistant:"
+ # randomize user id and app id
+ _user_id = "".join(
+ random.choices(f"{string.ascii_lowercase}{string.digits}", k=16)
+ )
+ _app_id = "".join(
+ random.choices(f"{string.ascii_lowercase}{string.digits}", k=31)
+ )
+ # make current date with format utc
+ _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
+ headers = {
+ "accept": "*/*",
+ "pragma": "no-cache",
+ "Content-Type": "application/json",
+ "Connection": "keep-alive"
+ # user agent android client
+ # 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 10; SM-G975F Build/QP1A.190711.020)',
+ }
+ data: dict[str, Any] = {
+ "user": _user_id,
+ "messages": [{"role": "user", "content": base}],
+ "subscriber": {
+ "originalPurchaseDate": None,
+ "originalApplicationVersion": None,
+ "allPurchaseDatesMillis": {},
+ "entitlements": {"active": {}, "all": {}},
+ "allPurchaseDates": {},
+ "allExpirationDatesMillis": {},
+ "allExpirationDates": {},
+ "originalAppUserId": f"$RCAnonymousID:{_app_id}",
+ "latestExpirationDate": None,
+ "requestDate": _request_date,
+ "latestExpirationDateMillis": None,
+ "nonSubscriptionTransactions": [],
+ "originalPurchaseDateMillis": None,
+ "managementURL": None,
+ "allPurchasedProductIdentifiers": [],
+ "firstSeen": _request_date,
+ "activeSubscriptions": [],
+ },
+ }
+
+ url = "https://wewordle.org/gptapi/v1/android/turbo"
+ response = requests.post(url, headers=headers, data=json.dumps(data))
+ response.raise_for_status()
+ _json = response.json()
+ if "message" in _json:
+ yield _json["message"]["content"]
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
new file mode 100644
index 00000000..0d8114a8
--- /dev/null
+++ b/g4f/Provider/You.py
@@ -0,0 +1,59 @@
+import re
+import urllib.parse
+
+from curl_cffi import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class You(BaseProvider):
+ url = "https://you.com"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ url_param = _create_url_param(messages)
+ headers = _create_header()
+ url = f"https://you.com/api/streamingSearch?{url_param}"
+ response = requests.get(
+ url,
+ headers=headers,
+ impersonate="chrome107",
+ )
+ response.raise_for_status()
+ yield _parse_output(response.text)
+
+
+def _create_url_param(messages: list[dict[str, str]]):
+ prompt = messages.pop()["content"]
+ chat = _convert_chat(messages)
+ param = {"q": prompt, "domain": "youchat", "chat": chat}
+ return urllib.parse.urlencode(param)
+
+
+def _convert_chat(messages: list[dict[str, str]]):
+ message_iter = iter(messages)
+ return [
+ {"question": user["content"], "answer": assistant["content"]}
+ for user, assistant in zip(message_iter, message_iter)
+ ]
+
+
+def _create_header():
+ return {
+ "accept": "text/event-stream",
+ "referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
+ }
+
+
+def _parse_output(output: str) -> str:
+ regex = r"^data:\s{\"youChatToken\": \"(.*)\"}$"
+ tokens = [token for token in re.findall(regex, output, re.MULTILINE)]
+ return "".join(tokens)
diff --git a/g4f/Provider/Yqcloud.py b/g4f/Provider/Yqcloud.py
new file mode 100644
index 00000000..3709e53d
--- /dev/null
+++ b/g4f/Provider/Yqcloud.py
@@ -0,0 +1,44 @@
+import requests
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class Yqcloud(BaseProvider):
+ url = "https://chat9.yqcloud.top/"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ headers = _create_header()
+ payload = _create_payload(messages)
+
+ url = "https://api.aichatos.cloud/api/generateStream"
+ response = requests.post(url=url, headers=headers, json=payload)
+ response.raise_for_status()
+ yield response.text
+
+
+def _create_header():
+ return {
+ "accept": "application/json, text/plain, */*",
+ "content-type": "application/json",
+ "origin": "https://chat9.yqcloud.top",
+ }
+
+
+def _create_payload(messages: list[dict[str, str]]):
+ prompt = messages[-1]["content"]
+ return {
+ "prompt": prompt,
+ "network": True,
+ "system": "",
+ "withoutContext": False,
+ "stream": False,
+ }
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 9d0442d0..21db1af5 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -1,30 +1,53 @@
-from . import Provider
-from .Providers import (
- Ails,
- You,
- Bing,
- Yqcloud,
- Theb,
- Aichat,
- Bard,
- Vercel,
- Forefront,
- Lockchat,
- Liaobots,
- H2o,
- ChatgptLogin,
- DeepAi,
- GetGpt,
- AItianhu,
- EasyChat,
- Acytoo,
- DfeHub,
- AiService,
- BingHuan,
- Wewordle,
- ChatgptAi,
- opchatgpts,
- Raycast,
-)
+from .Acytoo import Acytoo
+from .Aichat import Aichat
+from .Ails import Ails
+from .AiService import AiService
+from .AItianhu import AItianhu
+from .Bard import Bard
+from .base_provider import BaseProvider
+from .Bing import Bing
+from .ChatgptAi import ChatgptAi
+from .ChatgptLogin import ChatgptLogin
+from .DeepAi import DeepAi
+from .DfeHub import DfeHub
+from .EasyChat import EasyChat
+from .Forefront import Forefront
+from .GetGpt import GetGpt
+from .H2o import H2o
+from .Liaobots import Liaobots
+from .Lockchat import Lockchat
+from .Opchatgpts import Opchatgpts
+from .Raycast import Raycast
+from .Theb import Theb
+from .Vercel import Vercel
+from .Wewordle import Wewordle
+from .You import You
+from .Yqcloud import Yqcloud
-Palm = Bard
+__all__ = [
+ "BaseProvider",
+ "Acytoo",
+ "Aichat",
+ "Ails",
+ "AiService",
+ "AItianhu",
+ "Bard",
+ "Bing",
+ "ChatgptAi",
+ "ChatgptLogin",
+ "DeepAi",
+ "DfeHub",
+ "EasyChat",
+ "Forefront",
+ "GetGpt",
+ "H2o",
+ "Liaobots",
+ "Lockchat",
+ "Opchatgpts",
+ "Raycast",
+ "Theb",
+ "Vercel",
+ "Wewordle",
+ "You",
+ "Yqcloud",
+]
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
new file mode 100644
index 00000000..92636fda
--- /dev/null
+++ b/g4f/Provider/base_provider.py
@@ -0,0 +1,33 @@
+from abc import ABC, abstractmethod
+
+from ..typing import Any, CreateResult
+
+
+class BaseProvider(ABC):
+ url: str
+ working = False
+ needs_auth = False
+ supports_stream = False
+ supports_gpt_35_turbo = False
+ supports_gpt_4 = False
+
+ @staticmethod
+ @abstractmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ raise NotImplementedError()
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"