summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorHeiner Lohaus <heiner@lohaus.eu>2023-10-05 05:13:37 +0200
committerHeiner Lohaus <heiner@lohaus.eu>2023-10-05 05:13:37 +0200
commit88d2cbff099df00944ed6dfb6c73b1b5e8dfc7f9 (patch)
tree7e6e3b6c179cf1a695f9d80026d544a4fd3c5203 /g4f
parent~ | g4f v-0.1.4.8 - Fixed `g4f.Provider.Bing` (diff)
downloadgpt4free-88d2cbff099df00944ed6dfb6c73b1b5e8dfc7f9.tar
gpt4free-88d2cbff099df00944ed6dfb6c73b1b5e8dfc7f9.tar.gz
gpt4free-88d2cbff099df00944ed6dfb6c73b1b5e8dfc7f9.tar.bz2
gpt4free-88d2cbff099df00944ed6dfb6c73b1b5e8dfc7f9.tar.lz
gpt4free-88d2cbff099df00944ed6dfb6c73b1b5e8dfc7f9.tar.xz
gpt4free-88d2cbff099df00944ed6dfb6c73b1b5e8dfc7f9.tar.zst
gpt4free-88d2cbff099df00944ed6dfb6c73b1b5e8dfc7f9.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/AItianhu.py17
-rw-r--r--g4f/Provider/AItianhuSpace.py33
-rw-r--r--g4f/Provider/AiAsk.py43
-rw-r--r--g4f/Provider/Aibn.py3
-rw-r--r--g4f/Provider/Aichat.py3
-rw-r--r--g4f/Provider/Aivvm.py8
-rw-r--r--g4f/Provider/Bing.py89
-rw-r--r--g4f/Provider/ChatForAi.py3
-rw-r--r--g4f/Provider/Chatgpt4Online.py39
-rw-r--r--g4f/Provider/ChatgptAi.py62
-rw-r--r--g4f/Provider/ChatgptDemo.py62
-rw-r--r--g4f/Provider/ChatgptDuo.py8
-rw-r--r--g4f/Provider/ChatgptX.py66
-rw-r--r--g4f/Provider/DeepAi.py11
-rw-r--r--g4f/Provider/FreeGpt.py3
-rw-r--r--g4f/Provider/GptGo.py3
-rw-r--r--g4f/Provider/Liaobots.py35
-rw-r--r--g4f/Provider/Myshell.py3
-rw-r--r--g4f/Provider/Vitalentum.py3
-rw-r--r--g4f/Provider/Ylokh.py4
-rw-r--r--g4f/Provider/You.py21
-rw-r--r--g4f/Provider/Yqcloud.py3
-rw-r--r--g4f/Provider/__init__.py61
-rw-r--r--g4f/Provider/deprecated/__init__.py1
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py3
-rw-r--r--g4f/Provider/retry_provider.py9
-rw-r--r--g4f/Provider/unfinished/Komo.py44
-rw-r--r--g4f/Provider/unfinished/MikuChat.py97
-rw-r--r--g4f/Provider/unfinished/PerplexityAi.py (renamed from g4f/Provider/deprecated/PerplexityAi.py)1
-rw-r--r--g4f/Provider/unfinished/__init__.py3
-rw-r--r--g4f/__init__.py7
-rw-r--r--g4f/debug.py1
-rw-r--r--g4f/models.py31
33 files changed, 603 insertions, 177 deletions
diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/AItianhu.py
index 1d8653b7..c6e4dbad 100644
--- a/g4f/Provider/AItianhu.py
+++ b/g4f/Provider/AItianhu.py
@@ -4,7 +4,7 @@ import json
from ..typing import AsyncGenerator
from ..requests import StreamSession
-from .base_provider import AsyncGeneratorProvider, format_prompt
+from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
class AItianhu(AsyncGeneratorProvider):
@@ -18,8 +18,12 @@ class AItianhu(AsyncGeneratorProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
+ cookies: dict = None,
+ timeout: int = 30,
**kwargs
) -> AsyncGenerator:
+ if not cookies:
+ cookies = get_cookies("www.aitianhu.com")
data = {
"prompt": format_prompt(messages),
"options": {},
@@ -34,12 +38,19 @@ class AItianhu(AsyncGeneratorProvider):
"Origin": cls.url,
"Referer": f"{cls.url}/"
}
- async with StreamSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107", verify=False) as session:
+ async with StreamSession(
+ headers=headers,
+ cookies=cookies,
+ timeout=timeout,
+ proxies={"https": proxy},
+ impersonate="chrome107",
+ verify=False
+ ) as session:
async with session.post(f"{cls.url}/api/chat-process", json=data) as response:
response.raise_for_status()
async for line in response.iter_lines():
if line == b"<script>":
- raise RuntimeError("Solve Challenge")
+ raise RuntimeError("Solve challenge and pass cookies")
if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control")
line = json.loads(line)
diff --git a/g4f/Provider/AItianhuSpace.py b/g4f/Provider/AItianhuSpace.py
index 27f2b1fa..78cdf657 100644
--- a/g4f/Provider/AItianhuSpace.py
+++ b/g4f/Provider/AItianhuSpace.py
@@ -4,11 +4,11 @@ import random, json
from ..typing import AsyncGenerator
from ..requests import StreamSession
-from .base_provider import AsyncGeneratorProvider, format_prompt
+from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
domains = {
- "gpt-3.5-turbo": ".aitianhu.space",
- "gpt-4": ".aitianhu.website",
+ "gpt-3.5-turbo": "aitianhu.space",
+ "gpt-4": "aitianhu.website",
}
class AItianhuSpace(AsyncGeneratorProvider):
@@ -21,20 +21,31 @@ class AItianhuSpace(AsyncGeneratorProvider):
cls,
model: str,
messages: list[dict[str, str]],
- stream: bool = True,
+ proxy: str = None,
+ domain: str = None,
+ cookies: dict = None,
+ timeout: int = 30,
**kwargs
) -> AsyncGenerator:
if not model:
model = "gpt-3.5-turbo"
elif not model in domains:
raise ValueError(f"Model are not supported: {model}")
+ if not domain:
+ chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
+ rand = ''.join(random.choice(chars) for _ in range(6))
+ domain = f"{rand}.{domains[model]}"
+ if not cookies:
+ cookies = get_cookies(domain)
- chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
- rand = ''.join(random.choice(chars) for _ in range(6))
- domain = domains[model]
- url = f'https://{rand}{domain}'
-
- async with StreamSession(impersonate="chrome110", verify=False) as session:
+ url = f'https://{domain}'
+ async with StreamSession(
+ proxies={"https": proxy},
+ cookies=cookies,
+ timeout=timeout,
+ impersonate="chrome110",
+ verify=False
+ ) as session:
data = {
"prompt": format_prompt(messages),
"options": {},
@@ -53,7 +64,7 @@ class AItianhuSpace(AsyncGeneratorProvider):
response.raise_for_status()
async for line in response.iter_lines():
if line == b"<script>":
- raise RuntimeError("Solve Challenge")
+ raise RuntimeError("Solve challenge and pass cookies and a fixed domain")
if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control")
line = json.loads(line)
diff --git a/g4f/Provider/AiAsk.py b/g4f/Provider/AiAsk.py
new file mode 100644
index 00000000..906b5575
--- /dev/null
+++ b/g4f/Provider/AiAsk.py
@@ -0,0 +1,43 @@
+from aiohttp import ClientSession
+from ..typing import AsyncGenerator
+from .base_provider import AsyncGeneratorProvider
+
+class AiAsk(AsyncGeneratorProvider):
+ url = "https://e.aiask.me"
+ supports_gpt_35_turbo = True
+ working = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ timeout: int = 30,
+ **kwargs
+ ) -> AsyncGenerator:
+ headers = {
+ "accept": "application/json, text/plain, */*",
+ "origin": cls.url,
+ "referer": f"{cls.url}/chat",
+ }
+ async with ClientSession(headers=headers, timeout=timeout) as session:
+ data = {
+ "continuous": True,
+ "id": "fRMSQtuHl91A4De9cCvKD",
+ "list": messages,
+ "models": "0",
+ "prompt": "",
+ "temperature": kwargs.get("temperature", 0.5),
+ "title": "",
+ }
+ buffer = ""
+ rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
+ async with session.post(f"{cls.url}/v1/chat/gpt/", json=data) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ buffer += chunk.decode()
+ if not rate_limit.startswith(buffer):
+ yield buffer
+ buffer = ""
+ elif buffer == rate_limit:
+ raise RuntimeError("Rate limit reached") \ No newline at end of file
diff --git a/g4f/Provider/Aibn.py b/g4f/Provider/Aibn.py
index df56189b..3399d613 100644
--- a/g4f/Provider/Aibn.py
+++ b/g4f/Provider/Aibn.py
@@ -18,9 +18,10 @@ class Aibn(AsyncGeneratorProvider):
cls,
model: str,
messages: list[dict[str, str]],
+ timeout: int = 30,
**kwargs
) -> AsyncGenerator:
- async with StreamSession(impersonate="chrome107") as session:
+ async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
timestamp = int(time.time())
data = {
"messages": messages,
diff --git a/g4f/Provider/Aichat.py b/g4f/Provider/Aichat.py
index 8edd17e2..41496765 100644
--- a/g4f/Provider/Aichat.py
+++ b/g4f/Provider/Aichat.py
@@ -15,6 +15,7 @@ class Aichat(AsyncProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
+ timeout: int = 30,
**kwargs
) -> str:
headers = {
@@ -33,7 +34,7 @@ class Aichat(AsyncProvider):
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
}
async with ClientSession(
- headers=headers
+ headers=headers, timeout=timeout
) as session:
json_data = {
"message": format_prompt(messages),
diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py
index c4ec677c..1e780953 100644
--- a/g4f/Provider/Aivvm.py
+++ b/g4f/Provider/Aivvm.py
@@ -29,6 +29,7 @@ class Aivvm(AsyncGeneratorProvider):
model: str,
messages: list[dict[str, str]],
stream: bool,
+ timeout: int = 30,
**kwargs
) -> AsyncGenerator:
if not model:
@@ -43,7 +44,12 @@ class Aivvm(AsyncGeneratorProvider):
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
"temperature" : kwargs.get("temperature", 0.7)
}
- async with StreamSession(impersonate="chrome107") as session:
+ headers = {
+ "Accept": "*/*",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/",
+ }
+ async with StreamSession(impersonate="chrome107", headers=headers, timeout=timeout) as session:
async with session.post(f"{cls.url}/api/chat", json=json_data) as response:
response.raise_for_status()
async for chunk in response.iter_content():
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index e333f132..b669d800 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -4,6 +4,7 @@ import random
import uuid
import json
import os
+import uuid
import urllib.parse
from aiohttp import ClientSession, ClientTimeout
from ..typing import AsyncGenerator
@@ -14,6 +15,15 @@ class Tones():
balanced = "Balanced"
precise = "Precise"
+default_cookies = {
+ 'SRCHD' : 'AF=NOFORM',
+ 'PPLState' : '1',
+ 'KievRPSSecAuth': '',
+ 'SUID' : '',
+ 'SRCHUSR' : '',
+ 'SRCHHPGUSR' : '',
+}
+
class Bing(AsyncGeneratorProvider):
url = "https://bing.com/chat"
working = True
@@ -27,7 +37,6 @@ class Bing(AsyncGeneratorProvider):
tone: str = Tones.creative,
**kwargs
) -> AsyncGenerator:
-
if len(messages) < 2:
prompt = messages[0]["content"]
context = None
@@ -36,14 +45,7 @@ class Bing(AsyncGeneratorProvider):
context = create_context(messages[:-1])
if not cookies or "SRCHD" not in cookies:
- cookies = {
- 'SRCHD' : 'AF=NOFORM',
- 'PPLState' : '1',
- 'KievRPSSecAuth': '',
- 'SUID' : '',
- 'SRCHUSR' : '',
- 'SRCHHPGUSR' : '',
- }
+ cookies = default_cookies
return stream_generate(prompt, tone, context, cookies)
def create_context(messages: list[dict[str, str]]):
@@ -58,51 +60,18 @@ class Conversation():
self.conversationSignature = conversationSignature
async def create_conversation(session: ClientSession) -> Conversation:
- url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1055.6'
- headers = {
- 'authority': 'www.bing.com',
- 'accept': 'application/json',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'cache-control': 'no-cache',
- 'pragma': 'no-cache',
- 'referer': 'https://www.bing.com/search?q=Bing+AI&showconv=1',
- 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
- 'sec-ch-ua-arch': '"arm"',
- 'sec-ch-ua-bitness': '"64"',
- 'sec-ch-ua-full-version': '"117.0.5938.132"',
- 'sec-ch-ua-full-version-list': '"Google Chrome";v="117.0.5938.132", "Not;A=Brand";v="8.0.0.0", "Chromium";v="117.0.5938.132"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-model': '""',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-ch-ua-platform-version': '"14.0.0"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
- 'x-ms-client-request-id': str(uuid.uuid4()),
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.12.0 OS/macOS',
- }
-
- async with await session.get(url, headers=headers) as response:
- conversationSignature = response.headers.get('X-Sydney-EncryptedConversationSignature', '')
-
- response = await response.json()
- conversationId = response.get('conversationId')
- clientId = response.get('clientId')
-
- if not conversationId or not clientId:
+ url = 'https://www.bing.com/turing/conversation/create'
+ async with await session.get(url) as response:
+ data = await response.json()
+ conversationId = data.get('conversationId')
+ clientId = data.get('clientId')
+ conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature')
+
+ if not conversationId or not clientId or not conversationSignature:
raise Exception('Failed to create conversation.')
return Conversation(conversationId, clientId, conversationSignature)
-async def retry_conversation(session: ClientSession) -> Conversation:
- for _ in range(5):
- try:
- return await create_conversation(session)
- except:
- session.cookie_jar.clear()
- return await create_conversation(session)
-
async def list_conversations(session: ClientSession) -> list:
url = "https://www.bing.com/turing/conversation/chats"
async with session.get(url) as response:
@@ -223,30 +192,34 @@ def format_message(msg: dict) -> str:
return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
def create_message(conversation: Conversation, prompt: str, tone: str, context: str=None) -> str:
+ request_id = str(uuid.uuid4())
struct = {
'arguments': [
{
- 'optionsSets': Defaults.optionsSets,
'source': 'cib',
+ 'optionsSets': Defaults.optionsSets,
'allowedMessageTypes': Defaults.allowedMessageTypes,
'sliceIds': Defaults.sliceIds,
'traceId': os.urandom(16).hex(),
'isStartOfSession': True,
+ 'requestId': request_id,
'message': Defaults.location | {
'author': 'user',
'inputMethod': 'Keyboard',
'text': prompt,
- 'messageType': 'Chat'
+ 'messageType': 'Chat',
+ 'requestId': request_id,
+ 'messageId': request_id,
},
'tone': tone,
- 'conversationSignature': conversation.conversationSignature,
+ 'spokenTextMode': 'None',
+ 'conversationId': conversation.conversationId,
'participant': {
'id': conversation.clientId
},
- 'conversationId': conversation.conversationId
}
],
- 'invocationId': '0',
+ 'invocationId': '1',
'target': 'chat',
'type': 4
}
@@ -272,16 +245,16 @@ async def stream_generate(
cookies=cookies,
headers=Defaults.headers,
) as session:
- conversation = await retry_conversation(session)
+ conversation = await create_conversation(session)
try:
async with session.ws_connect(
f'wss://sydney.bing.com/sydney/ChatHub?sec_access_token={urllib.parse.quote_plus(conversation.conversationSignature)}',
autoping=False,
+ params={'sec_access_token': conversation.conversationSignature}
) as wss:
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
- msg = await wss.receive(timeout=900)
-
+ await wss.receive(timeout=900)
await wss.send_str(create_message(conversation, prompt, tone, context))
response_txt = ''
diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py
index 779799cf..47ac46f0 100644
--- a/g4f/Provider/ChatForAi.py
+++ b/g4f/Provider/ChatForAi.py
@@ -17,9 +17,10 @@ class ChatForAi(AsyncGeneratorProvider):
cls,
model: str,
messages: list[dict[str, str]],
+ timeout: int = 30,
**kwargs
) -> AsyncGenerator:
- async with StreamSession(impersonate="chrome107") as session:
+ async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
conversation_id = f"id_{int(time.time())}"
prompt = messages[-1]["content"]
timestamp = int(time.time())
diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py
new file mode 100644
index 00000000..b9631429
--- /dev/null
+++ b/g4f/Provider/Chatgpt4Online.py
@@ -0,0 +1,39 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncGenerator
+from .base_provider import AsyncGeneratorProvider
+
+
+class Chatgpt4Online(AsyncGeneratorProvider):
+ url = "https://chatgpt4online.org"
+ supports_gpt_35_turbo = True
+ working = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ **kwargs
+ ) -> AsyncGenerator:
+ async with ClientSession() as session:
+ data = {
+ "botId": "default",
+ "customId": None,
+ "session": "N/A",
+ "chatId": "",
+ "contextId": 58,
+ "messages": messages,
+ "newMessage": messages[-1]["content"],
+ "stream": True
+ }
+ async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ line = json.loads(line[6:])
+ if line["type"] == "live":
+ yield line["data"] \ No newline at end of file
diff --git a/g4f/Provider/ChatgptAi.py b/g4f/Provider/ChatgptAi.py
index e6416cc3..0ae0fa38 100644
--- a/g4f/Provider/ChatgptAi.py
+++ b/g4f/Provider/ChatgptAi.py
@@ -1,28 +1,28 @@
from __future__ import annotations
import re
-import html
-import json
from aiohttp import ClientSession
-from ..typing import AsyncGenerator
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncProvider, format_prompt
-class ChatgptAi(AsyncGeneratorProvider):
+class ChatgptAi(AsyncProvider):
url: str = "https://chatgpt.ai/"
working = True
supports_gpt_35_turbo = True
- _system_data = None
+ _nonce = None
+ _post_id = None
+ _bot_id = None
@classmethod
- async def create_async_generator(
+ async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
+ timeout: int = 30,
**kwargs
- ) -> AsyncGenerator:
+ ) -> str:
headers = {
"authority" : "chatgpt.ai",
"accept" : "*/*",
@@ -40,36 +40,36 @@ class ChatgptAi(AsyncGeneratorProvider):
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
}
async with ClientSession(
- headers=headers
+ headers=headers, timeout=timeout
) as session:
- if not cls._system_data:
+ if not cls._nonce:
async with session.get(cls.url, proxy=proxy) as response:
response.raise_for_status()
- match = re.findall(r"data-system='([^']+)'", await response.text())
- if not match:
- raise RuntimeError("No system data")
- cls._system_data = json.loads(html.unescape(match[0]))
+ text = await response.text()
+ result = re.search(r'data-nonce="(.*?)"', text)
+ if result:
+ cls._nonce = result.group(1)
+ result = re.search(r'data-post-id="(.*?)"', text)
+ if result:
+ cls._post_id = result.group(1)
+ result = re.search(r'data-bot-id="(.*?)"', text)
+ if result:
+ cls._bot_id = result.group(1)
+ if not cls._nonce or not cls._post_id or not cls._bot_id:
+ raise RuntimeError("Nonce, post-id or bot-id not found")
data = {
- "botId": cls._system_data["botId"],
- "clientId": "",
- "contextId": cls._system_data["contextId"],
- "id": cls._system_data["id"],
- "messages": messages[:-1],
- "newMessage": messages[-1]["content"],
- "session": cls._system_data["sessionId"],
- "stream": True
+ "_wpnonce": cls._nonce,
+ "post_id": cls._post_id,
+ "url": "https://chatgpt.ai",
+ "action": "wpaicg_chat_shortcode_message",
+ "message": format_prompt(messages),
+ "bot_id": cls._bot_id
}
async with session.post(
- "https://chatgpt.ai/wp-json/mwai-ui/v1/chats/submit",
+ "https://chatgpt.ai/wp-admin/admin-ajax.php",
proxy=proxy,
- json=data
+ data=data
) as response:
response.raise_for_status()
- start = "data: "
- async for line in response.content:
- line = line.decode('utf-8')
- if line.startswith(start):
- line = json.loads(line[len(start):-1])
- if line["type"] == "live":
- yield line["data"] \ No newline at end of file
+ return (await response.json())["data"] \ No newline at end of file
diff --git a/g4f/Provider/ChatgptDemo.py b/g4f/Provider/ChatgptDemo.py
new file mode 100644
index 00000000..fdd7730b
--- /dev/null
+++ b/g4f/Provider/ChatgptDemo.py
@@ -0,0 +1,62 @@
+from __future__ import annotations
+
+import time, json, re
+from aiohttp import ClientSession
+from typing import AsyncGenerator
+
+from .base_provider import AsyncGeneratorProvider
+from .helper import format_prompt
+
+class ChatgptDemo(AsyncGeneratorProvider):
+ url = "https://chat.chatgptdemo.net"
+ supports_gpt_35_turbo = True
+ working = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ proxy: str = None,
+ timeout: int = 30,
+ **kwargs
+ ) -> AsyncGenerator:
+ headers = {
+ "authority": "chat.chatgptdemo.net",
+ "accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US",
+ "origin": "https://chat.chatgptdemo.net",
+ "referer": "https://chat.chatgptdemo.net/",
+ "sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers, timeout=timeout) as session:
+ async with session.get(f"{cls.url}/", proxy=proxy) as response:
+ response.raise_for_status()
+ response = await response.text()
+ result = re.search(r'<div id="USERID" style="display: none">(.*?)<\/div>', response)
+ if not result:
+ raise RuntimeError("No user id found")
+ user_id = result.group(1)
+ async with session.post(f"{cls.url}/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
+ response.raise_for_status()
+ chat_id = (await response.json())["id_"]
+ if not chat_id:
+ raise RuntimeError("Could not create new chat")
+ data = {
+ "question": format_prompt(messages),
+ "chat_id": chat_id,
+ "timestamp": int(time.time()*1000),
+ }
+ async with session.post(f"{cls.url}/chat_api_stream", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ line = json.loads(line[6:-1])
+ chunk = line["choices"][0]["delta"].get("content")
+ if chunk:
+ yield chunk \ No newline at end of file
diff --git a/g4f/Provider/ChatgptDuo.py b/g4f/Provider/ChatgptDuo.py
index abed8a3c..119ff16b 100644
--- a/g4f/Provider/ChatgptDuo.py
+++ b/g4f/Provider/ChatgptDuo.py
@@ -14,9 +14,15 @@ class ChatgptDuo(AsyncProvider):
cls,
model: str,
messages: list[dict[str, str]],
+ proxy: str = None,
+ timeout: int = 30,
**kwargs
) -> str:
- async with AsyncSession(impersonate="chrome107") as session:
+ async with AsyncSession(
+ impersonate="chrome107",
+ proxies={"https": proxy},
+ timeout=timeout
+ ) as session:
prompt = format_prompt(messages),
data = {
"prompt": prompt,
diff --git a/g4f/Provider/ChatgptX.py b/g4f/Provider/ChatgptX.py
new file mode 100644
index 00000000..eb62ddc9
--- /dev/null
+++ b/g4f/Provider/ChatgptX.py
@@ -0,0 +1,66 @@
+import re
+from aiohttp import ClientSession
+
+from .base_provider import AsyncProvider
+from .helper import format_prompt
+
+class ChatgptX(AsyncProvider):
+ url = "https://chatgptx.de"
+ supports_gpt_35_turbo = True
+ working = True
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ **kwargs
+ ) -> str:
+ headers = {
+ 'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US',
+ 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': 'Linux',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
+ }
+ async with ClientSession(headers=headers) as session:
+ async with session.get(f"{cls.url}/") as response:
+ response = await response.text()
+ result = re.search(r'<meta name="csrf-token" content="(.*?)"', response)
+ if result:
+ csrf_token = result.group(1)
+ result = re.search(r"openconversions\('(.*?)'\)", response)
+ if result:
+ chat_id = result.group(1)
+ result = re.search(r'<input type="hidden" id="user_id" value="(.*?)"', response)
+ if result:
+ user_id = result.group(1)
+
+ if not csrf_token or not chat_id or not user_id:
+ raise RuntimeError("Missing csrf_token, chat_id or user_id")
+
+ data = {
+ '_token': csrf_token,
+ 'user_id': user_id,
+ 'chats_id': chat_id,
+ 'prompt': format_prompt(messages),
+ 'current_model': "gpt3"
+ }
+ headers = {
+ 'authority': 'chatgptx.de',
+ 'accept': 'application/json, text/javascript, */*; q=0.01',
+ 'origin': cls.url,
+ 'referer': f'{cls.url}/',
+ 'x-csrf-token': csrf_token,
+ 'x-requested-with': 'XMLHttpRequest'
+ }
+ async with session.post(cls.url + '/sendchat', data=data, headers=headers) as response:
+ response.raise_for_status()
+ data = await response.json()
+ if "message" in data:
+ return data["message"]
+ elif "messages" in data:
+ raise RuntimeError(f'Response: {data["messages"]}') \ No newline at end of file
diff --git a/g4f/Provider/DeepAi.py b/g4f/Provider/DeepAi.py
index 7239f6ac..bac3e3fe 100644
--- a/g4f/Provider/DeepAi.py
+++ b/g4f/Provider/DeepAi.py
@@ -2,6 +2,8 @@ from __future__ import annotations
import json
import js2py
+import random
+import hashlib
from aiohttp import ClientSession
from ..typing import AsyncGenerator
@@ -64,3 +66,12 @@ f = function () {
async for stream in response.content.iter_any():
if stream:
yield stream.decode()
+
+
+def get_api_key(user_agent: str):
+ e = str(round(1E11 * random.random()))
+
+ def hash(data: str):
+ return hashlib.md5(data.encode()).hexdigest()[::-1]
+
+ return f"tryit-{e}-" + hash(user_agent + hash(user_agent + hash(user_agent + e + "x"))) \ No newline at end of file
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index 092e1bb6..73b8acea 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -21,9 +21,10 @@ class FreeGpt(AsyncGeneratorProvider):
cls,
model: str,
messages: list[dict[str, str]],
+ timeout: int = 30,
**kwargs
) -> AsyncGenerator:
- async with StreamSession(impersonate="chrome107") as session:
+ async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
prompt = messages[-1]["content"]
timestamp = int(time.time())
data = {
diff --git a/g4f/Provider/GptGo.py b/g4f/Provider/GptGo.py
index 7db8fb0d..19484291 100644
--- a/g4f/Provider/GptGo.py
+++ b/g4f/Provider/GptGo.py
@@ -18,6 +18,7 @@ class GptGo(AsyncGeneratorProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
+ timeout: int = 30,
**kwargs
) -> AsyncGenerator:
headers = {
@@ -31,7 +32,7 @@ class GptGo(AsyncGeneratorProvider):
"Sec-Fetch-Site" : "same-origin",
}
async with ClientSession(
- headers=headers
+ headers=headers, timeout=timeout
) as session:
async with session.get(
"https://gptgo.ai/action_get_token.php",
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index ea3e0d45..e65c3a0d 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -30,8 +30,8 @@ models = {
}
class Liaobots(AsyncGeneratorProvider):
- url = "https://liaobots.com"
- working = False
+ url = "https://liaobots.site"
+ working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
_auth_code = None
@@ -43,6 +43,7 @@ class Liaobots(AsyncGeneratorProvider):
messages: list[dict[str, str]],
auth: str = None,
proxy: str = None,
+ timeout: int = 30,
**kwargs
) -> AsyncGenerator:
model = model if model in models else "gpt-3.5-turbo"
@@ -54,13 +55,25 @@ class Liaobots(AsyncGeneratorProvider):
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
}
async with ClientSession(
- headers=headers
+ headers=headers, timeout=timeout
) as session:
- auth_code = auth if isinstance(auth, str) else cls._auth_code
- if not auth_code:
- async with session.post(cls.url + "/api/user", proxy=proxy, json={"authcode": ""}) as response:
+ cls._auth_code = auth if isinstance(auth, str) else cls._auth_code
+ if not cls._auth_code:
+ async with session.post(
+ "https://liaobots.work/recaptcha/api/login",
+ proxy=proxy,
+ data={"token": "abcdefghijklmnopqrst"},
+ verify_ssl=False
+ ) as response:
response.raise_for_status()
- auth_code = cls._auth_code = json.loads(await response.text())["authCode"]
+ async with session.post(
+ "https://liaobots.work/api/user",
+ proxy=proxy,
+ json={"authcode": ""},
+ verify_ssl=False
+ ) as response:
+ response.raise_for_status()
+ cls._auth_code = (await response.json(content_type=None))["authCode"]
data = {
"conversationId": str(uuid.uuid4()),
"model": models[model],
@@ -68,7 +81,13 @@ class Liaobots(AsyncGeneratorProvider):
"key": "",
"prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
}
- async with session.post(cls.url + "/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response:
+ async with session.post(
+ "https://liaobots.work/api/chat",
+ proxy=proxy,
+ json=data,
+ headers={"x-auth-code": cls._auth_code},
+ verify_ssl=False
+ ) as response:
response.raise_for_status()
async for stream in response.content.iter_any():
if stream:
diff --git a/g4f/Provider/Myshell.py b/g4f/Provider/Myshell.py
index 0ddd3029..da170fa3 100644
--- a/g4f/Provider/Myshell.py
+++ b/g4f/Provider/Myshell.py
@@ -28,6 +28,7 @@ class Myshell(AsyncGeneratorProvider):
cls,
model: str,
messages: list[dict[str, str]],
+ timeout: int = 90,
**kwargs
) -> AsyncGenerator:
if not model:
@@ -46,7 +47,7 @@ class Myshell(AsyncGeneratorProvider):
async with session.ws_connect(
"wss://api.myshell.ai/ws/?EIO=4&transport=websocket",
autoping=False,
- timeout=90
+ timeout=timeout
) as wss:
# Send and receive hello message
await wss.receive_str()
diff --git a/g4f/Provider/Vitalentum.py b/g4f/Provider/Vitalentum.py
index d5265428..9125c285 100644
--- a/g4f/Provider/Vitalentum.py
+++ b/g4f/Provider/Vitalentum.py
@@ -18,6 +18,7 @@ class Vitalentum(AsyncGeneratorProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
+ timeout: int = 30,
**kwargs
) -> AsyncGenerator:
headers = {
@@ -40,7 +41,7 @@ class Vitalentum(AsyncGeneratorProvider):
**kwargs
}
async with ClientSession(
- headers=headers
+ headers=headers, timeout=timeout
) as session:
async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response:
response.raise_for_status()
diff --git a/g4f/Provider/Ylokh.py b/g4f/Provider/Ylokh.py
index 3c8b32dd..a15f7894 100644
--- a/g4f/Provider/Ylokh.py
+++ b/g4f/Provider/Ylokh.py
@@ -19,6 +19,7 @@ class Ylokh(AsyncGeneratorProvider):
messages: list[dict[str, str]],
stream: bool = True,
proxy: str = None,
+ timeout: int = 30,
**kwargs
) -> AsyncGenerator:
model = model if model else "gpt-3.5-turbo"
@@ -39,7 +40,8 @@ class Ylokh(AsyncGeneratorProvider):
}
async with StreamSession(
headers=headers,
- proxies={"https": proxy}
+ proxies={"https": proxy},
+ timeout=timeout
) as session:
async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
response.raise_for_status()
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index 4f49f15e..4afe1ef6 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -2,8 +2,7 @@ from __future__ import annotations
import json
-from curl_cffi.requests import AsyncSession
-
+from ..requests import StreamSession
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt
@@ -12,7 +11,6 @@ class You(AsyncGeneratorProvider):
url = "https://you.com"
working = True
supports_gpt_35_turbo = True
- supports_stream = False
@classmethod
@@ -21,20 +19,21 @@ class You(AsyncGeneratorProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
+ timeout: int = 30,
**kwargs,
) -> AsyncGenerator:
- async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session:
+ async with StreamSession(proxies={"https": proxy}, impersonate="chrome107", timeout=timeout) as session:
headers = {
"Accept": "text/event-stream",
"Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
}
- response = await session.get(
+ async with session.get(
"https://you.com/api/streamingSearch",
params={"q": format_prompt(messages), "domain": "youchat", "chat": ""},
headers=headers
- )
- response.raise_for_status()
- start = 'data: {"youChatToken": '
- for line in response.text.splitlines():
- if line.startswith(start):
- yield json.loads(line[len(start): -1]) \ No newline at end of file
+ ) as response:
+ response.raise_for_status()
+ start = b'data: {"youChatToken": '
+ async for line in response.iter_lines():
+ if line.startswith(start):
+ yield json.loads(line[len(start):-1]) \ No newline at end of file
diff --git a/g4f/Provider/Yqcloud.py b/g4f/Provider/Yqcloud.py
index ac93315c..1b22de9e 100644
--- a/g4f/Provider/Yqcloud.py
+++ b/g4f/Provider/Yqcloud.py
@@ -16,10 +16,11 @@ class Yqcloud(AsyncGeneratorProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
+ timeout: int = 30,
**kwargs,
) -> AsyncGenerator:
async with ClientSession(
- headers=_create_header()
+ headers=_create_header(), timeout=timeout
) as session:
payload = _create_payload(messages)
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 72a459c2..f3236474 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -1,34 +1,39 @@
from __future__ import annotations
-from .Acytoo import Acytoo
-from .Aibn import Aibn
-from .Aichat import Aichat
-from .Ails import Ails
-from .AItianhu import AItianhu
-from .AItianhuSpace import AItianhuSpace
-from .Aivvm import Aivvm
-from .Bing import Bing
-from .ChatBase import ChatBase
-from .ChatForAi import ChatForAi
-from .ChatgptAi import ChatgptAi
-from .ChatgptDuo import ChatgptDuo
-from .ChatgptLogin import ChatgptLogin
-from .DeepAi import DeepAi
-from .FreeGpt import FreeGpt
-from .GptGo import GptGo
-from .H2o import H2o
-from .Liaobots import Liaobots
-from .Myshell import Myshell
-from .Phind import Phind
-from .Vercel import Vercel
-from .Vitalentum import Vitalentum
-from .Ylokh import Ylokh
-from .You import You
-from .Yqcloud import Yqcloud
+from .Acytoo import Acytoo
+from .AiAsk import AiAsk
+from .Aibn import Aibn
+from .Aichat import Aichat
+from .Ails import Ails
+from .AItianhu import AItianhu
+from .AItianhuSpace import AItianhuSpace
+from .Aivvm import Aivvm
+from .Bing import Bing
+from .ChatBase import ChatBase
+from .ChatForAi import ChatForAi
+from .Chatgpt4Online import Chatgpt4Online
+from .ChatgptAi import ChatgptAi
+from .ChatgptDemo import ChatgptDemo
+from .ChatgptDuo import ChatgptDuo
+from .ChatgptLogin import ChatgptLogin
+from .ChatgptX import ChatgptX
+from .DeepAi import DeepAi
+from .FreeGpt import FreeGpt
+from .GptGo import GptGo
+from .H2o import H2o
+from .Liaobots import Liaobots
+from .Myshell import Myshell
+from .Phind import Phind
+from .Vercel import Vercel
+from .Vitalentum import Vitalentum
+from .Ylokh import Ylokh
+from .You import You
+from .Yqcloud import Yqcloud
from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
from .retry_provider import RetryProvider
from .deprecated import *
from .needs_auth import *
+from .unfinished import *
__all__ = [
'BaseProvider',
@@ -36,6 +41,7 @@ __all__ = [
'AsyncGeneratorProvider',
'RetryProvider',
'Acytoo',
+ 'AiAsk',
'Aibn',
'Aichat',
'Ails',
@@ -47,9 +53,12 @@ __all__ = [
'Bing',
'ChatBase',
'ChatForAi',
+ 'Chatgpt4Online',
'ChatgptAi',
+ 'ChatgptDemo',
'ChatgptDuo',
'ChatgptLogin',
+ 'ChatgptX',
'CodeLinkAva',
'DeepAi',
'DfeHub',
@@ -80,4 +89,4 @@ __all__ = [
'FastGpt',
'Wuguokai',
'V50'
-]
+] \ No newline at end of file
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
index 8d22a3c3..e4528d02 100644
--- a/g4f/Provider/deprecated/__init__.py
+++ b/g4f/Provider/deprecated/__init__.py
@@ -6,7 +6,6 @@ from .Forefront import Forefront
from .GetGpt import GetGpt
from .Opchatgpts import Opchatgpts
from .Lockchat import Lockchat
-from .PerplexityAi import PerplexityAi
from .Wewordle import Wewordle
from .Equing import Equing
from .Wuguokai import Wuguokai
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index c57692d3..c41909e3 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -22,6 +22,7 @@ class OpenaiChat(AsyncGeneratorProvider):
proxy: str = None,
access_token: str = None,
cookies: dict = None,
+ timeout: int = 30,
**kwargs: dict
) -> AsyncGenerator:
proxies = {"https": proxy}
@@ -31,7 +32,7 @@ class OpenaiChat(AsyncGeneratorProvider):
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
}
- async with StreamSession(proxies=proxies, headers=headers, impersonate="chrome107") as session:
+ async with StreamSession(proxies=proxies, headers=headers, impersonate="chrome107", timeout=timeout) as session:
messages = [
{
"id": str(uuid.uuid4()),
diff --git a/g4f/Provider/retry_provider.py b/g4f/Provider/retry_provider.py
index e1a9cd1f..c1672aba 100644
--- a/g4f/Provider/retry_provider.py
+++ b/g4f/Provider/retry_provider.py
@@ -4,6 +4,7 @@ import random
from ..typing import CreateResult
from .base_provider import BaseProvider, AsyncProvider
+from ..debug import logging
class RetryProvider(AsyncProvider):
@@ -41,6 +42,8 @@ class RetryProvider(AsyncProvider):
started = False
for provider in providers:
try:
+ if logging:
+ print(f"Using {provider.__name__} provider")
for token in provider.create_completion(model, messages, stream, **kwargs):
yield token
started = True
@@ -48,6 +51,8 @@ class RetryProvider(AsyncProvider):
return
except Exception as e:
self.exceptions[provider.__name__] = e
+ if logging:
+ print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
if started:
break
@@ -59,7 +64,7 @@ class RetryProvider(AsyncProvider):
messages: list[dict[str, str]],
**kwargs
) -> str:
- providers = [provider for provider in self.providers if issubclass(provider, AsyncProvider)]
+ providers = [provider for provider in self.providers]
if self.shuffle:
random.shuffle(providers)
@@ -69,6 +74,8 @@ class RetryProvider(AsyncProvider):
return await provider.create_async(model, messages, **kwargs)
except Exception as e:
self.exceptions[provider.__name__] = e
+ if logging:
+ print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
self.raise_exceptions()
diff --git a/g4f/Provider/unfinished/Komo.py b/g4f/Provider/unfinished/Komo.py
new file mode 100644
index 00000000..84d8d634
--- /dev/null
+++ b/g4f/Provider/unfinished/Komo.py
@@ -0,0 +1,44 @@
+from __future__ import annotations
+
+import json
+
+from ...requests import StreamSession
+from ...typing import AsyncGenerator
+from ..base_provider import AsyncGeneratorProvider, format_prompt
+
+class Komo(AsyncGeneratorProvider):
+ url = "https://komo.ai/api/ask"
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ **kwargs
+ ) -> AsyncGenerator:
+ async with StreamSession(impersonate="chrome107") as session:
+ prompt = format_prompt(messages)
+ data = {
+ "query": prompt,
+ "FLAG_URLEXTRACT": "false",
+ "token": "",
+ "FLAG_MODELA": "1",
+ }
+ headers = {
+ 'authority': 'komo.ai',
+ 'accept': 'text/event-stream',
+ 'cache-control': 'no-cache',
+ 'referer': 'https://komo.ai/',
+ }
+
+ async with session.get(cls.url, params=data, headers=headers) as response:
+ response.raise_for_status()
+ next = False
+ async for line in response.iter_lines():
+ if line == b"event: line":
+ next = True
+ elif next and line.startswith(b"data: "):
+ yield json.loads(line[6:])
+ next = False
+
diff --git a/g4f/Provider/unfinished/MikuChat.py b/g4f/Provider/unfinished/MikuChat.py
new file mode 100644
index 00000000..bf19631f
--- /dev/null
+++ b/g4f/Provider/unfinished/MikuChat.py
@@ -0,0 +1,97 @@
+from __future__ import annotations
+
+import random, json
+from datetime import datetime
+from ...requests import StreamSession
+
+from ...typing import AsyncGenerator
+from ..base_provider import AsyncGeneratorProvider
+
+
+class MikuChat(AsyncGeneratorProvider):
+ url = "https://ai.okmiku.com"
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ **kwargs
+ ) -> AsyncGenerator:
+ if not model:
+ model = "gpt-3.5-turbo"
+ headers = {
+ "authority": "api.catgpt.cc",
+ "accept": "application/json",
+ "origin": cls.url,
+ "referer": f"{cls.url}/chat/",
+ 'x-app-version': 'undefined',
+ 'x-date': get_datetime(),
+ 'x-fingerprint': get_fingerprint(),
+ 'x-platform': 'web'
+ }
+ async with StreamSession(headers=headers, impersonate="chrome107") as session:
+ data = {
+ "model": model,
+ "top_p": 0.8,
+ "temperature": 0.5,
+ "presence_penalty": 1,
+ "frequency_penalty": 0,
+ "max_tokens": 2000,
+ "stream": True,
+ "messages": messages,
+ }
+ async with session.post("https://api.catgpt.cc/ai/v1/chat/completions", json=data) as response:
+ print(await response.text())
+ response.raise_for_status()
+ async for line in response.iter_lines():
+ if line.startswith(b"data: "):
+ line = json.loads(line[6:])
+ chunk = line["choices"][0]["delta"].get("content")
+ if chunk:
+ yield chunk
+
+def k(e: str, t: int):
+ a = len(e) & 3
+ s = len(e) - a
+ i = t
+ c = 3432918353
+ o = 461845907
+ n = 0
+ r = 0
+ while n < s:
+ r = (ord(e[n]) & 255) | ((ord(e[n + 1]) & 255) << 8) | ((ord(e[n + 2]) & 255) << 16) | ((ord(e[n + 3]) & 255) << 24)
+ n += 4
+ r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
+ r = (r << 15) | (r >> 17)
+ r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
+ i ^= r
+ i = (i << 13) | (i >> 19)
+ l = (i & 65535) * 5 + (((i >> 16) * 5 & 65535) << 16) & 4294967295
+ i = (l & 65535) + 27492 + (((l >> 16) + 58964 & 65535) << 16)
+
+ if a == 3:
+ r ^= (ord(e[n + 2]) & 255) << 16
+ elif a == 2:
+ r ^= (ord(e[n + 1]) & 255) << 8
+ elif a == 1:
+ r ^= ord(e[n]) & 255
+ r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
+ r = (r << 15) | (r >> 17)
+ r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
+ i ^= r
+
+ i ^= len(e)
+ i ^= i >> 16
+ i = (i & 65535) * 2246822507 + (((i >> 16) * 2246822507 & 65535) << 16) & 4294967295
+ i ^= i >> 13
+ i = (i & 65535) * 3266489909 + (((i >> 16) * 3266489909 & 65535) << 16) & 4294967295
+ i ^= i >> 16
+ return i & 0xFFFFFFFF
+
+def get_fingerprint() -> str:
+ return str(k(str(int(random.random() * 100000)), 256))
+
+def get_datetime() -> str:
+ return datetime.now().strftime("%Y-%m-%d %H:%M:%S") \ No newline at end of file
diff --git a/g4f/Provider/deprecated/PerplexityAi.py b/g4f/Provider/unfinished/PerplexityAi.py
index f4f71712..3e096808 100644
--- a/g4f/Provider/deprecated/PerplexityAi.py
+++ b/g4f/Provider/unfinished/PerplexityAi.py
@@ -10,7 +10,6 @@ from ..base_provider import AsyncProvider, format_prompt, get_cookies
class PerplexityAi(AsyncProvider):
url = "https://www.perplexity.ai"
- working = False
supports_gpt_35_turbo = True
_sources = []
diff --git a/g4f/Provider/unfinished/__init__.py b/g4f/Provider/unfinished/__init__.py
new file mode 100644
index 00000000..8330b5e4
--- /dev/null
+++ b/g4f/Provider/unfinished/__init__.py
@@ -0,0 +1,3 @@
+from .MikuChat import MikuChat
+from .PerplexityAi import PerplexityAi
+from .Komo import Komo \ No newline at end of file
diff --git a/g4f/__init__.py b/g4f/__init__.py
index 9fb6f672..dc5e1fdd 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -1,12 +1,15 @@
from __future__ import annotations
+
from g4f import models
-from .Provider import BaseProvider, AsyncProvider
-from .typing import Any, CreateResult, Union
+from .Provider import BaseProvider
+from .typing import CreateResult, Union
+from .debug import logging
from requests import get
logging = False
version = '0.1.4.8'
+
def check_pypi_version():
try:
response = get(f"https://pypi.org/pypi/g4f/json").json()
diff --git a/g4f/debug.py b/g4f/debug.py
new file mode 100644
index 00000000..558a2428
--- /dev/null
+++ b/g4f/debug.py
@@ -0,0 +1 @@
+logging = False \ No newline at end of file
diff --git a/g4f/models.py b/g4f/models.py
index 4a6c9faf..48c19f9b 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -3,27 +3,28 @@ from dataclasses import dataclass
from .typing import Union
from .Provider import BaseProvider, RetryProvider
from .Provider import (
- AItianhuSpace,
ChatgptLogin,
- PerplexityAi,
+ ChatgptDemo,
ChatgptDuo,
+ Vitalentum,
ChatgptAi,
ChatForAi,
- ChatBase,
- AItianhu,
- Wewordle,
+ ChatBase,
+ Liaobots,
Yqcloud,
Myshell,
FreeGpt,
Vercel,
- DeepAi,
+ DeepAi,
Aichat,
+ AiAsk,
Aivvm,
GptGo,
Ylokh,
Bard,
Aibn,
Bing,
+ You,
H2o,
)
@@ -33,19 +34,25 @@ class Model:
base_provider: str
best_provider: Union[type[BaseProvider], RetryProvider] = None
-# Config for HuggingChat, OpenAssistant
-# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You
default = Model(
name = "",
base_provider = "",
best_provider = RetryProvider([
Bing, # Not fully GPT 3 or 4
- PerplexityAi, # Adds references to sources
- Wewordle, # Responds with markdown
Yqcloud, # Answers short questions in chinese
ChatBase, # Don't want to answer creatively
ChatgptDuo, # Include search results
- DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh,
+ Aibn, Aichat, Aivvm, ChatForAi, ChatgptAi, ChatgptLogin, DeepAi, FreeGpt, GptGo, Myshell, Ylokh,
+ ])
+)
+
+# GPT-3.5 too, but all providers supports long responses and a custom timeouts
+gpt_35_long = Model(
+ name = 'gpt-3.5-turbo',
+ base_provider = 'openai',
+ best_provider = RetryProvider([
+ AiAsk, Aibn, Aichat, Aivvm, ChatForAi, ChatgptAi, ChatgptDemo, ChatgptDuo,
+ FreeGpt, GptGo, Liaobots, Myshell, Vitalentum, Ylokh, You, Yqcloud
])
)
@@ -54,7 +61,7 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
- DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh,
+ Aibn, Aichat, Aivvm, ChatForAi, ChatgptAi, ChatgptLogin, DeepAi, FreeGpt, GptGo, Myshell, Ylokh,
])
)