summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/ChatForAi.py2
-rw-r--r--g4f/Provider/ChatgptAi.py72
-rw-r--r--g4f/Provider/FakeGpt.py94
-rw-r--r--g4f/Provider/FreeGpt.py5
-rw-r--r--g4f/Provider/Geekgpt.py27
-rw-r--r--g4f/Provider/Hashnode.py79
-rw-r--r--g4f/Provider/Liaobots.py2
-rw-r--r--g4f/Provider/MyShell.py89
-rw-r--r--g4f/Provider/NoowAi.py2
-rw-r--r--g4f/Provider/Phind.py4
-rw-r--r--g4f/Provider/Vercel.py8
-rw-r--r--g4f/Provider/Yqcloud.py14
-rw-r--r--g4f/Provider/__init__.py10
-rw-r--r--g4f/Provider/deprecated/Myshell.py (renamed from g4f/Provider/Myshell.py)48
-rw-r--r--g4f/Provider/deprecated/__init__.py3
-rw-r--r--g4f/Provider/retry_provider.py13
-rw-r--r--g4f/models.py33
17 files changed, 368 insertions, 137 deletions
diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py
index 0ccc8444..718affeb 100644
--- a/g4f/Provider/ChatForAi.py
+++ b/g4f/Provider/ChatForAi.py
@@ -10,7 +10,7 @@ from .base_provider import AsyncGeneratorProvider
class ChatForAi(AsyncGeneratorProvider):
url = "https://chatforai.store"
- working = False
+ working = True
supports_gpt_35_turbo = True
@classmethod
diff --git a/g4f/Provider/ChatgptAi.py b/g4f/Provider/ChatgptAi.py
index cf45909c..d5fd5bff 100644
--- a/g4f/Provider/ChatgptAi.py
+++ b/g4f/Provider/ChatgptAi.py
@@ -1,36 +1,34 @@
from __future__ import annotations
-import re
+import re, html, json, string, random
from aiohttp import ClientSession
-from ..typing import Messages
-from .base_provider import AsyncProvider, format_prompt
+from ..typing import Messages, AsyncResult
+from .base_provider import AsyncGeneratorProvider
-class ChatgptAi(AsyncProvider):
- url: str = "https://chatgpt.ai/"
+class ChatgptAi(AsyncGeneratorProvider):
+ url: str = "https://chatgpt.ai"
working = True
supports_gpt_35_turbo = True
- _nonce = None
- _post_id = None
- _bot_id = None
+ _system = None
@classmethod
- async def create_async(
+ async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
- ) -> str:
+ ) -> AsyncResult:
headers = {
"authority" : "chatgpt.ai",
"accept" : "*/*",
- "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "accept-language" : "en-US",
"cache-control" : "no-cache",
- "origin" : "https://chatgpt.ai",
+ "origin" : cls.url,
"pragma" : "no-cache",
- "referer" : cls.url,
+ "referer" : f"{cls.url}/",
"sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
"sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform" : '"Windows"',
@@ -42,34 +40,40 @@ class ChatgptAi(AsyncProvider):
async with ClientSession(
headers=headers
) as session:
- if not cls._nonce:
+ if not cls._system:
async with session.get(cls.url, proxy=proxy) as response:
response.raise_for_status()
text = await response.text()
- result = re.search(r'data-nonce="(.*?)"', text)
+ result = re.search(r"data-system='(.*?)'", text)
if result:
- cls._nonce = result.group(1)
- result = re.search(r'data-post-id="(.*?)"', text)
- if result:
- cls._post_id = result.group(1)
- result = re.search(r'data-bot-id="(.*?)"', text)
- if result:
- cls._bot_id = result.group(1)
- if not cls._nonce or not cls._post_id or not cls._bot_id:
- raise RuntimeError("Nonce, post-id or bot-id not found")
-
+ cls._system = json.loads(html.unescape(result.group(1)))
+ if not cls._system:
+ raise RuntimeError("System args not found")
+
data = {
- "_wpnonce": cls._nonce,
- "post_id": cls._post_id,
- "url": "https://chatgpt.ai",
- "action": "wpaicg_chat_shortcode_message",
- "message": format_prompt(messages),
- "bot_id": cls._bot_id
+ "botId": cls._system["botId"],
+ "customId": cls._system["customId"],
+ "session": cls._system["sessionId"],
+ "chatId": "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=11)),
+ "contextId": cls._system["contextId"],
+ "messages": messages,
+ "newMessage": messages[-1]["content"],
+ "stream": True
}
async with session.post(
- "https://chatgpt.ai/wp-admin/admin-ajax.php",
+ f"{cls.url}/wp-json/mwai-ui/v1/chats/submit",
proxy=proxy,
- data=data
+ json=data
) as response:
response.raise_for_status()
- return (await response.json())["data"] \ No newline at end of file
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ try:
+ line = json.loads(line[6:])
+ assert "type" in line
+ except:
+ raise RuntimeError(f"Broken line: {line.decode()}")
+ if line["type"] == "live":
+ yield line["data"]
+ elif line["type"] == "end":
+ break \ No newline at end of file
diff --git a/g4f/Provider/FakeGpt.py b/g4f/Provider/FakeGpt.py
new file mode 100644
index 00000000..5bce1280
--- /dev/null
+++ b/g4f/Provider/FakeGpt.py
@@ -0,0 +1,94 @@
+from __future__ import annotations
+
+import uuid, time, random, string, json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+from .helper import format_prompt
+
+
+class FakeGpt(AsyncGeneratorProvider):
+ url = "https://chat-shared2.zhile.io"
+ supports_gpt_35_turbo = True
+ working = True
+ _access_token = None
+ _cookie_jar = None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Accept-Language": "en-US",
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
+ "Referer": "https://chat-shared2.zhile.io/?v=2",
+ "sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-ch-ua-mobile": "?0",
+ }
+ async with ClientSession(headers=headers, cookie_jar=cls._cookie_jar) as session:
+ if not cls._access_token:
+ async with session.get(f"{cls.url}/api/loads", params={"t": int(time.time())}, proxy=proxy) as response:
+ response.raise_for_status()
+ list = (await response.json())["loads"]
+ token_ids = [t["token_id"] for t in list if t["count"] == 0]
+ data = {
+ "token_key": random.choice(token_ids),
+ "session_password": random_string()
+ }
+ async with session.post(f"{cls.url}/auth/login", data=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async with session.get(f"{cls.url}/api/auth/session", proxy=proxy) as response:
+ response.raise_for_status()
+ cls._access_token = (await response.json())["accessToken"]
+ cls._cookie_jar = session.cookie_jar
+ headers = {
+ "Content-Type": "application/json",
+ "Accept": "text/event-stream",
+ "X-Authorization": f"Bearer {cls._access_token}",
+ }
+ prompt = format_prompt(messages)
+ data = {
+ "action": "next",
+ "messages": [
+ {
+ "id": str(uuid.uuid4()),
+ "author": {"role": "user"},
+ "content": {"content_type": "text", "parts": [prompt]},
+ "metadata": {},
+ }
+ ],
+ "parent_message_id": str(uuid.uuid4()),
+ "model": "text-davinci-002-render-sha",
+ "plugin_ids": [],
+ "timezone_offset_min": -120,
+ "suggestions": [],
+ "history_and_training_disabled": True,
+ "arkose_token": "",
+ "force_paragen": False,
+ }
+ last_message = ""
+ async with session.post(f"{cls.url}/api/conversation", json=data, headers=headers, proxy=proxy) as response:
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ line = line[6:]
+ if line == b"[DONE]":
+ break
+ try:
+ line = json.loads(line)
+ if line["message"]["metadata"]["message_type"] == "next":
+ new_message = line["message"]["content"]["parts"][0]
+ yield new_message[len(last_message):]
+ last_message = new_message
+ except:
+ continue
+ if not last_message:
+ raise RuntimeError("No valid response")
+
+def random_string(length: int = 10):
+ return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length)) \ No newline at end of file
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index 6638b67f..a6d31644 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -41,7 +41,10 @@ class FreeGpt(AsyncGeneratorProvider):
async with session.post(f"{url}/api/generate", json=data) as response:
response.raise_for_status()
async for chunk in response.iter_content():
- yield chunk.decode()
+ chunk = chunk.decode()
+ if chunk == "当前地区当日额度已消耗完":
+ raise RuntimeError("Rate limit reached")
+ yield chunk
@classmethod
@property
diff --git a/g4f/Provider/Geekgpt.py b/g4f/Provider/Geekgpt.py
index 1a82757c..3c577cf8 100644
--- a/g4f/Provider/Geekgpt.py
+++ b/g4f/Provider/Geekgpt.py
@@ -14,19 +14,23 @@ class GeekGpt(BaseProvider):
supports_gpt_4 = True
@classmethod
- def create_completion(cls,
- model: str,
- messages: Messages,
- stream: bool, **kwargs) -> CreateResult:
-
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ **kwargs
+ ) -> CreateResult:
+ if not model:
+ model = "gpt-3.5-turbo"
json_data = {
'messages': messages,
- 'model': model,
- 'temperature': kwargs.get('temperature', 0.9),
- 'presence_penalty': kwargs.get('presence_penalty', 0),
- 'top_p': kwargs.get('top_p', 1),
- 'frequency_penalty': kwargs.get('frequency_penalty', 0),
- 'stream': True
+ 'model': model,
+ 'temperature': kwargs.get('temperature', 0.9),
+ 'presence_penalty': kwargs.get('presence_penalty', 0),
+ 'top_p': kwargs.get('top_p', 1),
+ 'frequency_penalty': kwargs.get('frequency_penalty', 0),
+ 'stream': True
}
data = dumps(json_data, separators=(',', ':'))
@@ -61,7 +65,6 @@ class GeekGpt(BaseProvider):
try:
content = json.loads(json_data)["choices"][0]["delta"].get("content")
-
except Exception as e:
raise RuntimeError(f'error | {e} :', json_data)
diff --git a/g4f/Provider/Hashnode.py b/g4f/Provider/Hashnode.py
new file mode 100644
index 00000000..7f308d7e
--- /dev/null
+++ b/g4f/Provider/Hashnode.py
@@ -0,0 +1,79 @@
+from __future__ import annotations
+
+import secrets
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+
+class SearchTypes():
+ quick = "quick"
+ code = "code"
+ websearch = "websearch"
+
+class Hashnode(AsyncGeneratorProvider):
+ url = "https://hashnode.com"
+ supports_gpt_35_turbo = True
+ working = True
+ _sources = []
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ search_type: str = SearchTypes.websearch,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
+ "Accept": "*/*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/rix",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Pragma": "no-cache",
+ "Cache-Control": "no-cache",
+ "TE": "trailers",
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = messages[-1]["content"]
+ cls._sources = []
+ if search_type == "websearch":
+ async with session.post(
+ f"{cls.url}/api/ai/rix/search",
+ json={"prompt": prompt},
+ proxy=proxy,
+ ) as response:
+ response.raise_for_status()
+ cls._sources = (await response.json())["result"]
+ data = {
+ "chatId": secrets.token_hex(16).zfill(32),
+ "history": messages,
+ "prompt": prompt,
+ "searchType": search_type,
+ "urlToScan": None,
+ "searchResults": cls._sources,
+ }
+ async with session.post(
+ f"{cls.url}/api/ai/rix/completion",
+ json=data,
+ proxy=proxy,
+ ) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode()
+
+ @classmethod
+ def get_sources(cls) -> list:
+ return [{
+ "title": source["name"],
+ "url": source["url"]
+ } for source in cls._sources] \ No newline at end of file
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 72731728..740be856 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -30,7 +30,7 @@ models = {
class Liaobots(AsyncGeneratorProvider):
url = "https://liaobots.site"
- working = False
+ working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
_auth_code = None
diff --git a/g4f/Provider/MyShell.py b/g4f/Provider/MyShell.py
new file mode 100644
index 00000000..fe8604bb
--- /dev/null
+++ b/g4f/Provider/MyShell.py
@@ -0,0 +1,89 @@
+from __future__ import annotations
+
+import time, random, json
+
+from ..requests import StreamSession
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+from .helper import format_prompt
+
+class MyShell(AsyncGeneratorProvider):
+ url = "https://app.myshell.ai/chat"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ **kwargs
+ ) -> AsyncResult:
+ user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
+ headers = {
+ "User-Agent": user_agent,
+ "Myshell-Service-Name": "organics-api",
+ "Visitor-Id": generate_visitor_id(user_agent)
+ }
+ async with StreamSession(
+ impersonate="chrome107",
+ proxies={"https": proxy},
+ timeout=timeout,
+ headers=headers
+ ) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "botId": "1",
+ "conversation_scenario": 3,
+ "message": prompt,
+ "messageType": 1
+ }
+ async with session.post("https://api.myshell.ai/v1/bot/chat/send_message", json=data) as response:
+ response.raise_for_status()
+ event = None
+ async for line in response.iter_lines():
+ if line.startswith(b"event: "):
+ event = line[7:]
+ elif event == b"MESSAGE_REPLY_SSE_ELEMENT_EVENT_NAME_TEXT":
+ if line.startswith(b"data: "):
+ yield json.loads(line[6:])["content"]
+ if event == b"MESSAGE_REPLY_SSE_ELEMENT_EVENT_NAME_TEXT_STREAM_PUSH_FINISHED":
+ break
+
+
+def xor_hash(B: str):
+ r = []
+ i = 0
+
+ def o(e, t):
+ o_val = 0
+ for i in range(len(t)):
+ o_val |= r[i] << (8 * i)
+ return e ^ o_val
+
+ for e in range(len(B)):
+ t = ord(B[e])
+ r.insert(0, 255 & t)
+
+ if len(r) >= 4:
+ i = o(i, r)
+ r = []
+
+ if len(r) > 0:
+ i = o(i, r)
+
+ return hex(i)[2:]
+
+def performance() -> str:
+ t = int(time.time() * 1000)
+ e = 0
+ while t == int(time.time() * 1000):
+ e += 1
+ return hex(t)[2:] + hex(e)[2:]
+
+def generate_visitor_id(user_agent: str) -> str:
+ f = performance()
+ r = hex(int(random.random() * (16**16)))[2:-2]
+ d = xor_hash(user_agent)
+ e = hex(1080 * 1920)[2:]
+ return f"{f}-{r}-{d}-{e}-{f}" \ No newline at end of file
diff --git a/g4f/Provider/NoowAi.py b/g4f/Provider/NoowAi.py
index 93748258..9dc26d35 100644
--- a/g4f/Provider/NoowAi.py
+++ b/g4f/Provider/NoowAi.py
@@ -61,6 +61,8 @@ class NoowAi(AsyncGeneratorProvider):
yield line["data"]
elif line["type"] == "end":
break
+ elif line["type"] == "error":
+ raise RuntimeError(line["data"])
def random_string(length: int = 10):
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length)) \ No newline at end of file
diff --git a/g4f/Provider/Phind.py b/g4f/Provider/Phind.py
index d7c6f7c7..0e698cba 100644
--- a/g4f/Provider/Phind.py
+++ b/g4f/Provider/Phind.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-import random
+import random, string
from datetime import datetime
from ..typing import AsyncResult, Messages
@@ -22,7 +22,7 @@ class Phind(AsyncGeneratorProvider):
timeout: int = 120,
**kwargs
) -> AsyncResult:
- chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
+ chars = string.ascii_lowercase + string.digits
user_id = ''.join(random.choice(chars) for _ in range(24))
data = {
"question": format_prompt(messages),
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py
index 10130320..7c3b8c55 100644
--- a/g4f/Provider/Vercel.py
+++ b/g4f/Provider/Vercel.py
@@ -4,7 +4,6 @@ import json, base64, requests, execjs, random, uuid
from ..typing import Messages, TypedDict, CreateResult, Any
from .base_provider import BaseProvider
-from abc import abstractmethod
from ..debug import logging
@@ -15,12 +14,13 @@ class Vercel(BaseProvider):
supports_stream = True
@staticmethod
- @abstractmethod
def create_completion(
model: str,
messages: Messages,
stream: bool,
- proxy: str = None, **kwargs) -> CreateResult:
+ proxy: str = None,
+ **kwargs
+ ) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
@@ -65,7 +65,7 @@ class Vercel(BaseProvider):
headers=headers, json=json_data, stream=True, proxies={"https": proxy})
try:
response.raise_for_status()
- except Exception:
+ except:
continue
for token in response.iter_content(chunk_size=None):
yield token.decode()
diff --git a/g4f/Provider/Yqcloud.py b/g4f/Provider/Yqcloud.py
index d6ce21a9..2829c5bf 100644
--- a/g4f/Provider/Yqcloud.py
+++ b/g4f/Provider/Yqcloud.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import random
-from aiohttp import ClientSession
+from ..requests import StreamSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
@@ -9,7 +9,7 @@ from .base_provider import AsyncGeneratorProvider, format_prompt
class Yqcloud(AsyncGeneratorProvider):
url = "https://chat9.yqcloud.top/"
- working = False
+ working = True
supports_gpt_35_turbo = True
@staticmethod
@@ -17,15 +17,16 @@ class Yqcloud(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
+ timeout: int = 120,
**kwargs,
) -> AsyncResult:
- async with ClientSession(
- headers=_create_header()
+ async with StreamSession(
+ headers=_create_header(), proxies={"https": proxy}, timeout=timeout
) as session:
payload = _create_payload(messages, **kwargs)
- async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
+ async with session.post("https://api.aichatos.cloud/api/generateStream", json=payload) as response:
response.raise_for_status()
- async for chunk in response.content.iter_any():
+ async for chunk in response.iter_content():
if chunk:
chunk = chunk.decode()
if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
@@ -38,6 +39,7 @@ def _create_header():
"accept" : "application/json, text/plain, */*",
"content-type" : "application/json",
"origin" : "https://chat9.yqcloud.top",
+ "referer" : "https://chat9.yqcloud.top/"
}
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 1dfa6a8d..653b6026 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -17,15 +17,17 @@ from .ChatgptFree import ChatgptFree
from .ChatgptLogin import ChatgptLogin
from .ChatgptX import ChatgptX
from .Cromicle import Cromicle
+from .FakeGpt import FakeGpt
from .FreeGpt import FreeGpt
from .GPTalk import GPTalk
from .GptChatly import GptChatly
from .GptForLove import GptForLove
from .GptGo import GptGo
from .GptGod import GptGod
+from .Hashnode import Hashnode
from .Liaobots import Liaobots
from .Llama2 import Llama2
-from .Myshell import Myshell
+from .MyShell import MyShell
from .NoowAi import NoowAi
from .Opchatgpts import Opchatgpts
from .Phind import Phind
@@ -73,6 +75,7 @@ class ProviderUtils:
'Equing': Equing,
'FastGpt': FastGpt,
'Forefront': Forefront,
+ 'FakeGpt': FakeGpt,
'FreeGpt': FreeGpt,
'GPTalk': GPTalk,
'GptChatly': GptChatly,
@@ -80,6 +83,7 @@ class ProviderUtils:
'GptForLove': GptForLove,
'GptGo': GptGo,
'GptGod': GptGod,
+ 'Hashnode': Hashnode,
'H2o': H2o,
'HuggingChat': HuggingChat,
'Komo': Komo,
@@ -88,6 +92,7 @@ class ProviderUtils:
'Lockchat': Lockchat,
'MikuChat': MikuChat,
'Myshell': Myshell,
+ 'MyShell': MyShell,
'NoowAi': NoowAi,
'Opchatgpts': Opchatgpts,
'OpenAssistant': OpenAssistant,
@@ -143,6 +148,7 @@ __all__ = [
'DfeHub',
'EasyChat',
'Forefront',
+ 'FakeGpt',
'FreeGpt',
'GPTalk',
'GptChatly',
@@ -150,12 +156,14 @@ __all__ = [
'GetGpt',
'GptGo',
'GptGod',
+ 'Hashnode',
'H2o',
'HuggingChat',
'Liaobots',
'Llama2',
'Lockchat',
'Myshell',
+ 'MyShell',
'NoowAi',
'Opchatgpts',
'Raycast',
diff --git a/g4f/Provider/Myshell.py b/g4f/Provider/deprecated/Myshell.py
index 096545f9..85731325 100644
--- a/g4f/Provider/Myshell.py
+++ b/g4f/Provider/deprecated/Myshell.py
@@ -8,8 +8,8 @@ from aiohttp import ClientSession
from aiohttp.http import WSMsgType
import asyncio
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, format_prompt
models = {
@@ -174,46 +174,4 @@ def generate_visitor_id(user_agent: str) -> str:
r = hex(int(random.random() * (16**16)))[2:-2]
d = xor_hash(user_agent)
e = hex(1080 * 1920)[2:]
- return f"{f}-{r}-{d}-{e}-{f}"
-
-
-
-# update
-# from g4f.requests import StreamSession
-
-# async def main():
-# headers = {
-# 'authority': 'api.myshell.ai',
-# 'accept': 'application/json',
-# 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
-# 'content-type': 'application/json',
-# 'myshell-service-name': 'organics-api',
-# 'origin': 'https://app.myshell.ai',
-# 'referer': 'https://app.myshell.ai/',
-# 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
-# 'sec-ch-ua-mobile': '?0',
-# 'sec-ch-ua-platform': '"macOS"',
-# 'sec-fetch-dest': 'empty',
-# 'sec-fetch-mode': 'cors',
-# 'sec-fetch-site': 'same-site',
-# 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
-# 'visitor-id': '18ae8fe5d916d3-0213f29594b17f-18525634-157188-18ae8fe5d916d3',
-# }
-
-# json_data = {
-# 'conversation_scenario': 3,
-# 'botId': '4738',
-# 'message': 'hi',
-# 'messageType': 1,
-# }
-
-# async with StreamSession(headers=headers, impersonate="chrome110") as session:
-# async with session.post(f'https://api.myshell.ai/v1/bot/chat/send_message',
-# json=json_data) as response:
-
-# response.raise_for_status()
-# async for chunk in response.iter_content():
-# print(chunk.decode("utf-8"))
-
-# import asyncio
-# asyncio.run(main()) \ No newline at end of file
+ return f"{f}-{r}-{d}-{e}-{f}" \ No newline at end of file
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
index db48c3fb..f8e35b37 100644
--- a/g4f/Provider/deprecated/__init__.py
+++ b/g4f/Provider/deprecated/__init__.py
@@ -12,4 +12,5 @@ from .V50 import V50
from .FastGpt import FastGpt
from .Aivvm import Aivvm
from .Vitalentum import Vitalentum
-from .H2o import H2o \ No newline at end of file
+from .H2o import H2o
+from .Myshell import Myshell \ No newline at end of file
diff --git a/g4f/Provider/retry_provider.py b/g4f/Provider/retry_provider.py
index ee342315..39d61c35 100644
--- a/g4f/Provider/retry_provider.py
+++ b/g4f/Provider/retry_provider.py
@@ -71,11 +71,10 @@ class RetryProvider(AsyncProvider):
self.exceptions: Dict[str, Exception] = {}
for provider in providers:
try:
- return await asyncio.wait_for(provider.create_async(model, messages, **kwargs), timeout=60)
- except asyncio.TimeoutError as e:
- self.exceptions[provider.__name__] = e
- if self.logging:
- print(f"{provider.__name__}: TimeoutError: {e}")
+ return await asyncio.wait_for(
+ provider.create_async(model, messages, **kwargs),
+ timeout=kwargs.get("timeout", 60)
+ )
except Exception as e:
self.exceptions[provider.__name__] = e
if self.logging:
@@ -85,8 +84,8 @@ class RetryProvider(AsyncProvider):
def raise_exceptions(self) -> None:
if self.exceptions:
- raise RuntimeError("\n".join(["All providers failed:"] + [
+ raise RuntimeError("\n".join(["RetryProvider failed:"] + [
f"{p}: {self.exceptions[p].__class__.__name__}: {self.exceptions[p]}" for p in self.exceptions
]))
- raise RuntimeError("No provider found") \ No newline at end of file
+ raise RuntimeError("RetryProvider: No provider found") \ No newline at end of file
diff --git a/g4f/models.py b/g4f/models.py
index 0ebe7395..7eee917a 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -3,20 +3,14 @@ from dataclasses import dataclass
from .typing import Union
from .Provider import BaseProvider, RetryProvider
from .Provider import (
- ChatgptLogin,
- ChatgptDemo,
- ChatgptDuo,
GptForLove,
- Opchatgpts,
ChatgptAi,
GptChatly,
- Liaobots,
ChatgptX,
- Yqcloud,
+ ChatBase,
GeekGpt,
- Myshell,
+ FakeGpt,
FreeGpt,
- Cromicle,
NoowAi,
Vercel,
Aichat,
@@ -24,15 +18,10 @@ from .Provider import (
AiAsk,
GptGo,
Phind,
- Ylokh,
Bard,
- Aibn,
Bing,
You,
H2o,
-
- ChatForAi,
- ChatBase
)
@dataclass(unsafe_hash=True)
@@ -50,9 +39,8 @@ default = Model(
base_provider = "",
best_provider = RetryProvider([
Bing, # Not fully GPT 3 or 4
- Yqcloud, # Answers short questions in chinese
- ChatgptDuo, # Include search results
- Aibn, Aichat, ChatgptAi, ChatgptLogin, FreeGpt, GptGo, Myshell, Ylokh, GeekGpt
+ AiAsk, Aichat, ChatgptAi, FreeGpt, GptGo, GeekGpt,
+ Phind, You
])
)
@@ -61,9 +49,10 @@ gpt_35_long = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
- AiAsk, Aichat, ChatgptDemo, FreeGpt, Liaobots, You,
- GPTalk, ChatgptLogin, GptChatly, GptForLove, Opchatgpts,
- NoowAi, GeekGpt, Phind
+ AiAsk, Aichat, FreeGpt, You,
+ GptChatly, GptForLove,
+ NoowAi, GeekGpt, Phind,
+ FakeGpt
])
)
@@ -72,8 +61,8 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider=RetryProvider([
- ChatgptX, ChatgptDemo, GptGo, You,
- NoowAi, GPTalk, GptForLove, Phind, ChatBase, Cromicle
+ ChatgptX, GptGo, You,
+ NoowAi, GPTalk, GptForLove, Phind, ChatBase
])
)
@@ -81,7 +70,7 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = RetryProvider([
- Bing, GeekGpt, Liaobots, Phind
+ Bing, GeekGpt, Phind
])
)