summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorHeiner Lohaus <heiner@lohaus.eu>2023-10-22 01:22:25 +0200
committerHeiner Lohaus <heiner@lohaus.eu>2023-10-22 01:22:25 +0200
commita3af9fac3ee152399ba031e2124149fdcf47bc33 (patch)
tree88b95e2b4edb5b6744bdcc4794be4a94c4d503ca /g4f/Provider
parent~ | Merge pull request #1112 from lategege/main (diff)
downloadgpt4free-a3af9fac3ee152399ba031e2124149fdcf47bc33.tar
gpt4free-a3af9fac3ee152399ba031e2124149fdcf47bc33.tar.gz
gpt4free-a3af9fac3ee152399ba031e2124149fdcf47bc33.tar.bz2
gpt4free-a3af9fac3ee152399ba031e2124149fdcf47bc33.tar.lz
gpt4free-a3af9fac3ee152399ba031e2124149fdcf47bc33.tar.xz
gpt4free-a3af9fac3ee152399ba031e2124149fdcf47bc33.tar.zst
gpt4free-a3af9fac3ee152399ba031e2124149fdcf47bc33.zip
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/FakeGpt.py94
-rw-r--r--g4f/Provider/__init__.py3
2 files changed, 97 insertions, 0 deletions
diff --git a/g4f/Provider/FakeGpt.py b/g4f/Provider/FakeGpt.py
new file mode 100644
index 00000000..43298a4c
--- /dev/null
+++ b/g4f/Provider/FakeGpt.py
@@ -0,0 +1,94 @@
+from __future__ import annotations
+
+import uuid, time, random, string, json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+from .helper import format_prompt
+
+
+class FakeGpt(AsyncGeneratorProvider):
+ url = "https://chat-shared2.zhile.io"
+ supports_gpt_35_turbo = True
+ working = True
+ _access_token = None
+ _cookie_jar = None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Accept-Language": "en-US",
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
+ "Referer": "https://chat-shared2.zhile.io/?v=2",
+ "sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-ch-ua-mobile": "?0",
+ }
+ async with ClientSession(headers=headers, cookie_jar=cls._cookie_jar) as session:
+ if not cls._access_token:
+ async with session.get(f"{cls.url}/api/loads", params={"t": int(time.time())}, proxy=proxy) as response:
+ response.raise_for_status()
+ list = (await response.json())["loads"]
+ token_ids = [t["token_id"] for t in list if t["count"] == 0]
+ data = {
+ "token_key": random.choice(token_ids),
+ "session_password": random_string()
+ }
+ async with session.post(f"{cls.url}/auth/login", data=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async with session.get(f"{cls.url}/api/auth/session", proxy=proxy) as response:
+ response.raise_for_status()
+ cls._access_token = (await response.json())["accessToken"]
+ cls._cookie_jar = session.cookie_jar
+ headers = {
+ "Content-Type": "application/json",
+ "Accept": "text/event-stream",
+ "X-Authorization": f"Bearer {cls._access_token}",
+ }
+ prompt = format_prompt(messages)
+ data = {
+ "action": "next",
+ "messages": [
+ {
+ "id": str(uuid.uuid4()),
+ "author": {"role": "user"},
+ "content": {"content_type": "text", "parts": [prompt]},
+ "metadata": {},
+ }
+ ],
+ "parent_message_id": str(uuid.uuid4()),
+ "model": "text-davinci-002-render-sha",
+ "plugin_ids": [],
+ "timezone_offset_min": -120,
+ "suggestions": [],
+ "history_and_training_disabled": True,
+ "arkose_token": "",
+ "force_paragen": False,
+ }
+ last_message = ""
+ async with session.post(f"{cls.url}/api/conversation", json=data, headers=headers, proxy=proxy) as response:
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ line = line[6:]
+ if line == b"[DONE]":
+ break
+ try:
+ line = json.loads(line)
+ if line["message"]["metadata"]["message_type"] == "next":
+ new_message = line["message"]["content"]["parts"][0]
+ yield new_message[len(last_message):]
+ last_message = new_message
+ except:
+ continue
+ if not last_message:
+ raise RuntimeError("No valid response")
+
+def random_string(length: int = 10):
+ return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length)) \ No newline at end of file
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 1dfa6a8d..a465b428 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -17,6 +17,7 @@ from .ChatgptFree import ChatgptFree
from .ChatgptLogin import ChatgptLogin
from .ChatgptX import ChatgptX
from .Cromicle import Cromicle
+from .FakeGpt import FakeGpt
from .FreeGpt import FreeGpt
from .GPTalk import GPTalk
from .GptChatly import GptChatly
@@ -73,6 +74,7 @@ class ProviderUtils:
'Equing': Equing,
'FastGpt': FastGpt,
'Forefront': Forefront,
+ 'FakeGpt': FakeGpt,
'FreeGpt': FreeGpt,
'GPTalk': GPTalk,
'GptChatly': GptChatly,
@@ -143,6 +145,7 @@ __all__ = [
'DfeHub',
'EasyChat',
'Forefront',
+ 'FakeGpt',
'FreeGpt',
'GPTalk',
'GptChatly',