summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorHeiner Lohaus <heiner@lohaus.eu>2023-10-02 02:04:22 +0200
committerHeiner Lohaus <heiner@lohaus.eu>2023-10-02 02:04:22 +0200
commiteb0e2c6a93c3f21937457d13220ce2b7fca1f04a (patch)
treee0139cd399052db29326d502f9d738d1943c7bce /g4f
parentChange event loop policy on windows (diff)
downloadgpt4free-eb0e2c6a93c3f21937457d13220ce2b7fca1f04a.tar
gpt4free-eb0e2c6a93c3f21937457d13220ce2b7fca1f04a.tar.gz
gpt4free-eb0e2c6a93c3f21937457d13220ce2b7fca1f04a.tar.bz2
gpt4free-eb0e2c6a93c3f21937457d13220ce2b7fca1f04a.tar.lz
gpt4free-eb0e2c6a93c3f21937457d13220ce2b7fca1f04a.tar.xz
gpt4free-eb0e2c6a93c3f21937457d13220ce2b7fca1f04a.tar.zst
gpt4free-eb0e2c6a93c3f21937457d13220ce2b7fca1f04a.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/AItianhuSpace.py8
-rw-r--r--g4f/Provider/Aibn.py6
-rw-r--r--g4f/Provider/Aivvm.py6
-rw-r--r--g4f/Provider/ChatForAi.py6
-rw-r--r--g4f/Provider/ChatgptDuo.py20
-rw-r--r--g4f/Provider/FreeGpt.py6
-rw-r--r--g4f/Provider/Ylokh.py13
-rw-r--r--g4f/models.py9
-rw-r--r--g4f/requests.py121
9 files changed, 119 insertions, 76 deletions
diff --git a/g4f/Provider/AItianhuSpace.py b/g4f/Provider/AItianhuSpace.py
index eb072db5..8805b1c0 100644
--- a/g4f/Provider/AItianhuSpace.py
+++ b/g4f/Provider/AItianhuSpace.py
@@ -2,7 +2,7 @@ from __future__ import annotations
import random, json
-from g4f.requests import AsyncSession
+from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt
domains = {
@@ -33,7 +33,7 @@ class AItianhuSpace(AsyncGeneratorProvider):
domain = domains[model]
url = f'https://{rand}{domain}'
- async with AsyncSession(impersonate="chrome110", verify=False) as session:
+ async with StreamSession(impersonate="chrome110", verify=False) as session:
data = {
"prompt": format_prompt(messages),
"options": {},
@@ -50,10 +50,10 @@ class AItianhuSpace(AsyncGeneratorProvider):
}
async with session.post(f"{url}/api/chat-process", json=data, headers=headers) as response:
response.raise_for_status()
- async for line in response.content:
+ async for line in response.iter_lines():
if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control")
- line = json.loads(line.rstrip())
+ line = json.loads(line)
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
if content:
diff --git a/g4f/Provider/Aibn.py b/g4f/Provider/Aibn.py
index 1ef928be..fe278f84 100644
--- a/g4f/Provider/Aibn.py
+++ b/g4f/Provider/Aibn.py
@@ -4,7 +4,7 @@ import time
import hashlib
from ..typing import AsyncGenerator
-from g4f.requests import AsyncSession
+from ..requests import StreamRequest
from .base_provider import AsyncGeneratorProvider
@@ -20,7 +20,7 @@ class Aibn(AsyncGeneratorProvider):
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
- async with AsyncSession(impersonate="chrome107") as session:
+ async with StreamRequest(impersonate="chrome107") as session:
timestamp = int(time.time())
data = {
"messages": messages,
@@ -30,7 +30,7 @@ class Aibn(AsyncGeneratorProvider):
}
async with session.post(f"{cls.url}/api/generate", json=data) as response:
response.raise_for_status()
- async for chunk in response.content.iter_any():
+ async for chunk in response.iter_content():
yield chunk.decode()
@classmethod
diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py
index 5cd91546..c4ec677c 100644
--- a/g4f/Provider/Aivvm.py
+++ b/g4f/Provider/Aivvm.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from ..requests import AsyncSession
+from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncGenerator
@@ -43,10 +43,10 @@ class Aivvm(AsyncGeneratorProvider):
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
"temperature" : kwargs.get("temperature", 0.7)
}
- async with AsyncSession(impersonate="chrome107") as session:
+ async with StreamSession(impersonate="chrome107") as session:
async with session.post(f"{cls.url}/api/chat", json=json_data) as response:
response.raise_for_status()
- async for chunk in response.content.iter_any():
+ async for chunk in response.iter_content():
yield chunk.decode()
@classmethod
diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py
index efb5478e..779799cf 100644
--- a/g4f/Provider/ChatForAi.py
+++ b/g4f/Provider/ChatForAi.py
@@ -3,7 +3,7 @@ from __future__ import annotations
import time, hashlib
from ..typing import AsyncGenerator
-from g4f.requests import AsyncSession
+from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
@@ -19,7 +19,7 @@ class ChatForAi(AsyncGeneratorProvider):
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
- async with AsyncSession(impersonate="chrome107") as session:
+ async with StreamSession(impersonate="chrome107") as session:
conversation_id = f"id_{int(time.time())}"
prompt = messages[-1]["content"]
timestamp = int(time.time())
@@ -43,7 +43,7 @@ class ChatForAi(AsyncGeneratorProvider):
}
async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response:
response.raise_for_status()
- async for chunk in response.content.iter_any():
+ async for chunk in response.iter_content():
yield chunk.decode()
@classmethod
diff --git a/g4f/Provider/ChatgptDuo.py b/g4f/Provider/ChatgptDuo.py
index 07f4c16c..abed8a3c 100644
--- a/g4f/Provider/ChatgptDuo.py
+++ b/g4f/Provider/ChatgptDuo.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from g4f.requests import AsyncSession
+from curl_cffi.requests import AsyncSession
from .base_provider import AsyncProvider, format_prompt
@@ -23,17 +23,17 @@ class ChatgptDuo(AsyncProvider):
"search": prompt,
"purpose": "ask",
}
- async with session.post(f"{cls.url}/", data=data) as response:
- response.raise_for_status()
- data = await response.json()
+ response = await session.post(f"{cls.url}/", data=data)
+ response.raise_for_status()
+ data = response.json()
- cls._sources = [{
- "title": source["title"],
- "url": source["link"],
- "snippet": source["snippet"]
- } for source in data["results"]]
+ cls._sources = [{
+ "title": source["title"],
+ "url": source["link"],
+ "snippet": source["snippet"]
+ } for source in data["results"]]
- return data["answer"]
+ return data["answer"]
@classmethod
def get_sources(cls):
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index 534b69a5..092e1bb6 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -3,7 +3,7 @@ from __future__ import annotations
import time, hashlib, random
from ..typing import AsyncGenerator
-from g4f.requests import AsyncSession
+from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
domains = [
@@ -23,7 +23,7 @@ class FreeGpt(AsyncGeneratorProvider):
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
- async with AsyncSession(impersonate="chrome107") as session:
+ async with StreamSession(impersonate="chrome107") as session:
prompt = messages[-1]["content"]
timestamp = int(time.time())
data = {
@@ -35,7 +35,7 @@ class FreeGpt(AsyncGeneratorProvider):
url = random.choice(domains)
async with session.post(f"{url}/api/generate", json=data) as response:
response.raise_for_status()
- async for chunk in response.content.iter_any():
+ async for chunk in response.iter_content():
yield chunk.decode()
@classmethod
diff --git a/g4f/Provider/Ylokh.py b/g4f/Provider/Ylokh.py
index 2187eb78..3c8b32dd 100644
--- a/g4f/Provider/Ylokh.py
+++ b/g4f/Provider/Ylokh.py
@@ -2,7 +2,7 @@ from __future__ import annotations
import json
-from ..requests import AsyncSession
+from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncGenerator
@@ -37,18 +37,19 @@ class Ylokh(AsyncGeneratorProvider):
"stream": stream,
**kwargs
}
- async with AsyncSession(
- headers=headers
+ async with StreamSession(
+ headers=headers,
+ proxies={"https": proxy}
) as session:
- async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data, proxy=proxy) as response:
+ async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
response.raise_for_status()
if stream:
- async for line in response.content:
+ async for line in response.iter_lines():
line = line.decode()
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
- line = json.loads(line[6:-1])
+ line = json.loads(line[6:])
content = line["choices"][0]["delta"].get("content")
if content:
yield content
diff --git a/g4f/models.py b/g4f/models.py
index cca9e850..6b27645a 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -8,16 +8,19 @@ from .Provider import (
PerplexityAi,
ChatgptDuo,
ChatgptAi,
+ ChatForAi,
ChatBase,
AItianhu,
Wewordle,
Yqcloud,
Myshell,
+ FreeGpt,
Vercel,
DeepAi,
Aichat,
Aivvm,
GptGo,
+ Ylokh,
Bard,
Aibn,
Bing,
@@ -42,7 +45,7 @@ default = Model(
Yqcloud, # Answers short questions in chinese
ChatBase, # Don't want to answer creatively
ChatgptDuo, # Include search results
- DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn,
+ DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh,
])
)
@@ -51,7 +54,7 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
- DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn,
+ DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh,
])
)
@@ -59,7 +62,7 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = RetryProvider([
- Myshell, AItianhuSpace,
+ Myshell, Ylokh,
])
)
diff --git a/g4f/requests.py b/g4f/requests.py
index 367bafa0..78acb9de 100644
--- a/g4f/requests.py
+++ b/g4f/requests.py
@@ -1,25 +1,24 @@
from __future__ import annotations
-import json, sys, asyncio
-from functools import partialmethod
+import warnings, json, asyncio
-from aiohttp import StreamReader
-from aiohttp.base_protocol import BaseProtocol
+from functools import partialmethod
+from asyncio import Future, Queue
+from typing import AsyncGenerator
-from curl_cffi.requests import AsyncSession as BaseSession
-from curl_cffi.requests import Response
+from curl_cffi.requests import AsyncSession, Response
import curl_cffi
-is_newer_0_5_8 = hasattr(BaseSession, "_set_cookies") or hasattr(curl_cffi.requests.Cookies, "get_cookies_for_curl")
+is_newer_0_5_8 = hasattr(AsyncSession, "_set_cookies") or hasattr(curl_cffi.requests.Cookies, "get_cookies_for_curl")
is_newer_0_5_9 = hasattr(curl_cffi.AsyncCurl, "remove_handle")
-is_newer_0_5_10 = hasattr(BaseSession, "release_curl")
+is_newer_0_5_10 = hasattr(AsyncSession, "release_curl")
class StreamResponse:
- def __init__(self, inner: Response, content: StreamReader, request):
+ def __init__(self, inner: Response, queue: Queue):
self.inner = inner
- self.content = content
- self.request = request
+ self.queue = queue
+ self.request = inner.request
self.status_code = inner.status_code
self.reason = inner.reason
self.ok = inner.ok
@@ -27,7 +26,7 @@ class StreamResponse:
self.cookies = inner.cookies
async def text(self) -> str:
- content = await self.content.read()
+ content = await self.read()
return content.decode()
def raise_for_status(self):
@@ -35,36 +34,74 @@ class StreamResponse:
raise RuntimeError(f"HTTP Error {self.status_code}: {self.reason}")
async def json(self, **kwargs):
- return json.loads(await self.content.read(), **kwargs)
+ return json.loads(await self.read(), **kwargs)
+
+ async def iter_lines(self, chunk_size=None, decode_unicode=False, delimiter=None) -> AsyncGenerator[bytes]:
+ """
+ Copied from: https://requests.readthedocs.io/en/latest/_modules/requests/models/
+ which is under the License: Apache 2.0
+ """
+ pending = None
+
+ async for chunk in self.iter_content(
+ chunk_size=chunk_size, decode_unicode=decode_unicode
+ ):
+ if pending is not None:
+ chunk = pending + chunk
+ if delimiter:
+ lines = chunk.split(delimiter)
+ else:
+ lines = chunk.splitlines()
+ if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
+ pending = lines.pop()
+ else:
+ pending = None
+
+ for line in lines:
+ yield line
+
+ if pending is not None:
+ yield pending
+
+ async def iter_content(self, chunk_size=None, decode_unicode=False) -> As:
+ if chunk_size:
+ warnings.warn("chunk_size is ignored, there is no way to tell curl that.")
+ if decode_unicode:
+ raise NotImplementedError()
+ while True:
+ chunk = await self.queue.get()
+ if chunk is None:
+ return
+ yield chunk
+
+ async def read(self) -> bytes:
+ return b"".join([chunk async for chunk in self.iter_content()])
class StreamRequest:
def __init__(self, session: AsyncSession, method: str, url: str, **kwargs):
self.session = session
self.loop = session.loop if session.loop else asyncio.get_running_loop()
- self.content = StreamReader(
- BaseProtocol(session.loop),
- sys.maxsize,
- loop=session.loop
- )
+ self.queue = Queue()
self.method = method
self.url = url
- if "proxy" in kwargs:
- proxy = kwargs.pop("proxy")
- if proxy:
- kwargs["proxies"] = {"http": proxy, "https": proxy}
self.options = kwargs
+ self.handle = None
- def on_content(self, data):
+ def _on_content(self, data):
if not self.enter.done():
self.enter.set_result(None)
- self.content.feed_data(data)
+ self.queue.put_nowait(data)
- def on_done(self, task):
+ def _on_done(self, task: Future):
if not self.enter.done():
self.enter.set_result(None)
- self.content.feed_eof()
+ self.queue.put_nowait(None)
- async def __aenter__(self) -> StreamResponse:
+ self.loop.call_soon(self.session.release_curl, self.curl)
+
+ async def fetch(self) -> StreamResponse:
+ if self.handle:
+ raise RuntimeError("Request already started")
self.curl = await self.session.pop_curl()
self.enter = self.loop.create_future()
if is_newer_0_5_10:
@@ -72,7 +109,7 @@ class StreamRequest:
self.curl,
self.method,
self.url,
- content_callback=self.on_content,
+ content_callback=self._on_content,
**self.options
)
else:
@@ -80,7 +117,7 @@ class StreamRequest:
self.curl,
self.method,
self.url,
- content_callback=self.on_content,
+ content_callback=self._on_content,
**self.options
)
if is_newer_0_5_9:
@@ -88,8 +125,12 @@ class StreamRequest:
else:
await self.session.acurl.add_handle(self.curl, False)
self.handle = self.session.acurl._curl2future[self.curl]
- self.handle.add_done_callback(self.on_done)
+ self.handle.add_done_callback(self._on_done)
+ # Wait for headers
await self.enter
+ # Raise exceptions
+ if self.handle.done():
+ self.handle.result()
if is_newer_0_5_8:
response = self.session._parse_response(self.curl, _, header_buffer)
response.request = request
@@ -97,18 +138,16 @@ class StreamRequest:
response = self.session._parse_response(self.curl, request, _, header_buffer)
return StreamResponse(
response,
- self.content,
- request
+ self.queue
)
-
- async def __aexit__(self, exc_type, exc, tb):
- if not self.handle.done():
- self.session.acurl.set_result(self.curl)
- self.curl.clean_after_perform()
- self.curl.reset()
- self.session.push_curl(self.curl)
-
-class AsyncSession(BaseSession):
+
+ async def __aenter__(self) -> StreamResponse:
+ return await self.fetch()
+
+ async def __aexit__(self, *args):
+ self.session.release_curl(self.curl)
+
+class StreamSession(AsyncSession):
def request(
self,
method: str,