summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorHeiner Lohaus <heiner@lohaus.eu>2023-10-07 19:00:45 +0200
committerHeiner Lohaus <heiner@lohaus.eu>2023-10-07 19:00:45 +0200
commitdfdb759639479da640701fe0db716d4455b7ae38 (patch)
tree607297dd731568653d11421038b23861b5a9a4fa /g4f
parentImprove code by AI (diff)
downloadgpt4free-dfdb759639479da640701fe0db716d4455b7ae38.tar
gpt4free-dfdb759639479da640701fe0db716d4455b7ae38.tar.gz
gpt4free-dfdb759639479da640701fe0db716d4455b7ae38.tar.bz2
gpt4free-dfdb759639479da640701fe0db716d4455b7ae38.tar.lz
gpt4free-dfdb759639479da640701fe0db716d4455b7ae38.tar.xz
gpt4free-dfdb759639479da640701fe0db716d4455b7ae38.tar.zst
gpt4free-dfdb759639479da640701fe0db716d4455b7ae38.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/helper.py25
-rw-r--r--g4f/requests.py29
2 files changed, 32 insertions, 22 deletions
diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py
index c127f241..5a9a9329 100644
--- a/g4f/Provider/helper.py
+++ b/g4f/Provider/helper.py
@@ -1,8 +1,10 @@
from __future__ import annotations
-import asyncio, sys
+import asyncio
+import sys
from asyncio import AbstractEventLoop
from os import path
+from typing import Dict, List
import browser_cookie3
# Change event loop policy on windows
@@ -13,7 +15,7 @@ if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# Local Cookie Storage
-_cookies: dict[str, dict[str, str]] = {}
+_cookies: Dict[str, Dict[str, str]] = {}
# If event loop is already running, handle nested event loops
# If "nest_asyncio" is installed, patch the event loop.
@@ -34,11 +36,13 @@ def get_event_loop() -> AbstractEventLoop:
return event_loop
except ImportError:
raise RuntimeError(
- 'Use "create_async" instead of "create" function in a running event loop. Or install the "nest_asyncio" package.')
+ 'Use "create_async" instead of "create" function in a running event loop. Or install the "nest_asyncio" package.'
+ )
+
-# Load cookies for a domain from all supported browser.
-# Cache the results in the "_cookies" variable
-def get_cookies(cookie_domain: str) -> dict:
+# Load cookies for a domain from all supported browsers.
+# Cache the results in the "_cookies" variable.
+def get_cookies(cookie_domain: str) -> Dict[str, str]:
if cookie_domain not in _cookies:
_cookies[cookie_domain] = {}
try:
@@ -49,15 +53,18 @@ def get_cookies(cookie_domain: str) -> dict:
return _cookies[cookie_domain]
-def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
+def format_prompt(messages: List[Dict[str, str]], add_special_tokens=False) -> str:
if add_special_tokens or len(messages) > 1:
formatted = "\n".join(
- ["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
+ [
+ "%s: %s" % ((message["role"]).capitalize(), message["content"])
+ for message in messages
+ ]
)
return f"{formatted}\nAssistant:"
else:
return messages[0]["content"]
-
+
def get_browser(user_data_dir: str = None):
from undetected_chromedriver import Chrome
diff --git a/g4f/requests.py b/g4f/requests.py
index 3a4a3f54..f238062e 100644
--- a/g4f/requests.py
+++ b/g4f/requests.py
@@ -5,7 +5,7 @@ import json
import asyncio
from functools import partialmethod
from asyncio import Future, Queue
-from typing import AsyncGenerator
+from typing import AsyncGenerator, Union, Optional
from curl_cffi.requests import AsyncSession, Response
import curl_cffi
@@ -37,7 +37,14 @@ class StreamResponse:
async def json(self, **kwargs) -> dict:
return json.loads(await self.read(), **kwargs)
- async def iter_lines(self, chunk_size=None, decode_unicode=False, delimiter=None) -> AsyncGenerator[bytes, None]:
+ async def iter_lines(
+ self, chunk_size: Optional[int] = None, decode_unicode: bool = False, delimiter: Optional[str] = None
+ ) -> AsyncGenerator[bytes, None]:
+ """
+ Copied from: https://requests.readthedocs.io/en/latest/_modules/requests/models/
+ which is under the License: Apache 2.0
+ """
+
pending: bytes = None
async for chunk in self.iter_content(
@@ -60,7 +67,9 @@ class StreamResponse:
if pending is not None:
yield pending
- async def iter_content(self, chunk_size=None, decode_unicode=False) -> AsyncGenerator[bytes, None]:
+ async def iter_content(
+ self, chunk_size: Optional[int] = None, decode_unicode: bool = False
+ ) -> AsyncGenerator[bytes, None]:
if chunk_size:
warnings.warn("chunk_size is ignored, there is no way to tell curl that.")
if decode_unicode:
@@ -76,14 +85,14 @@ class StreamResponse:
class StreamRequest:
- def __init__(self, session: AsyncSession, method: str, url: str, **kwargs) -> None:
+ def __init__(self, session: AsyncSession, method: str, url: str, **kwargs: Union[bool, int, str]) -> None:
self.session: AsyncSession = session
self.loop: asyncio.AbstractEventLoop = session.loop if session.loop else asyncio.get_running_loop()
self.queue: Queue[bytes] = Queue()
self.method: str = method
self.url: str = url
self.options: dict = kwargs
- self.handle: curl_cffi.AsyncCurl = None
+ self.handle: Optional[curl_cffi.AsyncCurl] = None
def _on_content(self, data: bytes) -> None:
if not self.enter.done():
@@ -134,10 +143,7 @@ class StreamRequest:
response.request = request
else:
response = self.session._parse_response(self.curl, request, _, header_buffer)
- return StreamResponse(
- response,
- self.queue
- )
+ return StreamResponse(response, self.queue)
async def __aenter__(self) -> StreamResponse:
return await self.fetch()
@@ -163,10 +169,7 @@ class StreamRequest:
class StreamSession(AsyncSession):
def request(
- self,
- method: str,
- url: str,
- **kwargs
+ self, method: str, url: str, **kwargs
) -> StreamRequest:
return StreamRequest(self, method, url, **kwargs)