summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-02-23 03:08:30 +0100
committerGitHub <noreply@github.com>2024-02-23 03:08:30 +0100
commitcf87d467c927d4b93916a42e7139f4e801172d62 (patch)
tree175b39f8486ce72be00a40cc345834abe859c84c /g4f
parent~ (diff)
parentFix unittests, use Union typing (diff)
downloadgpt4free-cf87d467c927d4b93916a42e7139f4e801172d62.tar
gpt4free-cf87d467c927d4b93916a42e7139f4e801172d62.tar.gz
gpt4free-cf87d467c927d4b93916a42e7139f4e801172d62.tar.bz2
gpt4free-cf87d467c927d4b93916a42e7139f4e801172d62.tar.lz
gpt4free-cf87d467c927d4b93916a42e7139f4e801172d62.tar.xz
gpt4free-cf87d467c927d4b93916a42e7139f4e801172d62.tar.zst
gpt4free-cf87d467c927d4b93916a42e7139f4e801172d62.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/Phind.py47
-rw-r--r--g4f/Provider/You.py4
-rw-r--r--g4f/Provider/__init__.py12
-rw-r--r--g4f/Provider/base_provider.py283
-rw-r--r--g4f/Provider/bing/create_images.py4
-rw-r--r--g4f/Provider/helper.py64
-rw-r--r--g4f/__init__.py4
-rw-r--r--g4f/api/__init__.py199
-rw-r--r--g4f/client.py54
-rw-r--r--g4f/debug.py2
-rw-r--r--g4f/gui/server/backend.py6
-rw-r--r--g4f/gui/server/internet.py18
-rw-r--r--g4f/image.py23
-rw-r--r--g4f/models.py2
-rw-r--r--g4f/providers/base_provider.py280
-rw-r--r--g4f/providers/create_images.py (renamed from g4f/Provider/create_images.py)3
-rw-r--r--g4f/providers/helper.py61
-rw-r--r--g4f/providers/retry_provider.py (renamed from g4f/Provider/retry_provider.py)3
-rw-r--r--g4f/providers/types.py (renamed from g4f/base_provider.py)6
-rw-r--r--g4f/requests/__init__.py (renamed from g4f/requests.py)8
-rw-r--r--g4f/requests/aiohttp.py (renamed from g4f/requests_aiohttp.py)2
-rw-r--r--g4f/requests/curl_cffi.py (renamed from g4f/requests_curl_cffi.py)2
-rw-r--r--g4f/requests/defaults.py (renamed from g4f/defaults.py)0
-rw-r--r--g4f/stubs.py89
-rw-r--r--g4f/version.py31
25 files changed, 611 insertions, 596 deletions
diff --git a/g4f/Provider/Phind.py b/g4f/Provider/Phind.py
index 746dcbcc..096cdd29 100644
--- a/g4f/Provider/Phind.py
+++ b/g4f/Provider/Phind.py
@@ -1,5 +1,7 @@
from __future__ import annotations
+import re
+import json
from urllib import parse
from datetime import datetime
@@ -32,10 +34,18 @@ class Phind(AsyncGeneratorProvider):
"Sec-Fetch-Site": "same-origin",
}
async with StreamSession(
- impersonate="chrome110",
+ headers=headers,
+ impersonate="chrome",
proxies={"https": proxy},
timeout=timeout
) as session:
+ url = "https://www.phind.com/search?home=true"
+ async with session.get(url) as response:
+ text = await response.text()
+ match = re.search(r'<script id="__NEXT_DATA__" type="application/json">(?P<json>[\S\s]+?)</script>', text)
+ data = json.loads(match.group("json"))
+ challenge_seeds = data["props"]["pageProps"]["challengeSeeds"]
+
prompt = messages[-1]["content"]
data = {
"question": prompt,
@@ -51,14 +61,13 @@ class Phind(AsyncGeneratorProvider):
"language": "en-US",
"detailed": True,
"anonUserId": "",
- "answerModel": "GPT-4" if model.startswith("gpt-4") else "Phind Model",
+ "answerModel": "GPT-4" if model.startswith("gpt-4") else "Phind-34B",
"creativeMode": creative_mode,
"customLinks": []
},
"context": "\n".join([message["content"] for message in messages if message["role"] == "system"]),
}
- data["challenge"] = generate_challenge(data)
-
+ data["challenge"] = generate_challenge(data, **challenge_seeds)
async with session.post(f"https://https.api.phind.com/infer/", headers=headers, json=data) as response:
new_line = False
async for line in response.iter_lines():
@@ -101,6 +110,18 @@ def deterministic_stringify(obj):
items = sorted(obj.items(), key=lambda x: x[0])
return ','.join([f'{k}:{handle_value(v)}' for k, v in items if handle_value(v) is not None])
+def prng_general(seed, multiplier, addend, modulus):
+ a = seed * multiplier + addend
+ if a < 0:
+ return ((a%modulus)-modulus)/modulus
+ else:
+ return a%modulus/modulus
+
+def generate_challenge_seed(l):
+ I = deterministic_stringify(l)
+ d = parse.quote(I, safe='')
+ return simple_hash(d)
+
def simple_hash(s):
d = 0
for char in s:
@@ -111,16 +132,8 @@ def simple_hash(s):
d -= 0x100000000 # Subtract 2**32
return d
-def generate_challenge(obj):
- deterministic_str = deterministic_stringify(obj)
- encoded_str = parse.quote(deterministic_str, safe='')
-
- c = simple_hash(encoded_str)
- a = (9301 * c + 49297)
- b = 233280
-
- # If negativ, we need a special logic
- if a < 0:
- return ((a%b)-b)/b
- else:
- return a%b/b \ No newline at end of file
+def generate_challenge(obj, **kwargs):
+ return prng_general(
+ seed=generate_challenge_seed(obj),
+ **kwargs
+ ) \ No newline at end of file
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index ece1d340..34130c47 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -7,9 +7,9 @@ from aiohttp import ClientSession, FormData
from ..typing import AsyncGenerator, Messages, ImageType, Cookies
from .base_provider import AsyncGeneratorProvider
-from .helper import get_connector, format_prompt
+from ..providers.helper import get_connector, format_prompt
from ..image import to_bytes
-from ..defaults import DEFAULT_HEADERS
+from ..requests.defaults import DEFAULT_HEADERS
class You(AsyncGeneratorProvider):
url = "https://you.com"
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 68b62fd9..bad77e9b 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -1,9 +1,10 @@
from __future__ import annotations
-from ..base_provider import BaseProvider, ProviderType
-from .retry_provider import RetryProvider
-from .base_provider import AsyncProvider, AsyncGeneratorProvider
-from .create_images import CreateImagesProvider
+from ..providers.types import BaseProvider, ProviderType
+from ..providers.retry_provider import RetryProvider
+from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
+from ..providers.create_images import CreateImagesProvider
+
from .deprecated import *
from .selenium import *
from .needs_auth import *
@@ -15,6 +16,7 @@ from .AItianhu import AItianhu
from .Aura import Aura
from .Bestim import Bestim
from .Bing import Bing
+from .BingCreateImages import BingCreateImages
from .ChatAnywhere import ChatAnywhere
from .ChatBase import ChatBase
from .ChatForAi import ChatForAi
@@ -53,8 +55,6 @@ from .Vercel import Vercel
from .Ylokh import Ylokh
from .You import You
-from .BingCreateImages import BingCreateImages
-
import sys
__modules__: list = [
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index 8659f506..8e761dba 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -1,281 +1,2 @@
-from __future__ import annotations
-
-import sys
-import asyncio
-from asyncio import AbstractEventLoop
-from concurrent.futures import ThreadPoolExecutor
-from abc import abstractmethod
-from inspect import signature, Parameter
-from .helper import get_cookies, format_prompt
-from ..typing import CreateResult, AsyncResult, Messages, Union
-from ..base_provider import BaseProvider
-from ..errors import NestAsyncioError, ModelNotSupportedError
-from .. import debug
-
-if sys.version_info < (3, 10):
- NoneType = type(None)
-else:
- from types import NoneType
-
-# Set Windows event loop policy for better compatibility with asyncio and curl_cffi
-if sys.platform == 'win32':
- if isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy):
- asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
-
-def get_running_loop() -> Union[AbstractEventLoop, None]:
- try:
- loop = asyncio.get_running_loop()
- if not hasattr(loop.__class__, "_nest_patched"):
- raise NestAsyncioError(
- 'Use "create_async" instead of "create" function in a running event loop. Or use "nest_asyncio" package.'
- )
- return loop
- except RuntimeError:
- pass
-
-class AbstractProvider(BaseProvider):
- """
- Abstract class for providing asynchronous functionality to derived classes.
- """
-
- @classmethod
- async def create_async(
- cls,
- model: str,
- messages: Messages,
- *,
- loop: AbstractEventLoop = None,
- executor: ThreadPoolExecutor = None,
- **kwargs
- ) -> str:
- """
- Asynchronously creates a result based on the given model and messages.
-
- Args:
- cls (type): The class on which this method is called.
- model (str): The model to use for creation.
- messages (Messages): The messages to process.
- loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
- executor (ThreadPoolExecutor, optional): The executor for running async tasks. Defaults to None.
- **kwargs: Additional keyword arguments.
-
- Returns:
- str: The created result as a string.
- """
- loop = loop or asyncio.get_running_loop()
-
- def create_func() -> str:
- return "".join(cls.create_completion(model, messages, False, **kwargs))
-
- return await asyncio.wait_for(
- loop.run_in_executor(executor, create_func),
- timeout=kwargs.get("timeout")
- )
-
- @classmethod
- @property
- def params(cls) -> str:
- """
- Returns the parameters supported by the provider.
-
- Args:
- cls (type): The class on which this property is called.
-
- Returns:
- str: A string listing the supported parameters.
- """
- sig = signature(
- cls.create_async_generator if issubclass(cls, AsyncGeneratorProvider) else
- cls.create_async if issubclass(cls, AsyncProvider) else
- cls.create_completion
- )
-
- def get_type_name(annotation: type) -> str:
- return annotation.__name__ if hasattr(annotation, "__name__") else str(annotation)
-
- args = ""
- for name, param in sig.parameters.items():
- if name in ("self", "kwargs") or (name == "stream" and not cls.supports_stream):
- continue
- args += f"\n {name}"
- args += f": {get_type_name(param.annotation)}" if param.annotation is not Parameter.empty else ""
- args += f' = "{param.default}"' if param.default == "" else f" = {param.default}" if param.default is not Parameter.empty else ""
-
- return f"g4f.Provider.{cls.__name__} supports: ({args}\n)"
-
-
-class AsyncProvider(AbstractProvider):
- """
- Provides asynchronous functionality for creating completions.
- """
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool = False,
- **kwargs
- ) -> CreateResult:
- """
- Creates a completion result synchronously.
-
- Args:
- cls (type): The class on which this method is called.
- model (str): The model to use for creation.
- messages (Messages): The messages to process.
- stream (bool): Indicates whether to stream the results. Defaults to False.
- loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
- **kwargs: Additional keyword arguments.
-
- Returns:
- CreateResult: The result of the completion creation.
- """
- get_running_loop()
- yield asyncio.run(cls.create_async(model, messages, **kwargs))
-
- @staticmethod
- @abstractmethod
- async def create_async(
- model: str,
- messages: Messages,
- **kwargs
- ) -> str:
- """
- Abstract method for creating asynchronous results.
-
- Args:
- model (str): The model to use for creation.
- messages (Messages): The messages to process.
- **kwargs: Additional keyword arguments.
-
- Raises:
- NotImplementedError: If this method is not overridden in derived classes.
-
- Returns:
- str: The created result as a string.
- """
- raise NotImplementedError()
-
-
-class AsyncGeneratorProvider(AsyncProvider):
- """
- Provides asynchronous generator functionality for streaming results.
- """
- supports_stream = True
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool = True,
- **kwargs
- ) -> CreateResult:
- """
- Creates a streaming completion result synchronously.
-
- Args:
- cls (type): The class on which this method is called.
- model (str): The model to use for creation.
- messages (Messages): The messages to process.
- stream (bool): Indicates whether to stream the results. Defaults to True.
- loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
- **kwargs: Additional keyword arguments.
-
- Returns:
- CreateResult: The result of the streaming completion creation.
- """
- loop = get_running_loop()
- new_loop = False
- if not loop:
- loop = asyncio.new_event_loop()
- asyncio.set_event_loop(loop)
- new_loop = True
-
- generator = cls.create_async_generator(model, messages, stream=stream, **kwargs)
- gen = generator.__aiter__()
-
- # Fix for RuntimeError: async generator ignored GeneratorExit
- async def await_callback(callback):
- return await callback()
-
- try:
- while True:
- yield loop.run_until_complete(await_callback(gen.__anext__))
- except StopAsyncIteration:
- ...
- # Fix for: ResourceWarning: unclosed event loop
- finally:
- if new_loop:
- loop.close()
- asyncio.set_event_loop(None)
-
- @classmethod
- async def create_async(
- cls,
- model: str,
- messages: Messages,
- **kwargs
- ) -> str:
- """
- Asynchronously creates a result from a generator.
-
- Args:
- cls (type): The class on which this method is called.
- model (str): The model to use for creation.
- messages (Messages): The messages to process.
- **kwargs: Additional keyword arguments.
-
- Returns:
- str: The created result as a string.
- """
- return "".join([
- chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)
- if not isinstance(chunk, Exception)
- ])
-
- @staticmethod
- @abstractmethod
- async def create_async_generator(
- model: str,
- messages: Messages,
- stream: bool = True,
- **kwargs
- ) -> AsyncResult:
- """
- Abstract method for creating an asynchronous generator.
-
- Args:
- model (str): The model to use for creation.
- messages (Messages): The messages to process.
- stream (bool): Indicates whether to stream the results. Defaults to True.
- **kwargs: Additional keyword arguments.
-
- Raises:
- NotImplementedError: If this method is not overridden in derived classes.
-
- Returns:
- AsyncResult: An asynchronous generator yielding results.
- """
- raise NotImplementedError()
-
-class ProviderModelMixin:
- default_model: str
- models: list[str] = []
- model_aliases: dict[str, str] = {}
-
- @classmethod
- def get_models(cls) -> list[str]:
- return cls.models
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if not model:
- model = cls.default_model
- elif model in cls.model_aliases:
- model = cls.model_aliases[model]
- elif model not in cls.get_models():
- raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}")
- debug.last_model = model
- return model \ No newline at end of file
+from ..providers.base_provider import *
+from .helper import get_cookies, format_prompt \ No newline at end of file
diff --git a/g4f/Provider/bing/create_images.py b/g4f/Provider/bing/create_images.py
index 7b82dc56..f6a8a372 100644
--- a/g4f/Provider/bing/create_images.py
+++ b/g4f/Provider/bing/create_images.py
@@ -17,9 +17,9 @@ try:
except ImportError:
has_requirements = False
-from ..create_images import CreateImagesProvider
+from ...providers.create_images import CreateImagesProvider
from ..helper import get_connector
-from ...base_provider import ProviderType
+from ...providers.types import ProviderType
from ...errors import MissingRequirementsError
from ...webdriver import WebDriver, get_driver_cookies, get_browser
diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py
index 35480255..da5b99f6 100644
--- a/g4f/Provider/helper.py
+++ b/g4f/Provider/helper.py
@@ -1,62 +1,2 @@
-from __future__ import annotations
-
-import random
-import secrets
-import string
-from aiohttp import BaseConnector
-
-from ..typing import Messages, Optional
-from ..errors import MissingRequirementsError
-from ..cookies import get_cookies
-
-def format_prompt(messages: Messages, add_special_tokens=False) -> str:
- """
- Format a series of messages into a single string, optionally adding special tokens.
-
- Args:
- messages (Messages): A list of message dictionaries, each containing 'role' and 'content'.
- add_special_tokens (bool): Whether to add special formatting tokens.
-
- Returns:
- str: A formatted string containing all messages.
- """
- if not add_special_tokens and len(messages) <= 1:
- return messages[0]["content"]
- formatted = "\n".join([
- f'{message["role"].capitalize()}: {message["content"]}'
- for message in messages
- ])
- return f"{formatted}\nAssistant:"
-
-def get_random_string(length: int = 10) -> str:
- """
- Generate a random string of specified length, containing lowercase letters and digits.
-
- Args:
- length (int, optional): Length of the random string to generate. Defaults to 10.
-
- Returns:
- str: A random string of the specified length.
- """
- return ''.join(
- random.choice(string.ascii_lowercase + string.digits)
- for _ in range(length)
- )
-
-def get_random_hex() -> str:
- """
- Generate a random hexadecimal string of a fixed length.
-
- Returns:
- str: A random hexadecimal string of 32 characters (16 bytes).
- """
- return secrets.token_hex(16).zfill(32)
-
-def get_connector(connector: BaseConnector = None, proxy: str = None) -> Optional[BaseConnector]:
- if proxy and not connector:
- try:
- from aiohttp_socks import ProxyConnector
- connector = ProxyConnector.from_url(proxy)
- except ImportError:
- raise MissingRequirementsError('Install "aiohttp_socks" package for proxy support')
- return connector \ No newline at end of file
+from ..providers.helper import *
+from ..cookies import get_cookies \ No newline at end of file
diff --git a/g4f/__init__.py b/g4f/__init__.py
index ec4a1743..6c8e100e 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -8,8 +8,8 @@ from .Provider import AsyncGeneratorProvider, ProviderUtils
from .typing import Messages, CreateResult, AsyncResult, Union
from .cookies import get_cookies, set_cookies
from . import debug, version
-from .base_provider import BaseRetryProvider, ProviderType
-from .Provider.base_provider import ProviderModelMixin
+from .providers.types import BaseRetryProvider, ProviderType
+from .providers.base_provider import ProviderModelMixin
def get_model_and_provider(model : Union[Model, str],
provider : Union[ProviderType, str, None],
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index 3f0778a1..d1e8539f 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -1,21 +1,27 @@
-import ast
import logging
-import time
import json
-import random
-import string
import uvicorn
import nest_asyncio
from fastapi import FastAPI, Response, Request
-from fastapi.responses import StreamingResponse
-from typing import List, Union, Any, Dict, AnyStr
-#from ._tokenizer import tokenize
+from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
+from pydantic import BaseModel
+from typing import List, Union
import g4f
-from .. import debug
-
-debug.logging = True
+import g4f.debug
+from g4f.client import Client
+from g4f.typing import Messages
+
+class ChatCompletionsConfig(BaseModel):
+ messages: Messages
+ model: str
+ provider: Union[str, None]
+ stream: bool = False
+ temperature: Union[float, None]
+ max_tokens: int = None
+ stop: Union[list[str], str, None]
+ access_token: Union[str, None]
class Api:
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
@@ -25,169 +31,82 @@ class Api:
self.sentry = sentry
self.list_ignored_providers = list_ignored_providers
- self.app = FastAPI()
+ if debug:
+ g4f.debug.logging = True
+ self.client = Client()
+
nest_asyncio.apply()
+ self.app = FastAPI()
- JSONObject = Dict[AnyStr, Any]
- JSONArray = List[Any]
- JSONStructure = Union[JSONArray, JSONObject]
+ self.routes()
+ def routes(self):
@self.app.get("/")
async def read_root():
- return Response(content=json.dumps({"info": "g4f API"}, indent=4), media_type="application/json")
+ return RedirectResponse("/v1", 302)
@self.app.get("/v1")
async def read_root_v1():
- return Response(content=json.dumps({"info": "Go to /v1/chat/completions or /v1/models."}, indent=4), media_type="application/json")
+ return HTMLResponse('g4f API: Go to '
+ '<a href="/v1/chat/completions">chat/completions</a> '
+ 'or <a href="/v1/models">models</a>.')
@self.app.get("/v1/models")
async def models():
- model_list = []
- for model in g4f.Model.__all__():
- model_info = (g4f.ModelUtils.convert[model])
- model_list.append({
- 'id': model,
+ model_list = dict(
+ (model, g4f.ModelUtils.convert[model])
+ for model in g4f.Model.__all__()
+ )
+ model_list = [{
+ 'id': model_id,
'object': 'model',
'created': 0,
- 'owned_by': model_info.base_provider}
- )
- return Response(content=json.dumps({
- 'object': 'list',
- 'data': model_list}, indent=4), media_type="application/json")
+ 'owned_by': model.base_provider
+ } for model_id, model in model_list.items()]
+ return JSONResponse(model_list)
@self.app.get("/v1/models/{model_name}")
async def model_info(model_name: str):
try:
- model_info = (g4f.ModelUtils.convert[model_name])
-
- return Response(content=json.dumps({
+ model_info = g4f.ModelUtils.convert[model_name]
+ return JSONResponse({
'id': model_name,
'object': 'model',
'created': 0,
'owned_by': model_info.base_provider
- }, indent=4), media_type="application/json")
+ })
except:
- return Response(content=json.dumps({"error": "The model does not exist."}, indent=4), media_type="application/json")
+ return JSONResponse({"error": "The model does not exist."})
@self.app.post("/v1/chat/completions")
- async def chat_completions(request: Request, item: JSONStructure = None):
- item_data = {
- 'model': 'gpt-3.5-turbo',
- 'stream': False,
- }
-
- # item contains byte keys, and dict.get suppresses error
- item_data.update({
- key.decode('utf-8') if isinstance(key, bytes) else key: str(value)
- for key, value in (item or {}).items()
- })
- # messages is str, need dict
- if isinstance(item_data.get('messages'), str):
- item_data['messages'] = ast.literal_eval(item_data.get('messages'))
-
- model = item_data.get('model')
- stream = True if item_data.get("stream") == "True" else False
- messages = item_data.get('messages')
- provider = item_data.get('provider', '').replace('g4f.Provider.', '')
- provider = provider if provider and provider != "Auto" else None
- temperature = item_data.get('temperature')
-
+ async def chat_completions(config: ChatCompletionsConfig = None, request: Request = None, provider: str = None):
try:
- response = g4f.ChatCompletion.create(
- model=model,
- stream=stream,
- messages=messages,
- temperature = temperature,
- provider = provider,
+ config.provider = provider if config.provider is None else config.provider
+ if config.access_token is None and request is not None:
+ auth_header = request.headers.get("Authorization")
+ if auth_header is not None:
+ config.access_token = auth_header.split(None, 1)[-1]
+
+ response = self.client.chat.completions.create(
+ **dict(config),
ignored=self.list_ignored_providers
)
except Exception as e:
logging.exception(e)
- content = json.dumps({
- "error": {"message": f"An error occurred while generating the response:\n{e}"},
- "model": model,
- "provider": g4f.get_last_provider(True)
- })
- return Response(content=content, status_code=500, media_type="application/json")
- completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
- completion_timestamp = int(time.time())
-
- if not stream:
- #prompt_tokens, _ = tokenize(''.join([message['content'] for message in messages]))
- #completion_tokens, _ = tokenize(response)
-
- json_data = {
- 'id': f'chatcmpl-{completion_id}',
- 'object': 'chat.completion',
- 'created': completion_timestamp,
- 'model': model,
- 'provider': g4f.get_last_provider(True),
- 'choices': [
- {
- 'index': 0,
- 'message': {
- 'role': 'assistant',
- 'content': response,
- },
- 'finish_reason': 'stop',
- }
- ],
- 'usage': {
- 'prompt_tokens': 0, #prompt_tokens,
- 'completion_tokens': 0, #completion_tokens,
- 'total_tokens': 0, #prompt_tokens + completion_tokens,
- },
- }
-
- return Response(content=json.dumps(json_data, indent=4), media_type="application/json")
+ return Response(content=format_exception(e, config), status_code=500, media_type="application/json")
+
+ if not config.stream:
+ return JSONResponse(response.to_json())
def streaming():
try:
for chunk in response:
- completion_data = {
- 'id': f'chatcmpl-{completion_id}',
- 'object': 'chat.completion.chunk',
- 'created': completion_timestamp,
- 'model': model,
- 'provider': g4f.get_last_provider(True),
- 'choices': [
- {
- 'index': 0,
- 'delta': {
- 'role': 'assistant',
- 'content': chunk,
- },
- 'finish_reason': None,
- }
- ],
- }
- yield f'data: {json.dumps(completion_data)}\n\n'
- time.sleep(0.03)
- end_completion_data = {
- 'id': f'chatcmpl-{completion_id}',
- 'object': 'chat.completion.chunk',
- 'created': completion_timestamp,
- 'model': model,
- 'provider': g4f.get_last_provider(True),
- 'choices': [
- {
- 'index': 0,
- 'delta': {},
- 'finish_reason': 'stop',
- }
- ],
- }
- yield f'data: {json.dumps(end_completion_data)}\n\n'
+ yield f"data: {json.dumps(chunk.to_json())}\n\n"
except GeneratorExit:
pass
except Exception as e:
logging.exception(e)
- content = json.dumps({
- "error": {"message": f"An error occurred while generating the response:\n{e}"},
- "model": model,
- "provider": g4f.get_last_provider(True),
- })
- yield f'data: {content}'
+ yield f'data: {format_exception(e, config)}'
return StreamingResponse(streaming(), media_type="text/event-stream")
@@ -198,3 +117,11 @@ class Api:
def run(self, ip):
split_ip = ip.split(":")
uvicorn.run(app=self.app, host=split_ip[0], port=int(split_ip[1]), use_colors=False)
+
+def format_exception(e: Exception, config: ChatCompletionsConfig) -> str:
+ last_provider = g4f.get_last_provider(True)
+ return json.dumps({
+ "error": {"message": f"ChatCompletionsError: {e.__class__.__name__}: {e}"},
+ "model": last_provider.get("model") if last_provider else config.model,
+ "provider": last_provider.get("name") if last_provider else config.provider
+ }) \ No newline at end of file
diff --git a/g4f/client.py b/g4f/client.py
index a1494d47..023d53f6 100644
--- a/g4f/client.py
+++ b/g4f/client.py
@@ -1,17 +1,23 @@
from __future__ import annotations
import re
+import os
+import time
+import random
+import string
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
from .typing import Union, Generator, Messages, ImageType
-from .base_provider import BaseProvider, ProviderType
+from .providers.types import BaseProvider, ProviderType
from .image import ImageResponse as ImageProviderResponse
-from .Provider import BingCreateImages, Gemini, OpenaiChat
+from .Provider.BingCreateImages import BingCreateImages
+from .Provider.needs_auth import Gemini, OpenaiChat
from .errors import NoImageResponseError
-from . import get_model_and_provider
+from . import get_model_and_provider, get_last_provider
ImageProvider = Union[BaseProvider, object]
Proxies = Union[dict, str]
+IterResponse = Generator[Union[ChatCompletion, ChatCompletionChunk], None, None]
def read_json(text: str) -> dict:
"""
@@ -29,21 +35,19 @@ def read_json(text: str) -> dict:
return text
def iter_response(
- response: iter,
+ response: iter[str],
stream: bool,
response_format: dict = None,
max_tokens: int = None,
stop: list = None
-) -> Generator:
+) -> IterResponse:
content = ""
finish_reason = None
- last_chunk = None
+ completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
for idx, chunk in enumerate(response):
- if last_chunk is not None:
- yield ChatCompletionChunk(last_chunk, finish_reason)
content += str(chunk)
if max_tokens is not None and idx + 1 >= max_tokens:
- finish_reason = "max_tokens"
+ finish_reason = "length"
first = -1
word = None
if stop is not None:
@@ -61,16 +65,25 @@ def iter_response(
if first != -1:
finish_reason = "stop"
if stream:
- last_chunk = chunk
+ yield ChatCompletionChunk(chunk, None, completion_id, int(time.time()))
if finish_reason is not None:
break
- if last_chunk is not None:
- yield ChatCompletionChunk(last_chunk, finish_reason)
- if not stream:
+ finish_reason = "stop" if finish_reason is None else finish_reason
+ if stream:
+ yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time()))
+ else:
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
- response = read_json(response)
- yield ChatCompletion(content, finish_reason)
+ content = read_json(content)
+ yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
+
+def iter_append_model_and_provider(response: IterResponse) -> IterResponse:
+ last_provider = None
+ for chunk in response:
+ last_provider = get_last_provider(True) if last_provider is None else last_provider
+ chunk.model = last_provider.get("model")
+ chunk.provider = last_provider.get("name")
+ yield chunk
class Client():
proxies: Proxies = None
@@ -89,13 +102,14 @@ class Client():
self.proxies: Proxies = proxies
def get_proxy(self) -> Union[str, None]:
- if isinstance(self.proxies, str) or self.proxies is None:
+ if isinstance(self.proxies, str):
return self.proxies
+ elif self.proxies is None:
+ return os.environ.get("G4F_PROXY")
elif "all" in self.proxies:
return self.proxies["all"]
elif "https" in self.proxies:
return self.proxies["https"]
- return None
class Completions():
def __init__(self, client: Client, provider: ProviderType = None):
@@ -110,7 +124,7 @@ class Completions():
stream: bool = False,
response_format: dict = None,
max_tokens: int = None,
- stop: Union[list. str] = None,
+ stop: Union[list[str], str] = None,
**kwargs
) -> Union[ChatCompletion, Generator[ChatCompletionChunk]]:
if max_tokens is not None:
@@ -123,9 +137,9 @@ class Completions():
stream,
**kwargs
)
- response = provider.create_completion(model, messages, stream=stream, **kwargs)
+ response = provider.create_completion(model, messages, stream=stream, proxy=self.client.get_proxy(), **kwargs)
stop = [stop] if isinstance(stop, str) else stop
- response = iter_response(response, stream, response_format, max_tokens, stop)
+ response = iter_append_model_and_provider(iter_response(response, stream, response_format, max_tokens, stop))
return response if stream else next(response)
class Chat():
diff --git a/g4f/debug.py b/g4f/debug.py
index 050bb0b2..69f7d55c 100644
--- a/g4f/debug.py
+++ b/g4f/debug.py
@@ -1,4 +1,4 @@
-from .base_provider import ProviderType
+from .providers.types import ProviderType
logging: bool = False
version_check: bool = True
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index 6847be34..454ed1c6 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -8,8 +8,6 @@ from g4f.image import is_allowed_extension, to_image
from g4f.errors import VersionNotFoundError
from g4f.Provider import __providers__
from g4f.Provider.bing.create_images import patch_provider
-from .internet import get_search_message
-
class Backend_Api:
"""
@@ -97,7 +95,7 @@ class Backend_Api:
current_version = None
return {
"version": current_version,
- "latest_version": version.get_latest_version(),
+ "latest_version": version.utils.latest_version,
}
def generate_title(self):
@@ -157,6 +155,8 @@ class Backend_Api:
if provider == "Bing":
kwargs['web_search'] = True
else:
+ # ResourceWarning: unclosed event loop
+ from .internet import get_search_message
messages[-1]["content"] = get_search_message(messages[-1]["content"])
model = json_data.get('model')
diff --git a/g4f/gui/server/internet.py b/g4f/gui/server/internet.py
index a6bfc885..e784e52d 100644
--- a/g4f/gui/server/internet.py
+++ b/g4f/gui/server/internet.py
@@ -2,7 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession, ClientTimeout
try:
- from duckduckgo_search import DDGS
+ from duckduckgo_search.duckduckgo_search_async import AsyncDDGS
from bs4 import BeautifulSoup
has_requirements = True
except ImportError:
@@ -30,7 +30,10 @@ class SearchResults():
search += result.snippet
search += f"\n\nSource: [[{idx}]]({result.url})"
return search
-
+
+ def __len__(self) -> int:
+ return len(self.results)
+
class SearchResultEntry():
def __init__(self, title: str, url: str, snippet: str, text: str = None):
self.title = title
@@ -96,21 +99,20 @@ async def fetch_and_scrape(session: ClientSession, url: str, max_words: int = No
async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text: bool = True) -> SearchResults:
if not has_requirements:
raise MissingRequirementsError('Install "duckduckgo-search" and "beautifulsoup4" package')
- with DDGS() as ddgs:
+ async with AsyncDDGS() as ddgs:
results = []
- for result in ddgs.text(
+ async for result in ddgs.text(
query,
region="wt-wt",
safesearch="moderate",
timelimit="y",
+ max_results=n_results
):
results.append(SearchResultEntry(
result["title"],
result["href"],
result["body"]
))
- if len(results) >= n_results:
- break
if add_text:
requests = []
@@ -136,7 +138,6 @@ async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text
return SearchResults(formatted_results)
-
def get_search_message(prompt) -> str:
try:
search_results = asyncio.run(search(prompt))
@@ -146,7 +147,6 @@ def get_search_message(prompt) -> str:
Instruction: Using the provided web search results, to write a comprehensive reply to the user request.
Make sure to add the sources of cites using [[Number]](Url) notation after the reference. Example: [[0]](http://google.com)
-If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.
User request:
{prompt}
@@ -154,4 +154,4 @@ User request:
return message
except Exception as e:
print("Couldn't do web search:", e)
- return prompt
+ return prompt \ No newline at end of file
diff --git a/g4f/image.py b/g4f/image.py
index 01d6ae50..6370a06f 100644
--- a/g4f/image.py
+++ b/g4f/image.py
@@ -11,7 +11,7 @@ try:
has_requirements = True
except ImportError:
has_requirements = False
-
+
from .errors import MissingRequirementsError
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp', 'svg'}
@@ -28,9 +28,11 @@ def to_image(image: ImageType, is_svg: bool = False) -> Image:
"""
if not has_requirements:
raise MissingRequirementsError('Install "pillow" package for images')
+
if isinstance(image, str):
is_data_uri_an_image(image)
image = extract_data_uri(image)
+
if is_svg:
try:
import cairosvg
@@ -41,6 +43,7 @@ def to_image(image: ImageType, is_svg: bool = False) -> Image:
buffer = BytesIO()
cairosvg.svg2png(image, write_to=buffer)
return open_image(buffer)
+
if isinstance(image, bytes):
is_accepted_format(image)
return open_image(BytesIO(image))
@@ -48,6 +51,7 @@ def to_image(image: ImageType, is_svg: bool = False) -> Image:
image = open_image(image)
image.load()
return image
+
return image
def is_allowed_extension(filename: str) -> bool:
@@ -200,17 +204,16 @@ def format_images_markdown(images: Union[str, list], alt: str, preview: Union[st
str: The formatted markdown string.
"""
if isinstance(images, str):
- images = f"[![{alt}]({preview.replace('{image}', images) if preview else images})]({images})"
+ result = f"[![{alt}]({preview.replace('{image}', images) if preview else images})]({images})"
else:
if not isinstance(preview, list):
preview = [preview.replace('{image}', image) if preview else image for image in images]
- images = [
+ result = "\n".join(
f"[![#{idx+1} {alt}]({preview[idx]})]({image})" for idx, image in enumerate(images)
- ]
- images = "\n".join(images)
+ )
start_flag = "<!-- generated images start -->\n"
end_flag = "<!-- generated images end -->\n"
- return f"\n{start_flag}{images}\n{end_flag}\n"
+ return f"\n{start_flag}{result}\n{end_flag}\n"
def to_bytes(image: ImageType) -> bytes:
"""
@@ -245,19 +248,19 @@ class ImageResponse:
self.images = images
self.alt = alt
self.options = options
-
+
def __str__(self) -> str:
return format_images_markdown(self.images, self.alt, self.get("preview"))
-
+
def get(self, key: str):
return self.options.get(key)
-
+
class ImageRequest:
def __init__(
self,
options: dict = {}
):
self.options = options
-
+
def get(self, key: str):
return self.options.get(key) \ No newline at end of file
diff --git a/g4f/models.py b/g4f/models.py
index 3b4ca468..f5951a29 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -1,5 +1,7 @@
from __future__ import annotations
+
from dataclasses import dataclass
+
from .Provider import RetryProvider, ProviderType
from .Provider import (
Chatgpt4Online,
diff --git a/g4f/providers/base_provider.py b/g4f/providers/base_provider.py
new file mode 100644
index 00000000..b8649ba5
--- /dev/null
+++ b/g4f/providers/base_provider.py
@@ -0,0 +1,280 @@
+from __future__ import annotations
+
+import sys
+import asyncio
+from asyncio import AbstractEventLoop
+from concurrent.futures import ThreadPoolExecutor
+from abc import abstractmethod
+from inspect import signature, Parameter
+from ..typing import CreateResult, AsyncResult, Messages, Union
+from .types import BaseProvider
+from ..errors import NestAsyncioError, ModelNotSupportedError
+from .. import debug
+
+if sys.version_info < (3, 10):
+ NoneType = type(None)
+else:
+ from types import NoneType
+
+# Set Windows event loop policy for better compatibility with asyncio and curl_cffi
+if sys.platform == 'win32':
+ if isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy):
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
+
+def get_running_loop() -> Union[AbstractEventLoop, None]:
+ try:
+ loop = asyncio.get_running_loop()
+ if not hasattr(loop.__class__, "_nest_patched"):
+ raise NestAsyncioError(
+ 'Use "create_async" instead of "create" function in a running event loop. Or use "nest_asyncio" package.'
+ )
+ return loop
+ except RuntimeError:
+ pass
+
+class AbstractProvider(BaseProvider):
+ """
+ Abstract class for providing asynchronous functionality to derived classes.
+ """
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ *,
+ loop: AbstractEventLoop = None,
+ executor: ThreadPoolExecutor = None,
+ **kwargs
+ ) -> str:
+ """
+ Asynchronously creates a result based on the given model and messages.
+
+ Args:
+ cls (type): The class on which this method is called.
+ model (str): The model to use for creation.
+ messages (Messages): The messages to process.
+ loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
+ executor (ThreadPoolExecutor, optional): The executor for running async tasks. Defaults to None.
+ **kwargs: Additional keyword arguments.
+
+ Returns:
+ str: The created result as a string.
+ """
+ loop = loop or asyncio.get_running_loop()
+
+ def create_func() -> str:
+ return "".join(cls.create_completion(model, messages, False, **kwargs))
+
+ return await asyncio.wait_for(
+ loop.run_in_executor(executor, create_func),
+ timeout=kwargs.get("timeout")
+ )
+
+ @classmethod
+ @property
+ def params(cls) -> str:
+ """
+ Returns the parameters supported by the provider.
+
+ Args:
+ cls (type): The class on which this property is called.
+
+ Returns:
+ str: A string listing the supported parameters.
+ """
+ sig = signature(
+ cls.create_async_generator if issubclass(cls, AsyncGeneratorProvider) else
+ cls.create_async if issubclass(cls, AsyncProvider) else
+ cls.create_completion
+ )
+
+ def get_type_name(annotation: type) -> str:
+ return annotation.__name__ if hasattr(annotation, "__name__") else str(annotation)
+
+ args = ""
+ for name, param in sig.parameters.items():
+ if name in ("self", "kwargs") or (name == "stream" and not cls.supports_stream):
+ continue
+ args += f"\n {name}"
+ args += f": {get_type_name(param.annotation)}" if param.annotation is not Parameter.empty else ""
+ args += f' = "{param.default}"' if param.default == "" else f" = {param.default}" if param.default is not Parameter.empty else ""
+
+ return f"g4f.Provider.{cls.__name__} supports: ({args}\n)"
+
+
+class AsyncProvider(AbstractProvider):
+ """
+ Provides asynchronous functionality for creating completions.
+ """
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ **kwargs
+ ) -> CreateResult:
+ """
+ Creates a completion result synchronously.
+
+ Args:
+ cls (type): The class on which this method is called.
+ model (str): The model to use for creation.
+ messages (Messages): The messages to process.
+ stream (bool): Indicates whether to stream the results. Defaults to False.
+ loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
+ **kwargs: Additional keyword arguments.
+
+ Returns:
+ CreateResult: The result of the completion creation.
+ """
+ get_running_loop()
+ yield asyncio.run(cls.create_async(model, messages, **kwargs))
+
+ @staticmethod
+ @abstractmethod
+ async def create_async(
+ model: str,
+ messages: Messages,
+ **kwargs
+ ) -> str:
+ """
+ Abstract method for creating asynchronous results.
+
+ Args:
+ model (str): The model to use for creation.
+ messages (Messages): The messages to process.
+ **kwargs: Additional keyword arguments.
+
+ Raises:
+ NotImplementedError: If this method is not overridden in derived classes.
+
+ Returns:
+ str: The created result as a string.
+ """
+ raise NotImplementedError()
+
+
+class AsyncGeneratorProvider(AsyncProvider):
+ """
+ Provides asynchronous generator functionality for streaming results.
+ """
+ supports_stream = True
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = True,
+ **kwargs
+ ) -> CreateResult:
+ """
+ Creates a streaming completion result synchronously.
+
+ Args:
+ cls (type): The class on which this method is called.
+ model (str): The model to use for creation.
+ messages (Messages): The messages to process.
+ stream (bool): Indicates whether to stream the results. Defaults to True.
+ loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
+ **kwargs: Additional keyword arguments.
+
+ Returns:
+ CreateResult: The result of the streaming completion creation.
+ """
+ loop = get_running_loop()
+ new_loop = False
+ if not loop:
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ new_loop = True
+
+ generator = cls.create_async_generator(model, messages, stream=stream, **kwargs)
+ gen = generator.__aiter__()
+
+ # Fix for RuntimeError: async generator ignored GeneratorExit
+ async def await_callback(callback):
+ return await callback()
+
+ try:
+ while True:
+ yield loop.run_until_complete(await_callback(gen.__anext__))
+ except StopAsyncIteration:
+ ...
+ # Fix for: ResourceWarning: unclosed event loop
+ finally:
+ if new_loop:
+ loop.close()
+ asyncio.set_event_loop(None)
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ **kwargs
+ ) -> str:
+ """
+ Asynchronously creates a result from a generator.
+
+ Args:
+ cls (type): The class on which this method is called.
+ model (str): The model to use for creation.
+ messages (Messages): The messages to process.
+ **kwargs: Additional keyword arguments.
+
+ Returns:
+ str: The created result as a string.
+ """
+ return "".join([
+ chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)
+ if not isinstance(chunk, Exception)
+ ])
+
+ @staticmethod
+ @abstractmethod
+ async def create_async_generator(
+ model: str,
+ messages: Messages,
+ stream: bool = True,
+ **kwargs
+ ) -> AsyncResult:
+ """
+ Abstract method for creating an asynchronous generator.
+
+ Args:
+ model (str): The model to use for creation.
+ messages (Messages): The messages to process.
+ stream (bool): Indicates whether to stream the results. Defaults to True.
+ **kwargs: Additional keyword arguments.
+
+ Raises:
+ NotImplementedError: If this method is not overridden in derived classes.
+
+ Returns:
+ AsyncResult: An asynchronous generator yielding results.
+ """
+ raise NotImplementedError()
+
+class ProviderModelMixin:
+ default_model: str
+ models: list[str] = []
+ model_aliases: dict[str, str] = {}
+
+ @classmethod
+ def get_models(cls) -> list[str]:
+ return cls.models
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if not model:
+ model = cls.default_model
+ elif model in cls.model_aliases:
+ model = cls.model_aliases[model]
+ elif model not in cls.get_models():
+ raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}")
+ debug.last_model = model
+ return model \ No newline at end of file
diff --git a/g4f/Provider/create_images.py b/g4f/providers/create_images.py
index 2ca92432..29a2a041 100644
--- a/g4f/Provider/create_images.py
+++ b/g4f/providers/create_images.py
@@ -2,9 +2,10 @@ from __future__ import annotations
import re
import asyncio
+
from .. import debug
from ..typing import CreateResult, Messages
-from ..base_provider import BaseProvider, ProviderType
+from .types import BaseProvider, ProviderType
system_message = """
You can generate images, pictures, photos or img with the DALL-E 3 image generator.
diff --git a/g4f/providers/helper.py b/g4f/providers/helper.py
new file mode 100644
index 00000000..49d033d1
--- /dev/null
+++ b/g4f/providers/helper.py
@@ -0,0 +1,61 @@
+from __future__ import annotations
+
+import random
+import secrets
+import string
+from aiohttp import BaseConnector
+
+from ..typing import Messages, Optional
+from ..errors import MissingRequirementsError
+
+def format_prompt(messages: Messages, add_special_tokens=False) -> str:
+ """
+ Format a series of messages into a single string, optionally adding special tokens.
+
+ Args:
+ messages (Messages): A list of message dictionaries, each containing 'role' and 'content'.
+ add_special_tokens (bool): Whether to add special formatting tokens.
+
+ Returns:
+ str: A formatted string containing all messages.
+ """
+ if not add_special_tokens and len(messages) <= 1:
+ return messages[0]["content"]
+ formatted = "\n".join([
+ f'{message["role"].capitalize()}: {message["content"]}'
+ for message in messages
+ ])
+ return f"{formatted}\nAssistant:"
+
+def get_random_string(length: int = 10) -> str:
+ """
+ Generate a random string of specified length, containing lowercase letters and digits.
+
+ Args:
+ length (int, optional): Length of the random string to generate. Defaults to 10.
+
+ Returns:
+ str: A random string of the specified length.
+ """
+ return ''.join(
+ random.choice(string.ascii_lowercase + string.digits)
+ for _ in range(length)
+ )
+
+def get_random_hex() -> str:
+ """
+ Generate a random hexadecimal string of a fixed length.
+
+ Returns:
+ str: A random hexadecimal string of 32 characters (16 bytes).
+ """
+ return secrets.token_hex(16).zfill(32)
+
+def get_connector(connector: BaseConnector = None, proxy: str = None) -> Optional[BaseConnector]:
+ if proxy and not connector:
+ try:
+ from aiohttp_socks import ProxyConnector
+ connector = ProxyConnector.from_url(proxy)
+ except ImportError:
+ raise MissingRequirementsError('Install "aiohttp_socks" package for proxy support')
+ return connector \ No newline at end of file
diff --git a/g4f/Provider/retry_provider.py b/g4f/providers/retry_provider.py
index 9cc026fc..a7ab2881 100644
--- a/g4f/Provider/retry_provider.py
+++ b/g4f/providers/retry_provider.py
@@ -2,8 +2,9 @@ from __future__ import annotations
import asyncio
import random
+
from ..typing import CreateResult, Messages
-from ..base_provider import BaseRetryProvider
+from .types import BaseRetryProvider
from .. import debug
from ..errors import RetryProviderError, RetryNoProviderError
diff --git a/g4f/base_provider.py b/g4f/providers/types.py
index cc3451a2..7b11ec43 100644
--- a/g4f/base_provider.py
+++ b/g4f/providers/types.py
@@ -2,7 +2,7 @@ from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Union, List, Dict, Type
-from .typing import Messages, CreateResult
+from ..typing import Messages, CreateResult
class BaseProvider(ABC):
"""
@@ -81,7 +81,7 @@ class BaseProvider(ABC):
Dict[str, str]: A dictionary with provider's details.
"""
return {'name': cls.__name__, 'url': cls.url}
-
+
class BaseRetryProvider(BaseProvider):
"""
Base class for a provider that implements retry logic.
@@ -113,5 +113,5 @@ class BaseRetryProvider(BaseProvider):
self.working = True
self.exceptions: Dict[str, Exception] = {}
self.last_provider: Type[BaseProvider] = None
-
+
ProviderType = Union[Type[BaseProvider], BaseRetryProvider] \ No newline at end of file
diff --git a/g4f/requests.py b/g4f/requests/__init__.py
index d7b5996b..d278ffaf 100644
--- a/g4f/requests.py
+++ b/g4f/requests/__init__.py
@@ -4,15 +4,15 @@ from urllib.parse import urlparse
try:
from curl_cffi.requests import Session
- from .requests_curl_cffi import StreamResponse, StreamSession
+ from .curl_cffi import StreamResponse, StreamSession
has_curl_cffi = True
except ImportError:
from typing import Type as Session
- from .requests_aiohttp import StreamResponse, StreamSession
+ from .aiohttp import StreamResponse, StreamSession
has_curl_cffi = False
-from .webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies
-from .errors import MissingRequirementsError
+from ..webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies
+from ..errors import MissingRequirementsError
from .defaults import DEFAULT_HEADERS
def get_args_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> dict:
diff --git a/g4f/requests_aiohttp.py b/g4f/requests/aiohttp.py
index 0da8973b..d9bd6541 100644
--- a/g4f/requests_aiohttp.py
+++ b/g4f/requests/aiohttp.py
@@ -3,7 +3,7 @@ from __future__ import annotations
from aiohttp import ClientSession, ClientResponse, ClientTimeout
from typing import AsyncGenerator, Any
-from .Provider.helper import get_connector
+from ..providers.helper import get_connector
from .defaults import DEFAULT_HEADERS
class StreamResponse(ClientResponse):
diff --git a/g4f/requests_curl_cffi.py b/g4f/requests/curl_cffi.py
index 64e41d65..cfcdd63b 100644
--- a/g4f/requests_curl_cffi.py
+++ b/g4f/requests/curl_cffi.py
@@ -44,7 +44,7 @@ class StreamResponse:
inner: Response = await self.inner
self.inner = inner
self.request = inner.request
- self.status_code: int = inner.status_code
+ self.status: int = inner.status_code
self.reason: str = inner.reason
self.ok: bool = inner.ok
self.headers = inner.headers
diff --git a/g4f/defaults.py b/g4f/requests/defaults.py
index 6ae6d7eb..6ae6d7eb 100644
--- a/g4f/defaults.py
+++ b/g4f/requests/defaults.py
diff --git a/g4f/stubs.py b/g4f/stubs.py
index 1cbbb134..49cf8a88 100644
--- a/g4f/stubs.py
+++ b/g4f/stubs.py
@@ -1,35 +1,98 @@
from __future__ import annotations
+from typing import Union
+
class Model():
- def __getitem__(self, item):
- return getattr(self, item)
+ ...
class ChatCompletion(Model):
- def __init__(self, content: str, finish_reason: str):
- self.choices = [ChatCompletionChoice(ChatCompletionMessage(content, finish_reason))]
+ def __init__(
+ self,
+ content: str,
+ finish_reason: str,
+ completion_id: str = None,
+ created: int = None
+ ):
+ self.id: str = f"chatcmpl-{completion_id}" if completion_id else None
+ self.object: str = "chat.completion"
+ self.created: int = created
+ self.model: str = None
+ self.provider: str = None
+ self.choices = [ChatCompletionChoice(ChatCompletionMessage(content), finish_reason)]
+ self.usage: dict[str, int] = {
+ "prompt_tokens": 0, #prompt_tokens,
+ "completion_tokens": 0, #completion_tokens,
+ "total_tokens": 0, #prompt_tokens + completion_tokens,
+ }
+
+ def to_json(self):
+ return {
+ **self.__dict__,
+ "choices": [choice.to_json() for choice in self.choices]
+ }
class ChatCompletionChunk(Model):
- def __init__(self, content: str, finish_reason: str):
- self.choices = [ChatCompletionDeltaChoice(ChatCompletionDelta(content, finish_reason))]
+ def __init__(
+ self,
+ content: str,
+ finish_reason: str,
+ completion_id: str = None,
+ created: int = None
+ ):
+ self.id: str = f"chatcmpl-{completion_id}" if completion_id else None
+ self.object: str = "chat.completion.chunk"
+ self.created: int = created
+ self.model: str = None
+ self.provider: str = None
+ self.choices = [ChatCompletionDeltaChoice(ChatCompletionDelta(content), finish_reason)]
+
+ def to_json(self):
+ return {
+ **self.__dict__,
+ "choices": [choice.to_json() for choice in self.choices]
+ }
class ChatCompletionMessage(Model):
- def __init__(self, content: str, finish_reason: str):
+ def __init__(self, content: Union[str, None]):
+ self.role = "assistant"
self.content = content
- self.finish_reason = finish_reason
+
+ def to_json(self):
+ return self.__dict__
class ChatCompletionChoice(Model):
- def __init__(self, message: ChatCompletionMessage):
+ def __init__(self, message: ChatCompletionMessage, finish_reason: str):
+ self.index = 0
self.message = message
+ self.finish_reason = finish_reason
+
+ def to_json(self):
+ return {
+ **self.__dict__,
+ "message": self.message.to_json()
+ }
class ChatCompletionDelta(Model):
- def __init__(self, content: str, finish_reason: str):
- self.content = content
- self.finish_reason = finish_reason
+ content: Union[str, None] = None
+
+ def __init__(self, content: Union[str, None]):
+ if content is not None:
+ self.content = content
+
+ def to_json(self):
+ return self.__dict__
class ChatCompletionDeltaChoice(Model):
- def __init__(self, delta: ChatCompletionDelta):
+ def __init__(self, delta: ChatCompletionDelta, finish_reason: Union[str, None]):
self.delta = delta
+ self.finish_reason = finish_reason
+
+ def to_json(self):
+ return {
+ **self.__dict__,
+ "delta": self.delta.to_json()
+ }
class Image(Model):
url: str
diff --git a/g4f/version.py b/g4f/version.py
index 63941baf..0d85a7f5 100644
--- a/g4f/version.py
+++ b/g4f/version.py
@@ -7,6 +7,9 @@ from importlib.metadata import version as get_package_version, PackageNotFoundEr
from subprocess import check_output, CalledProcessError, PIPE
from .errors import VersionNotFoundError
+PACKAGE_NAME = "g4f"
+GITHUB_REPOSITORY = "xtekky/gpt4free"
+
def get_pypi_version(package_name: str) -> str:
"""
Retrieves the latest version of a package from PyPI.
@@ -45,25 +48,6 @@ def get_github_version(repo: str) -> str:
except requests.RequestException as e:
raise VersionNotFoundError(f"Failed to get GitHub release version: {e}")
-def get_latest_version() -> str:
- """
- Retrieves the latest release version of the 'g4f' package from PyPI or GitHub.
-
- Returns:
- str: The latest release version of 'g4f'.
-
- Note:
- The function first tries to fetch the version from PyPI. If the package is not found,
- it retrieves the version from the GitHub repository.
- """
- try:
- # Is installed via package manager?
- get_package_version("g4f")
- return get_pypi_version("g4f")
- except PackageNotFoundError:
- # Else use Github version:
- return get_github_version("xtekky/gpt4free")
-
class VersionUtils:
"""
Utility class for managing and comparing package versions of 'g4f'.
@@ -82,7 +66,7 @@ class VersionUtils:
"""
# Read from package manager
try:
- return get_package_version("g4f")
+ return get_package_version(PACKAGE_NAME)
except PackageNotFoundError:
pass
@@ -108,7 +92,12 @@ class VersionUtils:
Returns:
str: The latest version of 'g4f'.
"""
- return get_latest_version()
+ # Is installed via package manager?
+ try:
+ get_package_version(PACKAGE_NAME)
+ except PackageNotFoundError:
+ return get_github_version(GITHUB_REPOSITORY)
+ return get_pypi_version(PACKAGE_NAME)
def check_version(self) -> None:
"""