summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorHeiner Lohaus <hlohaus@users.noreply.github.com>2024-04-07 10:36:13 +0200
committerHeiner Lohaus <hlohaus@users.noreply.github.com>2024-04-07 10:36:13 +0200
commitb35dfcd1b01c575b65e0299ef71d285dc8f41459 (patch)
treecfe5f4a390af62fafefd1d27ca2c82a23cdcab49 /g4f/Provider
parentUpdate Gemini.py (diff)
downloadgpt4free-b35dfcd1b01c575b65e0299ef71d285dc8f41459.tar
gpt4free-b35dfcd1b01c575b65e0299ef71d285dc8f41459.tar.gz
gpt4free-b35dfcd1b01c575b65e0299ef71d285dc8f41459.tar.bz2
gpt4free-b35dfcd1b01c575b65e0299ef71d285dc8f41459.tar.lz
gpt4free-b35dfcd1b01c575b65e0299ef71d285dc8f41459.tar.xz
gpt4free-b35dfcd1b01c575b65e0299ef71d285dc8f41459.tar.zst
gpt4free-b35dfcd1b01c575b65e0299ef71d285dc8f41459.zip
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Aura.py2
-rw-r--r--g4f/Provider/DeepInfra.py70
-rw-r--r--g4f/Provider/GeminiPro.py2
-rw-r--r--g4f/Provider/Local.py42
-rw-r--r--g4f/Provider/You.py8
-rw-r--r--g4f/Provider/__init__.py1
-rw-r--r--g4f/Provider/deprecated/OpenAssistant.py (renamed from g4f/Provider/needs_auth/OpenAssistant.py)1
-rw-r--r--g4f/Provider/deprecated/__init__.py3
-rw-r--r--g4f/Provider/needs_auth/Gemini.py2
-rw-r--r--g4f/Provider/needs_auth/Openai.py96
-rw-r--r--g4f/Provider/needs_auth/ThebApi.py57
-rw-r--r--g4f/Provider/needs_auth/__init__.py1
-rw-r--r--g4f/Provider/you/__init__.py0
-rw-r--r--g4f/Provider/you/har_file.py70
14 files changed, 227 insertions, 128 deletions
diff --git a/g4f/Provider/Aura.py b/g4f/Provider/Aura.py
index 4501df2c..7e2b2831 100644
--- a/g4f/Provider/Aura.py
+++ b/g4f/Provider/Aura.py
@@ -18,7 +18,7 @@ class Aura(AsyncGeneratorProvider):
messages: Messages,
proxy: str = None,
temperature: float = 0.5,
- max_tokens: int = 8192.
+ max_tokens: int = 8192,
webdriver: WebDriver = None,
**kwargs
) -> AsyncResult:
diff --git a/g4f/Provider/DeepInfra.py b/g4f/Provider/DeepInfra.py
index 6cf52694..53c8d6b9 100644
--- a/g4f/Provider/DeepInfra.py
+++ b/g4f/Provider/DeepInfra.py
@@ -1,42 +1,41 @@
from __future__ import annotations
-import json
import requests
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..requests import StreamSession, raise_for_status
+from .needs_auth.Openai import Openai
-class DeepInfra(AsyncGeneratorProvider, ProviderModelMixin):
+class DeepInfra(Openai):
url = "https://deepinfra.com"
working = True
+ needs_auth = False
supports_stream = True
supports_message_history = True
default_model = 'meta-llama/Llama-2-70b-chat-hf'
-
+
@classmethod
def get_models(cls):
if not cls.models:
url = 'https://api.deepinfra.com/models/featured'
models = requests.get(url).json()
- cls.models = [model['model_name'] for model in models]
+ cls.models = [model['model_name'] for model in models if model["type"] == "text-generation"]
return cls.models
@classmethod
- async def create_async_generator(
+ def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool,
- proxy: str = None,
- timeout: int = 120,
- auth: str = None,
+ api_base: str = "https://api.deepinfra.com/v1/openai",
+ temperature: float = 0.7,
+ max_tokens: int = 1028,
**kwargs
) -> AsyncResult:
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US',
'Connection': 'keep-alive',
- 'Content-Type': 'application/json',
+ 'Content-Type': None,
'Origin': 'https://deepinfra.com',
'Referer': 'https://deepinfra.com/',
'Sec-Fetch-Dest': 'empty',
@@ -44,46 +43,17 @@ class DeepInfra(AsyncGeneratorProvider, ProviderModelMixin):
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
'X-Deepinfra-Source': 'web-embed',
- 'accept': 'text/event-stream',
+ 'Accept': None,
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
}
- if auth:
- headers['Authorization'] = f"bearer {auth}"
-
- async with StreamSession(headers=headers,
- timeout=timeout,
- proxies={"https": proxy},
- impersonate="chrome110"
- ) as session:
- json_data = {
- 'model' : cls.get_model(model),
- 'messages': messages,
- 'temperature': kwargs.get("temperature", 0.7),
- 'max_tokens': kwargs.get("max_tokens", 512),
- 'stop': kwargs.get("stop", []),
- 'stream' : True
- }
- async with session.post('https://api.deepinfra.com/v1/openai/chat/completions',
- json=json_data) as response:
- await raise_for_status(response)
- first = True
- async for line in response.iter_lines():
- if not line.startswith(b"data: "):
- continue
- try:
- json_line = json.loads(line[6:])
- choices = json_line.get("choices", [{}])
- finish_reason = choices[0].get("finish_reason")
- if finish_reason:
- break
- token = choices[0].get("delta", {}).get("content")
- if token:
- if first:
- token = token.lstrip()
- if token:
- first = False
- yield token
- except Exception:
- raise RuntimeError(f"Response: {line}")
+ return super().create_async_generator(
+ model, messages,
+ stream=stream,
+ api_base=api_base,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ headers=headers,
+ **kwargs
+ ) \ No newline at end of file
diff --git a/g4f/Provider/GeminiPro.py b/g4f/Provider/GeminiPro.py
index 4378a18c..214b7383 100644
--- a/g4f/Provider/GeminiPro.py
+++ b/g4f/Provider/GeminiPro.py
@@ -76,7 +76,7 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
if not response.ok:
data = await response.json()
data = data[0] if isinstance(data, list) else data
- raise RuntimeError(f"Response {response.status}: {data["error"]["message"]}")
+ raise RuntimeError(f"Response {response.status}: {data['error']['message']}")
if stream:
lines = []
async for chunk in response.content:
diff --git a/g4f/Provider/Local.py b/g4f/Provider/Local.py
new file mode 100644
index 00000000..b4d096de
--- /dev/null
+++ b/g4f/Provider/Local.py
@@ -0,0 +1,42 @@
+from __future__ import annotations
+
+from ..locals.models import get_models
+try:
+ from ..locals.provider import LocalProvider
+ has_requirements = True
+except ModuleNotFoundError:
+ has_requirements = False
+
+from ..typing import Messages, CreateResult
+from ..providers.base_provider import AbstractProvider, ProviderModelMixin
+from ..errors import MissingRequirementsError
+
+class Local(AbstractProvider, ProviderModelMixin):
+ working = True
+ supports_message_history = True
+ supports_system_message = True
+ supports_stream = True
+
+ @classmethod
+ def get_models(cls):
+ if not cls.models:
+ cls.models = list(get_models())
+ cls.default_model = cls.models[0]
+ return cls.models
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ **kwargs
+ ) -> CreateResult:
+ if not has_requirements:
+ raise MissingRequirementsError('Install "gpt4all" package | pip install -U g4f[local]')
+ return LocalProvider.create_completion(
+ cls.get_model(model),
+ messages,
+ stream,
+ **kwargs
+ ) \ No newline at end of file
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index 9b040367..231f953f 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -17,6 +17,8 @@ from ..image import to_bytes, ImageResponse
from ..requests import StreamSession, raise_for_status
from ..errors import MissingRequirementsError
+from .you.har_file import get_dfp_telemetry_id
+
class You(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://you.com"
working = True
@@ -45,6 +47,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
cls,
model: str,
messages: Messages,
+ stream: bool = True,
image: ImageType = None,
image_name: str = None,
proxy: str = None,
@@ -56,7 +59,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
if image is not None:
chat_mode = "agent"
elif not model or model == cls.default_model:
- chat_mode = "default"
+ ...
elif model.startswith("dall-e"):
chat_mode = "create"
else:
@@ -108,7 +111,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
data = json.loads(line[6:])
if event == "youChatToken" and event in data:
yield data[event]
- elif event == "youChatUpdate" and "t" in data:
+ elif event == "youChatUpdate" and "t" in data and data["t"] is not None:
match = re.search(r"!\[fig\]\((.+?)\)", data["t"])
if match:
yield ImageResponse(match.group(1), messages[-1]["content"])
@@ -177,6 +180,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
"X-SDK-Parent-Host": cls.url
},
json={
+ "dfp_telemetry_id": await get_dfp_telemetry_id(),
"email": f"{user_uuid}@gmail.com",
"password": f"{user_uuid}#{user_uuid}",
"session_duration_minutes": 129600
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 50a5da31..1db29e19 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -33,6 +33,7 @@ from .HuggingFace import HuggingFace
from .Koala import Koala
from .Liaobots import Liaobots
from .Llama2 import Llama2
+from .Local import Local
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Vercel import Vercel
diff --git a/g4f/Provider/needs_auth/OpenAssistant.py b/g4f/Provider/deprecated/OpenAssistant.py
index e549b517..80cae3c2 100644
--- a/g4f/Provider/needs_auth/OpenAssistant.py
+++ b/g4f/Provider/deprecated/OpenAssistant.py
@@ -8,7 +8,6 @@ from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_cookies
-
class OpenAssistant(AsyncGeneratorProvider):
url = "https://open-assistant.io/chat"
needs_auth = True
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
index f6b4a1d9..408f3913 100644
--- a/g4f/Provider/deprecated/__init__.py
+++ b/g4f/Provider/deprecated/__init__.py
@@ -31,4 +31,5 @@ from .FakeGpt import FakeGpt
from .GeekGpt import GeekGpt
from .GPTalk import GPTalk
from .Hashnode import Hashnode
-from .Ylokh import Ylokh \ No newline at end of file
+from .Ylokh import Ylokh
+from .OpenAssistant import OpenAssistant \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py
index 9013a4f8..fc9d9575 100644
--- a/g4f/Provider/needs_auth/Gemini.py
+++ b/g4f/Provider/needs_auth/Gemini.py
@@ -19,7 +19,7 @@ except ImportError:
from ...typing import Messages, Cookies, ImageType, AsyncResult
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_cookies
-from requests.raise_for_status import raise_for_status
+from ...requests.raise_for_status import raise_for_status
from ...errors import MissingAuthError, MissingRequirementsError
from ...image import to_bytes, ImageResponse
from ...webdriver import get_browser, get_driver_cookies
diff --git a/g4f/Provider/needs_auth/Openai.py b/g4f/Provider/needs_auth/Openai.py
index b876cd0b..6cd2cf86 100644
--- a/g4f/Provider/needs_auth/Openai.py
+++ b/g4f/Provider/needs_auth/Openai.py
@@ -3,10 +3,10 @@ from __future__ import annotations
import json
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, FinishReason
-from ...typing import AsyncResult, Messages
+from ...typing import Union, Optional, AsyncResult, Messages
from ...requests.raise_for_status import raise_for_status
from ...requests import StreamSession
-from ...errors import MissingAuthError
+from ...errors import MissingAuthError, ResponseError
class Openai(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://openai.com"
@@ -27,48 +27,82 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
temperature: float = None,
max_tokens: int = None,
top_p: float = None,
- stop: str = None,
+ stop: Union[str, list[str]] = None,
stream: bool = False,
+ headers: dict = None,
+ extra_data: dict = {},
**kwargs
) -> AsyncResult:
- if api_key is None:
+ if cls.needs_auth and api_key is None:
raise MissingAuthError('Add a "api_key"')
async with StreamSession(
proxies={"all": proxy},
- headers=cls.get_headers(api_key),
+ headers=cls.get_headers(stream, api_key, headers),
timeout=timeout
) as session:
- data = {
- "messages": messages,
- "model": cls.get_model(model),
- "temperature": temperature,
- "max_tokens": max_tokens,
- "top_p": top_p,
- "stop": stop,
- "stream": stream,
- }
+ data = filter_none(
+ messages=messages,
+ model=cls.get_model(model),
+ temperature=temperature,
+ max_tokens=max_tokens,
+ top_p=top_p,
+ stop=stop,
+ stream=stream,
+ **extra_data
+ )
async with session.post(f"{api_base.rstrip('/')}/chat/completions", json=data) as response:
await raise_for_status(response)
- async for line in response.iter_lines():
- if line.startswith(b"data: ") or not stream:
- async for chunk in cls.read_line(line[6:] if stream else line, stream):
- yield chunk
+ if not stream:
+ data = await response.json()
+ choice = data["choices"][0]
+ if "content" in choice["message"]:
+ yield choice["message"]["content"].strip()
+ finish = cls.read_finish_reason(choice)
+ if finish is not None:
+ yield finish
+ else:
+ first = True
+ async for line in response.iter_lines():
+ if line.startswith(b"data: "):
+ chunk = line[6:]
+ if chunk == b"[DONE]":
+ break
+ data = json.loads(chunk)
+ if "error_message" in data:
+ raise ResponseError(data["error_message"])
+ choice = data["choices"][0]
+ if "content" in choice["delta"] and choice["delta"]["content"]:
+ delta = choice["delta"]["content"]
+ if first:
+ delta = delta.lstrip()
+ if delta:
+ first = False
+ yield delta
+ finish = cls.read_finish_reason(choice)
+ if finish is not None:
+ yield finish
@staticmethod
- async def read_line(line: str, stream: bool):
- if line == b"[DONE]":
- return
- choice = json.loads(line)["choices"][0]
- if stream and "content" in choice["delta"] and choice["delta"]["content"]:
- yield choice["delta"]["content"]
- elif not stream and "content" in choice["message"]:
- yield choice["message"]["content"]
+ def read_finish_reason(choice: dict) -> Optional[FinishReason]:
if "finish_reason" in choice and choice["finish_reason"] is not None:
- yield FinishReason(choice["finish_reason"])
+ return FinishReason(choice["finish_reason"])
- @staticmethod
- def get_headers(api_key: str) -> dict:
+ @classmethod
+ def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
return {
- "Authorization": f"Bearer {api_key}",
+ "Accept": "text/event-stream" if stream else "application/json",
"Content-Type": "application/json",
- } \ No newline at end of file
+ **(
+ {"Authorization": f"Bearer {api_key}"}
+ if cls.needs_auth and api_key is not None
+ else {}
+ ),
+ **({} if headers is None else headers)
+ }
+
+def filter_none(**kwargs) -> dict:
+ return {
+ key: value
+ for key, value in kwargs.items()
+ if value is not None
+ } \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py
index 1c7baf8d..48879bcb 100644
--- a/g4f/Provider/needs_auth/ThebApi.py
+++ b/g4f/Provider/needs_auth/ThebApi.py
@@ -1,10 +1,7 @@
from __future__ import annotations
-import requests
-
-from ...typing import Any, CreateResult, Messages
-from ..base_provider import AbstractProvider, ProviderModelMixin
-from ...errors import MissingAuthError
+from ...typing import CreateResult, Messages
+from .Openai import Openai
models = {
"theb-ai": "TheB.AI",
@@ -30,7 +27,7 @@ models = {
"qwen-7b-chat": "Qwen 7B"
}
-class ThebApi(AbstractProvider, ProviderModelMixin):
+class ThebApi(Openai):
url = "https://theb.ai"
working = True
needs_auth = True
@@ -38,44 +35,26 @@ class ThebApi(AbstractProvider, ProviderModelMixin):
models = list(models)
@classmethod
- def create_completion(
+ def create_async_generator(
cls,
model: str,
messages: Messages,
- stream: bool,
- auth: str = None,
- proxy: str = None,
+ api_base: str = "https://api.theb.ai/v1",
+ temperature: float = 1,
+ top_p: float = 1,
**kwargs
) -> CreateResult:
- if not auth:
- raise MissingAuthError("Missing auth")
- headers = {
- 'accept': 'application/json',
- 'authorization': f'Bearer {auth}',
- 'content-type': 'application/json',
- }
- # response = requests.get("https://api.baizhi.ai/v1/models", headers=headers).json()["data"]
- # models = dict([(m["id"], m["name"]) for m in response])
- # print(json.dumps(models, indent=4))
- data: dict[str, Any] = {
- "model": cls.get_model(model),
- "messages": messages,
- "stream": False,
+ if "auth" in kwargs:
+ kwargs["api_key"] = kwargs["auth"]
+ system_message = "\n".join([message["content"] for message in messages if message["role"] == "system"])
+ if not system_message:
+ system_message = "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture."
+ messages = [message for message in messages if message["role"] != "system"]
+ data = {
"model_params": {
- "system_prompt": kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture."),
- "temperature": 1,
- "top_p": 1,
- **kwargs
+ "system_prompt": system_message,
+ "temperature": temperature,
+ "top_p": top_p,
}
}
- response = requests.post(
- "https://api.theb.ai/v1/chat/completions",
- headers=headers,
- json=data,
- proxies={"https": proxy}
- )
- try:
- response.raise_for_status()
- yield response.json()["choices"][0]["message"]["content"]
- except:
- raise RuntimeError(f"Response: {next(response.iter_lines()).decode()}") \ No newline at end of file
+ return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs) \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 92fa165b..581335e1 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -3,7 +3,6 @@ from .Raycast import Raycast
from .Theb import Theb
from .ThebApi import ThebApi
from .OpenaiChat import OpenaiChat
-from .OpenAssistant import OpenAssistant
from .Poe import Poe
from .Openai import Openai
from .Groq import Groq \ No newline at end of file
diff --git a/g4f/Provider/you/__init__.py b/g4f/Provider/you/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/g4f/Provider/you/__init__.py
diff --git a/g4f/Provider/you/har_file.py b/g4f/Provider/you/har_file.py
new file mode 100644
index 00000000..59e1ff94
--- /dev/null
+++ b/g4f/Provider/you/har_file.py
@@ -0,0 +1,70 @@
+import json
+import os
+import random
+import uuid
+
+from ...requests import StreamSession, raise_for_status
+
+class NoValidHarFileError(Exception):
+ ...
+
+class arkReq:
+ def __init__(self, arkURL, arkHeaders, arkBody, arkCookies, userAgent):
+ self.arkURL = arkURL
+ self.arkHeaders = arkHeaders
+ self.arkBody = arkBody
+ self.arkCookies = arkCookies
+ self.userAgent = userAgent
+
+arkPreURL = "https://telemetry.stytch.com/submit"
+chatArks: list = None
+
+def readHAR():
+ dirPath = "./"
+ harPath = []
+ chatArks = []
+ for root, dirs, files in os.walk(dirPath):
+ for file in files:
+ if file.endswith(".har"):
+ harPath.append(os.path.join(root, file))
+ if harPath:
+ break
+ if not harPath:
+ raise NoValidHarFileError("No .har file found")
+ for path in harPath:
+ with open(path, 'rb') as file:
+ try:
+ harFile = json.load(file)
+ except json.JSONDecodeError:
+ # Error: not a HAR file!
+ continue
+ for v in harFile['log']['entries']:
+ if arkPreURL in v['request']['url']:
+ chatArks.append(parseHAREntry(v))
+ if not chatArks:
+ raise NoValidHarFileError("No telemetry in .har files found")
+ return chatArks
+
+def parseHAREntry(entry) -> arkReq:
+ tmpArk = arkReq(
+ arkURL=entry['request']['url'],
+ arkHeaders={h['name'].lower(): h['value'] for h in entry['request']['headers'] if h['name'].lower() not in ['content-length', 'cookie'] and not h['name'].startswith(':')},
+ arkBody=entry['request']['postData']['text'],
+ arkCookies={c['name']: c['value'] for c in entry['request']['cookies']},
+ userAgent=""
+ )
+ tmpArk.userAgent = tmpArk.arkHeaders.get('user-agent', '')
+ return tmpArk
+
+async def sendRequest(tmpArk: arkReq, proxy: str = None):
+ async with StreamSession(headers=tmpArk.arkHeaders, cookies=tmpArk.arkCookies, proxies={"all": proxy}) as session:
+ async with session.post(tmpArk.arkURL, data=tmpArk.arkBody) as response:
+ await raise_for_status(response)
+ return await response.text()
+
+async def get_dfp_telemetry_id(proxy: str = None):
+ return str(uuid.uuid4())
+ global chatArks
+ if chatArks is None:
+ chatArks = readHAR()
+ return await sendRequest(random.choice(chatArks), proxy) \ No newline at end of file