From c6b33e527c9af9c72a615a7b23c8d40b5783862f Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 9 Oct 2023 13:33:20 +0200 Subject: Add Proxy Support and Create Provider to Readme Add proxy support to many providers --- g4f/Provider/needs_auth/Bard.py | 14 ++++++-------- g4f/Provider/needs_auth/HuggingChat.py | 9 +++++---- g4f/Provider/needs_auth/OpenAssistant.py | 11 ++++++----- g4f/Provider/needs_auth/OpenaiChat.py | 17 +++++++++++------ g4f/Provider/needs_auth/Raycast.py | 15 +++++++++++---- g4f/Provider/needs_auth/Theb.py | 26 +++++++++++++++----------- 6 files changed, 54 insertions(+), 38 deletions(-) (limited to 'g4f/Provider/needs_auth') diff --git a/g4f/Provider/needs_auth/Bard.py b/g4f/Provider/needs_auth/Bard.py index 7c42b680..fe9777db 100644 --- a/g4f/Provider/needs_auth/Bard.py +++ b/g4f/Provider/needs_auth/Bard.py @@ -6,7 +6,9 @@ import re from aiohttp import ClientSession -from ..base_provider import AsyncProvider, format_prompt, get_cookies +from ...typing import Messages +from ..base_provider import AsyncProvider +from ..helper import format_prompt, get_cookies class Bard(AsyncProvider): @@ -19,25 +21,22 @@ class Bard(AsyncProvider): async def create_async( cls, model: str, - messages: list[dict[str, str]], + messages: Messages, proxy: str = None, cookies: dict = None, **kwargs ) -> str: prompt = format_prompt(messages) - if proxy and "://" not in proxy: - proxy = f"http://{proxy}" if not cookies: cookies = get_cookies(".google.com") headers = { 'authority': 'bard.google.com', - 'origin': 'https://bard.google.com', - 'referer': 'https://bard.google.com/', + 'origin': cls.url, + 'referer': f'{cls.url}/', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', 'x-same-domain': '1', } - async with ClientSession( cookies=cookies, headers=headers @@ -67,7 +66,6 @@ class Bard(AsyncProvider): 'lamda', 'BardFrontendService' ]) - async with session.post( f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate', data=data, diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py index 1d500338..68c6713b 100644 --- a/g4f/Provider/needs_auth/HuggingChat.py +++ b/g4f/Provider/needs_auth/HuggingChat.py @@ -4,8 +4,9 @@ import json, uuid from aiohttp import ClientSession -from ...typing import AsyncGenerator -from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import format_prompt, get_cookies class HuggingChat(AsyncGeneratorProvider): @@ -18,12 +19,12 @@ class HuggingChat(AsyncGeneratorProvider): async def create_async_generator( cls, model: str, - messages: list[dict[str, str]], + messages: Messages, stream: bool = True, proxy: str = None, cookies: dict = None, **kwargs - ) -> AsyncGenerator: + ) -> AsyncResult: model = model if model else cls.model if proxy and "://" not in proxy: proxy = f"http://{proxy}" diff --git a/g4f/Provider/needs_auth/OpenAssistant.py b/g4f/Provider/needs_auth/OpenAssistant.py index 3b0e0424..de62636c 100644 --- a/g4f/Provider/needs_auth/OpenAssistant.py +++ b/g4f/Provider/needs_auth/OpenAssistant.py @@ -4,8 +4,9 @@ import json from aiohttp import ClientSession -from ...typing import Any, AsyncGenerator -from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import format_prompt, get_cookies class OpenAssistant(AsyncGeneratorProvider): @@ -18,11 +19,11 @@ class OpenAssistant(AsyncGeneratorProvider): async def create_async_generator( cls, model: str, - messages: list[dict[str, str]], + messages: Messages, proxy: str = None, cookies: dict = None, - **kwargs: Any - ) -> AsyncGenerator: + **kwargs + ) -> AsyncResult: if not cookies: cookies = get_cookies("open-assistant.io") diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index c41909e3..b4b4a670 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -4,7 +4,7 @@ import uuid, json, time from ..base_provider import AsyncGeneratorProvider from ..helper import get_browser, get_cookies, format_prompt -from ...typing import AsyncGenerator +from ...typing import AsyncResult, Messages from ...requests import StreamSession class OpenaiChat(AsyncGeneratorProvider): @@ -18,13 +18,13 @@ class OpenaiChat(AsyncGeneratorProvider): async def create_async_generator( cls, model: str, - messages: list[dict[str, str]], + messages: Messages, proxy: str = None, + timeout: int = 120, access_token: str = None, cookies: dict = None, - timeout: int = 30, - **kwargs: dict - ) -> AsyncGenerator: + **kwargs + ) -> AsyncResult: proxies = {"https": proxy} if not access_token: access_token = await cls.get_access_token(cookies, proxies) @@ -32,7 +32,12 @@ class OpenaiChat(AsyncGeneratorProvider): "Accept": "text/event-stream", "Authorization": f"Bearer {access_token}", } - async with StreamSession(proxies=proxies, headers=headers, impersonate="chrome107", timeout=timeout) as session: + async with StreamSession( + proxies=proxies, + headers=headers, + impersonate="chrome107", + timeout=timeout + ) as session: messages = [ { "id": str(uuid.uuid4()), diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py index 619b217b..4b448985 100644 --- a/g4f/Provider/needs_auth/Raycast.py +++ b/g4f/Provider/needs_auth/Raycast.py @@ -4,7 +4,7 @@ import json import requests -from ...typing import Any, CreateResult +from ...typing import CreateResult, Messages from ..base_provider import BaseProvider @@ -19,9 +19,10 @@ class Raycast(BaseProvider): @staticmethod def create_completion( model: str, - messages: list[dict[str, str]], + messages: Messages, stream: bool, - **kwargs: Any, + proxy: str = None, + **kwargs, ) -> CreateResult: auth = kwargs.get('auth') headers = { @@ -47,7 +48,13 @@ class Raycast(BaseProvider): "system_instruction": "markdown", "temperature": 0.5 } - response = requests.post("https://backend.raycast.com/api/v1/ai/chat_completions", headers=headers, json=data, stream=True) + response = requests.post( + "https://backend.raycast.com/api/v1/ai/chat_completions", + headers=headers, + json=data, + stream=True, + proxies={"https": proxy} + ) for token in response.iter_lines(): if b'data: ' not in token: continue diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py index c35ea592..9803db97 100644 --- a/g4f/Provider/needs_auth/Theb.py +++ b/g4f/Provider/needs_auth/Theb.py @@ -2,11 +2,11 @@ from __future__ import annotations import json import random - import requests -from ...typing import Any, CreateResult +from ...typing import Any, CreateResult, Messages from ..base_provider import BaseProvider +from ..helper import format_prompt class Theb(BaseProvider): @@ -19,12 +19,11 @@ class Theb(BaseProvider): @staticmethod def create_completion( model: str, - messages: list[dict[str, str]], - stream: bool, **kwargs: Any) -> CreateResult: - - conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages) - conversation += "\nassistant: " - + messages: Messages, + stream: bool, + proxy: str = None, + **kwargs + ) -> CreateResult: auth = kwargs.get("auth", { "bearer_token":"free", "org_id":"theb", @@ -54,7 +53,7 @@ class Theb(BaseProvider): req_rand = random.randint(100000000, 9999999999) json_data: dict[str, Any] = { - "text" : conversation, + "text" : format_prompt(messages), "category" : "04f58f64a4aa4191a957b47290fee864", "model" : "ee8d4f29cb7047f78cbe84313ed6ace8", "model_params": { @@ -67,8 +66,13 @@ class Theb(BaseProvider): } } - response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}", - headers=headers, json=json_data, stream=True) + response = requests.post( + f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}", + headers=headers, + json=json_data, + stream=True, + proxies={"https": proxy} + ) response.raise_for_status() content = "" -- cgit v1.2.3