From 68c7a92ee269131e4aada81fdbc994466ab38b57 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 16 Dec 2024 00:58:45 +0100 Subject: Hide api_key fields from disabeld providers --- g4f/Provider/Blackbox2.py | 21 ++++++++++----------- g4f/Provider/needs_auth/Cerebras.py | 18 ++++++++++-------- g4f/Provider/needs_auth/OpenaiChat.py | 2 +- 3 files changed, 21 insertions(+), 20 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/Blackbox2.py b/g4f/Provider/Blackbox2.py index ce949b8b..f27a2559 100644 --- a/g4f/Provider/Blackbox2.py +++ b/g4f/Provider/Blackbox2.py @@ -6,7 +6,7 @@ import re import json from pathlib import Path from aiohttp import ClientSession -from typing import AsyncGenerator +from typing import AsyncIterator from ..typing import AsyncResult, Messages from ..image import ImageResponse @@ -21,12 +21,12 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin): "llama-3.1-70b": "https://www.blackbox.ai/api/improve-prompt", "flux": "https://www.blackbox.ai/api/image-generator" } - + working = True supports_system_message = True supports_message_history = True supports_stream = False - + default_model = 'llama-3.1-70b' chat_models = ['llama-3.1-70b'] image_models = ['flux'] @@ -97,15 +97,14 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin): messages: Messages, prompt: str = None, proxy: str = None, - prompt: str = None, max_retries: int = 3, delay: int = 1, max_tokens: int = None, **kwargs - ) -> AsyncGenerator[str, None]: + ) -> AsyncResult: if not model: model = cls.default_model - + if model in cls.chat_models: async for result in cls._generate_text(model, messages, proxy, max_retries, delay, max_tokens): yield result @@ -125,13 +124,13 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin): max_retries: int = 3, delay: int = 1, max_tokens: int = None, - ) -> AsyncGenerator[str, None]: + ) -> AsyncIterator[str]: headers = cls._get_headers() async with ClientSession(headers=headers) as session: license_key = await cls._get_license_key(session) api_endpoint = cls.api_endpoints[model] - + data = { "messages": messages, "max_tokens": max_tokens, @@ -162,7 +161,7 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin): model: str, prompt: str, proxy: str = None - ) -> AsyncGenerator[ImageResponse, None]: + ) -> AsyncIterator[ImageResponse]: headers = cls._get_headers() api_endpoint = cls.api_endpoints[model] @@ -170,11 +169,11 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin): data = { "query": prompt } - + async with session.post(api_endpoint, headers=headers, json=data, proxy=proxy) as response: response.raise_for_status() response_data = await response.json() - + if 'markdown' in response_data: image_url = response_data['markdown'].split('(')[1].split(')')[0] yield ImageResponse(images=image_url, alt=prompt) diff --git a/g4f/Provider/needs_auth/Cerebras.py b/g4f/Provider/needs_auth/Cerebras.py index df34db0e..86b2dcbd 100644 --- a/g4f/Provider/needs_auth/Cerebras.py +++ b/g4f/Provider/needs_auth/Cerebras.py @@ -16,6 +16,7 @@ class Cerebras(OpenaiAPI): models = [ "llama3.1-70b", "llama3.1-8b", + "llama-3.3-70b" ] model_aliases = {"llama-3.1-70b": "llama3.1-70b", "llama-3.1-8b": "llama3.1-8b"} @@ -29,14 +30,15 @@ class Cerebras(OpenaiAPI): cookies: Cookies = None, **kwargs ) -> AsyncResult: - if api_key is None and cookies is None: - cookies = get_cookies(".cerebras.ai") - async with ClientSession(cookies=cookies) as session: - async with session.get("https://inference.cerebras.ai/api/auth/session") as response: - raise_for_status(response) - data = await response.json() - if data: - api_key = data.get("user", {}).get("demoApiKey") + if api_key is None: + if cookies is None: + cookies = get_cookies(".cerebras.ai") + async with ClientSession(cookies=cookies) as session: + async with session.get("https://inference.cerebras.ai/api/auth/session") as response: + await raise_for_status(response) + data = await response.json() + if data: + api_key = data.get("user", {}).get("demoApiKey") async for chunk in super().create_async_generator( model, messages, api_base=api_base, diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index 01976198..0e25a28d 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -504,7 +504,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): await cls.login() async with StreamSession( impersonate="chrome", - timeout=900 + timeout=0 ) as session: async with session.get( f"{cls.url}/backend-api/synthesize", -- cgit v1.2.3