diff options
author | Tekky <98614666+xtekky@users.noreply.github.com> | 2024-06-09 15:05:14 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-06-09 15:05:14 +0200 |
commit | c91fef3eb33fc6a5019e0ed925c426079ba91d98 (patch) | |
tree | 27b7e42492ec5de1f0bca96983b99404f7a3f44d /g4f | |
parent | Merge pull request #2044 from eienmojiki206/main (diff) | |
parent | Update URL For GeminiProChat (diff) | |
download | gpt4free-0.3.2.0.tar gpt4free-0.3.2.0.tar.gz gpt4free-0.3.2.0.tar.bz2 gpt4free-0.3.2.0.tar.lz gpt4free-0.3.2.0.tar.xz gpt4free-0.3.2.0.tar.zst gpt4free-0.3.2.0.zip |
Diffstat (limited to 'g4f')
-rw-r--r-- | g4f/Provider/GeminiProChat.py | 41 |
1 files changed, 26 insertions, 15 deletions
diff --git a/g4f/Provider/GeminiProChat.py b/g4f/Provider/GeminiProChat.py index 679242f3..b2bf0ff6 100644 --- a/g4f/Provider/GeminiProChat.py +++ b/g4f/Provider/GeminiProChat.py @@ -2,16 +2,18 @@ from __future__ import annotations import time from hashlib import sha256 -from aiohttp import ClientSession, BaseConnector -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider +from aiohttp import BaseConnector, ClientSession + from ..errors import RateLimitError from ..requests import raise_for_status from ..requests.aiohttp import get_connector +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider + class GeminiProChat(AsyncGeneratorProvider): - url = "https://gemini-chatbot-sigma.vercel.app" + url = "https://www.chatgemini.net/" working = True supports_message_history = True @@ -22,7 +24,7 @@ class GeminiProChat(AsyncGeneratorProvider): messages: Messages, proxy: str = None, connector: BaseConnector = None, - **kwargs + **kwargs, ) -> AsyncResult: headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0", @@ -38,26 +40,35 @@ class GeminiProChat(AsyncGeneratorProvider): "Connection": "keep-alive", "TE": "trailers", } - async with ClientSession(connector=get_connector(connector, proxy), headers=headers) as session: + async with ClientSession( + connector=get_connector(connector, proxy), headers=headers + ) as session: timestamp = int(time.time() * 1e3) data = { - "messages":[{ - "role": "model" if message["role"] == "assistant" else "user", - "parts": [{"text": message["content"]}] - } for message in messages], + "messages": [ + { + "role": "model" if message["role"] == "assistant" else "user", + "parts": [{"text": message["content"]}], + } + for message in messages + ], "time": timestamp, "pass": None, "sign": generate_signature(timestamp, messages[-1]["content"]), } - async with session.post(f"{cls.url}/api/generate", json=data, proxy=proxy) as response: + async with session.post( + f"{cls.url}/api/generate", json=data, proxy=proxy + ) as response: if response.status == 500: if "Quota exceeded" in await response.text(): - raise RateLimitError(f"Response {response.status}: Rate limit reached") + raise RateLimitError( + f"Response {response.status}: Rate limit reached" + ) await raise_for_status(response) async for chunk in response.content.iter_any(): yield chunk.decode(errors="ignore") - + + def generate_signature(time: int, text: str, secret: str = ""): - message = f'{time}:{text}:{secret}'; + message = f"{time}:{text}:{secret}" return sha256(message.encode()).hexdigest() - |