diff options
author | kqlio67 <kqlio67@users.noreply.github.com> | 2024-10-11 08:33:30 +0200 |
---|---|---|
committer | kqlio67 <kqlio67@users.noreply.github.com> | 2024-10-11 08:33:30 +0200 |
commit | a9bc67362f2be529fe9165ebb13347195ba1ddcf (patch) | |
tree | 1a91836eaa94f14c18ad5d55f687ff8a2118c357 /g4f/Provider/nexra/NexraChatGptV2.py | |
parent | feat(g4f/Provider/__init__.py): add new providers and update imports (diff) | |
download | gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.gz gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.bz2 gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.lz gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.xz gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.zst gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.zip |
Diffstat (limited to 'g4f/Provider/nexra/NexraChatGptV2.py')
-rw-r--r-- | g4f/Provider/nexra/NexraChatGptV2.py | 93 |
1 files changed, 93 insertions, 0 deletions
diff --git a/g4f/Provider/nexra/NexraChatGptV2.py b/g4f/Provider/nexra/NexraChatGptV2.py new file mode 100644 index 00000000..c0faf93a --- /dev/null +++ b/g4f/Provider/nexra/NexraChatGptV2.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import json + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + + +class NexraChatGptV2(AsyncGeneratorProvider, ProviderModelMixin): + label = "Nexra ChatGPT v2" + url = "https://nexra.aryahcr.cc/documentation/chatgpt/en" + api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements" + working = True + supports_gpt_4 = True + supports_stream = True + + default_model = 'chatgpt' + models = [default_model] + + model_aliases = { + "gpt-4": "chatgpt", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + stream: bool = False, + markdown: bool = False, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "Content-Type": "application/json" + } + + async with ClientSession(headers=headers) as session: + prompt = format_prompt(messages) + data = { + "messages": [ + { + "role": "user", + "content": prompt + } + ], + "stream": stream, + "markdown": markdown, + "model": model + } + + async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response: + response.raise_for_status() + + if stream: + # Streamed response handling (stream=True) + collected_message = "" + async for chunk in response.content.iter_any(): + if chunk: + decoded_chunk = chunk.decode().strip().split("\x1e") + for part in decoded_chunk: + if part: + message_data = json.loads(part) + + # Collect messages until 'finish': true + if 'message' in message_data and message_data['message']: + collected_message = message_data['message'] + + # When finish is true, yield the final collected message + if message_data.get('finish', False): + yield collected_message + return + else: + # Non-streamed response handling (stream=False) + response_data = await response.json(content_type=None) + + # Yield the message directly from the response + if 'message' in response_data and response_data['message']: + yield response_data['message'] + return |