From 3430b04f870d982d7fba34e3b9d6e5cf3bd3b847 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sat, 7 Oct 2023 19:10:26 +0200 Subject: Remove Aivvm from working providers --- g4f/Provider/Aivvm.py | 68 ------------------------------------- g4f/Provider/__init__.py | 1 - g4f/Provider/deprecated/Aivvm.py | 66 +++++++++++++++++++++++++++++++++++ g4f/Provider/deprecated/__init__.py | 3 +- g4f/models.py | 24 ++++++------- 5 files changed, 78 insertions(+), 84 deletions(-) delete mode 100644 g4f/Provider/Aivvm.py create mode 100644 g4f/Provider/deprecated/Aivvm.py (limited to 'g4f') diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py deleted file mode 100644 index 1e780953..00000000 --- a/g4f/Provider/Aivvm.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import annotations - -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider -from ..typing import AsyncGenerator - -# to recreate this easily, send a post request to https://chat.aivvm.com/api/models -models = { - 'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'}, - 'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'}, - 'gpt-3.5-turbo-16k': {'id': 'gpt-3.5-turbo-16k', 'name': 'GPT-3.5-16K'}, - 'gpt-3.5-turbo-16k-0613': {'id': 'gpt-3.5-turbo-16k-0613', 'name': 'GPT-3.5-16K-0613'}, - 'gpt-4': {'id': 'gpt-4', 'name': 'GPT-4'}, - 'gpt-4-0613': {'id': 'gpt-4-0613', 'name': 'GPT-4-0613'}, - 'gpt-4-32k': {'id': 'gpt-4-32k', 'name': 'GPT-4-32K'}, - 'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'}, -} - -class Aivvm(AsyncGeneratorProvider): - url = 'https://chat.aivvm.com' - supports_stream = True - working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - stream: bool, - timeout: int = 30, - **kwargs - ) -> AsyncGenerator: - if not model: - model = "gpt-3.5-turbo" - elif model not in models: - raise ValueError(f"Model is not supported: {model}") - - json_data = { - "model" : models[model], - "messages" : messages, - "key" : "", - "prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."), - "temperature" : kwargs.get("temperature", 0.7) - } - headers = { - "Accept": "*/*", - "Origin": cls.url, - "Referer": f"{cls.url}/", - } - async with StreamSession(impersonate="chrome107", headers=headers, timeout=timeout) as session: - async with session.post(f"{cls.url}/api/chat", json=json_data) as response: - response.raise_for_status() - async for chunk in response.iter_content(): - yield chunk.decode() - - @classmethod - @property - def params(cls): - params = [ - ('model', 'str'), - ('messages', 'list[dict[str, str]]'), - ('stream', 'bool'), - ('temperature', 'float'), - ] - param = ', '.join([': '.join(p) for p in params]) - return f'g4f.provider.{cls.__name__} supports: ({param})' \ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 697f6185..7609744e 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -6,7 +6,6 @@ from .Aichat import Aichat from .Ails import Ails from .AItianhu import AItianhu from .AItianhuSpace import AItianhuSpace -from .Aivvm import Aivvm from .Bing import Bing from .ChatBase import ChatBase from .ChatForAi import ChatForAi diff --git a/g4f/Provider/deprecated/Aivvm.py b/g4f/Provider/deprecated/Aivvm.py new file mode 100644 index 00000000..bceb6faf --- /dev/null +++ b/g4f/Provider/deprecated/Aivvm.py @@ -0,0 +1,66 @@ +from __future__ import annotations + +from ...requests import StreamSession +from ..base_provider import AsyncGeneratorProvider +from ...typing import AsyncGenerator + +# to recreate this easily, send a post request to https://chat.aivvm.com/api/models +models = { + 'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'}, + 'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'}, + 'gpt-3.5-turbo-16k': {'id': 'gpt-3.5-turbo-16k', 'name': 'GPT-3.5-16K'}, + 'gpt-3.5-turbo-16k-0613': {'id': 'gpt-3.5-turbo-16k-0613', 'name': 'GPT-3.5-16K-0613'}, + 'gpt-4': {'id': 'gpt-4', 'name': 'GPT-4'}, + 'gpt-4-0613': {'id': 'gpt-4-0613', 'name': 'GPT-4-0613'}, + 'gpt-4-32k': {'id': 'gpt-4-32k', 'name': 'GPT-4-32K'}, + 'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'}, +} + +class Aivvm(AsyncGeneratorProvider): + url = 'https://chat.aivvm.com' + supports_gpt_35_turbo = True + supports_gpt_4 = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + stream: bool, + timeout: int = 30, + **kwargs + ) -> AsyncGenerator: + if not model: + model = "gpt-3.5-turbo" + elif model not in models: + raise ValueError(f"Model is not supported: {model}") + + json_data = { + "model" : models[model], + "messages" : messages, + "key" : "", + "prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."), + "temperature" : kwargs.get("temperature", 0.7) + } + headers = { + "Accept": "*/*", + "Origin": cls.url, + "Referer": f"{cls.url}/", + } + async with StreamSession(impersonate="chrome107", headers=headers, timeout=timeout) as session: + async with session.post(f"{cls.url}/api/chat", json=json_data) as response: + response.raise_for_status() + async for chunk in response.iter_content(): + yield chunk.decode() + + @classmethod + @property + def params(cls): + params = [ + ('model', 'str'), + ('messages', 'list[dict[str, str]]'), + ('stream', 'bool'), + ('temperature', 'float'), + ] + param = ', '.join([': '.join(p) for p in params]) + return f'g4f.provider.{cls.__name__} supports: ({param})' \ No newline at end of file diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py index 5c66c87f..d6b93e8d 100644 --- a/g4f/Provider/deprecated/__init__.py +++ b/g4f/Provider/deprecated/__init__.py @@ -11,4 +11,5 @@ from .Equing import Equing from .Wuguokai import Wuguokai from .V50 import V50 from .FastGpt import FastGpt -from .ChatgptLogin import ChatgptLogin \ No newline at end of file +from .ChatgptLogin import ChatgptLogin +from .Aivvm import Aivvm \ No newline at end of file diff --git a/g4f/models.py b/g4f/models.py index a91a3df4..ddd39993 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -20,7 +20,6 @@ from .Provider import ( DeepAi, Aichat, AiAsk, - Aivvm, GptGo, Ylokh, Bard, @@ -44,7 +43,7 @@ default = Model( Yqcloud, # Answers short questions in chinese ChatBase, # Don't want to answer creatively ChatgptDuo, # Include search results - Aibn, Aichat, Aivvm, ChatForAi, ChatgptAi, ChatgptLogin, DeepAi, FreeGpt, GptGo, Myshell, Ylokh, + Aibn, Aichat, ChatForAi, ChatgptAi, ChatgptLogin, DeepAi, FreeGpt, GptGo, Myshell, Ylokh, ]) ) @@ -53,7 +52,7 @@ gpt_35_long = Model( name = 'gpt-3.5-turbo', base_provider = 'openai', best_provider = RetryProvider([ - AiAsk, Aibn, Aichat, Aivvm, ChatForAi, ChatgptAi, ChatgptDemo, ChatgptDuo, + AiAsk, Aibn, Aichat, ChatForAi, ChatgptAi, ChatgptDemo, ChatgptDuo, FreeGpt, GptGo, Liaobots, Myshell, Vitalentum, Ylokh, You, Yqcloud ]) ) @@ -63,7 +62,7 @@ gpt_35_turbo = Model( name = 'gpt-3.5-turbo', base_provider = 'openai', best_provider = RetryProvider([ - DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh + DeepAi, ChatgptLogin, ChatgptAi, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh ]) ) @@ -167,26 +166,23 @@ gpt_35_turbo_16k_0613 = Model( gpt_35_turbo_0613 = Model( name = 'gpt-3.5-turbo-0613', - base_provider = 'openai', - best_provider = RetryProvider([ - Aivvm, ChatgptLogin - ]) + base_provider = 'openai' ) gpt_4_0613 = Model( name = 'gpt-4-0613', - base_provider = 'openai', - best_provider = Aivvm) + base_provider = 'openai' +) gpt_4_32k = Model( name = 'gpt-4-32k', - base_provider = 'openai', - best_provider = Aivvm) + base_provider = 'openai' +) gpt_4_32k_0613 = Model( name = 'gpt-4-32k-0613', - base_provider = 'openai', - best_provider = Aivvm) + base_provider = 'openai' +) text_ada_001 = Model( name = 'text-ada-001', -- cgit v1.2.3