summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-10-12 20:34:04 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-10-12 20:34:04 +0200
commit3119b8e37e73c958157ce8277bd7801202ab6962 (patch)
tree48c44c500d637a3843ebb49c976346f529b3225b /g4f
parentfeat(/Provider/AmigoChat.py): add retry mechanism for API requests (diff)
downloadgpt4free-3119b8e37e73c958157ce8277bd7801202ab6962.tar
gpt4free-3119b8e37e73c958157ce8277bd7801202ab6962.tar.gz
gpt4free-3119b8e37e73c958157ce8277bd7801202ab6962.tar.bz2
gpt4free-3119b8e37e73c958157ce8277bd7801202ab6962.tar.lz
gpt4free-3119b8e37e73c958157ce8277bd7801202ab6962.tar.xz
gpt4free-3119b8e37e73c958157ce8277bd7801202ab6962.tar.zst
gpt4free-3119b8e37e73c958157ce8277bd7801202ab6962.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/Airforce.py2
-rw-r--r--g4f/Provider/Binjie.py65
-rw-r--r--g4f/Provider/Blackbox.py2
-rw-r--r--g4f/Provider/ChatifyAI.py10
-rw-r--r--g4f/Provider/LiteIcoding.py132
-rw-r--r--g4f/Provider/__init__.py2
-rw-r--r--g4f/Provider/nexra/NexraAnimagineXL.py66
-rw-r--r--g4f/Provider/nexra/NexraBing.py2
-rw-r--r--g4f/Provider/nexra/NexraGeminiPro.py2
-rw-r--r--g4f/Provider/nexra/NexraLLaMA31.py10
-rw-r--r--g4f/Provider/nexra/NexraMidjourney.py2
-rw-r--r--g4f/Provider/nexra/NexraProdiaAI.py2
-rw-r--r--g4f/Provider/nexra/NexraSD15.py2
-rw-r--r--g4f/Provider/nexra/NexraSD21.py2
-rw-r--r--g4f/Provider/nexra/NexraSDLora.py2
-rw-r--r--g4f/Provider/nexra/NexraSDTurbo.py2
-rw-r--r--g4f/Provider/nexra/__init__.py1
-rw-r--r--g4f/models.py236
18 files changed, 70 insertions, 472 deletions
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index 3ec08b99..e7907cec 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -37,7 +37,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
'gpt-3.5-turbo',
'gpt-3.5-turbo-0125',
'gpt-3.5-turbo-1106',
- 'llama-3-70b-chat',
+ default_model,
'llama-3-70b-chat-turbo',
'llama-3-8b-chat',
'llama-3-8b-chat-turbo',
diff --git a/g4f/Provider/Binjie.py b/g4f/Provider/Binjie.py
deleted file mode 100644
index 90f9ec3c..00000000
--- a/g4f/Provider/Binjie.py
+++ /dev/null
@@ -1,65 +0,0 @@
-from __future__ import annotations
-
-import random
-from ..requests import StreamSession
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, format_prompt
-
-
-class Binjie(AsyncGeneratorProvider):
- url = "https://chat18.aichatos8.com"
- working = True
- supports_gpt_4 = True
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- @staticmethod
- async def create_async_generator(
- model: str,
- messages: Messages,
- proxy: str = None,
- timeout: int = 120,
- **kwargs,
- ) -> AsyncResult:
- async with StreamSession(
- headers=_create_header(), proxies={"https": proxy}, timeout=timeout
- ) as session:
- payload = _create_payload(messages, **kwargs)
- async with session.post("https://api.binjie.fun/api/generateStream", json=payload) as response:
- response.raise_for_status()
- async for chunk in response.iter_content():
- if chunk:
- chunk = chunk.decode()
- if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
- raise RuntimeError("IP address is blocked by abuse detection.")
- yield chunk
-
-
-def _create_header():
- return {
- "accept" : "application/json, text/plain, */*",
- "content-type" : "application/json",
- "origin" : "https://chat18.aichatos8.com",
- "referer" : "https://chat18.aichatos8.com/"
- }
-
-
-def _create_payload(
- messages: Messages,
- system_message: str = "",
- user_id: int = None,
- **kwargs
-):
- if not user_id:
- user_id = random.randint(1690000544336, 2093025544336)
- return {
- "prompt": format_prompt(messages),
- "network": True,
- "system": system_message,
- "withoutContext": False,
- "stream": True,
- "userId": f"#/chat/{user_id}"
- }
-
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 3bc49dc4..5989145b 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -80,7 +80,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"gpt-4o": "gpt-4o",
"gemini-pro": "gemini-pro",
'claude-sonnet-3.5': "claude-sonnet-3.5",
- 'blackboxai-pro': "blackboxai-pro",
}
model_prefixes = {
@@ -107,7 +106,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
model_referers = {
"blackboxai": f"{url}/?model=blackboxai",
- "blackboxai-pro": f"{url}/?model=blackboxai-pro",
"gpt-4o": f"{url}/?model=gpt-4o",
"gemini-pro": f"{url}/?model=gemini-pro",
"claude-sonnet-3.5": f"{url}/?model=claude-sonnet-3.5"
diff --git a/g4f/Provider/ChatifyAI.py b/g4f/Provider/ChatifyAI.py
index c5b4a104..a999afac 100644
--- a/g4f/Provider/ChatifyAI.py
+++ b/g4f/Provider/ChatifyAI.py
@@ -17,10 +17,18 @@ class ChatifyAI(AsyncGeneratorProvider, ProviderModelMixin):
default_model = 'llama-3.1'
models = [default_model]
+ model_aliases = {
+ "llama-3.1-8b": "llama-3.1",
+ }
@classmethod
def get_model(cls, model: str) -> str:
- return cls.default_model
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases.get(model, cls.default_model)
+ else:
+ return cls.default_model
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/LiteIcoding.py b/g4f/Provider/LiteIcoding.py
deleted file mode 100644
index f349c85e..00000000
--- a/g4f/Provider/LiteIcoding.py
+++ /dev/null
@@ -1,132 +0,0 @@
-from __future__ import annotations
-import base64
-import re
-from aiohttp import ClientSession, ClientResponseError
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://lite.icoding.ink"
- api_endpoint = "/api/v1/gpt/message"
- working = True
- supports_gpt_4 = True
- default_model = "gpt-4o"
- models = [
- 'gpt-4o',
- 'gpt-4-turbo',
- 'claude-3',
- 'claude-3.5',
- 'gemini-1.5',
- ]
-
- model_aliases = {
- "gpt-4o-mini": "gpt-4o",
- "gemini-pro": "gemini-1.5",
- }
-
- bearer_tokens = [
- "NWQ2OWNkMjcxYjE0NDIyNmFjMTE5OWIzYzg0OWE1NjY=",
- ]
- current_token_index = 0
-
- @classmethod
- def decode_token(cls, encoded_token: str) -> str:
- return base64.b64decode(encoded_token).decode('utf-8')
-
- @classmethod
- def get_next_bearer_token(cls):
- encoded_token = cls.bearer_tokens[cls.current_token_index]
- cls.current_token_index = (cls.current_token_index + 1) % len(cls.bearer_tokens)
- return cls.decode_token(encoded_token)
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- bearer_token = cls.get_next_bearer_token()
- headers = {
- "Accept": "*/*",
- "Accept-Language": "en-US,en;q=0.9",
- "Authorization": f"Bearer {bearer_token}",
- "Connection": "keep-alive",
- "Content-Type": "application/json;charset=utf-8",
- "DNT": "1",
- "Origin": cls.url,
- "Referer": f"{cls.url}/",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "User-Agent": (
- "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "
- "Chrome/126.0.0.0 Safari/537.36"
- ),
- "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- }
-
- data = {
- "model": model,
- "chatId": "-1",
- "messages": [
- {
- "role": msg["role"],
- "content": msg["content"],
- "time": msg.get("time", ""),
- "attachments": msg.get("attachments", []),
- }
- for msg in messages
- ],
- "plugins": [],
- "systemPrompt": "",
- "temperature": 0.5,
- }
-
- async with ClientSession(headers=headers) as session:
- try:
- async with session.post(
- f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy
- ) as response:
- response.raise_for_status()
- buffer = ""
- full_response = ""
-
- def decode_content(data):
- bytes_array = bytes([int(b, 16) ^ 255 for b in data.split()])
- return bytes_array.decode('utf-8')
-
- async for chunk in response.content.iter_any():
- if chunk:
- buffer += chunk.decode()
- while "\n\n" in buffer:
- part, buffer = buffer.split("\n\n", 1)
- if part.startswith("data: "):
- content = part[6:].strip()
- if content and content != "[DONE]":
- content = content.strip('"')
- decoded_content = decode_content(content)
- full_response += decoded_content
- full_response = (
- full_response.replace('""', '')
- .replace('" "', ' ')
- .replace("\\n\\n", "\n\n")
- .replace("\\n", "\n")
- .replace('\\"', '"')
- .strip()
- )
- filtered_response = re.sub(r'\n---\n.*', '', full_response, flags=re.DOTALL)
- cleaned_response = filtered_response.strip().strip('"')
- yield cleaned_response
-
- except ClientResponseError as e:
- raise RuntimeError(
- f"ClientResponseError {e.status}: {e.message}, url={e.request_info.url}, data={data}"
- ) from e
-
- except Exception as e:
- raise RuntimeError(f"Unexpected error: {str(e)}") from e
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 73a17e10..3d6539fc 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -22,7 +22,6 @@ from .Airforce import Airforce
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
-from .Binjie import Binjie
from .Blackbox import Blackbox
from .ChatGot import ChatGot
from .ChatGpt import ChatGpt
@@ -50,7 +49,6 @@ from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
from .Koala import Koala
from .Liaobots import Liaobots
-from .LiteIcoding import LiteIcoding
from .Local import Local
from .MagickPen import MagickPen
from .MetaAI import MetaAI
diff --git a/g4f/Provider/nexra/NexraAnimagineXL.py b/g4f/Provider/nexra/NexraAnimagineXL.py
deleted file mode 100644
index d6fbc629..00000000
--- a/g4f/Provider/nexra/NexraAnimagineXL.py
+++ /dev/null
@@ -1,66 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-import json
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ...image import ImageResponse
-
-
-class NexraAnimagineXL(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Nexra Animagine XL"
- url = "https://nexra.aryahcr.cc/documentation/midjourney/en"
- api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
-
- default_model = 'animagine-xl'
- models = [default_model]
-
- @classmethod
- def get_model(cls, model: str) -> str:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- response: str = "url", # base64 or url
- **kwargs
- ) -> AsyncResult:
- # Retrieve the correct model to use
- model = cls.get_model(model)
-
- # Format the prompt from the messages
- prompt = messages[0]['content']
-
- headers = {
- "Content-Type": "application/json"
- }
- payload = {
- "prompt": prompt,
- "model": model,
- "response": response
- }
-
- async with ClientSession(headers=headers) as session:
- async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
- response.raise_for_status()
- text_data = await response.text()
-
- try:
- # Parse the JSON response
- json_start = text_data.find('{')
- json_data = text_data[json_start:]
- data = json.loads(json_data)
-
- # Check if the response contains images
- if 'images' in data and len(data['images']) > 0:
- image_url = data['images'][0]
- yield ImageResponse(image_url, prompt)
- else:
- yield ImageResponse("No images found in the response.", prompt)
- except json.JSONDecodeError:
- yield ImageResponse("Failed to parse JSON. Response might not be in JSON format.", prompt)
diff --git a/g4f/Provider/nexra/NexraBing.py b/g4f/Provider/nexra/NexraBing.py
index 02f3724d..716e9254 100644
--- a/g4f/Provider/nexra/NexraBing.py
+++ b/g4f/Provider/nexra/NexraBing.py
@@ -13,7 +13,7 @@ class NexraBing(AsyncGeneratorProvider, ProviderModelMixin):
label = "Nexra Bing"
url = "https://nexra.aryahcr.cc/documentation/bing/en"
api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- working = True
+ working = False
supports_gpt_4 = False
supports_stream = False
diff --git a/g4f/Provider/nexra/NexraGeminiPro.py b/g4f/Provider/nexra/NexraGeminiPro.py
index 651f7cb4..fb0b096b 100644
--- a/g4f/Provider/nexra/NexraGeminiPro.py
+++ b/g4f/Provider/nexra/NexraGeminiPro.py
@@ -11,7 +11,7 @@ class NexraGeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
label = "Nexra Gemini PRO"
url = "https://nexra.aryahcr.cc/documentation/gemini-pro/en"
api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- working = True
+ working = False
supports_stream = True
default_model = 'gemini-pro'
diff --git a/g4f/Provider/nexra/NexraLLaMA31.py b/g4f/Provider/nexra/NexraLLaMA31.py
index c67febb3..d461f2b2 100644
--- a/g4f/Provider/nexra/NexraLLaMA31.py
+++ b/g4f/Provider/nexra/NexraLLaMA31.py
@@ -17,10 +17,18 @@ class NexraLLaMA31(AsyncGeneratorProvider, ProviderModelMixin):
default_model = 'llama-3.1'
models = [default_model]
+ model_aliases = {
+ "llama-3.1-8b": "llama-3.1",
+ }
@classmethod
def get_model(cls, model: str) -> str:
- return cls.default_model
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases.get(model, cls.default_model)
+ else:
+ return cls.default_model
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/nexra/NexraMidjourney.py b/g4f/Provider/nexra/NexraMidjourney.py
index 3d6a4960..e43cb164 100644
--- a/g4f/Provider/nexra/NexraMidjourney.py
+++ b/g4f/Provider/nexra/NexraMidjourney.py
@@ -12,7 +12,7 @@ class NexraMidjourney(AsyncGeneratorProvider, ProviderModelMixin):
label = "Nexra Midjourney"
url = "https://nexra.aryahcr.cc/documentation/midjourney/en"
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
+ working = False
default_model = 'midjourney'
models = [default_model]
diff --git a/g4f/Provider/nexra/NexraProdiaAI.py b/g4f/Provider/nexra/NexraProdiaAI.py
index 262558fd..9d82ab9b 100644
--- a/g4f/Provider/nexra/NexraProdiaAI.py
+++ b/g4f/Provider/nexra/NexraProdiaAI.py
@@ -12,7 +12,7 @@ class NexraProdiaAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Nexra Prodia AI"
url = "https://nexra.aryahcr.cc/documentation/prodia/en"
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
+ working = False
default_model = 'absolutereality_v181.safetensors [3d9d4d2b]'
models = [
diff --git a/g4f/Provider/nexra/NexraSD15.py b/g4f/Provider/nexra/NexraSD15.py
index 410947df..03b35013 100644
--- a/g4f/Provider/nexra/NexraSD15.py
+++ b/g4f/Provider/nexra/NexraSD15.py
@@ -12,7 +12,7 @@ class NexraSD15(AsyncGeneratorProvider, ProviderModelMixin):
label = "Nexra Stable Diffusion 1.5"
url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
+ working = False
default_model = 'stablediffusion-1.5'
models = [default_model]
diff --git a/g4f/Provider/nexra/NexraSD21.py b/g4f/Provider/nexra/NexraSD21.py
index fc5c90d9..46cd6611 100644
--- a/g4f/Provider/nexra/NexraSD21.py
+++ b/g4f/Provider/nexra/NexraSD21.py
@@ -12,7 +12,7 @@ class NexraSD21(AsyncGeneratorProvider, ProviderModelMixin):
label = "Nexra Stable Diffusion 2.1"
url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
+ working = False
default_model = 'stablediffusion-2.1'
models = [default_model]
diff --git a/g4f/Provider/nexra/NexraSDLora.py b/g4f/Provider/nexra/NexraSDLora.py
index ad986507..a33afa04 100644
--- a/g4f/Provider/nexra/NexraSDLora.py
+++ b/g4f/Provider/nexra/NexraSDLora.py
@@ -12,7 +12,7 @@ class NexraSDLora(AsyncGeneratorProvider, ProviderModelMixin):
label = "Nexra Stable Diffusion Lora"
url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
+ working = False
default_model = 'sdxl-lora'
models = [default_model]
diff --git a/g4f/Provider/nexra/NexraSDTurbo.py b/g4f/Provider/nexra/NexraSDTurbo.py
index feb59f0b..da1428b8 100644
--- a/g4f/Provider/nexra/NexraSDTurbo.py
+++ b/g4f/Provider/nexra/NexraSDTurbo.py
@@ -12,7 +12,7 @@ class NexraSDTurbo(AsyncGeneratorProvider, ProviderModelMixin):
label = "Nexra Stable Diffusion Turbo"
url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- working = True
+ working = False
default_model = 'sdxl-turbo'
models = [default_model]
diff --git a/g4f/Provider/nexra/__init__.py b/g4f/Provider/nexra/__init__.py
index d8b9218f..c2e6b2f6 100644
--- a/g4f/Provider/nexra/__init__.py
+++ b/g4f/Provider/nexra/__init__.py
@@ -1,4 +1,3 @@
-from .NexraAnimagineXL import NexraAnimagineXL
from .NexraBing import NexraBing
from .NexraBlackbox import NexraBlackbox
from .NexraChatGPT import NexraChatGPT
diff --git a/g4f/models.py b/g4f/models.py
index 6708022e..be07c6a7 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -9,7 +9,6 @@ from .Provider import (
Allyfy,
AmigoChat,
Bing,
- Binjie,
Blackbox,
ChatGpt,
Chatgpt4Online,
@@ -35,11 +34,8 @@ from .Provider import (
HuggingFace,
Koala,
Liaobots,
- LiteIcoding,
MagickPen,
MetaAI,
- NexraAnimagineXL,
- NexraBing,
NexraBlackbox,
NexraChatGPT,
NexraChatGPT4o,
@@ -50,15 +46,8 @@ from .Provider import (
NexraDalleMini,
NexraEmi,
NexraFluxPro,
- NexraGeminiPro,
NexraLLaMA31,
- NexraMidjourney,
- NexraProdiaAI,
NexraQwen,
- NexraSD15,
- NexraSD21,
- NexraSDLora,
- NexraSDTurbo,
OpenaiChat,
PerplexityLabs,
Pi,
@@ -68,7 +57,6 @@ from .Provider import (
ReplicateHome,
TeachAnything,
Upstage,
- You,
)
@@ -102,11 +90,9 @@ default = Model(
ReplicateHome,
Upstage,
Blackbox,
- Binjie,
Free2GPT,
MagickPen,
DeepInfraChat,
- LiteIcoding,
Airforce,
ChatHub,
ChatGptEs,
@@ -133,58 +119,45 @@ gpt_3 = Model(
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'OpenAI',
- best_provider = IterListProvider([
- Allyfy, NexraChatGPT, Airforce, DarkAI, Liaobots,
- ])
+ best_provider = IterListProvider([Allyfy, NexraChatGPT, Airforce, DarkAI, Liaobots])
)
# gpt-4
gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'OpenAI',
- best_provider = IterListProvider([
- NexraChatGPT4o, ChatGptEs, AmigoChat, DarkAI, Liaobots, Airforce,
- OpenaiChat
- ])
+ best_provider = IterListProvider([NexraChatGPT4o, ChatGptEs, AmigoChat, DarkAI, Liaobots, Airforce, OpenaiChat])
)
gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'OpenAI',
- best_provider = IterListProvider([
- DDG, ChatGptEs, You, FreeNetfly, Pizzagpt, LiteIcoding, MagickPen, AmigoChat, Liaobots, Airforce, ChatgptFree, Koala,
- OpenaiChat, ChatGpt
- ])
+ best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, MagickPen, AmigoChat, Liaobots, Airforce, ChatgptFree, Koala, OpenaiChat, ChatGpt])
)
gpt_4_turbo = Model(
name = 'gpt-4-turbo',
base_provider = 'OpenAI',
- best_provider = IterListProvider([
- Liaobots, Airforce, Bing
- ])
+ best_provider = IterListProvider([Liaobots, Airforce, Bing])
)
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'OpenAI',
- best_provider = IterListProvider([
- NexraBing, NexraChatGPT, NexraChatGptV2, NexraChatGptWeb, Binjie, Airforce, Chatgpt4Online, Bing, OpenaiChat,
- gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider
- ])
+ best_provider = IterListProvider([NexraChatGPT, NexraChatGptV2, NexraChatGptWeb, Airforce, Chatgpt4Online, Bing, OpenaiChat])
)
# o1
o1 = Model(
name = 'o1',
base_provider = 'OpenAI',
- best_provider = IterListProvider([AmigoChat])
+ best_provider = AmigoChat
)
o1_mini = Model(
name = 'o1-mini',
base_provider = 'OpenAI',
- best_provider = IterListProvider([AmigoChat])
+ best_provider = AmigoChat
)
@@ -229,17 +202,11 @@ llama_3_70b = Model(
best_provider = IterListProvider([ReplicateHome, Airforce, DeepInfra, Replicate])
)
-llama_3 = Model(
- name = "llama-3",
- base_provider = "Meta Llama",
- best_provider = IterListProvider([llama_3_8b.best_provider, llama_3_70b.best_provider])
-)
-
# llama 3.1
llama_3_1_8b = Model(
name = "llama-3.1-8b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Blackbox, DeepInfraChat, ChatHub, Cloudflare, Airforce, PerplexityLabs])
+ best_provider = IterListProvider([Blackbox, DeepInfraChat, ChatHub, Cloudflare, NexraLLaMA31, Airforce, PerplexityLabs])
)
llama_3_1_70b = Model(
@@ -254,23 +221,17 @@ llama_3_1_405b = Model(
best_provider = IterListProvider([DeepInfraChat, Blackbox, AmigoChat, DarkAI, Airforce])
)
-llama_3_1 = Model(
- name = "llama-3.1",
- base_provider = "Meta Llama",
- best_provider = IterListProvider([NexraLLaMA31, ChatifyAI, llama_3_1_8b.best_provider, llama_3_1_70b.best_provider, llama_3_1_405b.best_provider,])
-)
-
# llama 3.2
llama_3_2_1b = Model(
name = "llama-3.2-1b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Cloudflare])
+ best_provider = Cloudflare
)
llama_3_2_3b = Model(
name = "llama-3.2-3b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Cloudflare])
+ best_provider = Cloudflare
)
llama_3_2_11b = Model(
@@ -285,22 +246,18 @@ llama_3_2_90b = Model(
best_provider = IterListProvider([AmigoChat, Airforce])
)
-llama_3_2 = Model(
- name = "llama-3.2",
- base_provider = "Meta Llama",
- best_provider = IterListProvider([llama_3_2_1b.best_provider, llama_3_2_3b.best_provider, llama_3_2_11b.best_provider, llama_3_2_90b.best_provider])
-)
+
# llamaguard
llamaguard_7b = Model(
name = "llamaguard-7b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Airforce])
+ best_provider = Airforce
)
llamaguard_2_8b = Model(
name = "llamaguard-2-8b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Airforce])
+ best_provider = Airforce
)
@@ -334,13 +291,13 @@ mistral_nemo = Model(
mixtral_8x7b_dpo = Model(
name = "mixtral-8x7b-dpo",
base_provider = "NousResearch",
- best_provider = IterListProvider([Airforce])
+ best_provider = Airforce
)
yi_34b = Model(
name = "yi-34b",
base_provider = "NousResearch",
- best_provider = IterListProvider([Airforce])
+ best_provider = Airforce
)
hermes_3 = Model(
@@ -374,7 +331,7 @@ phi_3_5_mini = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google DeepMind',
- best_provider = IterListProvider([GeminiPro, LiteIcoding, Blackbox, AIChatFree, GPROChat, NexraGeminiPro, AmigoChat, Liaobots, Airforce])
+ best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, GPROChat, AmigoChat, Liaobots, Airforce])
)
gemini_flash = Model(
@@ -386,7 +343,7 @@ gemini_flash = Model(
gemini = Model(
name = 'gemini',
base_provider = 'Google DeepMind',
- best_provider = IterListProvider([Gemini, gemini_flash.best_provider, gemini_pro.best_provider])
+ best_provider = Gemini
)
# gemma
@@ -405,16 +362,13 @@ gemma_2b_27b = Model(
gemma_2b = Model(
name = 'gemma-2b',
base_provider = 'Google',
- best_provider = IterListProvider([
- ReplicateHome, Airforce,
- gemma_2b_9b.best_provider, gemma_2b_27b.best_provider,
- ])
+ best_provider = IterListProvider([ReplicateHome, Airforce])
)
gemma_7b = Model(
name = 'gemma-7b',
base_provider = 'Google',
- best_provider = IterListProvider([Cloudflare])
+ best_provider = Cloudflare
)
# gemma 2
@@ -427,10 +381,7 @@ gemma_2_27b = Model(
gemma_2 = Model(
name = 'gemma-2',
base_provider = 'Google',
- best_provider = IterListProvider([
- ChatHub,
- gemma_2_27b.best_provider,
- ])
+ best_provider = ChatHub
)
@@ -441,15 +392,6 @@ claude_2_1 = Model(
best_provider = Liaobots
)
-claude_2 = Model(
- name = 'claude-2',
- base_provider = 'Anthropic',
- best_provider = IterListProvider([
- You,
- claude_2_1.best_provider,
- ])
-)
-
# claude 3
claude_3_opus = Model(
name = 'claude-3-opus',
@@ -469,14 +411,6 @@ claude_3_haiku = Model(
best_provider = IterListProvider([DDG, Airforce, Liaobots])
)
-claude_3 = Model(
- name = 'claude-3',
- base_provider = 'Anthropic',
- best_provider = IterListProvider([
- claude_3_opus.best_provider, claude_3_sonnet.best_provider, claude_3_haiku.best_provider
- ])
-)
-
# claude 3.5
claude_3_5_sonnet = Model(
name = 'claude-3.5-sonnet',
@@ -484,16 +418,6 @@ claude_3_5_sonnet = Model(
best_provider = IterListProvider([Blackbox, Airforce, AmigoChat, Liaobots])
)
-claude_3_5 = Model(
- name = 'claude-3.5',
- base_provider = 'Anthropic',
- best_provider = IterListProvider([
- LiteIcoding,
- claude_3_5_sonnet.best_provider
- ])
-)
-
-
### Reka AI ###
reka_core = Model(
@@ -513,7 +437,7 @@ blackboxai = Model(
blackboxai_pro = Model(
name = 'blackboxai-pro',
base_provider = 'Blackbox AI',
- best_provider = IterListProvider([Blackbox])
+ best_provider = Blackbox
)
@@ -537,7 +461,7 @@ command_r_plus = Model(
sparkdesk_v1_1 = Model(
name = 'sparkdesk-v1.1',
base_provider = 'iFlytek',
- best_provider = IterListProvider([FreeChatgpt])
+ best_provider = FreeChatgpt
)
@@ -589,7 +513,7 @@ qwen_2_72b = Model(
qwen = Model(
name = 'qwen',
base_provider = 'Qwen',
- best_provider = IterListProvider([NexraQwen, qwen_1_5_0_5b.best_provider, qwen_1_5_14b.best_provider, qwen_1_5_72b.best_provider, qwen_1_5_110b.best_provider, qwen_1_5_1_8b.best_provider, qwen_2_72b.best_provider])
+ best_provider = NexraQwen
)
@@ -747,7 +671,7 @@ sonar_chat = Model(
mythomax_l2_13b = Model(
name = 'mythomax-l2-13b',
base_provider = 'Gryphe',
- best_provider = IterListProvider([Airforce])
+ best_provider = Airforce
)
@@ -755,7 +679,7 @@ mythomax_l2_13b = Model(
cosmosrp = Model(
name = 'cosmosrp',
base_provider = 'Pawan',
- best_provider = IterListProvider([Airforce])
+ best_provider = Airforce
)
@@ -763,7 +687,7 @@ cosmosrp = Model(
german_7b = Model(
name = 'german-7b',
base_provider = 'TheBloke',
- best_provider = IterListProvider([Cloudflare])
+ best_provider = Cloudflare
)
@@ -771,7 +695,7 @@ german_7b = Model(
tinyllama_1_1b = Model(
name = 'tinyllama-1.1b',
base_provider = 'Tinyllama',
- best_provider = IterListProvider([Cloudflare])
+ best_provider = Cloudflare
)
@@ -779,7 +703,7 @@ tinyllama_1_1b = Model(
cybertron_7b = Model(
name = 'cybertron-7b',
base_provider = 'Fblgit',
- best_provider = IterListProvider([Cloudflare])
+ best_provider = Cloudflare
)
@@ -789,64 +713,25 @@ cybertron_7b = Model(
#############
### Stability AI ###
-sdxl_lora = Model(
- name = 'sdxl-lora',
- base_provider = 'Stability AI',
- best_provider = IterListProvider([NexraSDLora])
-
-)
-
-sdxl_turbo = Model(
- name = 'sdxl-turbo',
- base_provider = 'Stability AI',
- best_provider = IterListProvider([NexraSDTurbo])
-
-)
-
sdxl = Model(
name = 'sdxl',
base_provider = 'Stability AI',
- best_provider = IterListProvider([
- ReplicateHome, NexraSD21, DeepInfraImage,
- sdxl_lora.best_provider, sdxl_turbo.best_provider,
- ])
-
-)
-
-sd_1_5 = Model(
- name = 'sd-1.5',
- base_provider = 'Stability AI',
- best_provider = IterListProvider([NexraSD15])
-
-)
-
-sd_2_1 = Model(
- name = 'sd-2.1',
- base_provider = 'Stability AI',
- best_provider = IterListProvider([NexraSD21])
+ best_provider = IterListProvider([ReplicateHome, DeepInfraImage])
)
sd_3 = Model(
name = 'sd-3',
base_provider = 'Stability AI',
- best_provider = IterListProvider([ReplicateHome])
-
-)
-
-sd = Model(
- name = 'sd',
- base_provider = 'Stability AI',
- best_provider = IterListProvider([sd_1_5.best_provider, sd_2_1.best_provider, sd_3.best_provider])
+ best_provider = ReplicateHome
)
-
### Playground ###
playground_v2_5 = Model(
name = 'playground-v2.5',
base_provider = 'Playground AI',
- best_provider = IterListProvider([ReplicateHome])
+ best_provider = ReplicateHome
)
@@ -876,42 +761,42 @@ flux_realism = Model(
flux_anime = Model(
name = 'flux-anime',
base_provider = 'Flux AI',
- best_provider = IterListProvider([Airforce])
+ best_provider = Airforce
)
flux_3d = Model(
name = 'flux-3d',
base_provider = 'Flux AI',
- best_provider = IterListProvider([Airforce])
+ best_provider = Airforce
)
flux_disney = Model(
name = 'flux-disney',
base_provider = 'Flux AI',
- best_provider = IterListProvider([Airforce])
+ best_provider = Airforce
)
flux_pixel = Model(
name = 'flux-pixel',
base_provider = 'Flux AI',
- best_provider = IterListProvider([Airforce])
+ best_provider = Airforce
)
flux_4o = Model(
name = 'flux-4o',
base_provider = 'Flux AI',
- best_provider = IterListProvider([Airforce])
+ best_provider = Airforce
)
flux_schnell = Model(
name = 'flux-schnell',
base_provider = 'Flux AI',
- best_provider = IterListProvider([ReplicateHome])
+ best_provider = ReplicateHome
)
@@ -920,59 +805,43 @@ flux_schnell = Model(
dalle_2 = Model(
name = 'dalle-2',
base_provider = 'OpenAI',
- best_provider = IterListProvider([NexraDallE2])
+ best_provider = NexraDallE2
)
dalle_3 = Model(
name = 'dalle-3',
base_provider = 'OpenAI',
- best_provider = IterListProvider([Airforce])
+ best_provider = Airforce
)
dalle = Model(
name = 'dalle',
base_provider = 'OpenAI',
- best_provider = IterListProvider([NexraDallE, dalle_2.best_provider, dalle_3.best_provider])
+ best_provider = NexraDallE
)
dalle_mini = Model(
name = 'dalle-mini',
base_provider = 'OpenAI',
- best_provider = IterListProvider([NexraDalleMini])
+ best_provider = NexraDalleMini
)
-### Cagliostro Research Lab ###
-animagine_xl = Model(
- name = 'animagine-xl',
- base_provider = 'Cagliostro Research Lab',
- best_provider = IterListProvider([NexraAnimagineXL])
-
-)
-
-### Midjourney ###
-midjourney = Model(
- name = 'midjourney',
- base_provider = 'Midjourney',
- best_provider = IterListProvider([NexraMidjourney])
-
-)
-
### Other ###
emi = Model(
name = 'emi',
base_provider = '',
- best_provider = IterListProvider([NexraEmi])
+ best_provider = NexraEmi
)
any_dark = Model(
name = 'any-dark',
base_provider = '',
- best_provider = IterListProvider([Airforce])
+ best_provider = Airforce
)
@@ -1015,18 +884,15 @@ class ModelUtils:
'llama-2-13b': llama_2_13b,
# llama-3
-'llama-3': llama_3,
'llama-3-8b': llama_3_8b,
'llama-3-70b': llama_3_70b,
# llama-3.1
-'llama-3.1': llama_3_1,
'llama-3.1-8b': llama_3_1_8b,
'llama-3.1-70b': llama_3_1_70b,
'llama-3.1-405b': llama_3_1_405b,
# llama-3.2
-'llama-3.2': llama_3_2,
'llama-3.2-1b': llama_3_2_1b,
'llama-3.2-3b': llama_3_2_3b,
'llama-3.2-11b': llama_3_2_11b,
@@ -1074,17 +940,14 @@ class ModelUtils:
### Anthropic ###
-'claude-2': claude_2,
'claude-2.1': claude_2_1,
# claude 3
-'claude-3': claude_3,
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,
'claude-3-haiku': claude_3_haiku,
# claude 3.5
-'claude-3.5': claude_3_5,
'claude-3.5-sonnet': claude_3_5_sonnet,
@@ -1213,11 +1076,6 @@ class ModelUtils:
### Stability AI ###
'sdxl': sdxl,
-'sdxl-lora': sdxl_lora,
-'sdxl-turbo': sdxl_turbo,
-'sd': sd,
-'sd-1.5': sd_1_5,
-'sd-2.1': sd_2_1,
'sd-3': sd_3,
@@ -1244,14 +1102,6 @@ class ModelUtils:
'dalle-mini': dalle_mini,
-### Cagliostro Research Lab ###
-'animagine-xl': animagine_xl,
-
-
-### Midjourney ###
-'midjourney': midjourney,
-
-
### Other ###
'emi': emi,
'any-dark': any_dark,