summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorabc <98614666+xtekky@users.noreply.github.com>2023-07-16 21:31:51 +0200
committerabc <98614666+xtekky@users.noreply.github.com>2023-07-16 21:31:51 +0200
commite9467297369dd4f429c084bd1d034f3b1b63e7c2 (patch)
treea8ef4184e60e226baf2dab207225ce8f52502c38 /g4f
parentminor fixes (diff)
downloadgpt4free-e9467297369dd4f429c084bd1d034f3b1b63e7c2.tar
gpt4free-e9467297369dd4f429c084bd1d034f3b1b63e7c2.tar.gz
gpt4free-e9467297369dd4f429c084bd1d034f3b1b63e7c2.tar.bz2
gpt4free-e9467297369dd4f429c084bd1d034f3b1b63e7c2.tar.lz
gpt4free-e9467297369dd4f429c084bd1d034f3b1b63e7c2.tar.xz
gpt4free-e9467297369dd4f429c084bd1d034f3b1b63e7c2.tar.zst
gpt4free-e9467297369dd4f429c084bd1d034f3b1b63e7c2.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/Provider.py1
-rw-r--r--g4f/Provider/Providers/AItianhu.py2
-rw-r--r--g4f/Provider/Providers/Acytoo.py1
-rw-r--r--g4f/Provider/Providers/AiService.py1
-rw-r--r--g4f/Provider/Providers/Aichat.py1
-rw-r--r--g4f/Provider/Providers/Ails.py2
-rw-r--r--g4f/Provider/Providers/Bard.py2
-rw-r--r--g4f/Provider/Providers/Bing.py1
-rw-r--r--g4f/Provider/Providers/BingHuan.py2
-rw-r--r--g4f/Provider/Providers/ChatgptAi.py2
-rw-r--r--g4f/Provider/Providers/ChatgptLogin.py2
-rw-r--r--g4f/Provider/Providers/DFEHub.py62
-rw-r--r--g4f/Provider/Providers/DeepAi.py2
-rw-r--r--g4f/Provider/Providers/EasyChat.py63
-rw-r--r--g4f/Provider/Providers/Forefront.py2
-rw-r--r--g4f/Provider/Providers/GetGpt.py2
-rw-r--r--g4f/Provider/Providers/H2o.py4
-rw-r--r--g4f/Provider/Providers/Liaobots.py1
-rw-r--r--g4f/Provider/Providers/Lockchat.py1
-rw-r--r--g4f/Provider/Providers/Theb.py1
-rw-r--r--g4f/Provider/Providers/Vercel.py3
-rw-r--r--g4f/Provider/Providers/Wewordle.py2
-rw-r--r--g4f/Provider/Providers/You.py1
-rw-r--r--g4f/Provider/Providers/Yqcloud.py1
-rw-r--r--g4f/Provider/__init__.py2
-rw-r--r--g4f/__init__.py5
-rw-r--r--g4f/models.py31
27 files changed, 136 insertions, 64 deletions
diff --git a/g4f/Provider/Provider.py b/g4f/Provider/Provider.py
index 12c23333..f123becd 100644
--- a/g4f/Provider/Provider.py
+++ b/g4f/Provider/Provider.py
@@ -5,6 +5,7 @@ url = None
model = None
supports_stream = False
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
return
diff --git a/g4f/Provider/Providers/AItianhu.py b/g4f/Provider/Providers/AItianhu.py
index d3e6a45f..5a151e1e 100644
--- a/g4f/Provider/Providers/AItianhu.py
+++ b/g4f/Provider/Providers/AItianhu.py
@@ -6,6 +6,8 @@ url = "https://www.aitianhu.com/api/chat-process"
model = ['gpt-3.5-turbo']
supports_stream = False
needs_auth = False
+working = True
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
base = ''
diff --git a/g4f/Provider/Providers/Acytoo.py b/g4f/Provider/Providers/Acytoo.py
index 06083eb5..4f40eac2 100644
--- a/g4f/Provider/Providers/Acytoo.py
+++ b/g4f/Provider/Providers/Acytoo.py
@@ -6,6 +6,7 @@ url = "https://chat.acytoo.com/api/completions"
model = ['gpt-3.5-turbo']
supports_stream = False
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
base = ''
diff --git a/g4f/Provider/Providers/AiService.py b/g4f/Provider/Providers/AiService.py
index 8d475118..0f9d9c47 100644
--- a/g4f/Provider/Providers/AiService.py
+++ b/g4f/Provider/Providers/AiService.py
@@ -6,6 +6,7 @@ url = "https://aiservice.vercel.app/api/chat/answer"
model = ['gpt-3.5-turbo']
supports_stream = False
needs_auth = False
+working = True
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
diff --git a/g4f/Provider/Providers/Aichat.py b/g4f/Provider/Providers/Aichat.py
index e4fde8c3..919486f2 100644
--- a/g4f/Provider/Providers/Aichat.py
+++ b/g4f/Provider/Providers/Aichat.py
@@ -5,6 +5,7 @@ url = 'https://chat-gpt.org/chat'
model = ['gpt-3.5-turbo']
supports_stream = False
needs_auth = False
+working = True
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
base = ''
diff --git a/g4f/Provider/Providers/Ails.py b/g4f/Provider/Providers/Ails.py
index 54d9e066..f6174f85 100644
--- a/g4f/Provider/Providers/Ails.py
+++ b/g4f/Provider/Providers/Ails.py
@@ -13,6 +13,8 @@ url: str = 'https://ai.ls'
model: str = 'gpt-3.5-turbo'
supports_stream = True
needs_auth = False
+working = True
+
class Utils:
def hash(json_data: Dict[str, str]) -> sha256:
diff --git a/g4f/Provider/Providers/Bard.py b/g4f/Provider/Providers/Bard.py
index 4c37c4b7..0d007a10 100644
--- a/g4f/Provider/Providers/Bard.py
+++ b/g4f/Provider/Providers/Bard.py
@@ -5,6 +5,8 @@ url = 'https://bard.google.com'
model = ['Palm2']
supports_stream = False
needs_auth = True
+working = True
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
diff --git a/g4f/Provider/Providers/Bing.py b/g4f/Provider/Providers/Bing.py
index 1d33cda5..94c1e21a 100644
--- a/g4f/Provider/Providers/Bing.py
+++ b/g4f/Provider/Providers/Bing.py
@@ -16,6 +16,7 @@ url = 'https://bing.com/chat'
model = ['gpt-4']
supports_stream = True
needs_auth = False
+working = True
ssl_context = ssl.create_default_context()
ssl_context.load_verify_locations(certifi.where())
diff --git a/g4f/Provider/Providers/BingHuan.py b/g4f/Provider/Providers/BingHuan.py
index 9703e0f1..64b67e4b 100644
--- a/g4f/Provider/Providers/BingHuan.py
+++ b/g4f/Provider/Providers/BingHuan.py
@@ -7,6 +7,8 @@ url = 'https://b.ai-huan.xyz'
model = ['gpt-3.5-turbo', 'gpt-4']
supports_stream = True
needs_auth = False
+working = False
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
path = os.path.dirname(os.path.realpath(__file__))
diff --git a/g4f/Provider/Providers/ChatgptAi.py b/g4f/Provider/Providers/ChatgptAi.py
index 929ec216..1f9ead0e 100644
--- a/g4f/Provider/Providers/ChatgptAi.py
+++ b/g4f/Provider/Providers/ChatgptAi.py
@@ -6,6 +6,8 @@ url = 'https://chatgpt.ai/gpt-4/'
model = ['gpt-4']
supports_stream = False
needs_auth = False
+working = True
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
chat = ''
diff --git a/g4f/Provider/Providers/ChatgptLogin.py b/g4f/Provider/Providers/ChatgptLogin.py
index 9551d15d..3659ee17 100644
--- a/g4f/Provider/Providers/ChatgptLogin.py
+++ b/g4f/Provider/Providers/ChatgptLogin.py
@@ -8,7 +8,7 @@ url = 'https://chatgptlogin.ac'
model = ['gpt-3.5-turbo']
supports_stream = False
needs_auth = False
-
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
def get_nonce():
diff --git a/g4f/Provider/Providers/DFEHub.py b/g4f/Provider/Providers/DFEHub.py
index 1bbdd01e..d3c12ed0 100644
--- a/g4f/Provider/Providers/DFEHub.py
+++ b/g4f/Provider/Providers/DFEHub.py
@@ -4,41 +4,45 @@ import json
url = "https://chat.dfehub.com/api/chat"
model = ['gpt-3.5-turbo']
-supports_stream = False
+supports_stream = True
needs_auth = False
+working = True
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- base = ''
- for message in messages:
- base += '%s: %s\n' % (message['role'], message['content'])
- base += 'assistant:'
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
headers = {
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
+ 'authority': 'chat.dfehub.com',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.dfehub.com',
+ 'referer': 'https://chat.dfehub.com/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
+ 'x-requested-with': 'XMLHttpRequest',
}
- data = {
- "model": {
- "id": "gpt-3.5-turbo",
- "name": "GPT-3.5",
- "maxLength": 12000,
- "tokenLimit": 4000
- },
- "messages": [
- {
- "role": "user",
- "content": base
- }
- ],
- "key": "",
- "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
- "temperature": 1
+
+ json_data = {
+ 'messages': messages,
+ 'model': 'gpt-3.5-turbo',
+ 'temperature': 0.5,
+ 'presence_penalty': 0,
+ 'frequency_penalty': 0,
+ 'top_p': 1,
}
- response = requests.post(url, headers=headers, data=json.dumps(data))
- if response.status_code == 200:
- yield response.text
- else:
- print(f"Error Occurred::{response.status_code}")
- return None
+
+ response = requests.post('https://chat.dfehub.com/api/openai/v1/chat/completions',
+ headers=headers, json=json_data)
+
+ for chunk in response.iter_lines():
+ if b'content' in chunk:
+ data = json.loads(chunk.decode().split('data: ')[1])
+ yield (data['choices'][0]['delta']['content'])
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/DeepAi.py b/g4f/Provider/Providers/DeepAi.py
index 02b08120..b34dd60d 100644
--- a/g4f/Provider/Providers/DeepAi.py
+++ b/g4f/Provider/Providers/DeepAi.py
@@ -10,6 +10,8 @@ url = 'https://deepai.org'
model = ['gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = True
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
def md5(text: str) -> str:
diff --git a/g4f/Provider/Providers/EasyChat.py b/g4f/Provider/Providers/EasyChat.py
index 9f4aa7b2..a59ea072 100644
--- a/g4f/Provider/Providers/EasyChat.py
+++ b/g4f/Provider/Providers/EasyChat.py
@@ -2,42 +2,51 @@ import os, requests
from ...typing import sha256, Dict, get_type_hints
import json
-url = "https://free.easychat.work/api/openai/v1/chat/completions"
+url = "https://free.easychat.work"
model = ['gpt-3.5-turbo']
-supports_stream = False
+supports_stream = True
needs_auth = False
+working = True
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- ''' limited to 240 messages/hour'''
- base = ''
- for message in messages:
- base += '%s: %s\n' % (message['role'], message['content'])
- base += 'assistant:'
-
headers = {
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ 'authority': 'free.easychat.work',
+ 'accept': 'text/event-stream',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'content-type': 'application/json',
+ 'endpoint': '',
+ 'origin': 'https://free.easychat.work',
+ 'plugins': '0',
+ 'referer': 'https://free.easychat.work/',
+ 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
+ 'usesearch': 'false',
+ 'x-requested-with': 'XMLHttpRequest',
}
- data = {
- "messages": [
- {"role": "system", "content": "You are ChatGPT, a large language model trained by OpenAI."},
- {"role": "user", "content": base}
- ],
- "stream": False,
- "model": "gpt-3.5-turbo",
- "temperature": 0.5,
- "presence_penalty": 0,
- "frequency_penalty": 0,
- "top_p": 1
+ json_data = {
+ 'messages': messages,
+ 'stream': True,
+ 'model': model,
+ 'temperature': 0.5,
+ 'presence_penalty': 0,
+ 'frequency_penalty': 0,
+ 'top_p': 1,
}
- response = requests.post(url, headers=headers, json=data)
- if response.status_code == 200:
- response = response.json()
- yield response['choices'][0]['message']['content']
- else:
- print(f"Error Occurred::{response.status_code}")
- return None
+ response = requests.post('https://free.easychat.work/api/openai/v1/chat/completions',
+ headers=headers, json=json_data)
+
+ for chunk in response.iter_lines():
+ if b'content' in chunk:
+ data = json.loads(chunk.decode().split('data: ')[1])
+ yield (data['choices'][0]['delta']['content'])
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file
diff --git a/g4f/Provider/Providers/Forefront.py b/g4f/Provider/Providers/Forefront.py
index e7e89831..70ea6725 100644
--- a/g4f/Provider/Providers/Forefront.py
+++ b/g4f/Provider/Providers/Forefront.py
@@ -7,6 +7,8 @@ url = 'https://forefront.com'
model = ['gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = False
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
json_data = {
diff --git a/g4f/Provider/Providers/GetGpt.py b/g4f/Provider/Providers/GetGpt.py
index 56a121f6..bafc0ce8 100644
--- a/g4f/Provider/Providers/GetGpt.py
+++ b/g4f/Provider/Providers/GetGpt.py
@@ -9,6 +9,8 @@ url = 'https://chat.getgpt.world/'
model = ['gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = True
+
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
def encrypt(e):
diff --git a/g4f/Provider/Providers/H2o.py b/g4f/Provider/Providers/H2o.py
index 4400f3a9..92043026 100644
--- a/g4f/Provider/Providers/H2o.py
+++ b/g4f/Provider/Providers/H2o.py
@@ -10,6 +10,7 @@ url = 'https://gpt-gm.h2o.ai'
model = ['falcon-40b', 'falcon-7b', 'llama-13b']
supports_stream = True
needs_auth = False
+working = True
models = {
'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
@@ -18,6 +19,7 @@ models = {
}
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+
conversation = ''
for message in messages:
conversation += '%s: %s\n' % (message['role'], message['content'])
@@ -47,8 +49,6 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
}
response = session.post("https://gpt-gm.h2o.ai/settings", headers=headers, data=data)
-
-
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
"Accept": "*/*",
diff --git a/g4f/Provider/Providers/Liaobots.py b/g4f/Provider/Providers/Liaobots.py
index 76b13c31..75746c03 100644
--- a/g4f/Provider/Providers/Liaobots.py
+++ b/g4f/Provider/Providers/Liaobots.py
@@ -5,6 +5,7 @@ url = 'https://liaobots.com'
model = ['gpt-3.5-turbo', 'gpt-4']
supports_stream = True
needs_auth = True
+working = False
models = {
'gpt-4': {
diff --git a/g4f/Provider/Providers/Lockchat.py b/g4f/Provider/Providers/Lockchat.py
index 1bce7403..489356ce 100644
--- a/g4f/Provider/Providers/Lockchat.py
+++ b/g4f/Provider/Providers/Lockchat.py
@@ -6,6 +6,7 @@ url = 'http://supertest.lockchat.app'
model = ['gpt-4', 'gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
diff --git a/g4f/Provider/Providers/Theb.py b/g4f/Provider/Providers/Theb.py
index aa43ebc5..093ea886 100644
--- a/g4f/Provider/Providers/Theb.py
+++ b/g4f/Provider/Providers/Theb.py
@@ -9,6 +9,7 @@ url = 'https://theb.ai'
model = ['gpt-3.5-turbo']
supports_stream = True
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
diff --git a/g4f/Provider/Providers/Vercel.py b/g4f/Provider/Providers/Vercel.py
index e5df9cf0..e1e84f9a 100644
--- a/g4f/Provider/Providers/Vercel.py
+++ b/g4f/Provider/Providers/Vercel.py
@@ -11,6 +11,7 @@ from ...typing import sha256, Dict, get_type_hints
url = 'https://play.vercel.ai'
supports_stream = True
needs_auth = False
+working = False
models = {
'claude-instant-v1': 'anthropic:claude-instant-v1',
@@ -143,8 +144,6 @@ class Client:
index = len(lines) - 1
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- yield 'Vercel is currently not working.'
- return
conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
diff --git a/g4f/Provider/Providers/Wewordle.py b/g4f/Provider/Providers/Wewordle.py
index 95966fbd..116ebb85 100644
--- a/g4f/Provider/Providers/Wewordle.py
+++ b/g4f/Provider/Providers/Wewordle.py
@@ -10,7 +10,7 @@ url = "https://wewordle.org/gptapi/v1/android/turbo"
model = ['gpt-3.5-turbo']
supports_stream = False
needs_auth = False
-
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
base = ''
diff --git a/g4f/Provider/Providers/You.py b/g4f/Provider/Providers/You.py
index 02a2774c..3c321118 100644
--- a/g4f/Provider/Providers/You.py
+++ b/g4f/Provider/Providers/You.py
@@ -9,6 +9,7 @@ url = 'https://you.com'
model = 'gpt-3.5-turbo'
supports_stream = True
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
diff --git a/g4f/Provider/Providers/Yqcloud.py b/g4f/Provider/Providers/Yqcloud.py
index 488951dd..fae44682 100644
--- a/g4f/Provider/Providers/Yqcloud.py
+++ b/g4f/Provider/Providers/Yqcloud.py
@@ -9,6 +9,7 @@ model = [
]
supports_stream = True
needs_auth = False
+working = False
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 8488cec1..b64e44f5 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -18,7 +18,7 @@ from .Providers import (
AItianhu,
EasyChat,
Acytoo,
- DFEHub,
+ DfeHub,
AiService,
BingHuan,
Wewordle,
diff --git a/g4f/__init__.py b/g4f/__init__.py
index a0b4bac6..09b24b55 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -2,11 +2,14 @@ import sys
from . import Provider
from g4f.models import Model, ModelUtils
+logging = False
class ChatCompletion:
@staticmethod
def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs):
kwargs['auth'] = auth
+ if provider and provider.working == False:
+ return f'{provider.__name__} is not working'
if provider and provider.needs_auth and not auth:
print(
@@ -27,7 +30,7 @@ class ChatCompletion:
f"ValueError: {engine.__name__} does not support 'stream' argument", file=sys.stderr)
sys.exit(1)
- print(f'Using {engine.__name__} provider')
+ if logging: print(f'Using {engine.__name__} provider')
return (engine._create_completion(model.name, messages, stream, **kwargs)
if stream else ''.join(engine._create_completion(model.name, messages, stream, **kwargs)))
diff --git a/g4f/models.py b/g4f/models.py
index ecf18e6d..95be4849 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -152,11 +152,42 @@ class Model:
name: str = 'llama-13b'
base_provider: str = 'huggingface'
best_provider: Provider.Provider = Provider.H2o
+
+ class gpt_35_turbo_16k:
+ name: str = 'gpt-3.5-turbo-16k'
+ base_provider: str = 'openai'
+ best_provider: Provider.Provider = Provider.EasyChat
+
+ class gpt_35_turbo_0613:
+ name: str = 'gpt-3.5-turbo-0613'
+ base_provider: str = 'openai'
+ best_provider: Provider.Provider = Provider.EasyChat
+
+ class gpt_35_turbo_16k_0613:
+ name: str = 'gpt-3.5-turbo-16k-0613'
+ base_provider: str = 'openai'
+ best_provider: Provider.Provider = Provider.EasyChat
+
+ class gpt_4_32k:
+ name: str = 'gpt-4-32k'
+ base_provider: str = 'openai'
+ best_provider = None
+
+ class gpt_4_0613:
+ name: str = 'gpt-4-0613'
+ base_provider: str = 'openai'
+ best_provider = None
class ModelUtils:
convert: dict = {
'gpt-3.5-turbo': Model.gpt_35_turbo,
+ 'gpt-3.6-turbo-16k': Model.gpt_35_turbo_16k,
+ 'gpt-3.5-turbo-0613': Model.gpt_35_turbo_0613,
+ 'gpt-3.5-turbo-16k-0613': Model.gpt_35_turbo_16k_0613,
+
'gpt-4': Model.gpt_4,
+ 'gpt-4-32k': Model.gpt_4_32k,
+ 'gpt-4-0613': Model.gpt_4_0613,
'claude-instant-v1-100k': Model.claude_instant_v1_100k,
'claude-v1-100k': Model.claude_v1_100k,