summaryrefslogtreecommitdiffstats
path: root/g4f/models.py
diff options
context:
space:
mode:
authorabc <98614666+xtekky@users.noreply.github.com>2023-10-19 16:14:48 +0200
committerabc <98614666+xtekky@users.noreply.github.com>2023-10-19 16:14:48 +0200
commitd4ab83a45bfef2910936dcabf3f875268631e65b (patch)
treefc8176d47b8074a8dc988b8a440b308b2784f42c /g4f/models.py
parent~ (diff)
downloadgpt4free-d4ab83a45bfef2910936dcabf3f875268631e65b.tar
gpt4free-d4ab83a45bfef2910936dcabf3f875268631e65b.tar.gz
gpt4free-d4ab83a45bfef2910936dcabf3f875268631e65b.tar.bz2
gpt4free-d4ab83a45bfef2910936dcabf3f875268631e65b.tar.lz
gpt4free-d4ab83a45bfef2910936dcabf3f875268631e65b.tar.xz
gpt4free-d4ab83a45bfef2910936dcabf3f875268631e65b.tar.zst
gpt4free-d4ab83a45bfef2910936dcabf3f875268631e65b.zip
Diffstat (limited to 'g4f/models.py')
-rw-r--r--g4f/models.py18
1 files changed, 15 insertions, 3 deletions
diff --git a/g4f/models.py b/g4f/models.py
index d7c29ccb..b34297f5 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -37,6 +37,10 @@ class Model:
name: str
base_provider: str
best_provider: Union[type[BaseProvider], RetryProvider] = None
+
+ @staticmethod
+ def __all__() -> list[str]:
+ return _all_models
default = Model(
name = "",
@@ -231,6 +235,11 @@ llama7b_v2_chat = Model(
base_provider = 'replicate',
best_provider = Vercel)
+llama70b_v2_chat = Model(
+ name = 'replicate/llama70b-v2-chat',
+ base_provider = 'replicate',
+ best_provider = Vercel)
+
class ModelUtils:
convert: dict[str, Model] = {
@@ -260,9 +269,9 @@ class ModelUtils:
'llama-13b' : llama_13b,
# Vercel
- 'claude-instant-v1' : claude_instant_v1,
- 'claude-v1' : claude_v1,
- 'claude-v2' : claude_v2,
+ #'claude-instant-v1' : claude_instant_v1,
+ #'claude-v1' : claude_v1,
+ #'claude-v2' : claude_v2,
'command-nightly' : command_nightly,
'gpt-neox-20b' : gpt_neox_20b,
'santacoder' : santacoder,
@@ -274,6 +283,7 @@ class ModelUtils:
'text-curie-001' : text_curie_001,
'text-davinci-002' : text_davinci_002,
'text-davinci-003' : text_davinci_003,
+ 'llama70b-v2-chat' : llama70b_v2_chat,
'llama13b-v2-chat' : llama13b_v2_chat,
'llama7b-v2-chat' : llama7b_v2_chat,
@@ -281,3 +291,5 @@ class ModelUtils:
'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35,
'command-light-nightly' : command_light_nightly,
}
+
+_all_models = list(ModelUtils.convert.keys()) \ No newline at end of file