diff options
author | PD <56485898+pratham-darooka@users.noreply.github.com> | 2024-04-19 09:27:33 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-04-19 09:27:33 +0200 |
commit | 5fd118f3c9d3d3a932695a3b413d8926d8ad58c0 (patch) | |
tree | e9e13603587cea196e672ff32453ea0936ed1a8c /g4f | |
parent | Merge pull request #1854 from hlohaus/nem (diff) | |
download | gpt4free-5fd118f3c9d3d3a932695a3b413d8926d8ad58c0.tar gpt4free-5fd118f3c9d3d3a932695a3b413d8926d8ad58c0.tar.gz gpt4free-5fd118f3c9d3d3a932695a3b413d8926d8ad58c0.tar.bz2 gpt4free-5fd118f3c9d3d3a932695a3b413d8926d8ad58c0.tar.lz gpt4free-5fd118f3c9d3d3a932695a3b413d8926d8ad58c0.tar.xz gpt4free-5fd118f3c9d3d3a932695a3b413d8926d8ad58c0.tar.zst gpt4free-5fd118f3c9d3d3a932695a3b413d8926d8ad58c0.zip |
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/FlowGpt.py | 2 | ||||
-rw-r--r-- | g4f/Provider/HuggingChat.py | 3 | ||||
-rw-r--r-- | g4f/Provider/Llama.py (renamed from g4f/Provider/Llama2.py) | 8 | ||||
-rw-r--r-- | g4f/Provider/__init__.py | 2 | ||||
-rw-r--r-- | g4f/gui/client/index.html | 1 | ||||
-rw-r--r-- | g4f/gui/client/static/js/chat.v1.js | 2 | ||||
-rw-r--r-- | g4f/models.py | 22 |
7 files changed, 30 insertions, 10 deletions
diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/FlowGpt.py index 7edd6f19..6c2aa046 100644 --- a/g4f/Provider/FlowGpt.py +++ b/g4f/Provider/FlowGpt.py @@ -99,4 +99,4 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin): if "event" not in message: continue if message["event"] == "text": - yield message["data"]
\ No newline at end of file + yield message["data"] diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index 882edb78..668ce4b1 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -19,7 +19,8 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin): 'mistralai/Mixtral-8x7B-Instruct-v0.1', 'google/gemma-1.1-7b-it', 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', - 'mistralai/Mistral-7B-Instruct-v0.2' + 'mistralai/Mistral-7B-Instruct-v0.2', + 'meta-llama/Meta-Llama-3-70B-Instruct' ] model_aliases = { "openchat/openchat_3.5": "openchat/openchat-3.5-0106", diff --git a/g4f/Provider/Llama2.py b/g4f/Provider/Llama.py index 04b5aee0..4d19866e 100644 --- a/g4f/Provider/Llama2.py +++ b/g4f/Provider/Llama.py @@ -7,17 +7,21 @@ from ..requests.raise_for_status import raise_for_status from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -class Llama2(AsyncGeneratorProvider, ProviderModelMixin): +class Llama(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.llama2.ai" working = True supports_message_history = True - default_model = "meta/llama-2-70b-chat" + default_model = "meta/llama-3-70b-chat" models = [ "meta/llama-2-7b-chat", "meta/llama-2-13b-chat", "meta/llama-2-70b-chat", + "meta/llama-3-8b-chat", + "meta/llama-3-70b-chat", ] model_aliases = { + "meta-llama/Meta-Llama-3-8b": "meta/llama-3-8b-chat", + "meta-llama/Meta-Llama-3-70b": "meta/llama-3-70b-chat", "meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat", "meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat", "meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat", diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index d5913e3c..f761df5b 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -40,7 +40,7 @@ from .HuggingChat import HuggingChat from .HuggingFace import HuggingFace from .Koala import Koala from .Liaobots import Liaobots -from .Llama2 import Llama2 +from .Llama import Llama from .Local import Local from .PerplexityLabs import PerplexityLabs from .Pi import Pi diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index a6c4909b..8668c21e 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -220,6 +220,7 @@ <option value="gpt-4">gpt-4</option> <option value="gpt-3.5-turbo">gpt-3.5-turbo</option> <option value="llama2-70b">llama2-70b</option> + <option value="llama3-70b">llama2-70b</option> <option value="gemini-pro">gemini-pro</option> <option value="">----</option> </select> diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index a17be16e..39027260 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -926,7 +926,7 @@ colorThemes.forEach((themeOption) => { function count_tokens(model, text) { if (model) { if (window.llamaTokenizer) - if (model.startsWith("llama2") || model.startsWith("codellama")) { + if (model.startsWith("llama") || model.startsWith("codellama")) { return llamaTokenizer.encode(text).length; } if (window.mistralTokenizer) diff --git a/g4f/models.py b/g4f/models.py index fe99958c..4af1c31e 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -16,7 +16,7 @@ from .Provider import ( GigaChat, Liaobots, FreeGpt, - Llama2, + Llama, Vercel, Gemini, Koala, @@ -117,19 +117,31 @@ gigachat_pro = Model( llama2_7b = Model( name = "meta-llama/Llama-2-7b-chat-hf", base_provider = 'meta', - best_provider = RetryProvider([Llama2, DeepInfra]) + best_provider = RetryProvider([Llama, DeepInfra]) ) llama2_13b = Model( name = "meta-llama/Llama-2-13b-chat-hf", base_provider = 'meta', - best_provider = RetryProvider([Llama2, DeepInfra]) + best_provider = RetryProvider([Llama, DeepInfra]) ) llama2_70b = Model( name = "meta-llama/Llama-2-70b-chat-hf", base_provider = "meta", - best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat]) + best_provider = RetryProvider([Llama, DeepInfra, HuggingChat]) +) + +llama3_8b = Model( + name = "meta-llama/Meta-Llama-3-8b", + base_provider = "meta", + best_provider = RetryProvider([Llama]) +) + +llama3_70b = Model( + name = "meta-llama/Meta-Llama-3-70b", + base_provider = "meta", + best_provider = RetryProvider([Llama, HuggingChat]) ) codellama_34b_instruct = Model( @@ -306,6 +318,8 @@ class ModelUtils: 'llama2-7b' : llama2_7b, 'llama2-13b': llama2_13b, 'llama2-70b': llama2_70b, + 'llama3-8b' : llama3_8b, + 'llama3-70b': llama3_70b, 'codellama-34b-instruct': codellama_34b_instruct, 'codellama-70b-instruct': codellama_70b_instruct, |