summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-09-25 07:01:39 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-09-25 07:01:39 +0200
commita6099ba48beeba7911a61dd4c2af2ba324ab23f0 (patch)
tree44df349d73bf35d4b662eb45733b16dc269b2ff3 /g4f
parentReturned provider 'g4f/Provider/ChatGot.py' updated documentation 'docs/providers-and-models.md' (diff)
downloadgpt4free-a6099ba48beeba7911a61dd4c2af2ba324ab23f0.tar
gpt4free-a6099ba48beeba7911a61dd4c2af2ba324ab23f0.tar.gz
gpt4free-a6099ba48beeba7911a61dd4c2af2ba324ab23f0.tar.bz2
gpt4free-a6099ba48beeba7911a61dd4c2af2ba324ab23f0.tar.lz
gpt4free-a6099ba48beeba7911a61dd4c2af2ba324ab23f0.tar.xz
gpt4free-a6099ba48beeba7911a61dd4c2af2ba324ab23f0.tar.zst
gpt4free-a6099ba48beeba7911a61dd4c2af2ba324ab23f0.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/Blackbox.py3
-rw-r--r--g4f/Provider/HuggingChat.py2
-rw-r--r--g4f/models.py2
3 files changed, 4 insertions, 3 deletions
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 40696c82..3e183076 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -154,7 +154,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
async for chunk in response.content.iter_any():
if chunk:
decoded_chunk = chunk.decode()
- # Видаляємо префікс $@$v=v1.10-rv1$@$ та інші подібні
decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
- if decoded_chunk.strip(): # Перевіряємо, чи залишився якийсь текст після видалення префікса
+ if decoded_chunk.strip():
yield decoded_chunk
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index 06216ade..01490e2f 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -16,6 +16,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
models = [
'meta-llama/Meta-Llama-3.1-70B-Instruct',
'CohereForAI/c4ai-command-r-plus-08-2024',
+ 'Qwen/Qwen2.5-72B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'mistralai/Mistral-7B-Instruct-v0.3',
@@ -25,6 +26,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
model_aliases = {
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
+ "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"mixtral-8x7b-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
diff --git a/g4f/models.py b/g4f/models.py
index f5980577..2f4405a3 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -443,7 +443,7 @@ qwen_1_5_110b = Model(
qwen_2_72b = Model(
name = 'qwen-2-72b',
base_provider = 'Qwen',
- best_provider = IterListProvider([DeepInfraChat, Airforce])
+ best_provider = IterListProvider([DeepInfraChat, HuggingChat, Airforce, HuggingFace])
)
qwen_turbo = Model(