summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/HuggingFace.py
diff options
context:
space:
mode:
authorHeiner Lohaus <hlohaus@users.noreply.github.com>2024-03-16 20:02:15 +0100
committerHeiner Lohaus <hlohaus@users.noreply.github.com>2024-03-16 20:02:15 +0100
commitf797ec828ce3b5965796b437878e14b6eec6235b (patch)
treecc39a3df291ad30f55df34f52d31d73afcd9a827 /g4f/Provider/HuggingFace.py
parentMerge pull request #1691 from hlohaus/retry (diff)
downloadgpt4free-f797ec828ce3b5965796b437878e14b6eec6235b.tar
gpt4free-f797ec828ce3b5965796b437878e14b6eec6235b.tar.gz
gpt4free-f797ec828ce3b5965796b437878e14b6eec6235b.tar.bz2
gpt4free-f797ec828ce3b5965796b437878e14b6eec6235b.tar.lz
gpt4free-f797ec828ce3b5965796b437878e14b6eec6235b.tar.xz
gpt4free-f797ec828ce3b5965796b437878e14b6eec6235b.tar.zst
gpt4free-f797ec828ce3b5965796b437878e14b6eec6235b.zip
Diffstat (limited to 'g4f/Provider/HuggingFace.py')
-rw-r--r--g4f/Provider/HuggingFace.py10
1 files changed, 4 insertions, 6 deletions
diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py
index a73411ce..647780fd 100644
--- a/g4f/Provider/HuggingFace.py
+++ b/g4f/Provider/HuggingFace.py
@@ -7,6 +7,7 @@ from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector
from ..errors import RateLimitError, ModelNotFoundError
+from ..requests.raise_for_status import raise_for_status
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"
@@ -44,12 +45,9 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
connector=get_connector(connector, proxy)
) as session:
async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response:
- if response.status == 429:
- raise RateLimitError("Rate limit reached. Set a api_key")
- elif response.status == 404:
+ if response.status == 404:
raise ModelNotFoundError(f"Model is not supported: {model}")
- elif response.status != 200:
- raise RuntimeError(f"Response {response.status}: {await response.text()}")
+ await raise_for_status(response)
if stream:
first = True
async for line in response.content:
@@ -68,7 +66,7 @@ def format_prompt(messages: Messages) -> str:
system_messages = [message["content"] for message in messages if message["role"] == "system"]
question = " ".join([messages[-1]["content"], *system_messages])
history = "".join([
- f"<s>[INST]{messages[idx-1]['content']} [/INST] {message}</s>"
+ f"<s>[INST]{messages[idx-1]['content']} [/INST] {message['content']}</s>"
for idx, message in enumerate(messages)
if message["role"] == "assistant"
])