diff options
author | Tekky <98614666+xtekky@users.noreply.github.com> | 2024-09-07 21:37:24 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-09-07 21:37:24 +0200 |
commit | 07fa87b4d180259d1da86afb565e14ac3d60d50b (patch) | |
tree | d474a2bf8bd79cf94bfa48cbaca3917659dd19be /g4f/Provider/HuggingFace.py | |
parent | Merge pull request #2206 from Parthsadaria/patch-1 (diff) | |
parent | g4f/models.py g4f/Provider/MagickPen.py (diff) | |
download | gpt4free-07fa87b4d180259d1da86afb565e14ac3d60d50b.tar gpt4free-07fa87b4d180259d1da86afb565e14ac3d60d50b.tar.gz gpt4free-07fa87b4d180259d1da86afb565e14ac3d60d50b.tar.bz2 gpt4free-07fa87b4d180259d1da86afb565e14ac3d60d50b.tar.lz gpt4free-07fa87b4d180259d1da86afb565e14ac3d60d50b.tar.xz gpt4free-07fa87b4d180259d1da86afb565e14ac3d60d50b.tar.zst gpt4free-07fa87b4d180259d1da86afb565e14ac3d60d50b.zip |
Diffstat (limited to 'g4f/Provider/HuggingFace.py')
-rw-r--r-- | g4f/Provider/HuggingFace.py | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py index a3741196..74957862 100644 --- a/g4f/Provider/HuggingFace.py +++ b/g4f/Provider/HuggingFace.py @@ -1,11 +1,14 @@ from __future__ import annotations + import json from aiohttp import ClientSession, BaseConnector + from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import get_connector from ..errors import RateLimitError, ModelNotFoundError from ..requests.raise_for_status import raise_for_status + class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): url = "https://huggingface.co/chat" working = True @@ -22,7 +25,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): 'mistralai/Mistral-7B-Instruct-v0.3', 'microsoft/Phi-3-mini-4k-instruct', ] - + model_aliases = { "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8", @@ -76,7 +79,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): } if api_key is not None: headers["Authorization"] = f"Bearer {api_key}" - + params = { "return_full_text": False, "max_new_tokens": max_new_tokens, @@ -84,7 +87,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): **kwargs } payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream} - + async with ClientSession( headers=headers, connector=get_connector(connector, proxy) @@ -106,6 +109,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): yield chunk else: yield (await response.json())[0]["generated_text"].strip() + def format_prompt(messages: Messages) -> str: system_messages = [message["content"] for message in messages if message["role"] == "system"] question = " ".join([messages[-1]["content"], *system_messages]) @@ -114,4 +118,4 @@ def format_prompt(messages: Messages) -> str: for idx, message in enumerate(messages) if message["role"] == "assistant" ]) - return f"{history}<s>[INST] {question} [/INST]"
\ No newline at end of file + return f"{history}<s>[INST] {question} [/INST]" |