diff options
author | Commenter123321 <36051603+Commenter123321@users.noreply.github.com> | 2023-10-10 14:15:12 +0200 |
---|---|---|
committer | Commenter123321 <36051603+Commenter123321@users.noreply.github.com> | 2023-10-10 14:15:12 +0200 |
commit | 0e4297494dd46533738f786e4ac675541586177a (patch) | |
tree | 6c2999e14ed831f7f37cc60991571cd040772918 /g4f/Provider/Vercel.py | |
parent | add cool testing for gpt-3.5 and and gpt-4 (diff) | |
parent | Update Aivvm.py (diff) | |
download | gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.gz gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.bz2 gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.lz gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.xz gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.zst gpt4free-0e4297494dd46533738f786e4ac675541586177a.zip |
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/Vercel.py | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py index 2d20ca6a..2d856664 100644 --- a/g4f/Provider/Vercel.py +++ b/g4f/Provider/Vercel.py @@ -2,7 +2,7 @@ from __future__ import annotations import json, base64, requests, execjs, random, uuid -from ..typing import Any, TypedDict, CreateResult +from ..typing import Messages, TypedDict, CreateResult from .base_provider import BaseProvider from abc import abstractmethod @@ -17,8 +17,9 @@ class Vercel(BaseProvider): @abstractmethod def create_completion( model: str, - messages: list[dict[str, str]], + messages: Messages, stream: bool, + proxy: str = None, **kwargs ) -> CreateResult: if not model: @@ -52,15 +53,18 @@ class Vercel(BaseProvider): 'model' : model_info[model]['id'], 'messages' : messages, 'playgroundId': str(uuid.uuid4()), - 'chatIndex' : 0} | model_info[model]['default_params'] + 'chatIndex' : 0, + **model_info[model]['default_params'], + **kwargs + } max_retries = kwargs.get('max_retries', 20) for i in range(max_retries): response = requests.post('https://sdk.vercel.ai/api/generate', - headers=headers, json=json_data, stream=True) + headers=headers, json=json_data, stream=True, proxies={"https": proxy}) try: response.raise_for_status() - except: + except Exception: continue for token in response.iter_content(chunk_size=None): yield token.decode() |