summaryrefslogtreecommitdiffstats
path: root/g4f/client/client.py
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/client/client.py')
-rw-r--r--g4f/client/client.py31
1 files changed, 5 insertions, 26 deletions
diff --git a/g4f/client/client.py b/g4f/client/client.py
index 2772f9bb..44d99d60 100644
--- a/g4f/client/client.py
+++ b/g4f/client/client.py
@@ -149,7 +149,6 @@ class Completions:
self,
messages: Messages,
model: str,
- system: str = None, # Added system parameter
provider: ProviderType = None,
stream: bool = False,
proxy: str = None,
@@ -162,12 +161,6 @@ class Completions:
ignore_stream: bool = False,
**kwargs
) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
- # If a system prompt is provided, prepend it to the messages
- if system:
- system_message = {"role": "system", "content": system}
- messages = [system_message] + messages
-
- # Existing implementation continues...
model, provider = get_model_and_provider(
model,
self.provider if provider is None else provider,
@@ -228,7 +221,6 @@ class Completions:
self,
messages: Messages,
model: str,
- system: str = None, # Added system parameter
provider: ProviderType = None,
stream: bool = False,
proxy: str = None,
@@ -241,12 +233,6 @@ class Completions:
ignore_stream: bool = False,
**kwargs
) -> Union[ChatCompletion, AsyncIterator[ChatCompletionChunk]]:
- # If a system prompt is provided, prepend it to the messages
- if system:
- system_message = {"role": "system", "content": system}
- messages = [system_message] + messages
-
- # Existing implementation continues...
model, provider = get_model_and_provider(
model,
self.provider if provider is None else provider,
@@ -285,18 +271,16 @@ class Completions:
**kwargs
)
- # Handle streaming or non-streaming responses
+ # Removed 'await' here since 'async_iter_response' returns an async generator
+ response = async_iter_response(response, stream, response_format, max_tokens, stop)
+ response = async_iter_append_model_and_provider(response)
+
if stream:
- response = async_iter_response(response, stream, response_format, max_tokens, stop)
- response = async_iter_append_model_and_provider(response)
return response
else:
- response = async_iter_response(response, stream, response_format, max_tokens, stop)
- response = async_iter_append_model_and_provider(response)
async for result in response:
return result
-
class Chat:
completions: Completions
@@ -417,12 +401,6 @@ class Image:
def __repr__(self):
return f"Image(url={self.url}, b64_json={'<base64 data>' if self.b64_json else None})"
- def to_dict(self):
- return {
- "url": self.url,
- "b64_json": self.b64_json
- }
-
class ImagesResponse:
def __init__(self, data: list[Image]):
self.data = data
@@ -530,3 +508,4 @@ class Images:
async def create_variation(self, image: Union[str, bytes], model: str = None, response_format: str = "url", **kwargs):
# Existing implementation, adjust if you want to support b64_json here as well
pass
+