summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-10-29 21:03:05 +0100
committerkqlio67 <kqlio67@users.noreply.github.com>2024-10-29 21:03:05 +0100
commite79c8b01f58d21502c962f38c804bf81196f89fb (patch)
treee8f0d3f94d7a8834d45173272bd1307c91ba06d4
parentUpdate (g4f/gui/server/api.py) (diff)
downloadgpt4free-e79c8b01f58d21502c962f38c804bf81196f89fb.tar
gpt4free-e79c8b01f58d21502c962f38c804bf81196f89fb.tar.gz
gpt4free-e79c8b01f58d21502c962f38c804bf81196f89fb.tar.bz2
gpt4free-e79c8b01f58d21502c962f38c804bf81196f89fb.tar.lz
gpt4free-e79c8b01f58d21502c962f38c804bf81196f89fb.tar.xz
gpt4free-e79c8b01f58d21502c962f38c804bf81196f89fb.tar.zst
gpt4free-e79c8b01f58d21502c962f38c804bf81196f89fb.zip
-rw-r--r--docs/async_client.md4
-rw-r--r--docs/client.md7
-rw-r--r--docs/interference-api.md2
-rw-r--r--g4f/client/client.py31
4 files changed, 9 insertions, 35 deletions
diff --git a/docs/async_client.md b/docs/async_client.md
index 357b0d86..0719a463 100644
--- a/docs/async_client.md
+++ b/docs/async_client.md
@@ -3,7 +3,7 @@ The G4F async client API is a powerful asynchronous interface for interacting wi
## Compatibility Note
-The G4F async client API is designed to be compatible with the OpenAI and Anthropic API, making it easy for developers familiar with OpenAI's or Anthropic's interface to transition to G4F.
+The G4F async client API is designed to be compatible with the OpenAI API, making it easy for developers familiar with OpenAI's interface to transition to G4F.
## Table of Contents
- [Introduction](#introduction)
@@ -57,7 +57,6 @@ client = Client(
**Here’s an improved example of creating chat completions:**
```python
response = await async_client.chat.completions.create(
- system="You are a helpful assistant.",
model="gpt-3.5-turbo",
messages=[
{
@@ -70,7 +69,6 @@ response = await async_client.chat.completions.create(
```
**This example:**
- - Sets a system message to define the assistant's role
- Asks a specific question `Say this is a test`
- Configures various parameters like temperature and max_tokens for more control over the output
- Disables streaming for a complete response
diff --git a/docs/client.md b/docs/client.md
index b4f351d3..388b2e4b 100644
--- a/docs/client.md
+++ b/docs/client.md
@@ -23,7 +23,7 @@
## Introduction
-Welcome to the G4F Client API, a cutting-edge tool for seamlessly integrating advanced AI capabilities into your Python applications. This guide is designed to facilitate your transition from using the OpenAI or Anthropic client to the G4F Client, offering enhanced features while maintaining compatibility with the existing OpenAI and Anthropic API.
+Welcome to the G4F Client API, a cutting-edge tool for seamlessly integrating advanced AI capabilities into your Python applications. This guide is designed to facilitate your transition from using the OpenAI client to the G4F Client, offering enhanced features while maintaining compatibility with the existing OpenAI API.
## Getting Started
### Switching to G4F Client
@@ -43,7 +43,7 @@ from g4f.client import Client as OpenAI
-The G4F Client preserves the same familiar API interface as OpenAI or Anthropic, ensuring a smooth transition process.
+The G4F Client preserves the same familiar API interface as OpenAI, ensuring a smooth transition process.
## Initializing the Client
To utilize the G4F Client, create a new instance. **Below is an example showcasing custom providers:**
@@ -62,7 +62,6 @@ client = Client(
**Here’s an improved example of creating chat completions:**
```python
response = client.chat.completions.create(
- system="You are a helpful assistant.",
model="gpt-3.5-turbo",
messages=[
{
@@ -75,7 +74,6 @@ response = client.chat.completions.create(
```
**This example:**
- - Sets a system message to define the assistant's role
- Asks a specific question `Say this is a test`
- Configures various parameters like temperature and max_tokens for more control over the output
- Disables streaming for a complete response
@@ -313,7 +311,6 @@ while True:
try:
# Get GPT's response
response = client.chat.completions.create(
- system="You are a helpful assistant.",
messages=messages,
model=g4f.models.default,
)
diff --git a/docs/interference-api.md b/docs/interference-api.md
index 1e51ba60..2e18e7b5 100644
--- a/docs/interference-api.md
+++ b/docs/interference-api.md
@@ -15,7 +15,7 @@
## Introduction
-The G4F Interference API is a powerful tool that allows you to serve other OpenAI integrations using G4F (Gpt4free). It acts as a proxy, translating requests intended for the OpenAI and Anthropic API into requests compatible with G4F providers. This guide will walk you through the process of setting up, running, and using the Interference API effectively.
+The G4F Interference API is a powerful tool that allows you to serve other OpenAI integrations using G4F (Gpt4free). It acts as a proxy, translating requests intended for the OpenAI API into requests compatible with G4F providers. This guide will walk you through the process of setting up, running, and using the Interference API effectively.
## Running the Interference API
diff --git a/g4f/client/client.py b/g4f/client/client.py
index 2772f9bb..44d99d60 100644
--- a/g4f/client/client.py
+++ b/g4f/client/client.py
@@ -149,7 +149,6 @@ class Completions:
self,
messages: Messages,
model: str,
- system: str = None, # Added system parameter
provider: ProviderType = None,
stream: bool = False,
proxy: str = None,
@@ -162,12 +161,6 @@ class Completions:
ignore_stream: bool = False,
**kwargs
) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
- # If a system prompt is provided, prepend it to the messages
- if system:
- system_message = {"role": "system", "content": system}
- messages = [system_message] + messages
-
- # Existing implementation continues...
model, provider = get_model_and_provider(
model,
self.provider if provider is None else provider,
@@ -228,7 +221,6 @@ class Completions:
self,
messages: Messages,
model: str,
- system: str = None, # Added system parameter
provider: ProviderType = None,
stream: bool = False,
proxy: str = None,
@@ -241,12 +233,6 @@ class Completions:
ignore_stream: bool = False,
**kwargs
) -> Union[ChatCompletion, AsyncIterator[ChatCompletionChunk]]:
- # If a system prompt is provided, prepend it to the messages
- if system:
- system_message = {"role": "system", "content": system}
- messages = [system_message] + messages
-
- # Existing implementation continues...
model, provider = get_model_and_provider(
model,
self.provider if provider is None else provider,
@@ -285,18 +271,16 @@ class Completions:
**kwargs
)
- # Handle streaming or non-streaming responses
+ # Removed 'await' here since 'async_iter_response' returns an async generator
+ response = async_iter_response(response, stream, response_format, max_tokens, stop)
+ response = async_iter_append_model_and_provider(response)
+
if stream:
- response = async_iter_response(response, stream, response_format, max_tokens, stop)
- response = async_iter_append_model_and_provider(response)
return response
else:
- response = async_iter_response(response, stream, response_format, max_tokens, stop)
- response = async_iter_append_model_and_provider(response)
async for result in response:
return result
-
class Chat:
completions: Completions
@@ -417,12 +401,6 @@ class Image:
def __repr__(self):
return f"Image(url={self.url}, b64_json={'<base64 data>' if self.b64_json else None})"
- def to_dict(self):
- return {
- "url": self.url,
- "b64_json": self.b64_json
- }
-
class ImagesResponse:
def __init__(self, data: list[Image]):
self.data = data
@@ -530,3 +508,4 @@ class Images:
async def create_variation(self, image: Union[str, bytes], model: str = None, response_format: str = "url", **kwargs):
# Existing implementation, adjust if you want to support b64_json here as well
pass
+