summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorHeiner Lohaus <hlohaus@users.noreply.github.com>2024-01-29 18:14:46 +0100
committerHeiner Lohaus <hlohaus@users.noreply.github.com>2024-01-29 18:14:46 +0100
commita28bab938704a15c825c1b45a8983c72e8c90ace (patch)
tree0a3ee26bb1f9e7bf7c1a5a739ed59015248acbfb /g4f/Provider
parentMerge pull request #1523 from u66u/which-webdriver (diff)
downloadgpt4free-a28bab938704a15c825c1b45a8983c72e8c90ace.tar
gpt4free-a28bab938704a15c825c1b45a8983c72e8c90ace.tar.gz
gpt4free-a28bab938704a15c825c1b45a8983c72e8c90ace.tar.bz2
gpt4free-a28bab938704a15c825c1b45a8983c72e8c90ace.tar.lz
gpt4free-a28bab938704a15c825c1b45a8983c72e8c90ace.tar.xz
gpt4free-a28bab938704a15c825c1b45a8983c72e8c90ace.tar.zst
gpt4free-a28bab938704a15c825c1b45a8983c72e8c90ace.zip
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/Bing.py4
-rw-r--r--g4f/Provider/bing/create_images.py9
-rw-r--r--g4f/Provider/bing/upload_image.py2
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py31
4 files changed, 21 insertions, 25 deletions
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index 32879fa6..40a42bf5 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -288,8 +288,6 @@ async def stream_generate(
) as session:
conversation = await create_conversation(session)
image_request = await upload_image(session, image, tone) if image else None
- if image_request:
- yield image_request
try:
async with session.ws_connect(
@@ -327,7 +325,7 @@ async def stream_generate(
elif message.get('contentType') == "IMAGE":
prompt = message.get('text')
try:
- image_response = ImageResponse(await create_images(session, prompt), prompt)
+ image_response = ImageResponse(await create_images(session, prompt), prompt, {"preview": "{image}?w=200&h=200"})
except:
response_txt += f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}"
final = True
diff --git a/g4f/Provider/bing/create_images.py b/g4f/Provider/bing/create_images.py
index a3fcd91b..e1031e61 100644
--- a/g4f/Provider/bing/create_images.py
+++ b/g4f/Provider/bing/create_images.py
@@ -187,11 +187,11 @@ def get_cookies_from_browser(proxy: str = None) -> dict[str, str]:
class CreateImagesBing:
"""A class for creating images using Bing."""
-
+
def __init__(self, cookies: dict[str, str] = {}, proxy: str = None) -> None:
self.cookies = cookies
self.proxy = proxy
-
+
def create_completion(self, prompt: str) -> Generator[ImageResponse, None, None]:
"""
Generator for creating imagecompletion based on a prompt.
@@ -229,9 +229,7 @@ class CreateImagesBing:
proxy = os.environ.get("G4F_PROXY")
async with create_session(cookies, proxy) as session:
images = await create_images(session, prompt, self.proxy)
- return ImageResponse(images, prompt)
-
-service = CreateImagesBing()
+ return ImageResponse(images, prompt, {"preview": "{image}?w=200&h=200"})
def patch_provider(provider: ProviderType) -> CreateImagesProvider:
"""
@@ -243,6 +241,7 @@ def patch_provider(provider: ProviderType) -> CreateImagesProvider:
Returns:
CreateImagesProvider: The patched provider with image creation capabilities.
"""
+ service = CreateImagesBing()
return CreateImagesProvider(
provider,
service.create_completion,
diff --git a/g4f/Provider/bing/upload_image.py b/g4f/Provider/bing/upload_image.py
index f9e11561..6d51aba0 100644
--- a/g4f/Provider/bing/upload_image.py
+++ b/g4f/Provider/bing/upload_image.py
@@ -149,4 +149,4 @@ def parse_image_response(response: dict) -> ImageRequest:
if IMAGE_CONFIG["enableFaceBlurDebug"] else
f"https://www.bing.com/images/blob?bcid={result['bcid']}"
)
- return ImageRequest(result["imageUrl"], "", result) \ No newline at end of file
+ return ImageRequest(result) \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 60a101d7..253d4f77 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -150,8 +150,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
headers=headers
) as response:
response.raise_for_status()
- download_url = (await response.json())["download_url"]
- return ImageRequest(download_url, image_data["file_name"], image_data)
+ image_data["download_url"] = (await response.json())["download_url"]
+ return ImageRequest(image_data)
@classmethod
async def get_default_model(cls, session: StreamSession, headers: dict):
@@ -175,7 +175,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
return cls.default_model
@classmethod
- def create_messages(cls, prompt: str, image_response: ImageRequest = None):
+ def create_messages(cls, prompt: str, image_request: ImageRequest = None):
"""
Create a list of messages for the user input
@@ -187,7 +187,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
A list of messages with the user input and the image, if any
"""
# Check if there is an image response
- if not image_response:
+ if not image_request:
# Create a content object with the text type and the prompt
content = {"content_type": "text", "parts": [prompt]}
else:
@@ -195,10 +195,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
content = {
"content_type": "multimodal_text",
"parts": [{
- "asset_pointer": f"file-service://{image_response.get('file_id')}",
- "height": image_response.get("height"),
- "size_bytes": image_response.get("file_size"),
- "width": image_response.get("width"),
+ "asset_pointer": f"file-service://{image_request.get('file_id')}",
+ "height": image_request.get("height"),
+ "size_bytes": image_request.get("file_size"),
+ "width": image_request.get("width"),
}, prompt]
}
# Create a message object with the user role and the content
@@ -208,16 +208,16 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"content": content,
}]
# Check if there is an image response
- if image_response:
+ if image_request:
# Add the metadata object with the attachments
messages[0]["metadata"] = {
"attachments": [{
- "height": image_response.get("height"),
- "id": image_response.get("file_id"),
- "mimeType": image_response.get("mime_type"),
- "name": image_response.get("file_name"),
- "size": image_response.get("file_size"),
- "width": image_response.get("width"),
+ "height": image_request.get("height"),
+ "id": image_request.get("file_id"),
+ "mimeType": image_request.get("mime_type"),
+ "name": image_request.get("file_name"),
+ "size": image_request.get("file_size"),
+ "width": image_request.get("width"),
}]
}
return messages
@@ -352,7 +352,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
image_response = None
if image:
image_response = await cls.upload_image(session, headers, image)
- yield image_response
except Exception as e:
yield e
end_turn = EndTurn()