summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Copilot.py9
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py63
-rw-r--r--g4f/Provider/openai/har_file.py5
3 files changed, 40 insertions, 37 deletions
diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py
index 6e64c714..c79f028d 100644
--- a/g4f/Provider/Copilot.py
+++ b/g4f/Provider/Copilot.py
@@ -93,13 +93,11 @@ class Copilot(AbstractProvider):
if return_conversation:
yield Conversation(conversation_id, session.cookies.jar, access_token)
prompt = format_prompt(messages)
- if debug.logging:
- print(f"Copilot: Created conversation: {conversation_id}")
+ debug.log(f"Copilot: Created conversation: {conversation_id}")
else:
conversation_id = conversation.conversation_id
prompt = messages[-1]["content"]
- if debug.logging:
- print(f"Copilot: Use conversation: {conversation_id}")
+ debug.log(f"Copilot: Use conversation: {conversation_id}")
images = []
if image is not None:
@@ -143,8 +141,7 @@ class Copilot(AbstractProvider):
if not has_nodriver:
raise MissingRequirementsError('Install "nodriver" package | pip install -U nodriver')
user_data_dir = user_config_dir("g4f-nodriver") if has_platformdirs else None
- if debug.logging:
- print(f"Copilot: Open nodriver with user_dir: {user_data_dir}")
+ debug.log(f"Copilot: Open nodriver with user_dir: {user_data_dir}")
browser = await nodriver.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 22264dd9..15a87f38 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -282,8 +282,11 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
Raises:
RuntimeError: If there'san error in downloading the image, including issues with the HTTP request or response.
"""
- prompt = element["metadata"]["dalle"]["prompt"]
- file_id = element["asset_pointer"].split("file-service://", 1)[1]
+ try:
+ prompt = element["metadata"]["dalle"]["prompt"]
+ file_id = element["asset_pointer"].split("file-service://", 1)[1]
+ except Exception as e:
+ raise RuntimeError(f"No Image: {e.__class__.__name__}: {e}")
try:
async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response:
cls._update_request_args(session)
@@ -380,9 +383,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
except Exception as e:
image_request = None
- if debug.logging:
- print("OpenaiChat: Upload image failed")
- print(f"{e.__class__.__name__}: {e}")
+ debug.log("OpenaiChat: Upload image failed")
+ debug.log(f"{e.__class__.__name__}: {e}")
model = cls.get_model(model)
if conversation is None:
conversation = Conversation(conversation_id, str(uuid.uuid4()) if parent_id is None else parent_id)
@@ -419,11 +421,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
user_agent=cls._headers["user-agent"],
proof_token=RequestConfig.proof_token
)
- if debug.logging:
- print(
- 'Arkose:', False if not need_arkose else RequestConfig.arkose_token[:12]+"...",
- 'Proofofwork:', False if proofofwork is None else proofofwork[:12]+"...",
- )
+ [debug.log(text) for text in (
+ f"Arkose: {'False' if not need_arkose else RequestConfig.arkose_token[:12]+'...'}",
+ f"Proofofwork: {'False' if proofofwork is None else proofofwork[:12]+'...'}",
+ )]
ws = None
if need_arkose:
async with session.post(f"{cls.url}/backend-api/register-websocket", headers=cls._headers) as response:
@@ -444,6 +445,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
}
if conversation.conversation_id is not None:
data["conversation_id"] = conversation.conversation_id
+ debug.log(f"OpenaiChat: Use conversation: {conversation.conversation_id}")
if action != "continue":
messages = messages if conversation_id is None else [messages[-1]]
data["messages"] = cls.create_messages(messages, image_request)
@@ -468,8 +470,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
cls._update_request_args(session)
if response.status == 403 and max_retries > 0:
max_retries -= 1
- if debug.logging:
- print(f"Retry: Error {response.status}: {await response.text()}")
+ debug.log(f"Retry: Error {response.status}: {await response.text()}")
await asyncio.sleep(5)
continue
await raise_for_status(response)
@@ -553,20 +554,27 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
elif isinstance(v, dict):
if fields.conversation_id is None:
fields.conversation_id = v.get("conversation_id")
- fields.message_id = v.get("message", {}).get("id")
- c = v.get("message", {}).get("content", {})
- if c.get("content_type") == "multimodal_text":
- generated_images = []
- for element in c.get("parts"):
- if element.get("content_type") == "image_asset_pointer":
- generated_images.append(
- cls.get_generated_image(session, cls._headers, element)
- )
- elif element.get("content_type") == "text":
- for part in element.get("parts", []):
- yield part
- for image_response in await asyncio.gather(*generated_images):
- yield image_response
+ debug.log(f"OpenaiChat: New conversation: {fields.conversation_id}")
+ m = v.get("message", {})
+ if m.get("author", {}).get("role") == "assistant":
+ fields.message_id = v.get("message", {}).get("id")
+ c = m.get("content", {})
+ if c.get("content_type") == "multimodal_text":
+ generated_images = []
+ for element in c.get("parts"):
+ if isinstance(element, str):
+ debug.log(f"No image or text: {line}")
+ elif element.get("content_type") == "image_asset_pointer":
+ generated_images.append(
+ cls.get_generated_image(session, cls._headers, element)
+ )
+ elif element.get("content_type") == "text":
+ for part in element.get("parts", []):
+ yield part
+ for image_response in await asyncio.gather(*generated_images):
+ yield image_response
+ else:
+ debug.log(f"OpenaiChat: {line}")
return
if "error" in line and line.get("error"):
raise RuntimeError(line.get("error"))
@@ -579,8 +587,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
user_data_dir = user_config_dir("g4f-nodriver")
else:
user_data_dir = None
- if debug.logging:
- print(f"Open nodriver with user_dir: {user_data_dir}")
+ debug.log(f"Open nodriver with user_dir: {user_data_dir}")
browser = await nodriver.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
diff --git a/g4f/Provider/openai/har_file.py b/g4f/Provider/openai/har_file.py
index c8d85a65..4569e1b7 100644
--- a/g4f/Provider/openai/har_file.py
+++ b/g4f/Provider/openai/har_file.py
@@ -71,8 +71,7 @@ def readHAR():
if "openai-sentinel-turnstile-token" in v_headers:
RequestConfig.turnstile_token = v_headers["openai-sentinel-turnstile-token"]
except Exception as e:
- if debug.logging:
- print(f"Read proof token: {e}")
+ debug.log(f"Read proof token: {e}")
if arkose_url == v['request']['url']:
RequestConfig.arkose_request = parseHAREntry(v)
elif v['request']['url'] == start_url or v['request']['url'].startswith(conversation_url):
@@ -150,7 +149,7 @@ def getN() -> str:
return base64.b64encode(timestamp.encode()).decode()
async def get_request_config(proxy: str) -> RequestConfig:
- if RequestConfig.arkose_request is None or RequestConfig.access_token is None:
+ if RequestConfig.access_token is None:
readHAR()
if RequestConfig.arkose_request is not None:
RequestConfig.arkose_token = await sendRequest(genArkReq(RequestConfig.arkose_request), proxy)