summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorHeiner Lohaus <hlohaus@users.noreply.github.com>2024-11-19 15:26:03 +0100
committerHeiner Lohaus <hlohaus@users.noreply.github.com>2024-11-19 15:26:03 +0100
commit8f3fbee0d8a4ca69af72dae989ae8954181d0108 (patch)
tree0481d391ff397c7d4be93a7224c3717e03ae2b3f /g4f
parentFix generate messages on error in gui (diff)
downloadgpt4free-8f3fbee0d8a4ca69af72dae989ae8954181d0108.tar
gpt4free-8f3fbee0d8a4ca69af72dae989ae8954181d0108.tar.gz
gpt4free-8f3fbee0d8a4ca69af72dae989ae8954181d0108.tar.bz2
gpt4free-8f3fbee0d8a4ca69af72dae989ae8954181d0108.tar.lz
gpt4free-8f3fbee0d8a4ca69af72dae989ae8954181d0108.tar.xz
gpt4free-8f3fbee0d8a4ca69af72dae989ae8954181d0108.tar.zst
gpt4free-8f3fbee0d8a4ca69af72dae989ae8954181d0108.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/Copilot.py9
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py63
-rw-r--r--g4f/Provider/openai/har_file.py5
-rw-r--r--g4f/client/service.py9
-rw-r--r--g4f/debug.py7
-rw-r--r--g4f/gui/client/index.html5
-rw-r--r--g4f/gui/client/static/css/style.css11
-rw-r--r--g4f/gui/client/static/js/chat.v1.js19
-rw-r--r--g4f/gui/server/api.py12
-rw-r--r--g4f/requests/raise_for_status.py2
10 files changed, 96 insertions, 46 deletions
diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py
index 6e64c714..c79f028d 100644
--- a/g4f/Provider/Copilot.py
+++ b/g4f/Provider/Copilot.py
@@ -93,13 +93,11 @@ class Copilot(AbstractProvider):
if return_conversation:
yield Conversation(conversation_id, session.cookies.jar, access_token)
prompt = format_prompt(messages)
- if debug.logging:
- print(f"Copilot: Created conversation: {conversation_id}")
+ debug.log(f"Copilot: Created conversation: {conversation_id}")
else:
conversation_id = conversation.conversation_id
prompt = messages[-1]["content"]
- if debug.logging:
- print(f"Copilot: Use conversation: {conversation_id}")
+ debug.log(f"Copilot: Use conversation: {conversation_id}")
images = []
if image is not None:
@@ -143,8 +141,7 @@ class Copilot(AbstractProvider):
if not has_nodriver:
raise MissingRequirementsError('Install "nodriver" package | pip install -U nodriver')
user_data_dir = user_config_dir("g4f-nodriver") if has_platformdirs else None
- if debug.logging:
- print(f"Copilot: Open nodriver with user_dir: {user_data_dir}")
+ debug.log(f"Copilot: Open nodriver with user_dir: {user_data_dir}")
browser = await nodriver.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 22264dd9..15a87f38 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -282,8 +282,11 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
Raises:
RuntimeError: If there'san error in downloading the image, including issues with the HTTP request or response.
"""
- prompt = element["metadata"]["dalle"]["prompt"]
- file_id = element["asset_pointer"].split("file-service://", 1)[1]
+ try:
+ prompt = element["metadata"]["dalle"]["prompt"]
+ file_id = element["asset_pointer"].split("file-service://", 1)[1]
+ except Exception as e:
+ raise RuntimeError(f"No Image: {e.__class__.__name__}: {e}")
try:
async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response:
cls._update_request_args(session)
@@ -380,9 +383,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
except Exception as e:
image_request = None
- if debug.logging:
- print("OpenaiChat: Upload image failed")
- print(f"{e.__class__.__name__}: {e}")
+ debug.log("OpenaiChat: Upload image failed")
+ debug.log(f"{e.__class__.__name__}: {e}")
model = cls.get_model(model)
if conversation is None:
conversation = Conversation(conversation_id, str(uuid.uuid4()) if parent_id is None else parent_id)
@@ -419,11 +421,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
user_agent=cls._headers["user-agent"],
proof_token=RequestConfig.proof_token
)
- if debug.logging:
- print(
- 'Arkose:', False if not need_arkose else RequestConfig.arkose_token[:12]+"...",
- 'Proofofwork:', False if proofofwork is None else proofofwork[:12]+"...",
- )
+ [debug.log(text) for text in (
+ f"Arkose: {'False' if not need_arkose else RequestConfig.arkose_token[:12]+'...'}",
+ f"Proofofwork: {'False' if proofofwork is None else proofofwork[:12]+'...'}",
+ )]
ws = None
if need_arkose:
async with session.post(f"{cls.url}/backend-api/register-websocket", headers=cls._headers) as response:
@@ -444,6 +445,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
}
if conversation.conversation_id is not None:
data["conversation_id"] = conversation.conversation_id
+ debug.log(f"OpenaiChat: Use conversation: {conversation.conversation_id}")
if action != "continue":
messages = messages if conversation_id is None else [messages[-1]]
data["messages"] = cls.create_messages(messages, image_request)
@@ -468,8 +470,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
cls._update_request_args(session)
if response.status == 403 and max_retries > 0:
max_retries -= 1
- if debug.logging:
- print(f"Retry: Error {response.status}: {await response.text()}")
+ debug.log(f"Retry: Error {response.status}: {await response.text()}")
await asyncio.sleep(5)
continue
await raise_for_status(response)
@@ -553,20 +554,27 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
elif isinstance(v, dict):
if fields.conversation_id is None:
fields.conversation_id = v.get("conversation_id")
- fields.message_id = v.get("message", {}).get("id")
- c = v.get("message", {}).get("content", {})
- if c.get("content_type") == "multimodal_text":
- generated_images = []
- for element in c.get("parts"):
- if element.get("content_type") == "image_asset_pointer":
- generated_images.append(
- cls.get_generated_image(session, cls._headers, element)
- )
- elif element.get("content_type") == "text":
- for part in element.get("parts", []):
- yield part
- for image_response in await asyncio.gather(*generated_images):
- yield image_response
+ debug.log(f"OpenaiChat: New conversation: {fields.conversation_id}")
+ m = v.get("message", {})
+ if m.get("author", {}).get("role") == "assistant":
+ fields.message_id = v.get("message", {}).get("id")
+ c = m.get("content", {})
+ if c.get("content_type") == "multimodal_text":
+ generated_images = []
+ for element in c.get("parts"):
+ if isinstance(element, str):
+ debug.log(f"No image or text: {line}")
+ elif element.get("content_type") == "image_asset_pointer":
+ generated_images.append(
+ cls.get_generated_image(session, cls._headers, element)
+ )
+ elif element.get("content_type") == "text":
+ for part in element.get("parts", []):
+ yield part
+ for image_response in await asyncio.gather(*generated_images):
+ yield image_response
+ else:
+ debug.log(f"OpenaiChat: {line}")
return
if "error" in line and line.get("error"):
raise RuntimeError(line.get("error"))
@@ -579,8 +587,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
user_data_dir = user_config_dir("g4f-nodriver")
else:
user_data_dir = None
- if debug.logging:
- print(f"Open nodriver with user_dir: {user_data_dir}")
+ debug.log(f"Open nodriver with user_dir: {user_data_dir}")
browser = await nodriver.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
diff --git a/g4f/Provider/openai/har_file.py b/g4f/Provider/openai/har_file.py
index c8d85a65..4569e1b7 100644
--- a/g4f/Provider/openai/har_file.py
+++ b/g4f/Provider/openai/har_file.py
@@ -71,8 +71,7 @@ def readHAR():
if "openai-sentinel-turnstile-token" in v_headers:
RequestConfig.turnstile_token = v_headers["openai-sentinel-turnstile-token"]
except Exception as e:
- if debug.logging:
- print(f"Read proof token: {e}")
+ debug.log(f"Read proof token: {e}")
if arkose_url == v['request']['url']:
RequestConfig.arkose_request = parseHAREntry(v)
elif v['request']['url'] == start_url or v['request']['url'].startswith(conversation_url):
@@ -150,7 +149,7 @@ def getN() -> str:
return base64.b64encode(timestamp.encode()).decode()
async def get_request_config(proxy: str) -> RequestConfig:
- if RequestConfig.arkose_request is None or RequestConfig.access_token is None:
+ if RequestConfig.access_token is None:
readHAR()
if RequestConfig.arkose_request is not None:
RequestConfig.arkose_token = await sendRequest(genArkReq(RequestConfig.arkose_request), proxy)
diff --git a/g4f/client/service.py b/g4f/client/service.py
index aa209b22..44533ece 100644
--- a/g4f/client/service.py
+++ b/g4f/client/service.py
@@ -83,11 +83,10 @@ def get_model_and_provider(model : Union[Model, str],
if not ignore_stream and not provider.supports_stream and stream:
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')
- if debug.logging:
- if model:
- print(f'Using {provider.__name__} provider and {model} model')
- else:
- print(f'Using {provider.__name__} provider')
+ if model:
+ debug.log(f'Using {provider.__name__} provider and {model} model')
+ else:
+ debug.log(f'Using {provider.__name__} provider')
debug.last_provider = provider
debug.last_model = model
diff --git a/g4f/debug.py b/g4f/debug.py
index 3f08e9e8..f8085add 100644
--- a/g4f/debug.py
+++ b/g4f/debug.py
@@ -4,4 +4,9 @@ logging: bool = False
version_check: bool = True
last_provider: ProviderType = None
last_model: str = None
-version: str = None \ No newline at end of file
+version: str = None
+log_handler: callable = print
+
+def log(text):
+ if logging:
+ log_handler(text) \ No newline at end of file
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index b256b0be..3a2197de 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -170,6 +170,10 @@
<i class="fa-solid fa-download"></i>
<a href="" onclick="return false;">Export Conversations</a>
</button>
+ <button id="showLog">
+ <i class="fa-solid fa-terminal"></i>
+ <a href="" onclick="return false;">Show log</a>
+ </button>
</div>
</div>
<div class="conversation">
@@ -257,6 +261,7 @@
</div>
</div>
</div>
+ <div class="log hidden"></div>
</div>
<div class="mobile-sidebar">
<i class="fa-solid fa-bars"></i>
diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css
index 3ee033ea..8364216a 100644
--- a/g4f/gui/client/static/css/style.css
+++ b/g4f/gui/client/static/css/style.css
@@ -1051,13 +1051,22 @@ a:-webkit-any-link {
padding: var(--inner-gap) var(--inner-gap) var(--inner-gap) 0;
}
-.settings, .images {
+.settings, .log {
width: 100%;
display: flex;
flex-direction: column;
overflow: auto;
}
+.log {
+ white-space: pre-wrap;
+ font-family: Consolas, "Andale Mono WT", "Andale Mono", "Lucida Console", "Lucida Sans Typewriter", "DejaVu Sans Mono", "Bitstream Vera Sans Mono", "Liberation Mono", "Nimbus Mono L", Monaco, "Courier New", Courier, monospace;
+}
+
+.log.hidden {
+ display: none;
+}
+
.settings .paper {
flex-direction: column;
min-width: 400px;
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index 9f6103fb..51bf8b81 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -19,6 +19,7 @@ const systemPrompt = document.getElementById("systemPrompt");
const settings = document.querySelector(".settings");
const chat = document.querySelector(".conversation");
const album = document.querySelector(".images");
+const log_storage = document.querySelector(".log");
const optionElements = document.querySelectorAll(".settings input, .settings textarea, #model, #model2, #provider")
@@ -400,6 +401,9 @@ async function add_message_chunk(message, message_index) {
error_storage[message_index] = message.error
console.error(message.error);
content_map.inner.innerHTML += `<p><strong>An error occured:</strong> ${message.error}</p>`;
+ let p = document.createElement("p");
+ p.innerText = message.error;
+ log_storage.appendChild(p);
} else if (message.type == "preview") {
content_map.inner.innerHTML = markdown_render(message.preview);
} else if (message.type == "content") {
@@ -419,6 +423,10 @@ async function add_message_chunk(message, message_index) {
content_map.inner.innerHTML = html;
content_map.count.innerText = count_words_and_tokens(message_storage[message_index], provider_storage[message_index]?.model);
highlight(content_map.inner);
+ } else if (message.type == "log") {
+ let p = document.createElement("p");
+ p.innerText = message.log;
+ log_storage.appendChild(p);
}
window.scrollTo(0, 0);
if (message_box.scrollTop >= message_box.scrollHeight - message_box.clientHeight - 100) {
@@ -517,7 +525,7 @@ const ask_gpt = async (message_index = -1, message_id) => {
}
}
delete controller_storage[message_index];
- if (!error && message_storage[message_index]) {
+ if (!error_storage[message_index] && message_storage[message_index]) {
const message_provider = message_index in provider_storage ? provider_storage[message_index] : null;
await add_message(window.conversation_id, "assistant", message_storage[message_index], message_provider);
await safe_load_conversation(window.conversation_id);
@@ -635,6 +643,7 @@ const set_conversation = async (conversation_id) => {
await load_conversation(conversation_id);
load_conversations();
hide_sidebar();
+ log_storage.classList.add("hidden");
};
const new_conversation = async () => {
@@ -647,6 +656,7 @@ const new_conversation = async () => {
}
load_conversations();
hide_sidebar();
+ log_storage.classList.add("hidden");
say_hello();
};
@@ -945,6 +955,7 @@ function open_settings() {
settings.classList.add("hidden");
chat.classList.remove("hidden");
}
+ log_storage.classList.add("hidden");
}
function open_album() {
@@ -1476,3 +1487,9 @@ if (SpeechRecognition) {
}
});
}
+
+document.getElementById("showLog").addEventListener("click", ()=> {
+ log_storage.classList.remove("hidden");
+ settings.classList.add("hidden");
+ log_storage.scrollTop = log_storage.scrollHeight;
+}); \ No newline at end of file
diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py
index ed8454c3..a6c4bef4 100644
--- a/g4f/gui/server/api.py
+++ b/g4f/gui/server/api.py
@@ -18,6 +18,7 @@ from g4f.requests.aiohttp import get_connector
from g4f.Provider import ProviderType, __providers__, __map__
from g4f.providers.base_provider import ProviderModelMixin, FinishReason
from g4f.providers.conversation import BaseConversation
+from g4f import debug
logger = logging.getLogger(__name__)
@@ -140,6 +141,13 @@ class Api:
}
def _create_response_stream(self, kwargs: dict, conversation_id: str, provider: str) -> Iterator:
+ if debug.logging:
+ logs = []
+ print_callback = debug.log_handler
+ def log_handler(text: str):
+ logs.append(text)
+ print_callback(text)
+ debug.log_handler = log_handler
try:
result = ChatCompletion.create(**kwargs)
first = True
@@ -168,6 +176,10 @@ class Api:
yield self._format_json("content", str(ImageResponse(images, chunk.alt)))
elif not isinstance(chunk, FinishReason):
yield self._format_json("content", str(chunk))
+ if logs:
+ for log in logs:
+ yield self._format_json("log", str(log))
+ logs = []
except Exception as e:
logger.exception(e)
yield self._format_json('error', get_error_message(e))
diff --git a/g4f/requests/raise_for_status.py b/g4f/requests/raise_for_status.py
index 1699d9a4..fe262f34 100644
--- a/g4f/requests/raise_for_status.py
+++ b/g4f/requests/raise_for_status.py
@@ -11,7 +11,7 @@ class CloudflareError(ResponseStatusError):
...
def is_cloudflare(text: str) -> bool:
- if "<title>Attention Required! | Cloudflare</title>" in text:
+ if "<title>Attention Required! | Cloudflare</title>" in text or 'id="cf-cloudflare-status"' in text:
return True
return '<div id="cf-please-wait">' in text or "<title>Just a moment...</title>" in text