From efacc15d92af4b380415b5d830fe829c57fbeec8 Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Tue, 28 Jan 2025 22:33:49 +0100 Subject: Support options like max_depth in downloads.json Add created buckets automatic to messages Don't add images to scrape result without title --- g4f/gui/client/static/img/site.webmanifest | 4 +++- g4f/gui/client/static/js/chat.v1.js | 30 ++++++++++++++++++++++-------- g4f/tools/files.py | 15 +++++++++------ g4f/tools/web_search.py | 2 +- 4 files changed, 35 insertions(+), 16 deletions(-) diff --git a/g4f/gui/client/static/img/site.webmanifest b/g4f/gui/client/static/img/site.webmanifest index 29c64657..2367fad0 100644 --- a/g4f/gui/client/static/img/site.webmanifest +++ b/g4f/gui/client/static/img/site.webmanifest @@ -21,7 +21,9 @@ "method": "GET", "enctype": "application/x-www-form-urlencoded", "params": { - "title": "prompt" + "title": "title", + "text": "prompt", + "url": "url" } } } \ No newline at end of file diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index e9726555..15faf1c7 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -792,7 +792,7 @@ function is_stopped() { const ask_gpt = async (message_id, message_index = -1, regenerate = false, provider = null, model = null, action = null) => { if (!model && !provider) { model = get_selected_model()?.value || null; - provider = providerSelect.options[providerSelect.selectedIndex].value; + provider = providerSelect.options[providerSelect.selectedIndex]?.value; } let conversation = await get_conversation(window.conversation_id); if (!conversation) { @@ -1815,8 +1815,14 @@ async function on_load() { let chat_url = new URL(window.location.href) let chat_params = new URLSearchParams(chat_url.search); if (chat_params.get("prompt")) { - messageInput.value = chat_params.get("prompt"); - await handle_ask(); + messageInput.value = chat_params.title + + chat_params.title ? "\n\n\n" : "" + + chat_params.prompt + + chat_params.prompt && chat_params.url ? "\n\n\n" : "" + + chat_params.url; + messageInput.style.height = messageInput.scrollHeight + "px"; + messageInput.focus(); + //await handle_ask(); } else { say_hello() } @@ -1871,7 +1877,6 @@ async function on_api() { setTimeout(()=>prompt_lock=false, 3000); await handle_ask(); } else { - messageInput.style.removeProperty("height"); messageInput.style.height = messageInput.scrollHeight + "px"; } }); @@ -1970,7 +1975,11 @@ async function on_api() { provider_options[provider.name] = option; } }); + } + if (appStorage.getItem("provider")) { await load_provider_models(appStorage.getItem("provider")) + } else { + providerSelect.selectedIndex = 0; } for (let [name, [label, login_url, childs]] of Object.entries(login_urls)) { if (!login_url && !is_demo) { @@ -1990,7 +1999,7 @@ async function on_api() { } register_settings_storage(); - await load_settings_storage() + await load_settings_storage(); Object.entries(provider_options).forEach( ([provider_name, option]) => load_provider_option(option.querySelector("input"), provider_name) ); @@ -2156,9 +2165,14 @@ async function upload_files(fileInput) { } appStorage.setItem(`bucket:${bucket_id}`, data.size); inputCount.innerText = "Files are loaded successfully"; - messageInput.value += (messageInput.value ? "\n" : "") + JSON.stringify({bucket_id: bucket_id}) + "\n"; - paperclip.classList.remove("blink"); - fileInput.value = ""; + if (!messageInput.value) { + messageInput.value = JSON.stringify({bucket_id: bucket_id}); + handle_ask(false); + } else { + messageInput.value += (messageInput.value ? "\n" : "") + JSON.stringify({bucket_id: bucket_id}) + "\n"; + paperclip.classList.remove("blink"); + fileInput.value = ""; + } } }; eventSource.onerror = (event) => { diff --git a/g4f/tools/files.py b/g4f/tools/files.py index f23f931b..350bc45e 100644 --- a/g4f/tools/files.py +++ b/g4f/tools/files.py @@ -481,18 +481,21 @@ def get_downloads_urls(bucket_dir: Path, delete_files: bool = False) -> Iterator if isinstance(data, list): for item in data: if "url" in item: - yield item["url"] + yield {"urls": [item.pop("url")], **item} + elif "urls" in item: + yield item def read_and_download_urls(bucket_dir: Path, event_stream: bool = False) -> Iterator[str]: urls = get_downloads_urls(bucket_dir) if urls: count = 0 with open(os.path.join(bucket_dir, FILE_LIST), 'a') as f: - for filename in to_sync_generator(download_urls(bucket_dir, urls)): - f.write(f"{filename}\n") - if event_stream: - count += 1 - yield f'data: {json.dumps({"action": "download", "count": count})}\n\n' + for url in urls: + for filename in to_sync_generator(download_urls(bucket_dir, **url)): + f.write(f"{filename}\n") + if event_stream: + count += 1 + yield f'data: {json.dumps({"action": "download", "count": count})}\n\n' async def async_read_and_download_urls(bucket_dir: Path, event_stream: bool = False) -> AsyncIterator[str]: urls = get_downloads_urls(bucket_dir) diff --git a/g4f/tools/web_search.py b/g4f/tools/web_search.py index 12a374ea..a96778bb 100644 --- a/g4f/tools/web_search.py +++ b/g4f/tools/web_search.py @@ -96,7 +96,7 @@ def scrape_text(html: str, max_words: int = None, add_source=True, count_images: if count_images > 0: image = paragraph.select_one(image_select) if image: - title = paragraph.get("title") or paragraph.text + title = str(paragraph.get("title", paragraph.text)) if title: yield f"!{format_link(image['src'], title)}\n" if max_words is not None: -- cgit v1.2.3