summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-04-12 18:01:54 +0200
committerGitHub <noreply@github.com>2024-04-12 18:01:54 +0200
commit0b712c2bde77fe7e3e347d71d5de3c9e62f26738 (patch)
treebd93b8b64705c74be6feff9cc2f7cba7837c8da8 /g4f
parentMerge pull request #1825 from hlohaus/ws (diff)
downloadgpt4free-0b712c2bde77fe7e3e347d71d5de3c9e62f26738.tar
gpt4free-0b712c2bde77fe7e3e347d71d5de3c9e62f26738.tar.gz
gpt4free-0b712c2bde77fe7e3e347d71d5de3c9e62f26738.tar.bz2
gpt4free-0b712c2bde77fe7e3e347d71d5de3c9e62f26738.tar.lz
gpt4free-0b712c2bde77fe7e3e347d71d5de3c9e62f26738.tar.xz
gpt4free-0b712c2bde77fe7e3e347d71d5de3c9e62f26738.tar.zst
gpt4free-0b712c2bde77fe7e3e347d71d5de3c9e62f26738.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/Vercel.py2
-rw-r--r--g4f/client/async_client.py7
-rw-r--r--g4f/gui/client/static/js/chat.v1.js11
-rw-r--r--g4f/requests/curl_cffi.py10
4 files changed, 16 insertions, 14 deletions
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py
index 4d38439f..bd918396 100644
--- a/g4f/Provider/Vercel.py
+++ b/g4f/Provider/Vercel.py
@@ -11,7 +11,7 @@ except ImportError:
from ..typing import Messages, CreateResult
from .base_provider import AbstractProvider
from ..requests import raise_for_status
-from ..errors import MissingRequirementsError, RateLimitError, ResponseStatusError
+from ..errors import MissingRequirementsError
class Vercel(AbstractProvider):
url = 'https://chat.vercel.ai'
diff --git a/g4f/client/async_client.py b/g4f/client/async_client.py
index bab2fde9..51a9cf83 100644
--- a/g4f/client/async_client.py
+++ b/g4f/client/async_client.py
@@ -16,6 +16,13 @@ from ..errors import NoImageResponseError
from ..image import ImageResponse as ImageProviderResponse
from ..providers.base_provider import AsyncGeneratorProvider
+try:
+ anext
+except NameError:
+ async def anext(iter):
+ async for chunk in iter:
+ return chunk
+
async def iter_response(
response: AsyncIterator[str],
stream: bool,
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index a875761c..46f7e808 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -1262,7 +1262,7 @@ if (SpeechRecognition) {
function may_stop() {
if (microLabel.classList.contains("recognition")) {
- //recognition.stop();
+ recognition.stop();
}
}
@@ -1272,15 +1272,12 @@ if (SpeechRecognition) {
recognition.onstart = function() {
microLabel.classList.add("recognition");
startValue = messageInput.value;
- messageInput.placeholder = "";
lastDebounceTranscript = "";
timeoutHandle = window.setTimeout(may_stop, 10000);
};
recognition.onend = function() {
microLabel.classList.remove("recognition");
- messageInput.value = messageInput.placeholder;
- messageInput.placeholder = "Ask a question";
- //messageInput.focus();
+ messageInput.focus();
};
recognition.onresult = function(event) {
if (!event.results) {
@@ -1298,9 +1295,9 @@ if (SpeechRecognition) {
lastDebounceTranscript = transcript;
}
if (transcript) {
- messageInput.placeholder = `${startValue ? startValue+"\n" : ""}${transcript.trim()}`;
+ messageInput.value = `${startValue ? startValue+"\n" : ""}${transcript.trim()}`;
if (isFinal) {
- startValue = messageInput.placeholder;
+ startValue = messageInput.value;
}
messageInput.style.height = messageInput.scrollHeight + "px";
messageInput.scrollTop = messageInput.scrollHeight;
diff --git a/g4f/requests/curl_cffi.py b/g4f/requests/curl_cffi.py
index 000448fe..e955d640 100644
--- a/g4f/requests/curl_cffi.py
+++ b/g4f/requests/curl_cffi.py
@@ -34,15 +34,13 @@ class StreamResponse:
"""Asynchronously parse the JSON response content."""
return json.loads(await self.inner.acontent(), **kwargs)
- async def iter_lines(self) -> AsyncGenerator[bytes, None]:
+ def iter_lines(self) -> AsyncGenerator[bytes, None]:
"""Asynchronously iterate over the lines of the response."""
- async for line in self.inner.aiter_lines():
- yield line
+ return self.inner.aiter_lines()
- async def iter_content(self) -> AsyncGenerator[bytes, None]:
+ def iter_content(self) -> AsyncGenerator[bytes, None]:
"""Asynchronously iterate over the response content."""
- async for chunk in self.inner.aiter_content():
- yield chunk
+ return self.inner.aiter_content()
async def __aenter__(self):
"""Asynchronously enter the runtime context for the response object."""