summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-04-12 18:01:54 +0200
committerGitHub <noreply@github.com>2024-04-12 18:01:54 +0200
commit0b712c2bde77fe7e3e347d71d5de3c9e62f26738 (patch)
treebd93b8b64705c74be6feff9cc2f7cba7837c8da8
parentMerge pull request #1825 from hlohaus/ws (diff)
downloadgpt4free-0b712c2bde77fe7e3e347d71d5de3c9e62f26738.tar
gpt4free-0b712c2bde77fe7e3e347d71d5de3c9e62f26738.tar.gz
gpt4free-0b712c2bde77fe7e3e347d71d5de3c9e62f26738.tar.bz2
gpt4free-0b712c2bde77fe7e3e347d71d5de3c9e62f26738.tar.lz
gpt4free-0b712c2bde77fe7e3e347d71d5de3c9e62f26738.tar.xz
gpt4free-0b712c2bde77fe7e3e347d71d5de3c9e62f26738.tar.zst
gpt4free-0b712c2bde77fe7e3e347d71d5de3c9e62f26738.zip
-rw-r--r--etc/unittest/__main__.py1
-rw-r--r--etc/unittest/async_client.py56
-rw-r--r--etc/unittest/integration.py10
-rw-r--r--g4f/Provider/Vercel.py2
-rw-r--r--g4f/client/async_client.py7
-rw-r--r--g4f/gui/client/static/js/chat.v1.js11
-rw-r--r--g4f/requests/curl_cffi.py10
7 files changed, 82 insertions, 15 deletions
diff --git a/etc/unittest/__main__.py b/etc/unittest/__main__.py
index 3a459dba..351c2bb3 100644
--- a/etc/unittest/__main__.py
+++ b/etc/unittest/__main__.py
@@ -4,6 +4,7 @@ from .backend import *
from .main import *
from .model import *
from .client import *
+from .async_client import *
from .include import *
from .integration import *
diff --git a/etc/unittest/async_client.py b/etc/unittest/async_client.py
new file mode 100644
index 00000000..a49b90ed
--- /dev/null
+++ b/etc/unittest/async_client.py
@@ -0,0 +1,56 @@
+import unittest
+
+from g4f.client import AsyncClient, ChatCompletion, ChatCompletionChunk
+from .mocks import AsyncGeneratorProviderMock, ModelProviderMock, YieldProviderMock
+
+DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}]
+
+class AsyncTestPassModel(unittest.IsolatedAsyncioTestCase):
+
+ async def test_response(self):
+ client = AsyncClient(provider=AsyncGeneratorProviderMock)
+ response = await client.chat.completions.create(DEFAULT_MESSAGES, "")
+ self.assertIsInstance(response, ChatCompletion)
+ self.assertEqual("Mock", response.choices[0].message.content)
+
+ async def test_pass_model(self):
+ client = AsyncClient(provider=ModelProviderMock)
+ response = await client.chat.completions.create(DEFAULT_MESSAGES, "Hello")
+ self.assertIsInstance(response, ChatCompletion)
+ self.assertEqual("Hello", response.choices[0].message.content)
+
+ async def test_max_tokens(self):
+ client = AsyncClient(provider=YieldProviderMock)
+ messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
+ response = await client.chat.completions.create(messages, "Hello", max_tokens=1)
+ self.assertIsInstance(response, ChatCompletion)
+ self.assertEqual("How ", response.choices[0].message.content)
+ response = await client.chat.completions.create(messages, "Hello", max_tokens=2)
+ self.assertIsInstance(response, ChatCompletion)
+ self.assertEqual("How are ", response.choices[0].message.content)
+
+ async def test_max_stream(self):
+ client = AsyncClient(provider=YieldProviderMock)
+ messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
+ response = client.chat.completions.create(messages, "Hello", stream=True)
+ async for chunk in response:
+ self.assertIsInstance(chunk, ChatCompletionChunk)
+ if chunk.choices[0].delta.content is not None:
+ self.assertIsInstance(chunk.choices[0].delta.content, str)
+ messages = [{'role': 'user', 'content': chunk} for chunk in ["You ", "You ", "Other", "?"]]
+ response = client.chat.completions.create(messages, "Hello", stream=True, max_tokens=2)
+ response = [chunk async for chunk in response]
+ self.assertEqual(len(response), 3)
+ for chunk in response:
+ if chunk.choices[0].delta.content is not None:
+ self.assertEqual(chunk.choices[0].delta.content, "You ")
+
+ async def test_stop(self):
+ client = AsyncClient(provider=YieldProviderMock)
+ messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
+ response = await client.chat.completions.create(messages, "Hello", stop=["and"])
+ self.assertIsInstance(response, ChatCompletion)
+ self.assertEqual("How are you?", response.choices[0].message.content)
+
+if __name__ == '__main__':
+ unittest.main() \ No newline at end of file
diff --git a/etc/unittest/integration.py b/etc/unittest/integration.py
index f69d4770..d87a7f6b 100644
--- a/etc/unittest/integration.py
+++ b/etc/unittest/integration.py
@@ -8,7 +8,7 @@ except ImportError:
has_nest_asyncio = False
from g4f.client import Client, ChatCompletion
-from g4f.Provider import Bing, OpenaiChat
+from g4f.Provider import Bing, OpenaiChat, DuckDuckGo
DEFAULT_MESSAGES = [{"role": "system", "content": 'Response in json, Example: {"success: true"}'},
{"role": "user", "content": "Say success true in json"}]
@@ -19,11 +19,19 @@ class TestProviderIntegration(unittest.TestCase):
self.skipTest("nest_asyncio is not installed")
def test_bing(self):
+ self.skipTest("Not stable")
client = Client(provider=Bing)
response = client.chat.completions.create(DEFAULT_MESSAGES, "", response_format={"type": "json_object"})
self.assertIsInstance(response, ChatCompletion)
self.assertIn("success", json.loads(response.choices[0].message.content))
+ def test_duckduckgo(self):
+ self.skipTest("Not working")
+ client = Client(provider=DuckDuckGo)
+ response = client.chat.completions.create(DEFAULT_MESSAGES, "", response_format={"type": "json_object"})
+ self.assertIsInstance(response, ChatCompletion)
+ self.assertIn("success", json.loads(response.choices[0].message.content))
+
def test_openai(self):
client = Client(provider=OpenaiChat)
response = client.chat.completions.create(DEFAULT_MESSAGES, "", response_format={"type": "json_object"})
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py
index 4d38439f..bd918396 100644
--- a/g4f/Provider/Vercel.py
+++ b/g4f/Provider/Vercel.py
@@ -11,7 +11,7 @@ except ImportError:
from ..typing import Messages, CreateResult
from .base_provider import AbstractProvider
from ..requests import raise_for_status
-from ..errors import MissingRequirementsError, RateLimitError, ResponseStatusError
+from ..errors import MissingRequirementsError
class Vercel(AbstractProvider):
url = 'https://chat.vercel.ai'
diff --git a/g4f/client/async_client.py b/g4f/client/async_client.py
index bab2fde9..51a9cf83 100644
--- a/g4f/client/async_client.py
+++ b/g4f/client/async_client.py
@@ -16,6 +16,13 @@ from ..errors import NoImageResponseError
from ..image import ImageResponse as ImageProviderResponse
from ..providers.base_provider import AsyncGeneratorProvider
+try:
+ anext
+except NameError:
+ async def anext(iter):
+ async for chunk in iter:
+ return chunk
+
async def iter_response(
response: AsyncIterator[str],
stream: bool,
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index a875761c..46f7e808 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -1262,7 +1262,7 @@ if (SpeechRecognition) {
function may_stop() {
if (microLabel.classList.contains("recognition")) {
- //recognition.stop();
+ recognition.stop();
}
}
@@ -1272,15 +1272,12 @@ if (SpeechRecognition) {
recognition.onstart = function() {
microLabel.classList.add("recognition");
startValue = messageInput.value;
- messageInput.placeholder = "";
lastDebounceTranscript = "";
timeoutHandle = window.setTimeout(may_stop, 10000);
};
recognition.onend = function() {
microLabel.classList.remove("recognition");
- messageInput.value = messageInput.placeholder;
- messageInput.placeholder = "Ask a question";
- //messageInput.focus();
+ messageInput.focus();
};
recognition.onresult = function(event) {
if (!event.results) {
@@ -1298,9 +1295,9 @@ if (SpeechRecognition) {
lastDebounceTranscript = transcript;
}
if (transcript) {
- messageInput.placeholder = `${startValue ? startValue+"\n" : ""}${transcript.trim()}`;
+ messageInput.value = `${startValue ? startValue+"\n" : ""}${transcript.trim()}`;
if (isFinal) {
- startValue = messageInput.placeholder;
+ startValue = messageInput.value;
}
messageInput.style.height = messageInput.scrollHeight + "px";
messageInput.scrollTop = messageInput.scrollHeight;
diff --git a/g4f/requests/curl_cffi.py b/g4f/requests/curl_cffi.py
index 000448fe..e955d640 100644
--- a/g4f/requests/curl_cffi.py
+++ b/g4f/requests/curl_cffi.py
@@ -34,15 +34,13 @@ class StreamResponse:
"""Asynchronously parse the JSON response content."""
return json.loads(await self.inner.acontent(), **kwargs)
- async def iter_lines(self) -> AsyncGenerator[bytes, None]:
+ def iter_lines(self) -> AsyncGenerator[bytes, None]:
"""Asynchronously iterate over the lines of the response."""
- async for line in self.inner.aiter_lines():
- yield line
+ return self.inner.aiter_lines()
- async def iter_content(self) -> AsyncGenerator[bytes, None]:
+ def iter_content(self) -> AsyncGenerator[bytes, None]:
"""Asynchronously iterate over the response content."""
- async for chunk in self.inner.aiter_content():
- yield chunk
+ return self.inner.aiter_content()
async def __aenter__(self):
"""Asynchronously enter the runtime context for the response object."""