From fdd8ef1fc3741ac9472b3f9f6e46cf65827566aa Mon Sep 17 00:00:00 2001 From: abc <98614666+xtekky@users.noreply.github.com> Date: Fri, 6 Oct 2023 19:51:36 +0100 Subject: ~ | new folder inluding `./tool`and `./testing` --- etc/testing/log_time.py | 25 ++++++++++ etc/testing/test_async.py | 30 ++++++++++++ etc/testing/test_chat_completion.py | 27 +++++++++++ etc/testing/test_interference.py | 27 +++++++++++ etc/testing/test_needs_auth.py | 96 +++++++++++++++++++++++++++++++++++++ etc/testing/test_providers.py | 66 +++++++++++++++++++++++++ 6 files changed, 271 insertions(+) create mode 100644 etc/testing/log_time.py create mode 100644 etc/testing/test_async.py create mode 100644 etc/testing/test_chat_completion.py create mode 100644 etc/testing/test_interference.py create mode 100644 etc/testing/test_needs_auth.py create mode 100644 etc/testing/test_providers.py (limited to 'etc/testing') diff --git a/etc/testing/log_time.py b/etc/testing/log_time.py new file mode 100644 index 00000000..376ab86d --- /dev/null +++ b/etc/testing/log_time.py @@ -0,0 +1,25 @@ +from time import time + + +async def log_time_async(method: callable, **kwargs): + start = time() + result = await method(**kwargs) + secs = f"{round(time() - start, 2)} secs" + if result: + return " ".join([result, secs]) + return secs + + +def log_time_yield(method: callable, **kwargs): + start = time() + result = yield from method(**kwargs) + yield f" {round(time() - start, 2)} secs" + + +def log_time(method: callable, **kwargs): + start = time() + result = method(**kwargs) + secs = f"{round(time() - start, 2)} secs" + if result: + return " ".join([result, secs]) + return secs \ No newline at end of file diff --git a/etc/testing/test_async.py b/etc/testing/test_async.py new file mode 100644 index 00000000..76b109b1 --- /dev/null +++ b/etc/testing/test_async.py @@ -0,0 +1,30 @@ +import sys +from pathlib import Path +import asyncio + +sys.path.append(str(Path(__file__).parent.parent)) + +import g4f +from testing.test_providers import get_providers +from testing.log_time import log_time_async + +async def create_async(provider): + try: + response = await log_time_async( + provider.create_async, + model=g4f.models.default.name, + messages=[{"role": "user", "content": "Hello, are you GPT 3.5?"}] + ) + print(f"{provider.__name__}:", response) + except Exception as e: + print(f"{provider.__name__}: {e.__class__.__name__}: {e}") + +async def run_async(): + responses: list = [ + create_async(provider) + for provider in get_providers() + if provider.working + ] + await asyncio.gather(*responses) + +print("Total:", asyncio.run(log_time_async(run_async))) \ No newline at end of file diff --git a/etc/testing/test_chat_completion.py b/etc/testing/test_chat_completion.py new file mode 100644 index 00000000..7600e46b --- /dev/null +++ b/etc/testing/test_chat_completion.py @@ -0,0 +1,27 @@ +import sys +from pathlib import Path + +sys.path.append(str(Path(__file__).parent.parent)) + +import g4f, asyncio + +print("create:", end=" ", flush=True) +for response in g4f.ChatCompletion.create( + model=g4f.models.gpt_4_32k_0613, + provider=g4f.Provider.Aivvm, + messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}], + temperature=0.0, + stream=True +): + print(response, end="", flush=True) +print() + +async def run_async(): + response = await g4f.ChatCompletion.create_async( + model=g4f.models.gpt_35_turbo_16k_0613, + provider=g4f.Provider.Aivvm, + messages=[{"role": "user", "content": "hello!"}], + ) + print("create_async:", response) + +# asyncio.run(run_async()) diff --git a/etc/testing/test_interference.py b/etc/testing/test_interference.py new file mode 100644 index 00000000..d8e85a6c --- /dev/null +++ b/etc/testing/test_interference.py @@ -0,0 +1,27 @@ +# type: ignore +import openai + +openai.api_key = "" +openai.api_base = "http://localhost:1337" + + +def main(): + chat_completion = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "write a poem about a tree"}], + stream=True, + ) + + if isinstance(chat_completion, dict): + # not stream + print(chat_completion.choices[0].message.content) + else: + # stream + for token in chat_completion: + content = token["choices"][0]["delta"].get("content") + if content != None: + print(content, end="", flush=True) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/etc/testing/test_needs_auth.py b/etc/testing/test_needs_auth.py new file mode 100644 index 00000000..26630e23 --- /dev/null +++ b/etc/testing/test_needs_auth.py @@ -0,0 +1,96 @@ +import sys +from pathlib import Path +import asyncio + +sys.path.append(str(Path(__file__).parent.parent)) + +import g4f +from testing.log_time import log_time, log_time_async, log_time_yield + + +_providers = [ + g4f.Provider.H2o, + g4f.Provider.You, + g4f.Provider.HuggingChat, + g4f.Provider.OpenAssistant, + g4f.Provider.Bing, + g4f.Provider.Bard +] + +_instruct = "Hello, are you GPT 4?." + +_example = """ +OpenaiChat: Hello! How can I assist you today? 2.0 secs +Bard: Hello! How can I help you today? 3.44 secs +Bing: Hello, this is Bing. How can I help? 😊 4.14 secs +Async Total: 4.25 secs + +OpenaiChat: Hello! How can I assist you today? 1.85 secs +Bard: Hello! How can I help you today? 3.38 secs +Bing: Hello, this is Bing. How can I help? 😊 6.14 secs +Stream Total: 11.37 secs + +OpenaiChat: Hello! How can I help you today? 3.28 secs +Bard: Hello there! How can I help you today? 3.58 secs +Bing: Hello! How can I help you today? 3.28 secs +No Stream Total: 10.14 secs +""" + +print("Bing: ", end="") +for response in log_time_yield( + g4f.ChatCompletion.create, + model=g4f.models.default, + messages=[{"role": "user", "content": _instruct}], + provider=g4f.Provider.Bing, + #cookies=g4f.get_cookies(".huggingface.co"), + stream=True, + auth=True +): + print(response, end="", flush=True) +print() +print() + + +async def run_async(): + responses = [ + log_time_async( + provider.create_async, + model=None, + messages=[{"role": "user", "content": _instruct}], + ) + for provider in _providers + ] + responses = await asyncio.gather(*responses) + for idx, provider in enumerate(_providers): + print(f"{provider.__name__}:", responses[idx]) +print("Async Total:", asyncio.run(log_time_async(run_async))) +print() + + +def run_stream(): + for provider in _providers: + print(f"{provider.__name__}: ", end="") + for response in log_time_yield( + provider.create_completion, + model=None, + messages=[{"role": "user", "content": _instruct}], + ): + print(response, end="", flush=True) + print() +print("Stream Total:", log_time(run_stream)) +print() + + +def create_no_stream(): + for provider in _providers: + print(f"{provider.__name__}:", end=" ") + for response in log_time_yield( + provider.create_completion, + model=None, + messages=[{"role": "user", "content": _instruct}], + stream=False + ): + print(response, end="") + print() +print("No Stream Total:", log_time(create_no_stream)) +print() \ No newline at end of file diff --git a/etc/testing/test_providers.py b/etc/testing/test_providers.py new file mode 100644 index 00000000..ec0e0271 --- /dev/null +++ b/etc/testing/test_providers.py @@ -0,0 +1,66 @@ +import sys +from pathlib import Path +from colorama import Fore, Style + +sys.path.append(str(Path(__file__).parent.parent)) + +from g4f import BaseProvider, models, Provider + +logging = False + + +def main(): + providers = get_providers() + failed_providers = [] + + for _provider in providers: + if _provider.needs_auth: + continue + print("Provider:", _provider.__name__) + result = test(_provider) + print("Result:", result) + if _provider.working and not result: + failed_providers.append(_provider) + + print() + + if failed_providers: + print(f"{Fore.RED + Style.BRIGHT}Failed providers:{Style.RESET_ALL}") + for _provider in failed_providers: + print(f"{Fore.RED}{_provider.__name__}") + else: + print(f"{Fore.GREEN + Style.BRIGHT}All providers are working") + + +def get_providers() -> list[type[BaseProvider]]: + providers = dir(Provider) + providers = [getattr(Provider, provider) for provider in providers if provider != "RetryProvider"] + providers = [provider for provider in providers if isinstance(provider, type)] + return [provider for provider in providers if issubclass(provider, BaseProvider)] + + +def create_response(_provider: type[BaseProvider]) -> str: + model = models.gpt_35_turbo.name if _provider.supports_gpt_35_turbo else models.default.name + response = _provider.create_completion( + model=model, + messages=[{"role": "user", "content": "Hello, who are you? Answer in detail much as possible."}], + stream=False, + ) + return "".join(response) + + +def test(_provider: type[BaseProvider]) -> bool: + try: + response = create_response(_provider) + assert type(response) is str + assert len(response) > 0 + return response + except Exception as e: + if logging: + print(e) + return False + + +if __name__ == "__main__": + main() + \ No newline at end of file -- cgit v1.2.3 From 1238d9a63839165567511cc88e49b246c453cc47 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sat, 7 Oct 2023 04:03:36 +0200 Subject: Add GPTalk and GptForLove Provider --- etc/testing/test_chat_completion.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'etc/testing') diff --git a/etc/testing/test_chat_completion.py b/etc/testing/test_chat_completion.py index 7600e46b..d5fb5b29 100644 --- a/etc/testing/test_chat_completion.py +++ b/etc/testing/test_chat_completion.py @@ -1,14 +1,14 @@ import sys from pathlib import Path -sys.path.append(str(Path(__file__).parent.parent)) +sys.path.append(str(Path(__file__).parent.parent.parent)) import g4f, asyncio print("create:", end=" ", flush=True) for response in g4f.ChatCompletion.create( - model=g4f.models.gpt_4_32k_0613, - provider=g4f.Provider.Aivvm, + model=g4f.models.default, + provider=g4f.Provider.GptForLove, messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}], temperature=0.0, stream=True -- cgit v1.2.3 From 4fa6e9c0f597c07d2acf10732fe2aeda270c6ca6 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sat, 7 Oct 2023 09:02:48 +0200 Subject: Add GptGod Provider Remove timeout from aiohttp providers Disable Opchatgpts and ChatgptLogin provider --- etc/testing/test_async.py | 1 + 1 file changed, 1 insertion(+) (limited to 'etc/testing') diff --git a/etc/testing/test_async.py b/etc/testing/test_async.py index 76b109b1..2c15f6b0 100644 --- a/etc/testing/test_async.py +++ b/etc/testing/test_async.py @@ -3,6 +3,7 @@ from pathlib import Path import asyncio sys.path.append(str(Path(__file__).parent.parent)) +sys.path.append(str(Path(__file__).parent.parent.parent)) import g4f from testing.test_providers import get_providers -- cgit v1.2.3 From 3430b04f870d982d7fba34e3b9d6e5cf3bd3b847 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sat, 7 Oct 2023 19:10:26 +0200 Subject: Remove Aivvm from working providers --- etc/testing/test_chat_completion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'etc/testing') diff --git a/etc/testing/test_chat_completion.py b/etc/testing/test_chat_completion.py index d5fb5b29..ee523b86 100644 --- a/etc/testing/test_chat_completion.py +++ b/etc/testing/test_chat_completion.py @@ -19,7 +19,7 @@ print() async def run_async(): response = await g4f.ChatCompletion.create_async( model=g4f.models.gpt_35_turbo_16k_0613, - provider=g4f.Provider.Aivvm, + provider=g4f.Provider.GptGod, messages=[{"role": "user", "content": "hello!"}], ) print("create_async:", response) -- cgit v1.2.3