summaryrefslogtreecommitdiffstats
path: root/etc/testing
diff options
context:
space:
mode:
Diffstat (limited to 'etc/testing')
-rw-r--r--etc/testing/log_time.py25
-rw-r--r--etc/testing/test_async.py31
-rw-r--r--etc/testing/test_chat_completion.py27
-rw-r--r--etc/testing/test_interference.py27
-rw-r--r--etc/testing/test_needs_auth.py96
-rw-r--r--etc/testing/test_providers.py66
6 files changed, 272 insertions, 0 deletions
diff --git a/etc/testing/log_time.py b/etc/testing/log_time.py
new file mode 100644
index 00000000..376ab86d
--- /dev/null
+++ b/etc/testing/log_time.py
@@ -0,0 +1,25 @@
+from time import time
+
+
+async def log_time_async(method: callable, **kwargs):
+ start = time()
+ result = await method(**kwargs)
+ secs = f"{round(time() - start, 2)} secs"
+ if result:
+ return " ".join([result, secs])
+ return secs
+
+
+def log_time_yield(method: callable, **kwargs):
+ start = time()
+ result = yield from method(**kwargs)
+ yield f" {round(time() - start, 2)} secs"
+
+
+def log_time(method: callable, **kwargs):
+ start = time()
+ result = method(**kwargs)
+ secs = f"{round(time() - start, 2)} secs"
+ if result:
+ return " ".join([result, secs])
+ return secs \ No newline at end of file
diff --git a/etc/testing/test_async.py b/etc/testing/test_async.py
new file mode 100644
index 00000000..2c15f6b0
--- /dev/null
+++ b/etc/testing/test_async.py
@@ -0,0 +1,31 @@
+import sys
+from pathlib import Path
+import asyncio
+
+sys.path.append(str(Path(__file__).parent.parent))
+sys.path.append(str(Path(__file__).parent.parent.parent))
+
+import g4f
+from testing.test_providers import get_providers
+from testing.log_time import log_time_async
+
+async def create_async(provider):
+ try:
+ response = await log_time_async(
+ provider.create_async,
+ model=g4f.models.default.name,
+ messages=[{"role": "user", "content": "Hello, are you GPT 3.5?"}]
+ )
+ print(f"{provider.__name__}:", response)
+ except Exception as e:
+ print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
+
+async def run_async():
+ responses: list = [
+ create_async(provider)
+ for provider in get_providers()
+ if provider.working
+ ]
+ await asyncio.gather(*responses)
+
+print("Total:", asyncio.run(log_time_async(run_async))) \ No newline at end of file
diff --git a/etc/testing/test_chat_completion.py b/etc/testing/test_chat_completion.py
new file mode 100644
index 00000000..ee523b86
--- /dev/null
+++ b/etc/testing/test_chat_completion.py
@@ -0,0 +1,27 @@
+import sys
+from pathlib import Path
+
+sys.path.append(str(Path(__file__).parent.parent.parent))
+
+import g4f, asyncio
+
+print("create:", end=" ", flush=True)
+for response in g4f.ChatCompletion.create(
+ model=g4f.models.default,
+ provider=g4f.Provider.GptForLove,
+ messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}],
+ temperature=0.0,
+ stream=True
+):
+ print(response, end="", flush=True)
+print()
+
+async def run_async():
+ response = await g4f.ChatCompletion.create_async(
+ model=g4f.models.gpt_35_turbo_16k_0613,
+ provider=g4f.Provider.GptGod,
+ messages=[{"role": "user", "content": "hello!"}],
+ )
+ print("create_async:", response)
+
+# asyncio.run(run_async())
diff --git a/etc/testing/test_interference.py b/etc/testing/test_interference.py
new file mode 100644
index 00000000..d8e85a6c
--- /dev/null
+++ b/etc/testing/test_interference.py
@@ -0,0 +1,27 @@
+# type: ignore
+import openai
+
+openai.api_key = ""
+openai.api_base = "http://localhost:1337"
+
+
+def main():
+ chat_completion = openai.ChatCompletion.create(
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": "write a poem about a tree"}],
+ stream=True,
+ )
+
+ if isinstance(chat_completion, dict):
+ # not stream
+ print(chat_completion.choices[0].message.content)
+ else:
+ # stream
+ for token in chat_completion:
+ content = token["choices"][0]["delta"].get("content")
+ if content != None:
+ print(content, end="", flush=True)
+
+
+if __name__ == "__main__":
+ main() \ No newline at end of file
diff --git a/etc/testing/test_needs_auth.py b/etc/testing/test_needs_auth.py
new file mode 100644
index 00000000..26630e23
--- /dev/null
+++ b/etc/testing/test_needs_auth.py
@@ -0,0 +1,96 @@
+import sys
+from pathlib import Path
+import asyncio
+
+sys.path.append(str(Path(__file__).parent.parent))
+
+import g4f
+from testing.log_time import log_time, log_time_async, log_time_yield
+
+
+_providers = [
+ g4f.Provider.H2o,
+ g4f.Provider.You,
+ g4f.Provider.HuggingChat,
+ g4f.Provider.OpenAssistant,
+ g4f.Provider.Bing,
+ g4f.Provider.Bard
+]
+
+_instruct = "Hello, are you GPT 4?."
+
+_example = """
+OpenaiChat: Hello! How can I assist you today? 2.0 secs
+Bard: Hello! How can I help you today? 3.44 secs
+Bing: Hello, this is Bing. How can I help? 😊 4.14 secs
+Async Total: 4.25 secs
+
+OpenaiChat: Hello! How can I assist you today? 1.85 secs
+Bard: Hello! How can I help you today? 3.38 secs
+Bing: Hello, this is Bing. How can I help? 😊 6.14 secs
+Stream Total: 11.37 secs
+
+OpenaiChat: Hello! How can I help you today? 3.28 secs
+Bard: Hello there! How can I help you today? 3.58 secs
+Bing: Hello! How can I help you today? 3.28 secs
+No Stream Total: 10.14 secs
+"""
+
+print("Bing: ", end="")
+for response in log_time_yield(
+ g4f.ChatCompletion.create,
+ model=g4f.models.default,
+ messages=[{"role": "user", "content": _instruct}],
+ provider=g4f.Provider.Bing,
+ #cookies=g4f.get_cookies(".huggingface.co"),
+ stream=True,
+ auth=True
+):
+ print(response, end="", flush=True)
+print()
+print()
+
+
+async def run_async():
+ responses = [
+ log_time_async(
+ provider.create_async,
+ model=None,
+ messages=[{"role": "user", "content": _instruct}],
+ )
+ for provider in _providers
+ ]
+ responses = await asyncio.gather(*responses)
+ for idx, provider in enumerate(_providers):
+ print(f"{provider.__name__}:", responses[idx])
+print("Async Total:", asyncio.run(log_time_async(run_async)))
+print()
+
+
+def run_stream():
+ for provider in _providers:
+ print(f"{provider.__name__}: ", end="")
+ for response in log_time_yield(
+ provider.create_completion,
+ model=None,
+ messages=[{"role": "user", "content": _instruct}],
+ ):
+ print(response, end="", flush=True)
+ print()
+print("Stream Total:", log_time(run_stream))
+print()
+
+
+def create_no_stream():
+ for provider in _providers:
+ print(f"{provider.__name__}:", end=" ")
+ for response in log_time_yield(
+ provider.create_completion,
+ model=None,
+ messages=[{"role": "user", "content": _instruct}],
+ stream=False
+ ):
+ print(response, end="")
+ print()
+print("No Stream Total:", log_time(create_no_stream))
+print() \ No newline at end of file
diff --git a/etc/testing/test_providers.py b/etc/testing/test_providers.py
new file mode 100644
index 00000000..ec0e0271
--- /dev/null
+++ b/etc/testing/test_providers.py
@@ -0,0 +1,66 @@
+import sys
+from pathlib import Path
+from colorama import Fore, Style
+
+sys.path.append(str(Path(__file__).parent.parent))
+
+from g4f import BaseProvider, models, Provider
+
+logging = False
+
+
+def main():
+ providers = get_providers()
+ failed_providers = []
+
+ for _provider in providers:
+ if _provider.needs_auth:
+ continue
+ print("Provider:", _provider.__name__)
+ result = test(_provider)
+ print("Result:", result)
+ if _provider.working and not result:
+ failed_providers.append(_provider)
+
+ print()
+
+ if failed_providers:
+ print(f"{Fore.RED + Style.BRIGHT}Failed providers:{Style.RESET_ALL}")
+ for _provider in failed_providers:
+ print(f"{Fore.RED}{_provider.__name__}")
+ else:
+ print(f"{Fore.GREEN + Style.BRIGHT}All providers are working")
+
+
+def get_providers() -> list[type[BaseProvider]]:
+ providers = dir(Provider)
+ providers = [getattr(Provider, provider) for provider in providers if provider != "RetryProvider"]
+ providers = [provider for provider in providers if isinstance(provider, type)]
+ return [provider for provider in providers if issubclass(provider, BaseProvider)]
+
+
+def create_response(_provider: type[BaseProvider]) -> str:
+ model = models.gpt_35_turbo.name if _provider.supports_gpt_35_turbo else models.default.name
+ response = _provider.create_completion(
+ model=model,
+ messages=[{"role": "user", "content": "Hello, who are you? Answer in detail much as possible."}],
+ stream=False,
+ )
+ return "".join(response)
+
+
+def test(_provider: type[BaseProvider]) -> bool:
+ try:
+ response = create_response(_provider)
+ assert type(response) is str
+ assert len(response) > 0
+ return response
+ except Exception as e:
+ if logging:
+ print(e)
+ return False
+
+
+if __name__ == "__main__":
+ main()
+ \ No newline at end of file