diff options
Diffstat (limited to 'etc/testing')
-rw-r--r-- | etc/testing/test_all.py | 67 | ||||
-rw-r--r-- | etc/testing/test_chat_completion.py | 8 |
2 files changed, 71 insertions, 4 deletions
diff --git a/etc/testing/test_all.py b/etc/testing/test_all.py new file mode 100644 index 00000000..73134e3f --- /dev/null +++ b/etc/testing/test_all.py @@ -0,0 +1,67 @@ +import asyncio +import sys +from pathlib import Path +sys.path.append(str(Path(__file__).parent.parent.parent)) + +import g4f + + +async def test(model: g4f.Model): + try: + try: + for response in g4f.ChatCompletion.create( + model=model, + messages=[{"role": "user", "content": "write a poem about a tree"}], + temperature=0.1, + stream=True + ): + print(response, end="") + + print() + except: + for response in await g4f.ChatCompletion.create_async( + model=model, + messages=[{"role": "user", "content": "write a poem about a tree"}], + temperature=0.1, + stream=True + ): + print(response, end="") + + print() + + return True + except Exception as e: + print(model.name, "not working:", e) + print(e.__traceback__.tb_next) + return False + + +async def start_test(): + models_to_test = [ + # GPT-3.5 4K Context + g4f.models.gpt_35_turbo, + g4f.models.gpt_35_turbo_0613, + + # GPT-3.5 16K Context + g4f.models.gpt_35_turbo_16k, + g4f.models.gpt_35_turbo_16k_0613, + + # GPT-4 8K Context + g4f.models.gpt_4, + g4f.models.gpt_4_0613, + + # GPT-4 32K Context + g4f.models.gpt_4_32k, + g4f.models.gpt_4_32k_0613, + ] + + models_working = [] + + for model in models_to_test: + if await test(model): + models_working.append(model.name) + + print("working models:", models_working) + + +asyncio.run(start_test()) diff --git a/etc/testing/test_chat_completion.py b/etc/testing/test_chat_completion.py index ee523b86..7058ab4c 100644 --- a/etc/testing/test_chat_completion.py +++ b/etc/testing/test_chat_completion.py @@ -7,10 +7,10 @@ import g4f, asyncio print("create:", end=" ", flush=True) for response in g4f.ChatCompletion.create( - model=g4f.models.default, - provider=g4f.Provider.GptForLove, - messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}], - temperature=0.0, + model=g4f.models.gpt_4_32k_0613, + provider=g4f.Provider.Aivvm, + messages=[{"role": "user", "content": "write a poem about a tree"}], + temperature=0.1, stream=True ): print(response, end="", flush=True) |