diff options
author | xtekky <98614666+xtekky@users.noreply.github.com> | 2023-07-11 20:28:28 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-07-11 20:28:28 +0200 |
commit | 99cd4088e5c9e98e39ce2b5d94bcc6bbc3190e92 (patch) | |
tree | 62e59c453836ebdc381c6da22ceb030611c2598b /testing/aiservice | |
parent | Merge pull request #740 from bagusindrayana/binghuan (diff) | |
parent | add readme (diff) | |
download | gpt4free-99cd4088e5c9e98e39ce2b5d94bcc6bbc3190e92.tar gpt4free-99cd4088e5c9e98e39ce2b5d94bcc6bbc3190e92.tar.gz gpt4free-99cd4088e5c9e98e39ce2b5d94bcc6bbc3190e92.tar.bz2 gpt4free-99cd4088e5c9e98e39ce2b5d94bcc6bbc3190e92.tar.lz gpt4free-99cd4088e5c9e98e39ce2b5d94bcc6bbc3190e92.tar.xz gpt4free-99cd4088e5c9e98e39ce2b5d94bcc6bbc3190e92.tar.zst gpt4free-99cd4088e5c9e98e39ce2b5d94bcc6bbc3190e92.zip |
Diffstat (limited to 'testing/aiservice')
-rw-r--r-- | testing/aiservice/AiService.py | 62 | ||||
-rw-r--r-- | testing/aiservice/README.md | 2 | ||||
-rw-r--r-- | testing/aiservice/testing.py | 30 |
3 files changed, 94 insertions, 0 deletions
diff --git a/testing/aiservice/AiService.py b/testing/aiservice/AiService.py new file mode 100644 index 00000000..287a39ef --- /dev/null +++ b/testing/aiservice/AiService.py @@ -0,0 +1,62 @@ +import os,sys +import requests +# from ...typing import get_type_hints + +url = "https://aiservice.vercel.app/api/chat/answer" +model = ['gpt-3.5-turbo'] +supports_stream = False +needs_auth = False + + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + base = '' + for message in messages: + base += '%s: %s\n' % (message['role'], message['content']) + base += 'assistant:' + + headers = { + "accept": "*/*", + "content-type": "text/plain;charset=UTF-8", + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "Referer": "https://aiservice.vercel.app/chat", + } + data = { + "input": base + } + response = requests.post(url, headers=headers, json=data) + if response.status_code == 200: + _json = response.json() + yield _json['data'] + else: + print(f"Error Occurred::{response.status_code}") + return None + + + +# params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ +# '(%s)' % ', '.join( +# [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) + + +# Temporary For ChatCompletion Class +class ChatCompletion: + @staticmethod + def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs): + kwargs['auth'] = auth + + if provider and needs_auth and not auth: + print( + f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr) + sys.exit(1) + + try: + return (_create_completion(model, messages, stream, **kwargs) + if stream else ''.join(_create_completion(model, messages, stream, **kwargs))) + except TypeError as e: + print(e) + arg: str = str(e).split("'")[1] + print( + f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr) + sys.exit(1)
\ No newline at end of file diff --git a/testing/aiservice/README.md b/testing/aiservice/README.md new file mode 100644 index 00000000..83b06481 --- /dev/null +++ b/testing/aiservice/README.md @@ -0,0 +1,2 @@ +https://github.com/xtekky/gpt4free/issues/40#issuecomment-1629152431 +probably gpt-3.5
\ No newline at end of file diff --git a/testing/aiservice/testing.py b/testing/aiservice/testing.py new file mode 100644 index 00000000..5cb6c5ef --- /dev/null +++ b/testing/aiservice/testing.py @@ -0,0 +1,30 @@ +from AiService import ChatCompletion + +# Test 1 +response = ChatCompletion.create(model="gpt-3.5-turbo", + provider="AiService", + stream=False, + messages=[{'role': 'user', 'content': 'who are you?'}]) + +print(response) + +# Test 2 +response = ChatCompletion.create(model="gpt-3.5-turbo", + provider="AiService", + stream=False, + messages=[{'role': 'user', 'content': 'what you can do?'}]) + +print(response) + + +# Test 3 +response = ChatCompletion.create(model="gpt-3.5-turbo", + provider="AiService", + stream=False, + messages=[ + {'role': 'user', 'content': 'now your name is Bob'}, + {'role': 'assistant', 'content': 'Hello Im Bob, you asistant'}, + {'role': 'user', 'content': 'what your name again?'}, + ]) + +print(response)
\ No newline at end of file |