From 25870e7523e4427b084aa82d5e08cfdc41807da1 Mon Sep 17 00:00:00 2001 From: Bagus Indrayana Date: Tue, 11 Jul 2023 22:40:29 +0800 Subject: add provider and helper --- testing/binghuan/BingHuan.py | 49 +++++++++ testing/binghuan/helpers/binghuan.py | 206 +++++++++++++++++++++++++++++++++++ testing/binghuan/testing.py | 0 3 files changed, 255 insertions(+) create mode 100644 testing/binghuan/BingHuan.py create mode 100644 testing/binghuan/helpers/binghuan.py create mode 100644 testing/binghuan/testing.py (limited to 'testing') diff --git a/testing/binghuan/BingHuan.py b/testing/binghuan/BingHuan.py new file mode 100644 index 00000000..c4a65309 --- /dev/null +++ b/testing/binghuan/BingHuan.py @@ -0,0 +1,49 @@ +import os,sys +import json +import subprocess +# from ...typing import sha256, Dict, get_type_hints + +url = 'https://b.ai-huan.xyz' +model = ['gpt-3.5-turbo', 'gpt-4'] +supports_stream = True +needs_auth = False + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + path = os.path.dirname(os.path.realpath(__file__)) + config = json.dumps({ + 'messages': messages, + 'model': model}, separators=(',', ':')) + + cmd = ['python', f'{path}/helpers/binghuan.py', config] + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + + for line in iter(p.stdout.readline, b''): + yield line.decode('cp1252') #[:-1] + + +# params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ +# '(%s)' % ', '.join( +# [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) + + +# Temporary For ChatCompletion Class +class ChatCompletion: + @staticmethod + def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs): + kwargs['auth'] = auth + + if provider and needs_auth and not auth: + print( + f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr) + sys.exit(1) + + try: + return (_create_completion(model, messages, stream, **kwargs) + if stream else ''.join(_create_completion(model, messages, stream, **kwargs))) + except TypeError as e: + print(e) + arg: str = str(e).split("'")[1] + print( + f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr) + sys.exit(1) \ No newline at end of file diff --git a/testing/binghuan/helpers/binghuan.py b/testing/binghuan/helpers/binghuan.py new file mode 100644 index 00000000..56762ef3 --- /dev/null +++ b/testing/binghuan/helpers/binghuan.py @@ -0,0 +1,206 @@ +import sys +import ssl +import uuid +import json +import time +import random +import asyncio +import certifi +# import requests +from curl_cffi import requests +import websockets +import browser_cookie3 + +config = json.loads(sys.argv[1]) + +ssl_context = ssl.create_default_context() +ssl_context.load_verify_locations(certifi.where()) + + + +conversationstyles = { + 'gpt-4': [ #'precise' + "nlu_direct_response_filter", + "deepleo", + "disable_emoji_spoken_text", + "responsible_ai_policy_235", + "enablemm", + "h3precise", + "rcsprtsalwlst", + "dv3sugg", + "autosave", + "clgalileo", + "gencontentv3" + ], + 'balanced': [ + "nlu_direct_response_filter", + "deepleo", + "disable_emoji_spoken_text", + "responsible_ai_policy_235", + "enablemm", + "harmonyv3", + "rcsprtsalwlst", + "dv3sugg", + "autosave" + ], + 'gpt-3.5-turbo': [ #'precise' + "nlu_direct_response_filter", + "deepleo", + "disable_emoji_spoken_text", + "responsible_ai_policy_235", + "enablemm", + "h3imaginative", + "rcsprtsalwlst", + "dv3sugg", + "autosave", + "gencontentv3" + ] +} + +def format(msg: dict) -> str: + return json.dumps(msg) + '\x1e' + +def get_token(): + return + + try: + cookies = {c.name: c.value for c in browser_cookie3.edge(domain_name='bing.com')} + return cookies['_U'] + except: + print('Error: could not find bing _U cookie in edge browser.') + exit(1) + +class AsyncCompletion: + async def create( + prompt : str = None, + optionSets : list = None, + token : str = None): # No auth required anymore + + create = None + for _ in range(5): + try: + create = requests.get('https://b.ai-huan.xyz/turing/conversation/create', + headers = { + 'host': 'b.ai-huan.xyz', + 'accept-encoding': 'gzip, deflate, br', + 'connection': 'keep-alive', + 'authority': 'b.ai-huan.xyz', + 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'max-age=0', + 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"', + 'sec-ch-ua-arch': '"x86"', + 'sec-ch-ua-bitness': '"64"', + 'sec-ch-ua-full-version': '"110.0.1587.69"', + 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-model': '""', + 'sec-ch-ua-platform': '"Windows"', + 'sec-ch-ua-platform-version': '"15.0.0"', + 'sec-fetch-dest': 'document', + 'sec-fetch-mode': 'navigate', + 'sec-fetch-site': 'none', + 'sec-fetch-user': '?1', + 'upgrade-insecure-requests': '1', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69', + 'x-edge-shopping-flag': '1', + 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}' + } + ) + + conversationId = create.json()['conversationId'] + clientId = create.json()['clientId'] + conversationSignature = create.json()['conversationSignature'] + + except Exception as e: + time.sleep(0.5) + continue + + if create == None: raise Exception('Failed to create conversation.') + + wss: websockets.WebSocketClientProtocol or None = None + + wss = await websockets.connect('wss://sydney.vcanbb.chat/sydney/ChatHub', max_size = None, ssl = ssl_context, + extra_headers = { + 'accept': 'application/json', + 'accept-language': 'en-US,en;q=0.9', + 'content-type': 'application/json', + 'sec-ch-ua': '"Not_A Brand";v="99", Microsoft Edge";v="110", "Chromium";v="110"', + 'sec-ch-ua-arch': '"x86"', + 'sec-ch-ua-bitness': '"64"', + 'sec-ch-ua-full-version': '"109.0.1518.78"', + 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-model': "", + 'sec-ch-ua-platform': '"Windows"', + 'sec-ch-ua-platform-version': '"15.0.0"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'x-ms-client-request-id': str(uuid.uuid4()), + 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', + 'Referer': 'https://b.ai-huan.xyz/search?q=Bing+AI&showconv=1&FORM=hpcodx', + 'Referrer-Policy': 'origin-when-cross-origin', + 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}' + } + ) + + await wss.send(format({'protocol': 'json', 'version': 1})) + await wss.recv() + + struct = { + 'arguments': [ + { + 'source': 'cib', + 'optionsSets': optionSets, + 'isStartOfSession': True, + 'message': { + 'author': 'user', + 'inputMethod': 'Keyboard', + 'text': prompt, + 'messageType': 'Chat' + }, + 'conversationSignature': conversationSignature, + 'participant': { + 'id': clientId + }, + 'conversationId': conversationId + } + ], + 'invocationId': '0', + 'target': 'chat', + 'type': 4 + } + + await wss.send(format(struct)) + + base_string = '' + + final = False + while not final: + objects = str(await wss.recv()).split('\x1e') + for obj in objects: + if obj is None or obj == '': + continue + + response = json.loads(obj) + #print(response, flush=True, end='') + if response.get('type') == 1 and response['arguments'][0].get('messages',): + response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text') + + yield (response_text.replace(base_string, '')) + base_string = response_text + + elif response.get('type') == 2: + final = True + + await wss.close() + +async def run(optionSets, messages): + async for value in AsyncCompletion.create(prompt=messages[-1]['content'], + optionSets=optionSets): + + print(value, flush=True, end = '') + +optionSet = conversationstyles[config['model']] +asyncio.run(run(optionSet, config['messages'])) \ No newline at end of file diff --git a/testing/binghuan/testing.py b/testing/binghuan/testing.py new file mode 100644 index 00000000..e69de29b -- cgit v1.2.3 From 42a5bad4eaf27af4b46811ca11bb8aab0d076a76 Mon Sep 17 00:00:00 2001 From: Bagus Indrayana Date: Tue, 11 Jul 2023 22:40:42 +0800 Subject: add testing --- testing/binghuan/testing.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'testing') diff --git a/testing/binghuan/testing.py b/testing/binghuan/testing.py index e69de29b..e03c598d 100644 --- a/testing/binghuan/testing.py +++ b/testing/binghuan/testing.py @@ -0,0 +1,30 @@ +from BingHuan import ChatCompletion + +# Test 1 +response = ChatCompletion.create(model="gpt-3.5-turbo", + provider="Wewordle", + stream=False, + messages=[{'role': 'user', 'content': 'who are you?'}]) + +print(response) + +# Test 2 +response = ChatCompletion.create(model="gpt-3.5-turbo", + provider="Wewordle", + stream=False, + messages=[{'role': 'user', 'content': 'what you can do?'}]) + +print(response) + + +# Test 3 +response = ChatCompletion.create(model="gpt-3.5-turbo", + provider="Wewordle", + stream=False, + messages=[ + {'role': 'user', 'content': 'now your name is Bob'}, + {'role': 'assistant', 'content': 'Hello Im Bob, you asistant'}, + {'role': 'user', 'content': 'what your name again?'}, + ]) + +print(response) \ No newline at end of file -- cgit v1.2.3 From 9c5ead57b65ad8d236706fe1b1c7071f4440ab17 Mon Sep 17 00:00:00 2001 From: Bagus Indrayana Date: Tue, 11 Jul 2023 22:54:25 +0800 Subject: udate testing --- testing/binghuan/helpers/binghuan.py | 11 ++++++++++- testing/binghuan/testing.py | 8 ++++---- 2 files changed, 14 insertions(+), 5 deletions(-) (limited to 'testing') diff --git a/testing/binghuan/helpers/binghuan.py b/testing/binghuan/helpers/binghuan.py index 56762ef3..ad3f7ff7 100644 --- a/testing/binghuan/helpers/binghuan.py +++ b/testing/binghuan/helpers/binghuan.py @@ -196,8 +196,17 @@ class AsyncCompletion: await wss.close() +# i thing bing realy donset understand multi message (based on prompt template) +def convert(messages): + context = "" + for message in messages: + context += "[%s](#message)\n%s\n\n" % (message['role'], + message['content']) + return context + async def run(optionSets, messages): - async for value in AsyncCompletion.create(prompt=messages[-1]['content'], + prompt = convert(messages) + async for value in AsyncCompletion.create(prompt=prompt, optionSets=optionSets): print(value, flush=True, end = '') diff --git a/testing/binghuan/testing.py b/testing/binghuan/testing.py index e03c598d..d10b0e96 100644 --- a/testing/binghuan/testing.py +++ b/testing/binghuan/testing.py @@ -2,7 +2,7 @@ from BingHuan import ChatCompletion # Test 1 response = ChatCompletion.create(model="gpt-3.5-turbo", - provider="Wewordle", + provider="BingHuan", stream=False, messages=[{'role': 'user', 'content': 'who are you?'}]) @@ -10,7 +10,7 @@ print(response) # Test 2 response = ChatCompletion.create(model="gpt-3.5-turbo", - provider="Wewordle", + provider="BingHuan", stream=False, messages=[{'role': 'user', 'content': 'what you can do?'}]) @@ -18,8 +18,8 @@ print(response) # Test 3 -response = ChatCompletion.create(model="gpt-3.5-turbo", - provider="Wewordle", +response = ChatCompletion.create(model="gpt-4", + provider="BingHuan", stream=False, messages=[ {'role': 'user', 'content': 'now your name is Bob'}, -- cgit v1.2.3 From e991484e2d6daf9e377adcdc9aa45fb52a1d921a Mon Sep 17 00:00:00 2001 From: Bagus Indrayana Date: Tue, 11 Jul 2023 23:00:51 +0800 Subject: readme and credit --- testing/binghuan/README.md | 5 +++++ testing/binghuan/helpers/binghuan.py | 2 ++ 2 files changed, 7 insertions(+) create mode 100644 testing/binghuan/README.md (limited to 'testing') diff --git a/testing/binghuan/README.md b/testing/binghuan/README.md new file mode 100644 index 00000000..3752d98d --- /dev/null +++ b/testing/binghuan/README.md @@ -0,0 +1,5 @@ +https://github.com/xtekky/gpt4free/issues/40#issuecomment-1630946450 +flow chat process is realy like real Bing (create conversation,listern to websocket and more) +so i just use code Bing Provider from https://gitler.moe/g4f/gpt4free/ version and replace API endpoint and some conversationstyles and work fine + +but bing dont realy support multi/continues conversation (using prompt template from original Provider : def convert(messages) : https://github.com/xtekky/gpt4free/blob/e594500c4e7a8443e9b3f4af755c72f42dae83f0/g4f/Provider/Providers/Bing.py#L322) \ No newline at end of file diff --git a/testing/binghuan/helpers/binghuan.py b/testing/binghuan/helpers/binghuan.py index ad3f7ff7..cdeabeb8 100644 --- a/testing/binghuan/helpers/binghuan.py +++ b/testing/binghuan/helpers/binghuan.py @@ -1,3 +1,5 @@ +# Original Code From : https://gitler.moe/g4f/gpt4free +# https://gitler.moe/g4f/gpt4free/src/branch/main/g4f/Provider/Providers/helpers/bing.py import sys import ssl import uuid -- cgit v1.2.3 From 27ca728af8185fc6b27f3d797bd78a6d61e5b30f Mon Sep 17 00:00:00 2001 From: Bagus Indrayana Date: Tue, 11 Jul 2023 23:05:15 +0800 Subject: check multiple messages --- testing/binghuan/helpers/binghuan.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'testing') diff --git a/testing/binghuan/helpers/binghuan.py b/testing/binghuan/helpers/binghuan.py index cdeabeb8..2aabc4b6 100644 --- a/testing/binghuan/helpers/binghuan.py +++ b/testing/binghuan/helpers/binghuan.py @@ -207,7 +207,9 @@ def convert(messages): return context async def run(optionSets, messages): - prompt = convert(messages) + prompt = messages[-1]['content'] + if(len(messages) > 1): + prompt = convert(messages) async for value in AsyncCompletion.create(prompt=prompt, optionSets=optionSets): -- cgit v1.2.3 From 659e2f4ff08b46bdcc2420b705eac47da683a3a4 Mon Sep 17 00:00:00 2001 From: Bagus Indrayana Date: Wed, 12 Jul 2023 01:13:24 +0800 Subject: emojin encoding problem idk how to fix it --- testing/binghuan/BingHuan.py | 6 +++--- testing/binghuan/helpers/binghuan.py | 10 ++++++---- testing/binghuan/testing.py | 32 +++++++++++++++++--------------- 3 files changed, 26 insertions(+), 22 deletions(-) (limited to 'testing') diff --git a/testing/binghuan/BingHuan.py b/testing/binghuan/BingHuan.py index c4a65309..ce36b1f2 100644 --- a/testing/binghuan/BingHuan.py +++ b/testing/binghuan/BingHuan.py @@ -13,13 +13,13 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs): config = json.dumps({ 'messages': messages, 'model': model}, separators=(',', ':')) - cmd = ['python', f'{path}/helpers/binghuan.py', config] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - + for line in iter(p.stdout.readline, b''): - yield line.decode('cp1252') #[:-1] + yield line.decode('utf-8') + # params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ diff --git a/testing/binghuan/helpers/binghuan.py b/testing/binghuan/helpers/binghuan.py index 2aabc4b6..203bbe45 100644 --- a/testing/binghuan/helpers/binghuan.py +++ b/testing/binghuan/helpers/binghuan.py @@ -210,10 +210,12 @@ async def run(optionSets, messages): prompt = messages[-1]['content'] if(len(messages) > 1): prompt = convert(messages) - async for value in AsyncCompletion.create(prompt=prompt, - optionSets=optionSets): - - print(value, flush=True, end = '') + async for value in AsyncCompletion.create(prompt=prompt, optionSets=optionSets): + try: + print(value, flush=True, end='') + except UnicodeEncodeError as e: + # emoji encoding problem + print(value.encode('utf-8'), flush=True, end='') optionSet = conversationstyles[config['model']] asyncio.run(run(optionSet, config['messages'])) \ No newline at end of file diff --git a/testing/binghuan/testing.py b/testing/binghuan/testing.py index d10b0e96..6c84cfae 100644 --- a/testing/binghuan/testing.py +++ b/testing/binghuan/testing.py @@ -1,12 +1,14 @@ from BingHuan import ChatCompletion +# text = "Hello, this is Bing. I can help you find information on the web, generate content such as poems, stories, code, essays, songs, celebrity parodies and more. I can also help you with rewriting, improving, or optimizing your content. Is there anything specific you would like me to help you with? 😊" +# print(text.encode('utf-8')) -# Test 1 -response = ChatCompletion.create(model="gpt-3.5-turbo", - provider="BingHuan", - stream=False, - messages=[{'role': 'user', 'content': 'who are you?'}]) +# # Test 1 +# response = ChatCompletion.create(model="gpt-3.5-turbo", +# provider="BingHuan", +# stream=False, +# messages=[{'role': 'user', 'content': 'who are you?'}]) -print(response) +# print(response) # Test 2 response = ChatCompletion.create(model="gpt-3.5-turbo", @@ -18,13 +20,13 @@ print(response) # Test 3 -response = ChatCompletion.create(model="gpt-4", - provider="BingHuan", - stream=False, - messages=[ - {'role': 'user', 'content': 'now your name is Bob'}, - {'role': 'assistant', 'content': 'Hello Im Bob, you asistant'}, - {'role': 'user', 'content': 'what your name again?'}, - ]) +# response = ChatCompletion.create(model="gpt-4", +# provider="BingHuan", +# stream=False, +# messages=[ +# {'role': 'user', 'content': 'now your name is Bob'}, +# {'role': 'assistant', 'content': 'Hello Im Bob, you asistant'}, +# {'role': 'user', 'content': 'what your name again?'}, +# ]) -print(response) \ No newline at end of file +# print(response) \ No newline at end of file -- cgit v1.2.3 From 86088bef83fb70cbc84fd2475446ca9fb79c460b Mon Sep 17 00:00:00 2001 From: Bagus Indrayana Date: Wed, 12 Jul 2023 01:13:45 +0800 Subject: update testing --- testing/binghuan/testing.py | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) (limited to 'testing') diff --git a/testing/binghuan/testing.py b/testing/binghuan/testing.py index 6c84cfae..2c510914 100644 --- a/testing/binghuan/testing.py +++ b/testing/binghuan/testing.py @@ -1,14 +1,12 @@ from BingHuan import ChatCompletion -# text = "Hello, this is Bing. I can help you find information on the web, generate content such as poems, stories, code, essays, songs, celebrity parodies and more. I can also help you with rewriting, improving, or optimizing your content. Is there anything specific you would like me to help you with? 😊" -# print(text.encode('utf-8')) -# # Test 1 -# response = ChatCompletion.create(model="gpt-3.5-turbo", -# provider="BingHuan", -# stream=False, -# messages=[{'role': 'user', 'content': 'who are you?'}]) +# Test 1 +response = ChatCompletion.create(model="gpt-3.5-turbo", + provider="BingHuan", + stream=False, + messages=[{'role': 'user', 'content': 'who are you?'}]) -# print(response) +print(response) # Test 2 response = ChatCompletion.create(model="gpt-3.5-turbo", @@ -19,14 +17,14 @@ response = ChatCompletion.create(model="gpt-3.5-turbo", print(response) -# Test 3 -# response = ChatCompletion.create(model="gpt-4", -# provider="BingHuan", -# stream=False, -# messages=[ -# {'role': 'user', 'content': 'now your name is Bob'}, -# {'role': 'assistant', 'content': 'Hello Im Bob, you asistant'}, -# {'role': 'user', 'content': 'what your name again?'}, -# ]) +Test 3 +response = ChatCompletion.create(model="gpt-4", + provider="BingHuan", + stream=False, + messages=[ + {'role': 'user', 'content': 'now your name is Bob'}, + {'role': 'assistant', 'content': 'Hello Im Bob, you asistant'}, + {'role': 'user', 'content': 'what your name again?'}, + ]) -# print(response) \ No newline at end of file +print(response) \ No newline at end of file -- cgit v1.2.3 From 1569b6c6da56c66d3982b785ba20c29a3a5fe19b Mon Sep 17 00:00:00 2001 From: Bagus Indrayana Date: Wed, 12 Jul 2023 01:14:44 +0800 Subject: update readme --- testing/binghuan/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'testing') diff --git a/testing/binghuan/README.md b/testing/binghuan/README.md index 3752d98d..642f1fee 100644 --- a/testing/binghuan/README.md +++ b/testing/binghuan/README.md @@ -2,4 +2,6 @@ https://github.com/xtekky/gpt4free/issues/40#issuecomment-1630946450 flow chat process is realy like real Bing (create conversation,listern to websocket and more) so i just use code Bing Provider from https://gitler.moe/g4f/gpt4free/ version and replace API endpoint and some conversationstyles and work fine -but bing dont realy support multi/continues conversation (using prompt template from original Provider : def convert(messages) : https://github.com/xtekky/gpt4free/blob/e594500c4e7a8443e9b3f4af755c72f42dae83f0/g4f/Provider/Providers/Bing.py#L322) \ No newline at end of file +but bing dont realy support multi/continues conversation (using prompt template from original Provider : def convert(messages) : https://github.com/xtekky/gpt4free/blob/e594500c4e7a8443e9b3f4af755c72f42dae83f0/g4f/Provider/Providers/Bing.py#L322) + +also i have problem with emoji encoding idk how to fix that \ No newline at end of file -- cgit v1.2.3 From 45787b4f0d0efeb72681c58ae3c350168b867752 Mon Sep 17 00:00:00 2001 From: Bagus Indrayana Date: Wed, 12 Jul 2023 01:16:45 +0800 Subject: update testing --- testing/binghuan/testing.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'testing') diff --git a/testing/binghuan/testing.py b/testing/binghuan/testing.py index 2c510914..2db0b427 100644 --- a/testing/binghuan/testing.py +++ b/testing/binghuan/testing.py @@ -9,6 +9,7 @@ response = ChatCompletion.create(model="gpt-3.5-turbo", print(response) # Test 2 +# this prompt will return emoji in end of response response = ChatCompletion.create(model="gpt-3.5-turbo", provider="BingHuan", stream=False, @@ -17,7 +18,7 @@ response = ChatCompletion.create(model="gpt-3.5-turbo", print(response) -Test 3 +# Test 3 response = ChatCompletion.create(model="gpt-4", provider="BingHuan", stream=False, -- cgit v1.2.3 From 23364b479af936f156b14550c918bc7d031c127d Mon Sep 17 00:00:00 2001 From: Bagus Indrayana Date: Wed, 12 Jul 2023 01:19:32 +0800 Subject: update encoding --- testing/binghuan/BingHuan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'testing') diff --git a/testing/binghuan/BingHuan.py b/testing/binghuan/BingHuan.py index ce36b1f2..8c859c08 100644 --- a/testing/binghuan/BingHuan.py +++ b/testing/binghuan/BingHuan.py @@ -18,7 +18,7 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs): p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) for line in iter(p.stdout.readline, b''): - yield line.decode('utf-8') + yield line.decode('cp1252') -- cgit v1.2.3