From 5db58fd87f230fbe5bae599bb4b120ab42cad3be Mon Sep 17 00:00:00 2001 From: "t.me/xtekky" <98614666+xtekky@users.noreply.github.com> Date: Sat, 24 Jun 2023 03:47:00 +0200 Subject: gpt4free v2, first release --- g4f/Provider/Provider.py | 15 ++ g4f/Provider/Providers/Aichat.py | 44 +++++ g4f/Provider/Providers/Ails.py | 91 +++++++++ g4f/Provider/Providers/Bard.py | 74 +++++++ g4f/Provider/Providers/Bing.py | 350 +++++++++++++++++++++++++++++++++ g4f/Provider/Providers/ChatgptAi.py | 51 +++++ g4f/Provider/Providers/ChatgptLogin.py | 96 +++++++++ g4f/Provider/Providers/DeepAi.py | 46 +++++ g4f/Provider/Providers/Forefront.py | 30 +++ g4f/Provider/Providers/GetGpt.py | 57 ++++++ g4f/Provider/Providers/H2o.py | 106 ++++++++++ g4f/Provider/Providers/Liaobots.py | 52 +++++ g4f/Provider/Providers/Lockchat.py | 32 +++ g4f/Provider/Providers/Theb.py | 28 +++ g4f/Provider/Providers/Vercel.py | 162 +++++++++++++++ g4f/Provider/Providers/You.py | 24 +++ g4f/Provider/Providers/Yqcloud.py | 37 ++++ g4f/Provider/Providers/helpers/theb.py | 48 +++++ g4f/Provider/Providers/helpers/you.py | 79 ++++++++ g4f/Provider/__init__.py | 20 ++ 20 files changed, 1442 insertions(+) create mode 100644 g4f/Provider/Provider.py create mode 100644 g4f/Provider/Providers/Aichat.py create mode 100644 g4f/Provider/Providers/Ails.py create mode 100644 g4f/Provider/Providers/Bard.py create mode 100644 g4f/Provider/Providers/Bing.py create mode 100644 g4f/Provider/Providers/ChatgptAi.py create mode 100644 g4f/Provider/Providers/ChatgptLogin.py create mode 100644 g4f/Provider/Providers/DeepAi.py create mode 100644 g4f/Provider/Providers/Forefront.py create mode 100644 g4f/Provider/Providers/GetGpt.py create mode 100644 g4f/Provider/Providers/H2o.py create mode 100644 g4f/Provider/Providers/Liaobots.py create mode 100644 g4f/Provider/Providers/Lockchat.py create mode 100644 g4f/Provider/Providers/Theb.py create mode 100644 g4f/Provider/Providers/Vercel.py create mode 100644 g4f/Provider/Providers/You.py create mode 100644 g4f/Provider/Providers/Yqcloud.py create mode 100644 g4f/Provider/Providers/helpers/theb.py create mode 100644 g4f/Provider/Providers/helpers/you.py create mode 100644 g4f/Provider/__init__.py (limited to 'g4f/Provider') diff --git a/g4f/Provider/Provider.py b/g4f/Provider/Provider.py new file mode 100644 index 00000000..12c23333 --- /dev/null +++ b/g4f/Provider/Provider.py @@ -0,0 +1,15 @@ +import os +from ..typing import sha256, Dict, get_type_hints + +url = None +model = None +supports_stream = False +needs_auth = False + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + return + + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join( + [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/g4f/Provider/Providers/Aichat.py b/g4f/Provider/Providers/Aichat.py new file mode 100644 index 00000000..e4fde8c3 --- /dev/null +++ b/g4f/Provider/Providers/Aichat.py @@ -0,0 +1,44 @@ +import os, requests +from ...typing import sha256, Dict, get_type_hints + +url = 'https://chat-gpt.org/chat' +model = ['gpt-3.5-turbo'] +supports_stream = False +needs_auth = False + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + base = '' + for message in messages: + base += '%s: %s\n' % (message['role'], message['content']) + base += 'assistant:' + + + headers = { + 'authority': 'chat-gpt.org', + 'accept': '*/*', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': 'https://chat-gpt.org', + 'pragma': 'no-cache', + 'referer': 'https://chat-gpt.org/chat', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36', + } + + json_data = { + 'message':base, + 'temperature': 1, + 'presence_penalty': 0, + 'top_p': 1, + 'frequency_penalty': 0 + } + + response = requests.post('https://chat-gpt.org/api/text', headers=headers, json=json_data) + yield response.json()['message'] + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/Providers/Ails.py b/g4f/Provider/Providers/Ails.py new file mode 100644 index 00000000..1a14b2e9 --- /dev/null +++ b/g4f/Provider/Providers/Ails.py @@ -0,0 +1,91 @@ +import os +import time +import json +import uuid +import random +import hashlib +import requests + +from ...typing import sha256, Dict, get_type_hints +from datetime import datetime + +url: str = 'https://ai.ls' +model: str = 'gpt-3.5-turbo' +supports_stream = True +needs_auth = False + +class Utils: + def hash(json_data: Dict[str, str]) -> sha256: + + secretKey: bytearray = bytearray([79, 86, 98, 105, 91, 84, 80, 78, 123, 83, + 35, 41, 99, 123, 51, 54, 37, 57, 63, 103, 59, 117, 115, 108, 41, 67, 76]) + + base_string: str = '%s:%s:%s:%s' % ( + json_data['t'], + json_data['m'], + 'WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf', + len(json_data['m']) + ) + + return hashlib.sha256(base_string.encode()).hexdigest() + + def format_timestamp(timestamp: int) -> str: + + e = timestamp + n = e % 10 + r = n + 1 if n % 2 == 0 else n + return str(e - n + r) + + +def _create_completion(model: str, messages: list, temperature: float = 0.6, stream: bool = False, **kwargs): + + headers = { + 'authority': 'api.caipacity.com', + 'accept': '*/*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'authorization': 'Bearer free', + 'client-id': str(uuid.uuid4()), + 'client-v': '0.1.217', + 'content-type': 'application/json', + 'origin': 'https://ai.ls', + 'referer': 'https://ai.ls/', + 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'cross-site', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', + } + + params = { + 'full': 'false', + } + + timestamp = Utils.format_timestamp(int(time.time() * 1000)) + + sig = { + 'd': datetime.now().strftime('%Y-%m-%d'), + 't': timestamp, + 's': Utils.hash({ + 't': timestamp, + 'm': messages[-1]['content']})} + + json_data = json.dumps(separators=(',', ':'), obj={ + 'model': 'gpt-3.5-turbo', + 'temperature': 0.6, + 'stream': True, + 'messages': messages} | sig) + + response = requests.post('https://api.caipacity.com/v1/chat/completions', + headers=headers, data=json_data, stream=True) + + for token in response.iter_lines(): + if b'content' in token: + completion_chunk = json.loads(token.decode().replace('data: ', '')) + token = completion_chunk['choices'][0]['delta'].get('content') + if token != None: + yield token + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/Providers/Bard.py b/g4f/Provider/Providers/Bard.py new file mode 100644 index 00000000..4c37c4b7 --- /dev/null +++ b/g4f/Provider/Providers/Bard.py @@ -0,0 +1,74 @@ +import os, requests, json, browser_cookie3, re, random +from ...typing import sha256, Dict, get_type_hints + +url = 'https://bard.google.com' +model = ['Palm2'] +supports_stream = False +needs_auth = True + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome( + domain_name='.google.com')}['__Secure-1PSID'] + + formatted = '\n'.join([ + '%s: %s' % (message['role'], message['content']) for message in messages + ]) + prompt = f'{formatted}\nAssistant:' + + proxy = kwargs.get('proxy', False) + if proxy == False: + print('warning!, you did not give a proxy, a lot of countries are banned from Google Bard, so it may not work') + + snlm0e = None + conversation_id = None + response_id = None + choice_id = None + + client = requests.Session() + client.proxies = { + 'http': f'http://{proxy}', + 'https': f'http://{proxy}'} if proxy else None + + client.headers = { + 'authority': 'bard.google.com', + 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8', + 'origin': 'https://bard.google.com', + 'referer': 'https://bard.google.com/', + 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', + 'x-same-domain': '1', + 'cookie': f'__Secure-1PSID={psid}' + } + + snlm0e = re.search(r'SNlM0e\":\"(.*?)\"', + client.get('https://bard.google.com/').text).group(1) if not snlm0e else snlm0e + + params = { + 'bl': 'boq_assistant-bard-web-server_20230326.21_p0', + '_reqid': random.randint(1111, 9999), + 'rt': 'c' + } + + data = { + 'at': snlm0e, + 'f.req': json.dumps([None, json.dumps([[prompt], None, [conversation_id, response_id, choice_id]])])} + + intents = '.'.join([ + 'assistant', + 'lamda', + 'BardFrontendService' + ]) + + response = client.post(f'https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate', + data=data, params=params) + + chat_data = json.loads(response.content.splitlines()[3])[0][2] + if chat_data: + json_chat_data = json.loads(chat_data) + + yield json_chat_data[0][0] + + else: + yield 'error' + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/Providers/Bing.py b/g4f/Provider/Providers/Bing.py new file mode 100644 index 00000000..1d33cda5 --- /dev/null +++ b/g4f/Provider/Providers/Bing.py @@ -0,0 +1,350 @@ +import os +import json +import random +import json +import os +import uuid +import ssl +import certifi +import aiohttp +import asyncio + +import requests +from ...typing import sha256, Dict, get_type_hints + +url = 'https://bing.com/chat' +model = ['gpt-4'] +supports_stream = True +needs_auth = False + +ssl_context = ssl.create_default_context() +ssl_context.load_verify_locations(certifi.where()) + + +class optionsSets: + optionSet: dict = { + 'tone': str, + 'optionsSets': list + } + + jailbreak: dict = { + "optionsSets": [ + 'saharasugg', + 'enablenewsfc', + 'clgalileo', + 'gencontentv3', + "nlu_direct_response_filter", + "deepleo", + "disable_emoji_spoken_text", + "responsible_ai_policy_235", + "enablemm", + "h3precise" + # "harmonyv3", + "dtappid", + "cricinfo", + "cricinfov2", + "dv3sugg", + "nojbfedge" + ] + } + + +class Defaults: + delimiter = '\x1e' + ip_address = f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}' + + allowedMessageTypes = [ + 'Chat', + 'Disengaged', + 'AdsQuery', + 'SemanticSerp', + 'GenerateContentQuery', + 'SearchQuery', + 'ActionRequest', + 'Context', + 'Progress', + 'AdsQuery', + 'SemanticSerp' + ] + + sliceIds = [ + + # "222dtappid", + # "225cricinfo", + # "224locals0" + + 'winmuid3tf', + 'osbsdusgreccf', + 'ttstmout', + 'crchatrev', + 'winlongmsgtf', + 'ctrlworkpay', + 'norespwtf', + 'tempcacheread', + 'temptacache', + '505scss0', + '508jbcars0', + '515enbotdets0', + '5082tsports', + '515vaoprvs', + '424dagslnv1s0', + 'kcimgattcf', + '427startpms0' + ] + + location = { + 'locale': 'en-US', + 'market': 'en-US', + 'region': 'US', + 'locationHints': [ + { + 'country': 'United States', + 'state': 'California', + 'city': 'Los Angeles', + 'timezoneoffset': 8, + 'countryConfidence': 8, + 'Center': { + 'Latitude': 34.0536909, + 'Longitude': -118.242766 + }, + 'RegionType': 2, + 'SourceType': 1 + } + ], + } + + +def _format(msg: dict) -> str: + return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter + + +async def create_conversation(): + for _ in range(5): + create = requests.get('https://www.bing.com/turing/conversation/create', + headers={ + 'authority': 'edgeservices.bing.com', + 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'max-age=0', + 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"', + 'sec-ch-ua-arch': '"x86"', + 'sec-ch-ua-bitness': '"64"', + 'sec-ch-ua-full-version': '"110.0.1587.69"', + 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-model': '""', + 'sec-ch-ua-platform': '"Windows"', + 'sec-ch-ua-platform-version': '"15.0.0"', + 'sec-fetch-dest': 'document', + 'sec-fetch-mode': 'navigate', + 'sec-fetch-site': 'none', + 'sec-fetch-user': '?1', + 'upgrade-insecure-requests': '1', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69', + 'x-edge-shopping-flag': '1', + 'x-forwarded-for': Defaults.ip_address + }) + + conversationId = create.json().get('conversationId') + clientId = create.json().get('clientId') + conversationSignature = create.json().get('conversationSignature') + + if not conversationId or not clientId or not conversationSignature and _ == 4: + raise Exception('Failed to create conversation.') + + return conversationId, clientId, conversationSignature + + +async def stream_generate(prompt: str, mode: optionsSets.optionSet = optionsSets.jailbreak, context: bool or str = False): + timeout = aiohttp.ClientTimeout(total=900) + session = aiohttp.ClientSession(timeout=timeout) + + conversationId, clientId, conversationSignature = await create_conversation() + + wss = await session.ws_connect('wss://sydney.bing.com/sydney/ChatHub', ssl=ssl_context, autoping=False, + headers={ + 'accept': 'application/json', + 'accept-language': 'en-US,en;q=0.9', + 'content-type': 'application/json', + 'sec-ch-ua': '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"', + 'sec-ch-ua-arch': '"x86"', + 'sec-ch-ua-bitness': '"64"', + 'sec-ch-ua-full-version': '"109.0.1518.78"', + 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-model': '', + 'sec-ch-ua-platform': '"Windows"', + 'sec-ch-ua-platform-version': '"15.0.0"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'x-ms-client-request-id': str(uuid.uuid4()), + 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', + 'Referer': 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx', + 'Referrer-Policy': 'origin-when-cross-origin', + 'x-forwarded-for': Defaults.ip_address + }) + + await wss.send_str(_format({'protocol': 'json', 'version': 1})) + await wss.receive(timeout=900) + + struct = { + 'arguments': [ + { + **mode, + 'source': 'cib', + 'allowedMessageTypes': Defaults.allowedMessageTypes, + 'sliceIds': Defaults.sliceIds, + 'traceId': os.urandom(16).hex(), + 'isStartOfSession': True, + 'message': Defaults.location | { + 'author': 'user', + 'inputMethod': 'Keyboard', + 'text': prompt, + 'messageType': 'Chat' + }, + 'conversationSignature': conversationSignature, + 'participant': { + 'id': clientId + }, + 'conversationId': conversationId + } + ], + 'invocationId': '0', + 'target': 'chat', + 'type': 4 + } + + if context: + struct['arguments'][0]['previousMessages'] = [ + { + "author": "user", + "description": context, + "contextType": "WebPage", + "messageType": "Context", + "messageId": "discover-web--page-ping-mriduna-----" + } + ] + + await wss.send_str(_format(struct)) + + final = False + draw = False + resp_txt = '' + result_text = '' + resp_txt_no_link = '' + cache_text = '' + + while not final: + msg = await wss.receive(timeout=900) + objects = msg.data.split(Defaults.delimiter) + + for obj in objects: + if obj is None or not obj: + continue + + response = json.loads(obj) + if response.get('type') == 1 and response['arguments'][0].get('messages',): + if not draw: + if (response['arguments'][0]['messages'][0]['contentOrigin'] != 'Apology') and not draw: + resp_txt = result_text + \ + response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get( + 'text', '') + resp_txt_no_link = result_text + \ + response['arguments'][0]['messages'][0].get( + 'text', '') + + if response['arguments'][0]['messages'][0].get('messageType',): + resp_txt = ( + resp_txt + + response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0]['inlines'][0].get('text') + + '\n' + ) + result_text = ( + result_text + + response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0]['inlines'][0].get('text') + + '\n' + ) + + if cache_text.endswith(' '): + final = True + if wss and not wss.closed: + await wss.close() + if session and not session.closed: + await session.close() + + yield (resp_txt.replace(cache_text, '')) + cache_text = resp_txt + + elif response.get('type') == 2: + if response['item']['result'].get('error'): + if wss and not wss.closed: + await wss.close() + if session and not session.closed: + await session.close() + + raise Exception( + f"{response['item']['result']['value']}: {response['item']['result']['message']}") + + if draw: + cache = response['item']['messages'][1]['adaptiveCards'][0]['body'][0]['text'] + response['item']['messages'][1]['adaptiveCards'][0]['body'][0]['text'] = ( + cache + resp_txt) + + if (response['item']['messages'][-1]['contentOrigin'] == 'Apology' and resp_txt): + response['item']['messages'][-1]['text'] = resp_txt_no_link + response['item']['messages'][-1]['adaptiveCards'][0]['body'][0]['text'] = resp_txt + + # print('Preserved the message from being deleted', file=sys.stderr) + + final = True + if wss and not wss.closed: + await wss.close() + if session and not session.closed: + await session.close() + + +def run(generator): + loop = asyncio.get_event_loop() + gen = generator.__aiter__() + + while True: + try: + next_val = loop.run_until_complete(gen.__anext__()) + yield next_val + + except StopAsyncIteration: + break + + #print('Done') + + +def convert(messages): + context = "" + + for message in messages: + context += "[%s](#message)\n%s\n\n" % (message['role'], + message['content']) + + return context + + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + if len(messages) < 2: + prompt = messages[0]['content'] + context = False + + else: + prompt = messages[-1]['content'] + context = convert(messages[:-1]) + + response = run(stream_generate(prompt, optionsSets.jailbreak, context)) + for token in response: + yield (token) + + #print('Done') + + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join( + [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/g4f/Provider/Providers/ChatgptAi.py b/g4f/Provider/Providers/ChatgptAi.py new file mode 100644 index 00000000..00d4cf6f --- /dev/null +++ b/g4f/Provider/Providers/ChatgptAi.py @@ -0,0 +1,51 @@ +import os +import requests, re +from ...typing import sha256, Dict, get_type_hints + +url = 'https://chatgpt.ai/gpt-4/' +model = ['gpt-4'] +supports_stream = False +needs_auth = False + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + chat = '' + for message in messages: + chat += '%s: %s\n' % (message['role'], message['content']) + chat += 'assistant: ' + + response = requests.get('https://chatgpt.ai/gpt-4/') + + nonce, post_id, _, bot_id = re.findall(r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width', response.text)[0] + + headers = { + 'authority': 'chatgpt.ai', + 'accept': '*/*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'cache-control': 'no-cache', + 'origin': 'https://chatgpt.ai', + 'pragma': 'no-cache', + 'referer': 'https://chatgpt.ai/gpt-4/', + 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', + } + data = { + '_wpnonce': nonce, + 'post_id': post_id, + 'url': 'https://chatgpt.ai/gpt-4', + 'action': 'wpaicg_chat_shortcode_message', + 'message': chat, + 'bot_id': bot_id + } + + response = requests.post('https://chatgpt.ai/wp-admin/admin-ajax.php', + headers=headers, data=data) + + yield (response.json()['data']) + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/g4f/Provider/Providers/ChatgptLogin.py b/g4f/Provider/Providers/ChatgptLogin.py new file mode 100644 index 00000000..9551d15d --- /dev/null +++ b/g4f/Provider/Providers/ChatgptLogin.py @@ -0,0 +1,96 @@ +import os +from ...typing import sha256, Dict, get_type_hints +import requests +import re +import base64 + +url = 'https://chatgptlogin.ac' +model = ['gpt-3.5-turbo'] +supports_stream = False +needs_auth = False + + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + def get_nonce(): + res = requests.get('https://chatgptlogin.ac/use-chatgpt-free/', headers={ + "Referer": "https://chatgptlogin.ac/use-chatgpt-free/", + "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' + }) + + src = re.search(r'class="mwai-chat mwai-chatgpt">.*Send