diff options
Diffstat (limited to 'g4f')
-rw-r--r-- | g4f/Provider/Acytoo.py | 7 | ||||
-rw-r--r-- | g4f/Provider/Aichat.py | 6 | ||||
-rw-r--r-- | g4f/Provider/ChatgptLogin.py | 17 | ||||
-rw-r--r-- | g4f/Provider/EasyChat.py | 39 | ||||
-rw-r--r-- | g4f/Provider/Equing.py | 3 | ||||
-rw-r--r-- | g4f/Provider/H2o.py | 2 | ||||
-rw-r--r-- | g4f/Provider/Theb.py | 87 |
7 files changed, 124 insertions, 37 deletions
diff --git a/g4f/Provider/Acytoo.py b/g4f/Provider/Acytoo.py index 32c67c0c..2edd9efd 100644 --- a/g4f/Provider/Acytoo.py +++ b/g4f/Provider/Acytoo.py @@ -19,11 +19,12 @@ class Acytoo(BaseProvider): **kwargs: Any, ) -> CreateResult: headers = _create_header() - payload = _create_payload(messages) + payload = _create_payload(messages, kwargs.get('temperature', 0.5)) url = "https://chat.acytoo.com/api/completions" response = requests.post(url=url, headers=headers, json=payload) response.raise_for_status() + response.encoding = "utf-8" yield response.text @@ -34,7 +35,7 @@ def _create_header(): } -def _create_payload(messages: list[dict[str, str]]): +def _create_payload(messages: list[dict[str, str]], temperature): payload_messages = [ message | {"createdAt": int(time.time()) * 1000} for message in messages ] @@ -42,6 +43,6 @@ def _create_payload(messages: list[dict[str, str]]): "key": "", "model": "gpt-3.5-turbo", "messages": payload_messages, - "temperature": 1, + "temperature": temperature, "password": "", } diff --git a/g4f/Provider/Aichat.py b/g4f/Provider/Aichat.py index 6992d071..a1d90db7 100644 --- a/g4f/Provider/Aichat.py +++ b/g4f/Provider/Aichat.py @@ -40,9 +40,9 @@ class Aichat(BaseProvider): json_data = { "message": base, - "temperature": 1, + "temperature": kwargs.get('temperature', 0.5), "presence_penalty": 0, - "top_p": 1, + "top_p": kwargs.get('top_p', 1), "frequency_penalty": 0, } @@ -52,4 +52,6 @@ class Aichat(BaseProvider): json=json_data, ) response.raise_for_status() + if not response.json()['response']: + raise Exception("Error Response: " + response.json()) yield response.json()["message"] diff --git a/g4f/Provider/ChatgptLogin.py b/g4f/Provider/ChatgptLogin.py index 05bd1262..da9fda40 100644 --- a/g4f/Provider/ChatgptLogin.py +++ b/g4f/Provider/ChatgptLogin.py @@ -9,8 +9,9 @@ from .base_provider import BaseProvider class ChatgptLogin(BaseProvider): - url = "https://chatgptlogin.ac" + url = "https://opchatgpts.net" supports_gpt_35_turbo = True + working = True @staticmethod def create_completion( @@ -24,15 +25,15 @@ class ChatgptLogin(BaseProvider): "accept": "*/*", "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", "content-type": "application/json", - "origin": "https://chatgptlogin.ac", - "referer": "https://chatgptlogin.ac/use-chatgpt-free/", - "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', + "origin": "https://opchatgpts.net", + "referer": "https://opchatgpts.net/chatgpt-free-use/", + "sec-ch-ua": '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', "sec-ch-ua-mobile": "?0", "sec-ch-ua-platform": '"Windows"', "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", + "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", "x-wp-nonce": _get_nonce(), } @@ -59,7 +60,7 @@ class ChatgptLogin(BaseProvider): } response = requests.post( - "https://chatgptlogin.ac/wp-json/ai-chatbot/v1/chat", + "https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", headers=headers, json=json_data, ) @@ -81,9 +82,9 @@ class ChatgptLogin(BaseProvider): def _get_nonce() -> str: res = requests.get( - "https://chatgptlogin.ac/use-chatgpt-free/", + "https://opchatgpts.net/chatgpt-free-use/", headers={ - "Referer": "https://chatgptlogin.ac/use-chatgpt-free/", + "Referer": "https://opchatgpts.net/chatgpt-free-use/", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", }, ) diff --git a/g4f/Provider/EasyChat.py b/g4f/Provider/EasyChat.py index 59c46ffa..2a61346c 100644 --- a/g4f/Provider/EasyChat.py +++ b/g4f/Provider/EasyChat.py @@ -10,6 +10,7 @@ class EasyChat(BaseProvider): url = "https://free.easychat.work" supports_stream = True supports_gpt_35_turbo = True + working = True @staticmethod def create_completion( @@ -25,6 +26,7 @@ class EasyChat(BaseProvider): "https://chat2.fastgpt.me", "https://chat3.fastgpt.me", "https://chat4.fastgpt.me", + "https://gxos1h1ddt.fastgpt.me" ] server = active_servers[kwargs.get("active_server", 0)] headers = { @@ -34,9 +36,17 @@ class EasyChat(BaseProvider): "content-type": "application/json", "origin": f"{server}", "referer": f"{server}/", - "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", "x-requested-with": "XMLHttpRequest", + 'plugins': '0', + 'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', + 'usesearch': 'false', + 'x-requested-with': 'XMLHttpRequest' } json_data = { @@ -57,14 +67,25 @@ class EasyChat(BaseProvider): f"{server}/api/openai/v1/chat/completions", headers=headers, json=json_data, + stream=stream, ) - - response.raise_for_status() - print(response.text) - for chunk in response.iter_lines(): - if b"content" in chunk: - data = json.loads(chunk.decode().split("data: ")[1]) - yield data["choices"][0]["delta"]["content"] + if response.status_code == 200: + if stream == False: + json_data = response.json() + if "choices" in json_data: + yield json_data["choices"][0]["message"]["content"] + else: + yield Exception("No response from server") + else: + + for chunk in response.iter_lines(): + if b"content" in chunk: + splitData = chunk.decode().split("data: ") + if len(splitData) > 1: + yield json.loads(splitData[1])["choices"][0]["delta"]["content"] + else: + yield Exception(f"Error {response.status_code} from server") + @classmethod @property diff --git a/g4f/Provider/Equing.py b/g4f/Provider/Equing.py index bcf6cff9..90c865d9 100644 --- a/g4f/Provider/Equing.py +++ b/g4f/Provider/Equing.py @@ -53,6 +53,9 @@ class Equing(ABC): response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions', headers=headers, json=json_data, stream=stream) + if not stream: + yield response.json()["choices"][0]["message"]["content"] + return for line in response.iter_content(chunk_size=1024): if line: diff --git a/g4f/Provider/H2o.py b/g4f/Provider/H2o.py index c2492e59..f9b799bb 100644 --- a/g4f/Provider/H2o.py +++ b/g4f/Provider/H2o.py @@ -75,6 +75,8 @@ class H2o(BaseProvider): headers=headers, json=data, ) + response.raise_for_status() + response.encoding = "utf-8" generated_text = response.text.replace("\n", "").split("data:") generated_text = json.loads(generated_text[-1]) diff --git a/g4f/Provider/Theb.py b/g4f/Provider/Theb.py index c6fd6f19..09c94c24 100644 --- a/g4f/Provider/Theb.py +++ b/g4f/Provider/Theb.py @@ -1,16 +1,15 @@ -import json - -from curl_cffi import requests - +import json,random,requests +# from curl_cffi import requests from ..typing import Any, CreateResult from .base_provider import BaseProvider class Theb(BaseProvider): url = "https://theb.ai" - working = False + working = True supports_stream = True supports_gpt_35_turbo = True + needs_auth = True @staticmethod def create_completion( @@ -19,21 +18,79 @@ class Theb(BaseProvider): stream: bool, **kwargs: Any, ) -> CreateResult: - prompt = messages[-1]["content"] - + conversation = '' + for message in messages: + conversation += '%s: %s\n' % (message['role'], message['content']) + + conversation += 'assistant: ' + auth = kwargs.get("auth", { + "bearer_token":"free", + "org_id":"theb", + }) + bearer_token = auth["bearer_token"] + org_id = auth["org_id"] headers = { - "accept": "application/json, text/plain, */*", - "content-type": "application/json", + 'authority': 'beta.theb.ai', + 'accept': 'text/event-stream', + 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', + 'authorization': 'Bearer '+bearer_token, + 'content-type': 'application/json', + 'origin': 'https://beta.theb.ai', + 'referer': 'https://beta.theb.ai/home', + 'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', + 'x-ai-model': 'ee8d4f29cb7047f78cbe84313ed6ace8', } + # generate 10 random number + # 0.1 - 0.9 + req_rand = random.randint(100000000, 9999999999) - json_data: dict[str, Any] = {"prompt": prompt, "options": {}} + json_data: dict[str, Any] = { + "text": conversation, + "category": "04f58f64a4aa4191a957b47290fee864", + "model": "ee8d4f29cb7047f78cbe84313ed6ace8", + "model_params": { + "system_prompt": "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}", + "temperature": kwargs.get("temperature", 1), + "top_p": kwargs.get("top_p", 1), + "frequency_penalty": kwargs.get("frequency_penalty", 0), + "presence_penalty": kwargs.get("presence_penalty", 0), + "long_term_memory": "auto" + } + } response = requests.post( - "https://chatbot.theb.ai/api/chat-process", + "https://beta.theb.ai/api/conversation?org_id="+org_id+"&req_rand="+str(req_rand), headers=headers, json=json_data, - impersonate="chrome110", + stream=True, ) response.raise_for_status() - line = response.text.splitlines()[-1] - text = json.loads(line)["text"] - yield text + content = "" + next_content = "" + for chunk in response.iter_lines(): + if b"content" in chunk: + next_content = content + data = json.loads(chunk.decode().split("data: ")[1]) + content = data["content"] + yield data["content"].replace(next_content, "") + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("auth", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ("presence_penalty", "int"), + ("frequency_penalty", "int"), + ("top_p", "int") + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})"
\ No newline at end of file |