From 38dbe4b8e5ca7f9bc0508e1ba1bf878fd6d8c19c Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 22 Jan 2024 03:38:11 +0100 Subject: Improve CreateImagesBing Sort providers by category --- g4f/Provider/selenium/AItianhuSpace.py | 117 +++++++++++++++++++++++++++++++++ g4f/Provider/selenium/MyShell.py | 76 +++++++++++++++++++++ g4f/Provider/selenium/PerplexityAi.py | 106 +++++++++++++++++++++++++++++ g4f/Provider/selenium/TalkAi.py | 87 ++++++++++++++++++++++++ g4f/Provider/selenium/__init__.py | 6 +- 5 files changed, 391 insertions(+), 1 deletion(-) create mode 100644 g4f/Provider/selenium/AItianhuSpace.py create mode 100644 g4f/Provider/selenium/MyShell.py create mode 100644 g4f/Provider/selenium/PerplexityAi.py create mode 100644 g4f/Provider/selenium/TalkAi.py (limited to 'g4f/Provider/selenium') diff --git a/g4f/Provider/selenium/AItianhuSpace.py b/g4f/Provider/selenium/AItianhuSpace.py new file mode 100644 index 00000000..9878bd5a --- /dev/null +++ b/g4f/Provider/selenium/AItianhuSpace.py @@ -0,0 +1,117 @@ +from __future__ import annotations + +import time +import random + +from ...typing import CreateResult, Messages +from ..base_provider import AbstractProvider +from ..helper import format_prompt, get_random_string +from ...webdriver import WebDriver, WebDriverSession +from ... import debug + +class AItianhuSpace(AbstractProvider): + url = "https://chat3.aiyunos.top/" + working = True + supports_stream = True + supports_gpt_35_turbo = True + _domains = ["aitianhu.com", "aitianhu1.top"] + + @classmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + domain: str = None, + proxy: str = None, + timeout: int = 120, + webdriver: WebDriver = None, + headless: bool = True, + **kwargs + ) -> CreateResult: + if not model: + model = "gpt-3.5-turbo" + if not domain: + rand = get_random_string(6) + domain = random.choice(cls._domains) + domain = f"{rand}.{domain}" + if debug.logging: + print(f"AItianhuSpace | using domain: {domain}") + url = f"https://{domain}" + prompt = format_prompt(messages) + + with WebDriverSession(webdriver, "", headless=headless, proxy=proxy) as driver: + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions as EC + + wait = WebDriverWait(driver, timeout) + + # Bypass devtools detection + driver.get("https://blank.page/") + wait.until(EC.visibility_of_element_located((By.ID, "sheet"))) + driver.execute_script(f""" + document.getElementById('sheet').addEventListener('click', () => {{ + window.open('{url}', '_blank'); + }}); + """) + driver.find_element(By.ID, "sheet").click() + time.sleep(10) + + original_window = driver.current_window_handle + for window_handle in driver.window_handles: + if window_handle != original_window: + driver.close() + driver.switch_to.window(window_handle) + break + + # Wait for page load + wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea.n-input__textarea-el"))) + + # Register hook in XMLHttpRequest + script = """ +const _http_request_open = XMLHttpRequest.prototype.open; +window._last_message = window._message = ""; +window._loadend = false; +XMLHttpRequest.prototype.open = function(method, url) { + if (url == "/api/chat-process") { + this.addEventListener("progress", (event) => { + const lines = this.responseText.split("\\n"); + try { + window._message = JSON.parse(lines[lines.length-1])["text"]; + } catch(e) { } + }); + this.addEventListener("loadend", (event) => { + window._loadend = true; + }); + } + return _http_request_open.call(this, method, url); +} +""" + driver.execute_script(script) + + # Submit prompt + driver.find_element(By.CSS_SELECTOR, "textarea.n-input__textarea-el").send_keys(prompt) + driver.find_element(By.CSS_SELECTOR, "button.n-button.n-button--primary-type.n-button--medium-type").click() + + # Read response + while True: + chunk = driver.execute_script(""" +if (window._message && window._message != window._last_message) { + try { + return window._message.substring(window._last_message.length); + } finally { + window._last_message = window._message; + } +} +if (window._loadend) { + return null; +} +return ""; +""") + if chunk: + yield chunk + elif chunk != "": + break + else: + time.sleep(0.1) \ No newline at end of file diff --git a/g4f/Provider/selenium/MyShell.py b/g4f/Provider/selenium/MyShell.py new file mode 100644 index 00000000..a3f246ff --- /dev/null +++ b/g4f/Provider/selenium/MyShell.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +import time, json + +from ...typing import CreateResult, Messages +from ..base_provider import AbstractProvider +from ..helper import format_prompt +from ...webdriver import WebDriver, WebDriverSession, bypass_cloudflare + +class MyShell(AbstractProvider): + url = "https://app.myshell.ai/chat" + working = True + supports_gpt_35_turbo = True + supports_stream = True + + @classmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + proxy: str = None, + timeout: int = 120, + webdriver: WebDriver = None, + **kwargs + ) -> CreateResult: + with WebDriverSession(webdriver, "", proxy=proxy) as driver: + bypass_cloudflare(driver, cls.url, timeout) + + # Send request with message + data = { + "botId": "4738", + "conversation_scenario": 3, + "message": format_prompt(messages), + "messageType": 1 + } + script = """ +response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", { + "headers": { + "accept": "application/json", + "content-type": "application/json", + "myshell-service-name": "organics-api", + "visitor-id": localStorage.getItem("mix_visitorId") + }, + "body": '{body}', + "method": "POST" +}) +window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); +""" + driver.execute_script(script.replace("{body}", json.dumps(data))) + script = """ +chunk = await window._reader.read(); +if (chunk.done) { + return null; +} +content = ''; +chunk.value.split('\\n').forEach((line, index) => { + if (line.startsWith('data: ')) { + try { + const data = JSON.parse(line.substring('data: '.length)); + if ('content' in data) { + content += data['content']; + } + } catch(e) {} + } +}); +return content; +""" + while True: + chunk = driver.execute_script(script) + if chunk: + yield chunk + elif chunk != "": + break + else: + time.sleep(0.1) \ No newline at end of file diff --git a/g4f/Provider/selenium/PerplexityAi.py b/g4f/Provider/selenium/PerplexityAi.py new file mode 100644 index 00000000..4796f709 --- /dev/null +++ b/g4f/Provider/selenium/PerplexityAi.py @@ -0,0 +1,106 @@ +from __future__ import annotations + +import time +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.common.keys import Keys + +from ...typing import CreateResult, Messages +from ..base_provider import AbstractProvider +from ..helper import format_prompt +from ...webdriver import WebDriver, WebDriverSession + +class PerplexityAi(AbstractProvider): + url = "https://www.perplexity.ai" + working = True + supports_gpt_35_turbo = True + supports_stream = True + + @classmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + proxy: str = None, + timeout: int = 120, + webdriver: WebDriver = None, + virtual_display: bool = True, + copilot: bool = False, + **kwargs + ) -> CreateResult: + with WebDriverSession(webdriver, "", virtual_display=virtual_display, proxy=proxy) as driver: + prompt = format_prompt(messages) + + driver.get(f"{cls.url}/") + wait = WebDriverWait(driver, timeout) + + # Is page loaded? + wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']"))) + + # Register WebSocket hook + script = """ +window._message = window._last_message = ""; +window._message_finished = false; +const _socket_send = WebSocket.prototype.send; +WebSocket.prototype.send = function(...args) { + if (!window.socket_onmessage) { + window._socket_onmessage = this; + this.addEventListener("message", (event) => { + if (event.data.startsWith("42")) { + let data = JSON.parse(event.data.substring(2)); + if (data[0] =="query_progress" || data[0] == "query_answered") { + let content = JSON.parse(data[1]["text"]); + if (data[1]["mode"] == "copilot") { + content = content[content.length-1]["content"]["answer"]; + content = JSON.parse(content); + } + window._message = content["answer"]; + if (!window._message_finished) { + window._message_finished = data[0] == "query_answered"; + } + } + } + }); + } + return _socket_send.call(this, ...args); +}; +""" + driver.execute_script(script) + + if copilot: + try: + # Check for account + driver.find_element(By.CSS_SELECTOR, "img[alt='User avatar']") + # Enable copilot + driver.find_element(By.CSS_SELECTOR, "button[data-testid='copilot-toggle']").click() + except: + raise RuntimeError("You need a account for copilot") + + # Submit prompt + driver.find_element(By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']").send_keys(prompt) + driver.find_element(By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']").send_keys(Keys.ENTER) + + # Stream response + script = """ +if(window._message && window._message != window._last_message) { + try { + return window._message.substring(window._last_message.length); + } finally { + window._last_message = window._message; + } +} else if(window._message_finished) { + return null; +} else { + return ''; +} +""" + while True: + chunk = driver.execute_script(script) + if chunk: + yield chunk + elif chunk != "": + break + else: + time.sleep(0.1) \ No newline at end of file diff --git a/g4f/Provider/selenium/TalkAi.py b/g4f/Provider/selenium/TalkAi.py new file mode 100644 index 00000000..5aeb48ac --- /dev/null +++ b/g4f/Provider/selenium/TalkAi.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +import time, json, time + +from ...typing import CreateResult, Messages +from ..base_provider import AbstractProvider +from ...webdriver import WebDriver, WebDriverSession + +class TalkAi(AbstractProvider): + url = "https://talkai.info" + working = True + supports_gpt_35_turbo = True + supports_stream = True + + @classmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + proxy: str = None, + webdriver: WebDriver = None, + **kwargs + ) -> CreateResult: + with WebDriverSession(webdriver, "", virtual_display=True, proxy=proxy) as driver: + from selenium.webdriver.common.by import By + from selenium.webdriver.support.ui import WebDriverWait + from selenium.webdriver.support import expected_conditions as EC + + driver.get(f"{cls.url}/chat/") + + # Wait for page load + WebDriverWait(driver, 240).until( + EC.presence_of_element_located((By.CSS_SELECTOR, "body.chat-page")) + ) + + data = { + "type": "chat", + "message": messages[-1]["content"], + "messagesHistory": [{ + "from": "you" if message["role"] == "user" else "chatGPT", + "content": message["content"] + } for message in messages], + "model": model if model else "gpt-3.5-turbo", + "max_tokens": 256, + "temperature": 1, + "top_p": 1, + "presence_penalty": 0, + "frequency_penalty": 0, + **kwargs + } + script = """ +const response = await fetch("/chat/send2/", { + "headers": { + "Accept": "application/json", + "Content-Type": "application/json", + }, + "body": {body}, + "method": "POST" +}); +window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); +""" + driver.execute_script( + script.replace("{body}", json.dumps(json.dumps(data))) + ) + # Read response + while True: + chunk = driver.execute_script(""" +chunk = await window._reader.read(); +if (chunk["done"]) { + return null; +} +content = ""; +lines = chunk["value"].split("\\n") +lines.forEach((line, index) => { + if (line.startsWith('data: ')) { + content += line.substring('data: '.length); + } +}); +return content; +""") + if chunk: + yield chunk.replace("\\n", "\n") + elif chunk != "": + break + else: + time.sleep(0.1) \ No newline at end of file diff --git a/g4f/Provider/selenium/__init__.py b/g4f/Provider/selenium/__init__.py index 80c48d14..a8c18a49 100644 --- a/g4f/Provider/selenium/__init__.py +++ b/g4f/Provider/selenium/__init__.py @@ -1 +1,5 @@ -from .Phind import Phind \ No newline at end of file +from .AItianhuSpace import AItianhuSpace +from .MyShell import MyShell +from .PerplexityAi import PerplexityAi +from .Phind import Phind +from .TalkAi import TalkAi \ No newline at end of file -- cgit v1.2.3