summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/selenium/TalkAi.py
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-01-22 04:33:19 +0100
committerGitHub <noreply@github.com>2024-01-22 04:33:19 +0100
commitefbcc04d8ac45f5acf1e394d9445ad8569a0a127 (patch)
treefe9d386a04a280c8aaeec2faaa05819058801d12 /g4f/Provider/selenium/TalkAi.py
parent~ | to do list | survey: https://forms.gle/FhtGtUorygyJn7i48 (diff)
parentImprove CreateImagesBing (diff)
downloadgpt4free-efbcc04d8ac45f5acf1e394d9445ad8569a0a127.tar
gpt4free-efbcc04d8ac45f5acf1e394d9445ad8569a0a127.tar.gz
gpt4free-efbcc04d8ac45f5acf1e394d9445ad8569a0a127.tar.bz2
gpt4free-efbcc04d8ac45f5acf1e394d9445ad8569a0a127.tar.lz
gpt4free-efbcc04d8ac45f5acf1e394d9445ad8569a0a127.tar.xz
gpt4free-efbcc04d8ac45f5acf1e394d9445ad8569a0a127.tar.zst
gpt4free-efbcc04d8ac45f5acf1e394d9445ad8569a0a127.zip
Diffstat (limited to 'g4f/Provider/selenium/TalkAi.py')
-rw-r--r--g4f/Provider/selenium/TalkAi.py87
1 files changed, 87 insertions, 0 deletions
diff --git a/g4f/Provider/selenium/TalkAi.py b/g4f/Provider/selenium/TalkAi.py
new file mode 100644
index 00000000..5aeb48ac
--- /dev/null
+++ b/g4f/Provider/selenium/TalkAi.py
@@ -0,0 +1,87 @@
+from __future__ import annotations
+
+import time, json, time
+
+from ...typing import CreateResult, Messages
+from ..base_provider import AbstractProvider
+from ...webdriver import WebDriver, WebDriverSession
+
+class TalkAi(AbstractProvider):
+ url = "https://talkai.info"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_stream = True
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ webdriver: WebDriver = None,
+ **kwargs
+ ) -> CreateResult:
+ with WebDriverSession(webdriver, "", virtual_display=True, proxy=proxy) as driver:
+ from selenium.webdriver.common.by import By
+ from selenium.webdriver.support.ui import WebDriverWait
+ from selenium.webdriver.support import expected_conditions as EC
+
+ driver.get(f"{cls.url}/chat/")
+
+ # Wait for page load
+ WebDriverWait(driver, 240).until(
+ EC.presence_of_element_located((By.CSS_SELECTOR, "body.chat-page"))
+ )
+
+ data = {
+ "type": "chat",
+ "message": messages[-1]["content"],
+ "messagesHistory": [{
+ "from": "you" if message["role"] == "user" else "chatGPT",
+ "content": message["content"]
+ } for message in messages],
+ "model": model if model else "gpt-3.5-turbo",
+ "max_tokens": 256,
+ "temperature": 1,
+ "top_p": 1,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ **kwargs
+ }
+ script = """
+const response = await fetch("/chat/send2/", {
+ "headers": {
+ "Accept": "application/json",
+ "Content-Type": "application/json",
+ },
+ "body": {body},
+ "method": "POST"
+});
+window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
+"""
+ driver.execute_script(
+ script.replace("{body}", json.dumps(json.dumps(data)))
+ )
+ # Read response
+ while True:
+ chunk = driver.execute_script("""
+chunk = await window._reader.read();
+if (chunk["done"]) {
+ return null;
+}
+content = "";
+lines = chunk["value"].split("\\n")
+lines.forEach((line, index) => {
+ if (line.startsWith('data: ')) {
+ content += line.substring('data: '.length);
+ }
+});
+return content;
+""")
+ if chunk:
+ yield chunk.replace("\\n", "\n")
+ elif chunk != "":
+ break
+ else:
+ time.sleep(0.1) \ No newline at end of file