summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/needs_auth
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2023-11-20 19:27:38 +0100
committerGitHub <noreply@github.com>2023-11-20 19:27:38 +0100
commite8d88c955f75f539dd71bd4b713e90094751161c (patch)
tree6d1ad2636abfd7ad0b4f5a59aa4630cf2a29723e /g4f/Provider/needs_auth
parentMerge pull request #1275 from egcash/patch-1 (diff)
parentMerge branch 'main' into webdriver (diff)
downloadgpt4free-e8d88c955f75f539dd71bd4b713e90094751161c.tar
gpt4free-e8d88c955f75f539dd71bd4b713e90094751161c.tar.gz
gpt4free-e8d88c955f75f539dd71bd4b713e90094751161c.tar.bz2
gpt4free-e8d88c955f75f539dd71bd4b713e90094751161c.tar.lz
gpt4free-e8d88c955f75f539dd71bd4b713e90094751161c.tar.xz
gpt4free-e8d88c955f75f539dd71bd4b713e90094751161c.tar.zst
gpt4free-e8d88c955f75f539dd71bd4b713e90094751161c.zip
Diffstat (limited to '')
-rw-r--r--g4f/Provider/needs_auth/Bard.py11
-rw-r--r--g4f/Provider/needs_auth/HuggingChat.py15
-rw-r--r--g4f/Provider/needs_auth/OpenAssistant.py12
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py131
-rw-r--r--g4f/Provider/needs_auth/Poe.py11
-rw-r--r--g4f/Provider/needs_auth/Raycast.py15
-rw-r--r--g4f/Provider/needs_auth/Theb.py29
7 files changed, 80 insertions, 144 deletions
diff --git a/g4f/Provider/needs_auth/Bard.py b/g4f/Provider/needs_auth/Bard.py
index 77c029b8..2c1f6121 100644
--- a/g4f/Provider/needs_auth/Bard.py
+++ b/g4f/Provider/needs_auth/Bard.py
@@ -4,7 +4,8 @@ import time
from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
-from ..helper import WebDriver, WebDriverSession, format_prompt
+from ..helper import format_prompt
+from ..webdriver import WebDriver, WebDriverSession
class Bard(BaseProvider):
url = "https://bard.google.com"
@@ -18,13 +19,13 @@ class Bard(BaseProvider):
messages: Messages,
stream: bool,
proxy: str = None,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
user_data_dir: str = None,
headless: bool = True,
**kwargs
) -> CreateResult:
prompt = format_prompt(messages)
- session = WebDriverSession(web_driver, user_data_dir, headless, proxy=proxy)
+ session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy)
with session as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
@@ -36,8 +37,8 @@ class Bard(BaseProvider):
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
except:
# Reopen browser for login
- if not web_driver:
- driver = session.reopen(headless=False)
+ if not webdriver:
+ driver = session.reopen()
driver.get(f"{cls.url}/chat")
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py
index 68c6713b..59e2da73 100644
--- a/g4f/Provider/needs_auth/HuggingChat.py
+++ b/g4f/Provider/needs_auth/HuggingChat.py
@@ -59,17 +59,4 @@ class HuggingChat(AsyncGeneratorProvider):
break
async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response:
- response.raise_for_status()
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ response.raise_for_status() \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/OpenAssistant.py b/g4f/Provider/needs_auth/OpenAssistant.py
index de62636c..e549b517 100644
--- a/g4f/Provider/needs_auth/OpenAssistant.py
+++ b/g4f/Provider/needs_auth/OpenAssistant.py
@@ -87,15 +87,3 @@ class OpenAssistant(AsyncGeneratorProvider):
}
async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
response.raise_for_status()
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 9fd90812..8c9dd1e0 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -6,7 +6,8 @@ from asyncstdlib.itertools import tee
from async_property import async_cached_property
from ..base_provider import AsyncGeneratorProvider
-from ..helper import get_browser, get_event_loop
+from ..helper import get_event_loop
+from ..webdriver import get_browser
from ...typing import AsyncResult, Messages
from ...requests import StreamSession
@@ -38,7 +39,10 @@ class OpenaiChat(AsyncGeneratorProvider):
**kwargs
) -> Response:
if prompt:
- messages.append({"role": "user", "content": prompt})
+ messages.append({
+ "role": "user",
+ "content": prompt
+ })
generator = cls.create_async_generator(
model,
messages,
@@ -49,12 +53,9 @@ class OpenaiChat(AsyncGeneratorProvider):
response_fields=True,
**kwargs
)
- fields: ResponseFields = await anext(generator)
- if "access_token" not in kwargs:
- kwargs["access_token"] = cls._access_token
return Response(
generator,
- fields,
+ await anext(generator),
action,
messages,
kwargs
@@ -87,7 +88,6 @@ class OpenaiChat(AsyncGeneratorProvider):
headers = {
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
- "Cookie": 'intercom-device-id-dgkjq2bp=0f047573-a750-46c8-be62-6d54b56e7bf0; ajs_user_id=user-iv3vxisaoNodwWpxmNpMfekH; ajs_anonymous_id=fd91be0b-0251-4222-ac1e-84b1071e9ec1; __Host-next-auth.csrf-token=d2b5f67d56f7dd6a0a42ae4becf2d1a6577b820a5edc88ab2018a59b9b506886%7Ce5c33eecc460988a137cbc72d90ee18f1b4e2f672104f368046df58e364376ac; _cfuvid=gt_mA.q6rue1.7d2.AR0KHpbVBS98i_ppfi.amj2._o-1700353424353-0-604800000; cf_clearance=GkHCfPSFU.NXGcHROoe4FantnqmnNcluhTNHz13Tk.M-1700353425-0-1-dfe77f81.816e9bc2.714615da-0.2.1700353425; __Secure-next-auth.callback-url=https%3A%2F%2Fchat.openai.com; intercom-session-dgkjq2bp=UWdrS1hHazk5VXN1c0V5Q1F0VXdCQmsyTU9pVjJMUkNpWnFnU3dKWmtIdGwxTC9wbjZuMk5hcEc0NWZDOGdndS0tSDNiaDNmMEdIL1RHU1dFWDBwOHFJUT09--f754361b91fddcd23a13b288dcb2bf8c7f509e91; _uasid="Z0FBQUFBQmxXVnV0a3dmVno4czRhcDc2ZVcwaUpSNUdZejlDR25YSk5NYTJQQkpyNmRvOGxjTHMyTlAxWmJhaURrMVhjLXZxQXdZeVpBbU1aczA5WUpHT2dwaS1MOWc4MnhyNWFnbGRzeGdJcGFKT0ZRdnBTMVJHcGV2MGNTSnVQY193c0hqUWIycHhQRVF4dENlZ3phcDdZeHgxdVhoalhrZmtZME9NbWhMQjdVR3Vzc3FRRk0ybjJjNWMwTWtIRjdPb19lUkFtRmV2MDVqd1kwWU11QTYtQkdZenEzVHhLMGplY1hZM3FlYUt1cVZaNWFTRldleEJETzJKQjk1VTJScy1GUnMxUVZWMnVxYklxMjdockVZbkZyd1R4U1RtMnA1ZzlSeXphdmVOVk9xeEdrRkVOSjhwTVd1QzFtQjhBcWdDaE92Q1VlM2pwcjFQTXRuLVJNRVlZSGpIdlZ0aGV3PT0="; _dd_s=rum=0&expire=1700356244884; __Secure-next-auth.session-token=eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..3aK6Fbdy2_8f07bf.8eT2xgonrCnz7ySY6qXFsg3kzL6UQfXKAYaw3tyn-6_X9657zy47k9qGvmi9mF0QKozj5jau3_Ca62AQQ7FmeC6Y2F1urtzqrXqwTTsQ2LuzFPIQkx6KKb2DXc8zW2-oyEzJ_EY5yxfLB2RlRkSh3M7bYNZh4_ltEcfkj38s_kIPGMxv34udtPWGWET99MCjkdwQWXylJag4s0fETA0orsBAKnGCyqAUNJbb_D7BYtGSV-MQ925kZMG6Di_QmfO0HQWURDYjmdRNcuy1PT_xJ1DJko8sjL42i4j3RhkNDkhqCIqyYImz2eHFWHW7rYKxTkrBhlCPMS5hRdcCswD7JYPcSBiwnVRYgyOocFGXoFvQgIZ2FX9NiZ3SMEVM1VwIGSE-qH0H2nMa8_iBvsOgOWJgKjVAvzzyzZvRVDUUHzJrikSFPNONVDU3h-04c1kVL4qIu9DfeTPN7n8AvNmYwMbro0L9-IUAeXNo4-pwF0Kt-AtTsamqWvMqnK4O_YOyLnDDlvkmnOvDC2d5uinwlQIxr6APO6qFfGLlHiLZemKoekxEE1Fx70dl-Ouhk1VIzbF3OC6XNNxeBm9BUYUiHdL0wj2H9rHgX4cz6ZmS_3VTgpD6UJh-evu5KJ2gIvjYmVbyzEN0aPNDxfvBaOm-Ezpy4bUJ2bUrOwNn-0knWkDiTvjYmNhCyefPCtCF6rpKNay8PCw_yh79C4SdEP6Q4V7LI0Tvdi5uz7kLCiBC4AT9L0ao1WDX03mkUOpjvzHDvPLmj8chW3lTVm_kA0eYGQY4wT0jzleWlfV0Q8rB2oYECNLWksA3F1zlGfcl4lQjprvTXRePkvAbMpoJEsZD3Ylq7-foLDLk4-M2LYAFZDs282AY04sFjAjQBxTELFCCuDgTIgTXSIskY_XCxpVXDbdLlbCJY7XVK45ybwtfqwlKRp8Mo0B131uQAFc-migHaUaoGujxJJk21bP8F0OmhNYHBo4FQqE1rQm2JH5bNM7txKeh5KXdJgVUVbRSr7OIp_OF5-Bx_v9eRBGAIDkue26E2-O8Rnrp5zQ5TnvecQLDaUzWavCLPwsZ0_gsOLBxNOmauNYZtF8IElCsQSFDdhoiMxXsYUm4ZYKEAy3GWq8HGTAvBhNkh1hvnI7y-d8-DOaZf_D_D98-olZfm-LUkeosLNpPB9rxYMqViCiW3KrXE9Yx0wlFm5ePKaVvR7Ym_EPhSOhJBKFPCvdTdMZSNPUcW0ZJBVByq0A9sxD51lYq3gaFyqh94S4s_ox182AQ3szGzHkdgLcnQmJG9OYvKxAVcd43eg6_gODAYhx02GjbMw-7JTAhyXSeCrlMteHyOXl8hai-3LilC3PmMzi7Vbu49dhF1s4LcVlUowen5ira44rQQaB26mdaOUoQfodgt66M3RTWGPXyK1Nb72AzSXsCKyaQPbzeb6cN0fdGSdG4ktwvR04eFNEkquo_3aKu2GmUKTD0XcRx9dYrfXjgY-X1DDTVs1YND2gRhdx7FFEeBVjtbj2UqmG3Rvd4IcHGe7OnYWw2MHDcol68SsR1KckXWwWREz7YTGUnDB2M1kx_H4W2mjclytnlHOnYU3RflegRPeSTbdzUZJvGKXCCz45luHkQWN_4DExE76D-9YqbFIz-RY5yL4h-Zs-i2xjm2K-4xCMM9nQIOqhLMqixIZQ2ldDAidKoYtbs5ppzbcBLyrZM96bq9DwRBY3aacqWdlRd-TfX0wv5KO4fo0sSh5FsuhuN0zcEV_NNXgqIEM_p14EcPqgbrAvCBQ8os70TRBQLXiF0EniSofGjxwF8kQvUk3C6Wfc8cTTeN-E6GxCVTn91HBwA1iSEZlRLMVb8_BcRJNqwbgnb_07jR6-eo42u88CR3KQdAWwbQRdMxsURFwZ0ujHXVGG0Ll6qCFBcHXWyDO1x1yHdHnw8_8yF26pnA2iPzrFR-8glMgIA-639sLuGAxjO1_ZuvJ9CAB41Az9S_jaZwaWy215Hk4-BRYD-MKmHtonwo3rrxhE67WJgbbu14efsw5nT6ow961pffgwXov5VA1Rg7nv1E8RvQOx7umWW6o8R4W6L8f2COsmPTXfgwIjoJKkjhUqAQ8ceG7cM0ET-38yaC0ObU8EkXfdGGgxI28qTEZWczG66_iM4hw7QEGCY5Cz2kbO6LETAiw9OsSigtBvDS7f0Ou0bZ41pdK7G3FmvdZAnjWPjObnDF4k4uWfn7mzt0fgj3FyqK20JezRDyGuAbUUhOvtZpc9sJpzxR34eXEZTouuALrHcGuNij4z6rx51FrQsaMtiup8QVrhtZbXtKLMYnWYSbkhuTeN2wY-xV1ZUsQlakIZszzGF7kuIG87KKWMpuPMvbXjz6Pp_gWJiIC6aQuk8xl5g0iBPycf_6Q-MtpuYxzNE2TpI1RyR9mHeXmteoRzrFiWp7yEC-QGNFyAJgxTqxM3CjHh1Jt6IddOsmn89rUo1dZM2Smijv_fbIv3avXLkIPX1KZjILeJCtpU0wAdsihDaRiRgDdx8fG__F8zuP0n7ziHas73cwrfg-Ujr6DhC0gTNxyd9dDA_oho9N7CQcy6EFmfNF2te7zpLony0859jtRv2t1TnpzAa1VvMK4u6mXuJ2XDo04_6GzLO3aPHinMdl1BcIAWnqAqWAu3euGFLTHOhXlfijut9N1OCifd_zWjhVtzlR39uFeCQBU5DyQArzQurdoMx8U1ETsnWgElxGSStRW-YQoPsAJ87eg9trqKspFpTVlAVN3t1GtoEAEhcwhe81SDssLmKGLc.7PqS6jRGTIfgTPlO7Ognvg; __cf_bm=VMWoAKEB45hQSwxXtnYXcurPaGZDJS4dMi6dIMFLwdw-1700355394-0-ATVsbq97iCaTaJbtYr8vtg1Zlbs3nLrJLKVBHYa2Jn7hhkGclqAy8Gbyn5ePEhDRqj93MsQmtayfYLqY5n4WiLY=; __cflb=0H28vVfF4aAyg2hkHFH9CkdHRXPsfCUf6VpYf2kz3RX'
}
async with StreamSession(
proxies={"https": proxy},
@@ -95,24 +95,22 @@ class OpenaiChat(AsyncGeneratorProvider):
headers=headers,
timeout=timeout
) as session:
- data = {
- "action": action,
- "arkose_token": await get_arkose_token(proxy, timeout),
- "conversation_id": conversation_id,
- "parent_message_id": parent_id,
- "model": models[model],
- "history_and_training_disabled": history_disabled and not auto_continue,
- }
- if action != "continue":
- data["messages"] = [{
- "id": str(uuid.uuid4()),
- "author": {"role": "user"},
- "content": {"content_type": "text", "parts": [messages[-1]["content"]]},
- }]
- first = True
end_turn = EndTurn()
- while first or auto_continue and not end_turn.is_end:
- first = False
+ while not end_turn.is_end:
+ data = {
+ "action": action,
+ "arkose_token": await get_arkose_token(proxy, timeout),
+ "conversation_id": conversation_id,
+ "parent_message_id": parent_id,
+ "model": models[model],
+ "history_and_training_disabled": history_disabled and not auto_continue,
+ }
+ if action != "continue":
+ data["messages"] = [{
+ "id": str(uuid.uuid4()),
+ "author": {"role": "user"},
+ "content": {"content_type": "text", "parts": [messages[-1]["content"]]},
+ }]
async with session.post(f"{cls.url}/backend-api/conversation", json=data) as response:
try:
response.raise_for_status()
@@ -120,43 +118,38 @@ class OpenaiChat(AsyncGeneratorProvider):
raise RuntimeError(f"Error {response.status_code}: {await response.text()}")
last_message = 0
async for line in response.iter_lines():
- if line.startswith(b"data: "):
- line = line[6:]
- if line == b"[DONE]":
- break
- try:
- line = json.loads(line)
- except:
- continue
- if "message" not in line:
- continue
- if "error" in line and line["error"]:
- raise RuntimeError(line["error"])
- if "message_type" not in line["message"]["metadata"]:
- continue
- if line["message"]["author"]["role"] != "assistant":
- continue
- if line["message"]["metadata"]["message_type"] in ("next", "continue", "variant"):
- conversation_id = line["conversation_id"]
- parent_id = line["message"]["id"]
- if response_fields:
- response_fields = False
- yield ResponseFields(conversation_id, parent_id, end_turn)
- new_message = line["message"]["content"]["parts"][0]
- yield new_message[last_message:]
- last_message = len(new_message)
- if "finish_details" in line["message"]["metadata"]:
- if line["message"]["metadata"]["finish_details"]["type"] == "max_tokens":
- end_turn.end()
-
- data = {
- "action": "continue",
- "arkose_token": await get_arkose_token(proxy, timeout),
- "conversation_id": conversation_id,
- "parent_message_id": parent_id,
- "model": models[model],
- "history_and_training_disabled": False,
- }
+ if not line.startswith(b"data: "):
+ continue
+ line = line[6:]
+ if line == b"[DONE]":
+ break
+ try:
+ line = json.loads(line)
+ except:
+ continue
+ if "message" not in line:
+ continue
+ if "error" in line and line["error"]:
+ raise RuntimeError(line["error"])
+ if "message_type" not in line["message"]["metadata"]:
+ continue
+ if line["message"]["author"]["role"] != "assistant":
+ continue
+ if line["message"]["metadata"]["message_type"] in ("next", "continue", "variant"):
+ conversation_id = line["conversation_id"]
+ parent_id = line["message"]["id"]
+ if response_fields:
+ response_fields = False
+ yield ResponseFields(conversation_id, parent_id, end_turn)
+ new_message = line["message"]["content"]["parts"][0]
+ yield new_message[last_message:]
+ last_message = len(new_message)
+ if "finish_details" in line["message"]["metadata"]:
+ if line["message"]["metadata"]["finish_details"]["type"] == "stop":
+ end_turn.end()
+ if not auto_continue:
+ break
+ action = "continue"
await asyncio.sleep(5)
@classmethod
@@ -167,7 +160,7 @@ class OpenaiChat(AsyncGeneratorProvider):
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
- driver = get_browser("~/openai", proxy=proxy)
+ driver = get_browser(proxy=proxy)
except ImportError:
return
try:
@@ -193,18 +186,6 @@ class OpenaiChat(AsyncGeneratorProvider):
raise RuntimeError("Read access token failed")
return cls._access_token
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ("access_token", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
async def get_arkose_token(proxy: str = None, timeout: int = None) -> str:
config = {
@@ -293,7 +274,7 @@ class Response():
async def variant(self, **kwargs) -> Response:
if self.action != "next":
- raise RuntimeError("Can't create variant with continue or variant request.")
+ raise RuntimeError("Can't create variant from continue or variant request.")
return await OpenaiChat.create(
**self._options,
messages=self._messages,
diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py
index 1c8c97d7..99f6945b 100644
--- a/g4f/Provider/needs_auth/Poe.py
+++ b/g4f/Provider/needs_auth/Poe.py
@@ -4,7 +4,8 @@ import time
from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
-from ..helper import WebDriver, WebDriverSession, format_prompt
+from ..helper import format_prompt
+from ..webdriver import WebDriver, WebDriverSession
models = {
"meta-llama/Llama-2-7b-chat-hf": {"name": "Llama-2-7b"},
@@ -33,7 +34,7 @@ class Poe(BaseProvider):
messages: Messages,
stream: bool,
proxy: str = None,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
user_data_dir: str = None,
headless: bool = True,
**kwargs
@@ -44,7 +45,7 @@ class Poe(BaseProvider):
raise ValueError(f"Model are not supported: {model}")
prompt = format_prompt(messages)
- session = WebDriverSession(web_driver, user_data_dir, headless, proxy=proxy)
+ session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy)
with session as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
@@ -80,8 +81,8 @@ class Poe(BaseProvider):
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']")))
except:
# Reopen browser for login
- if not web_driver:
- driver = session.reopen(headless=False)
+ if not webdriver:
+ driver = session.reopen()
driver.get(f"{cls.url}/{models[model]['name']}")
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']")))
diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py
index 4570fd9f..d7be98ac 100644
--- a/g4f/Provider/needs_auth/Raycast.py
+++ b/g4f/Provider/needs_auth/Raycast.py
@@ -60,18 +60,3 @@ class Raycast(BaseProvider):
token = completion_chunk['text']
if token != None:
yield token
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("top_p", "int"),
- ("model", "str"),
- ("auth", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py
index cf33f0c6..49ee174b 100644
--- a/g4f/Provider/needs_auth/Theb.py
+++ b/g4f/Provider/needs_auth/Theb.py
@@ -4,7 +4,8 @@ import time
from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
-from ..helper import WebDriver, WebDriverSession, format_prompt
+from ..helper import format_prompt
+from ..webdriver import WebDriver, WebDriverSession
models = {
"theb-ai": "TheB.AI",
@@ -44,14 +45,14 @@ class Theb(BaseProvider):
messages: Messages,
stream: bool,
proxy: str = None,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
virtual_display: bool = True,
**kwargs
) -> CreateResult:
if model in models:
model = models[model]
prompt = format_prompt(messages)
- web_session = WebDriverSession(web_driver, virtual_display=virtual_display, proxy=proxy)
+ web_session = WebDriverSession(webdriver, virtual_display=virtual_display, proxy=proxy)
with web_session as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
@@ -61,22 +62,16 @@ class Theb(BaseProvider):
# Register fetch hook
script = """
window._fetch = window.fetch;
-window.fetch = (url, options) => {
+window.fetch = async (url, options) => {
// Call parent fetch method
- const result = window._fetch(url, options);
+ const response = await window._fetch(url, options);
if (!url.startsWith("/api/conversation")) {
return result;
}
- // Load response reader
- result.then((response) => {
- if (!response.body.locked) {
- window._reader = response.body.getReader();
- }
- });
- // Return dummy response
- return new Promise((resolve, reject) => {
- resolve(new Response(new ReadableStream()))
- });
+ // Copy response
+ copy = response.clone();
+ window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
+ return copy;
}
window._last_message = "";
"""
@@ -97,7 +92,6 @@ window._last_message = "";
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize")))
- time.sleep(200)
try:
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
@@ -134,9 +128,8 @@ if(window._reader) {
if (chunk['done']) {
return null;
}
- text = (new TextDecoder()).decode(chunk['value']);
message = '';
- text.split('\\r\\n').forEach((line, index) => {
+ chunk['value'].split('\\r\\n').forEach((line, index) => {
if (line.startsWith('data: ')) {
try {
line = JSON.parse(line.substring('data: '.length));