summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2023-12-06 12:02:41 +0100
committerGitHub <noreply@github.com>2023-12-06 12:02:41 +0100
commitf962993b8dac225d85f9fbb59be188e263af1c0b (patch)
treeae153b027ad5a2679b4f1fde881d61cc0e1276b4 /g4f
parent~ | g4f `v-0.1.9.1` (diff)
parentImprove docker image (diff)
downloadgpt4free-f962993b8dac225d85f9fbb59be188e263af1c0b.tar
gpt4free-f962993b8dac225d85f9fbb59be188e263af1c0b.tar.gz
gpt4free-f962993b8dac225d85f9fbb59be188e263af1c0b.tar.bz2
gpt4free-f962993b8dac225d85f9fbb59be188e263af1c0b.tar.lz
gpt4free-f962993b8dac225d85f9fbb59be188e263af1c0b.tar.xz
gpt4free-f962993b8dac225d85f9fbb59be188e263af1c0b.tar.zst
gpt4free-f962993b8dac225d85f9fbb59be188e263af1c0b.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/Bing.py7
-rw-r--r--g4f/Provider/PerplexityAi.py9
-rw-r--r--g4f/Provider/helper.py9
-rw-r--r--g4f/Provider/needs_auth/Bard.py16
-rw-r--r--g4f/Provider/needs_auth/HuggingChat.py6
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py37
-rw-r--r--g4f/__init__.py15
-rw-r--r--g4f/gui/client/js/chat.v1.js95
-rw-r--r--g4f/gui/server/backend.py47
-rw-r--r--g4f/gui/server/provider.py14
-rw-r--r--g4f/models.py5
-rw-r--r--g4f/webdriver.py10
12 files changed, 127 insertions, 143 deletions
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index b790a6d2..9e3e7405 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -156,8 +156,11 @@ async def delete_conversation(session: ClientSession, conversation: Conversation
"optionsSets": ["autosave"]
}
async with session.post(url, json=json, proxy=proxy) as response:
- response = await response.json()
- return response["result"]["value"] == "Success"
+ try:
+ response = await response.json()
+ return response["result"]["value"] == "Success"
+ except:
+ return False
class Defaults:
delimiter = "\x1e"
diff --git a/g4f/Provider/PerplexityAi.py b/g4f/Provider/PerplexityAi.py
index 941ca6d4..ad629aa8 100644
--- a/g4f/Provider/PerplexityAi.py
+++ b/g4f/Provider/PerplexityAi.py
@@ -1,6 +1,10 @@
from __future__ import annotations
import time
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.common.keys import Keys
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
@@ -27,11 +31,6 @@ class PerplexityAi(BaseProvider):
**kwargs
) -> CreateResult:
with WebDriverSession(webdriver, "", virtual_display=virtual_display, proxy=proxy) as driver:
- from selenium.webdriver.common.by import By
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.support import expected_conditions as EC
- from selenium.webdriver.common.keys import Keys
-
prompt = format_prompt(messages)
driver.get(f"{cls.url}/")
diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py
index 2171f0b7..61d9cb62 100644
--- a/g4f/Provider/helper.py
+++ b/g4f/Provider/helper.py
@@ -6,6 +6,7 @@ import webbrowser
import random
import string
import secrets
+import os
from os import path
from asyncio import AbstractEventLoop
from platformdirs import user_config_dir
@@ -18,7 +19,7 @@ from browser_cookie3 import (
edge,
vivaldi,
firefox,
- BrowserCookieError
+ _LinuxPasswordManager
)
from ..typing import Dict, Messages
@@ -81,6 +82,10 @@ def init_cookies():
except webbrowser.Error:
continue
+# Check for broken dbus address in docker image
+if os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null":
+ _LinuxPasswordManager.get_password = lambda a, b: b"secret"
+
# Load cookies for a domain from all supported browsers.
# Cache the results in the "_cookies" variable.
def get_cookies(domain_name=''):
@@ -100,7 +105,7 @@ def get_cookies(domain_name=''):
for cookie in cookie_jar:
if cookie.name not in cookies:
cookies[cookie.name] = cookie.value
- except BrowserCookieError as e:
+ except:
pass
_cookies[domain_name] = cookies
return _cookies[domain_name]
diff --git a/g4f/Provider/needs_auth/Bard.py b/g4f/Provider/needs_auth/Bard.py
index 877af37e..48e535dd 100644
--- a/g4f/Provider/needs_auth/Bard.py
+++ b/g4f/Provider/needs_auth/Bard.py
@@ -1,6 +1,11 @@
from __future__ import annotations
import time
+import os
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.common.keys import Keys
from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
@@ -27,10 +32,6 @@ class Bard(BaseProvider):
prompt = format_prompt(messages)
session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy)
with session as driver:
- from selenium.webdriver.common.by import By
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.support import expected_conditions as EC
-
try:
driver.get(f"{cls.url}/chat")
wait = WebDriverWait(driver, 10 if headless else 240)
@@ -40,6 +41,9 @@ class Bard(BaseProvider):
if not webdriver:
driver = session.reopen()
driver.get(f"{cls.url}/chat")
+ login_url = os.environ.get("G4F_LOGIN_URL")
+ if login_url:
+ yield f"Please login: [Google Bard]({login_url})\n\n"
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
else:
@@ -61,8 +65,8 @@ XMLHttpRequest.prototype.open = function(method, url) {
driver.execute_script(script)
# Submit prompt
- driver.find_element(By.CSS_SELECTOR, "div.ql-editor.ql-blank.textarea").send_keys(prompt)
- driver.find_element(By.CSS_SELECTOR, "button.send-button").click()
+ driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea").send_keys(prompt)
+ driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea").send_keys(Keys.ENTER)
# Yield response
while True:
diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py
index 59e2da73..530069c0 100644
--- a/g4f/Provider/needs_auth/HuggingChat.py
+++ b/g4f/Provider/needs_auth/HuggingChat.py
@@ -11,7 +11,6 @@ from ..helper import format_prompt, get_cookies
class HuggingChat(AsyncGeneratorProvider):
url = "https://huggingface.co/chat"
- needs_auth = True
working = True
model = "meta-llama/Llama-2-70b-chat-hf"
@@ -22,12 +21,11 @@ class HuggingChat(AsyncGeneratorProvider):
messages: Messages,
stream: bool = True,
proxy: str = None,
+ web_search: bool = False,
cookies: dict = None,
**kwargs
) -> AsyncResult:
model = model if model else cls.model
- if proxy and "://" not in proxy:
- proxy = f"http://{proxy}"
if not cookies:
cookies = get_cookies(".huggingface.co")
@@ -46,7 +44,7 @@ class HuggingChat(AsyncGeneratorProvider):
"inputs": format_prompt(messages),
"is_retry": False,
"response_id": str(uuid.uuid4()),
- "web_search": False
+ "web_search": web_search
}
async with session.post(f"{cls.url}/conversation/{conversation_id}", json=send, proxy=proxy) as response:
async for line in response.content:
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index af62382a..818c163f 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -1,12 +1,15 @@
from __future__ import annotations
-import uuid, json, asyncio
+import uuid, json, asyncio, os
from py_arkose_generator.arkose import get_values_for_request
from asyncstdlib.itertools import tee
from async_property import async_cached_property
-
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.support import expected_conditions as EC
+
from ..base_provider import AsyncGeneratorProvider
-from ..helper import get_event_loop
+from ..helper import get_event_loop, format_prompt
from ...webdriver import get_browser
from ...typing import AsyncResult, Messages
from ...requests import StreamSession
@@ -84,7 +87,12 @@ class OpenaiChat(AsyncGeneratorProvider):
if not parent_id:
parent_id = str(uuid.uuid4())
if not access_token:
- access_token = await cls.get_access_token(proxy)
+ access_token = cls._access_token
+ if not access_token:
+ login_url = os.environ.get("G4F_LOGIN_URL")
+ if login_url:
+ yield f"Please login: [ChatGPT]({login_url})\n\n"
+ access_token = cls._access_token = await cls.browse_access_token(proxy)
headers = {
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
@@ -106,10 +114,11 @@ class OpenaiChat(AsyncGeneratorProvider):
"history_and_training_disabled": history_disabled and not auto_continue,
}
if action != "continue":
+ prompt = format_prompt(messages) if not conversation_id else messages[-1]["content"]
data["messages"] = [{
"id": str(uuid.uuid4()),
"author": {"role": "user"},
- "content": {"content_type": "text", "parts": [messages[-1]["content"]]},
+ "content": {"content_type": "text", "parts": [prompt]},
}]
async with session.post(f"{cls.url}/backend-api/conversation", json=data) as response:
try:
@@ -155,14 +164,7 @@ class OpenaiChat(AsyncGeneratorProvider):
@classmethod
async def browse_access_token(cls, proxy: str = None) -> str:
def browse() -> str:
- try:
- from selenium.webdriver.common.by import By
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.support import expected_conditions as EC
-
- driver = get_browser(proxy=proxy)
- except ImportError:
- return
+ driver = get_browser(proxy=proxy)
try:
driver.get(f"{cls.url}/")
WebDriverWait(driver, 1200).until(
@@ -177,15 +179,6 @@ class OpenaiChat(AsyncGeneratorProvider):
None,
browse
)
-
- @classmethod
- async def get_access_token(cls, proxy: str = None) -> str:
- if not cls._access_token:
- cls._access_token = await cls.browse_access_token(proxy)
- if not cls._access_token:
- raise RuntimeError("Read access token failed")
- return cls._access_token
-
async def get_arkose_token(proxy: str = None, timeout: int = None) -> str:
config = {
diff --git a/g4f/__init__.py b/g4f/__init__.py
index 7448bf62..9363455a 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -25,7 +25,8 @@ def get_model_and_provider(model : Union[Model, str],
provider : Union[type[BaseProvider], None],
stream : bool,
ignored : List[str] = None,
- ignore_working: bool = False) -> tuple[Model, type[BaseProvider]]:
+ ignore_working: bool = False,
+ ignore_stream: bool = False) -> tuple[Model, type[BaseProvider]]:
if isinstance(model, str):
if model in ModelUtils.convert:
@@ -45,7 +46,7 @@ def get_model_and_provider(model : Union[Model, str],
if not provider.working and not ignore_working:
raise RuntimeError(f'{provider.__name__} is not working')
- if not provider.supports_stream and stream:
+ if not ignore_stream and not provider.supports_stream and stream:
raise ValueError(f'{provider.__name__} does not support "stream" argument')
if debug.logging:
@@ -61,15 +62,17 @@ class ChatCompletion:
stream : bool = False,
auth : Union[str, None] = None,
ignored : List[str] = None,
- ignore_working: bool = False, **kwargs) -> Union[CreateResult, str]:
+ ignore_working: bool = False,
+ ignore_stream_and_auth: bool = False,
+ **kwargs) -> Union[CreateResult, str]:
- model, provider = get_model_and_provider(model, provider, stream, ignored, ignore_working)
+ model, provider = get_model_and_provider(model, provider, stream, ignored, ignore_working, ignore_stream_and_auth)
- if provider.needs_auth and not auth:
+ if not ignore_stream_and_auth and provider.needs_auth and not auth:
raise ValueError(
f'{provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)')
- if provider.needs_auth:
+ if auth:
kwargs['auth'] = auth
result = provider.create_completion(model.name, messages, stream, **kwargs)
diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js
index 5b7a0bf0..2844b73e 100644
--- a/g4f/gui/client/js/chat.v1.js
+++ b/g4f/gui/client/js/chat.v1.js
@@ -161,7 +161,7 @@ const ask_gpt = async (txtMsgs) => {
text += chunk;
- document.getElementById(`gpt_${window.token}`).innerHTML = markdown.render(text);
+ document.getElementById(`gpt_${window.token}`).innerHTML = markdown.render(text).replace("<a href=", '<a target="_blank" href=');
document.querySelectorAll(`code`).forEach((el) => {
hljs.highlightElement(el);
});
@@ -308,7 +308,7 @@ const load_conversation = async (conversation_id) => {
</div>
<div class="content">
${item.role == "assistant"
- ? markdown.render(item.content)
+ ? markdown.render(item.content).replace("<a href=", '<a target="_blank" href=')
: item.content
}
</div>
@@ -529,7 +529,7 @@ window.onload = async () => {
load_settings_localstorage();
setTheme();
- conversations = 0;
+ let conversations = 0;
for (let i = 0; i < localStorage.length; i++) {
if (localStorage.key(i).startsWith("conversation:")) {
conversations += 1;
@@ -548,7 +548,6 @@ window.onload = async () => {
}
}
- // await load_models();
await say_hello()
message_input.addEventListener(`keydown`, async (evt) => {
@@ -593,64 +592,40 @@ const observer = new MutationObserver((mutationsList) => {
observer.observe(message_input, { attributes: true });
-
-const load_models = async () => {
- // models = localStorage.getItem('_models')
-
- // if (models === null) {
- // response = await fetch('/backend-api/v2/models')
- // models = await response.json()
- // localStorage.setItem('_models', JSON.stringify(models))
-
- // } else {
- // models = JSON.parse(models)
- // }
-
- models = [
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-16k-0613",
- "gpt-4",
- "gpt-4-0613",
- "gpt-4-32k",
- "gpt-4-32k-0613",
- "palm2",
- "palm",
- "google",
- "google-bard",
- "google-palm",
- "bard",
- "falcon-40b",
- "falcon-7b",
- "llama-13b",
- "command-nightly",
- "gpt-neox-20b",
- "santacoder",
- "bloom",
- "flan-t5-xxl",
- "code-davinci-002",
- "text-ada-001",
- "text-babbage-001",
- "text-curie-001",
- "text-davinci-002",
- "text-davinci-003",
- "llama70b-v2-chat",
- "llama13b-v2-chat",
- "llama7b-v2-chat",
- "oasst-sft-1-pythia-12b",
- "oasst-sft-4-pythia-12b-epoch-3.5",
- "command-light-nightly"
- ]
-
- let MODELS_SELECT = document.getElementById('model');
+(async () => {
+ response = await fetch('/backend-api/v2/models')
+ models = await response.json()
+ let select = document.getElementById('model');
+ select.textContent = '';
+
+ let auto = document.createElement('option');
+ auto.value = '';
+ auto.text = 'Model: Default';
+ select.appendChild(auto);
for (model of models) {
- let model_info = document.createElement('option');
- model_info.value = model
- model_info.text = model
+ let option = document.createElement('option');
+ option.value = option.text = model;
+ select.appendChild(option);
+ }
+})();
- MODELS_SELECT.appendChild(model_info);
+(async () => {
+ response = await fetch('/backend-api/v2/providers')
+ providers = await response.json()
+
+ let select = document.getElementById('provider');
+ select.textContent = '';
+
+ let auto = document.createElement('option');
+ auto.value = '';
+ auto.text = 'Provider: Auto';
+ select.appendChild(auto);
+
+ for (provider of providers) {
+ let option = document.createElement('option');
+ option.value = option.text = provider;
+ select.appendChild(option);
}
-} \ No newline at end of file
+})(); \ No newline at end of file
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index 3d7bfedc..e1abb764 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -3,9 +3,8 @@ import g4f
from flask import request
from .internet import search
from .config import special_instructions
-from .provider import get_provider
-g4f.logging = True
+g4f.debug.logging = True
class Backend_Api:
def __init__(self, app) -> None:
@@ -15,6 +14,10 @@ class Backend_Api:
'function': self.models,
'methods' : ['GET']
},
+ '/backend-api/v2/providers': {
+ 'function': self.providers,
+ 'methods' : ['GET']
+ },
'/backend-api/v2/conversation': {
'function': self._conversation,
'methods': ['POST']
@@ -37,6 +40,12 @@ class Backend_Api:
def models(self):
return g4f._all_models
+ def providers(self):
+ return [
+ provider.__name__ for provider in g4f.Provider.__providers__
+ if provider.working and provider is not g4f.Provider.RetryProvider
+ ]
+
def _gen_title(self):
return {
'title': ''
@@ -47,26 +56,26 @@ class Backend_Api:
#jailbreak = request.json['jailbreak']
#internet_access = request.json['meta']['content']['internet_access']
#conversation = request.json['meta']['content']['conversation']
- prompt = request.json['meta']['content']['parts']
- model = request.json['model']
- provider = request.json.get('provider').split('g4f.Provider.')[1]
-
- messages = prompt
- print(messages)
+ messages = request.json['meta']['content']['parts']
+ model = request.json.get('model')
+ model = model if model else g4f.models.default
+ provider = request.json.get('provider', 'Auto').replace('g4f.Provider.', '')
+ provider = provider if provider != "Auto" else None
+ if provider != None:
+ provider = g4f.Provider.ProviderUtils.convert.get(provider)
- def stream():
- yield from g4f.ChatCompletion.create(
- model=model,
- provider=get_provider(provider),
- messages=messages,
- stream=True,
- ) if provider else g4f.ChatCompletion.create(
- model=model, messages=messages, stream=True
- )
+ response = g4f.ChatCompletion.create(
+ model=model,
+ provider=provider,
+ messages=messages,
+ stream=True,
+ ignore_stream_and_auth=True
+ )
- return self.app.response_class(stream(), mimetype='text/event-stream')
+ return self.app.response_class(response, mimetype='text/event-stream')
- except Exception as e:
+ except Exception as e:
+ print(e)
return {
'code' : 'G4F_ERROR',
'_action': '_ask',
diff --git a/g4f/gui/server/provider.py b/g4f/gui/server/provider.py
deleted file mode 100644
index 8c7ac755..00000000
--- a/g4f/gui/server/provider.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from __future__ import annotations
-
-import g4f
-from g4f import BaseProvider
-
-
-def get_provider(provider: str) -> BaseProvider | None:
- if not isinstance(provider, str):
- return None
- print(provider)
- if provider == 'g4f.Provider.Auto':
- return None
-
- return g4f.Provider.ProviderUtils.convert.get(provider)
diff --git a/g4f/models.py b/g4f/models.py
index 2f86891d..cf24ff52 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -7,6 +7,7 @@ from .Provider import (
ChatgptDemoAi,
ChatAnywhere,
ChatgptNext,
+ HuggingChat,
GptForLove,
ChatgptAi,
DeepInfra,
@@ -100,7 +101,7 @@ llama2_13b = Model(
llama2_70b = Model(
name = "meta-llama/Llama-2-70b-chat-hf",
base_provider = "huggingface",
- best_provider = RetryProvider([Llama2, DeepInfra]))
+ best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat]))
# Bard
palm = Model(
@@ -274,6 +275,8 @@ class ModelUtils:
'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
+ 'gpt-3.5-long': gpt_35_long,
+
# gpt-4
'gpt-4' : gpt_4,
'gpt-4-0613' : gpt_4_0613,
diff --git a/g4f/webdriver.py b/g4f/webdriver.py
index 288eed0e..f0fa1fba 100644
--- a/g4f/webdriver.py
+++ b/g4f/webdriver.py
@@ -4,6 +4,8 @@ import time
from platformdirs import user_config_dir
from selenium.webdriver.remote.webdriver import WebDriver
from undetected_chromedriver import Chrome, ChromeOptions
+import os.path
+from . import debug
try:
from pyvirtualdisplay import Display
@@ -19,12 +21,16 @@ def get_browser(
) -> WebDriver:
if user_data_dir == None:
user_data_dir = user_config_dir("g4f")
+ if debug.logging:
+ print("Open browser with config dir:", user_data_dir)
if not options:
options = ChromeOptions()
- options.add_argument("window-size=1920,1080");
if proxy:
options.add_argument(f'--proxy-server={proxy}')
- return Chrome(options=options, user_data_dir=user_data_dir, headless=headless)
+ driver = '/usr/bin/chromedriver'
+ if not os.path.isfile(driver):
+ driver = None
+ return Chrome(options=options, user_data_dir=user_data_dir, driver_executable_path=driver, headless=headless)
class WebDriverSession():
def __init__(