summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Bestim.py43
-rw-r--r--g4f/Provider/Bing.py315
-rw-r--r--g4f/Provider/FreeChatgpt.py68
-rw-r--r--g4f/Provider/__init__.py2
-rw-r--r--g4f/Provider/bing/conversation.py43
-rw-r--r--g4f/Provider/bing/create_images.py164
-rw-r--r--g4f/Provider/bing/upload_image.py162
-rw-r--r--g4f/Provider/create_images.py84
-rw-r--r--g4f/Provider/needs_auth/Bard.py13
-rw-r--r--g4f/__init__.py8
-rw-r--r--g4f/gui/client/html/index.html5
-rw-r--r--g4f/gui/client/js/chat.v1.js26
-rw-r--r--g4f/gui/server/backend.py26
-rw-r--r--g4f/gui/server/internet.py2
-rw-r--r--g4f/models.py7
-rw-r--r--g4f/requests.py5
-rw-r--r--g4f/version.py2
-rw-r--r--g4f/webdriver.py3
18 files changed, 671 insertions, 307 deletions
diff --git a/g4f/Provider/Bestim.py b/g4f/Provider/Bestim.py
index 312655b8..be95b48a 100644
--- a/g4f/Provider/Bestim.py
+++ b/g4f/Provider/Bestim.py
@@ -1,42 +1,35 @@
from __future__ import annotations
-from ..typing import Messages, List, Dict
+from ..typing import Messages
from .base_provider import BaseProvider, CreateResult
+from ..requests import get_session_from_browser
from uuid import uuid4
import requests
-def format_prompt(messages) -> List[Dict[str, str]]:
-
- return [{"id": str(uuid4()), "content": '\n'.join(f'{m["role"]}: {m["content"]}' for m in messages), "from": "you"}]
-
class Bestim(BaseProvider):
url = "https://chatgpt.bestim.org"
supports_gpt_35_turbo = True
supports_message_history = True
- working = True
+ working = False
supports_stream = True
- @staticmethod
+ @classmethod
def create_completion(
+ cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
**kwargs
) -> CreateResult:
-
+ session = get_session_from_browser(cls.url, proxy=proxy)
headers = {
- 'POST': '/chat/send2/ HTTP/3',
- 'Host': 'chatgpt.bestim.org',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:121.0) Gecko/20100101 Firefox/121.0',
'Accept': 'application/json, text/event-stream',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://chatgpt.bestim.org/chat/',
- 'Content-Type': 'application/json',
- 'Content-Length': '109',
'Origin': 'https://chatgpt.bestim.org',
- 'Cookie': 'NpZAER=qKkRHguMIOraVbJAWpoyzGLFjZwYlm; qKkRHguMIOraVbJAWpoyzGLFjZwYlm=8ebb5ae1561bde05354de5979b52c6e1-1704058188-1704058188; NpZAER_hits=2; _csrf-front=fcf20965823c0a152ae8f9cdf15b23022bb26cdc6bf32a9d4c8bfe78dcc6b807a%3A2%3A%7Bi%3A0%3Bs%3A11%3A%22_csrf-front%22%3Bi%3A1%3Bs%3A32%3A%22a5wP6azsc7dxV8rmwAXaNsl8XS1yvW5V%22%3B%7D',
'Alt-Used': 'chatgpt.bestim.org',
'Connection': 'keep-alive',
'Sec-Fetch-Dest': 'empty',
@@ -44,27 +37,25 @@ class Bestim(BaseProvider):
'Sec-Fetch-Site': 'same-origin',
'TE': 'trailers'
}
-
data = {
-
- "messagesHistory": format_prompt(messages),
+ "messagesHistory": [{
+ "id": str(uuid4()),
+ "content": m["content"],
+ "from": "you" if m["role"] == "user" else "bot"
+ } for m in messages],
"type": "chat",
}
-
- response = requests.post(
+ response = session.post(
url="https://chatgpt.bestim.org/chat/send2/",
headers=headers,
json=data,
- proxies={"https": proxy}
+ proxies={"https": proxy},
+ stream=True
)
-
response.raise_for_status()
-
- for chunk in response.iter_lines():
-
- if b"event: trylimit" not in chunk:
-
- yield chunk.decode().removeprefix("data: ")
+ for line in response.iter_lines():
+ if not line.startswith(b"event: trylimit"):
+ yield line.decode().removeprefix("data: ")
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index e03b413f..b2e10e5d 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -1,35 +1,25 @@
from __future__ import annotations
-import string
import random
import json
import os
-import re
-import io
-import base64
-import numpy as np
import uuid
-import urllib.parse
import time
-from PIL import Image
-from aiohttp import ClientSession, ClientTimeout
-from ..typing import AsyncResult, Messages
+from urllib import parse
+from aiohttp import ClientSession, ClientTimeout
+
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
+from ..webdriver import get_browser, get_driver_cookies
+from .bing.upload_image import upload_image
+from .bing.create_images import create_images, format_images_markdown, wait_for_login
+from .bing.conversation import Conversation, create_conversation, delete_conversation
class Tones():
creative = "Creative"
balanced = "Balanced"
precise = "Precise"
-default_cookies = {
- 'SRCHD' : 'AF=NOFORM',
- 'PPLState' : '1',
- 'KievRPSSecAuth': '',
- 'SUID' : '',
- 'SRCHUSR' : '',
- 'SRCHHPGUSR' : f'HV={int(time.time())}',
-}
-
class Bing(AsyncGeneratorProvider):
url = "https://bing.com/chat"
working = True
@@ -55,9 +45,9 @@ class Bing(AsyncGeneratorProvider):
context = create_context(messages[:-1])
if not cookies:
- cookies = default_cookies
+ cookies = Defaults.cookies
else:
- for key, value in default_cookies.items():
+ for key, value in Defaults.cookies.items():
if key not in cookies:
cookies[key] = value
@@ -71,106 +61,6 @@ def create_context(messages: Messages):
for message in messages
)
-class Conversation():
- def __init__(self, conversationId: str, clientId: str, conversationSignature: str, imageInfo: dict=None) -> None:
- self.conversationId = conversationId
- self.clientId = clientId
- self.conversationSignature = conversationSignature
- self.imageInfo = imageInfo
-
-async def create_conversation(session: ClientSession, tone: str, image: str = None, proxy: str = None) -> Conversation:
- url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1199.4'
- async with session.get(url, proxy=proxy) as response:
- data = await response.json()
-
- conversationId = data.get('conversationId')
- clientId = data.get('clientId')
- conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature')
-
- if not conversationId or not clientId or not conversationSignature:
- raise Exception('Failed to create conversation.')
- conversation = Conversation(conversationId, clientId, conversationSignature, None)
- if isinstance(image,str):
- try:
- config = {
- "visualSearch": {
- "maxImagePixels": 360000,
- "imageCompressionRate": 0.7,
- "enableFaceBlurDebug": 0,
- }
- }
- is_data_uri_an_image(image)
- img_binary_data = extract_data_uri(image)
- is_accepted_format(img_binary_data)
- img = Image.open(io.BytesIO(img_binary_data))
- width, height = img.size
- max_image_pixels = config['visualSearch']['maxImagePixels']
- compression_rate = config['visualSearch']['imageCompressionRate']
-
- if max_image_pixels / (width * height) < 1:
- new_width = int(width * np.sqrt(max_image_pixels / (width * height)))
- new_height = int(height * np.sqrt(max_image_pixels / (width * height)))
- else:
- new_width = width
- new_height = height
- try:
- orientation = get_orientation(img)
- except Exception:
- orientation = None
- new_img = process_image(orientation, img, new_width, new_height)
- new_img_binary_data = compress_image_to_base64(new_img, compression_rate)
- data, boundary = build_image_upload_api_payload(new_img_binary_data, conversation, tone)
- headers = session.headers.copy()
- headers["content-type"] = f'multipart/form-data; boundary={boundary}'
- headers["referer"] = 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx'
- headers["origin"] = 'https://www.bing.com'
- async with session.post("https://www.bing.com/images/kblob", data=data, headers=headers, proxy=proxy) as image_upload_response:
- if image_upload_response.status != 200:
- raise Exception("Failed to upload image.")
-
- image_info = await image_upload_response.json()
- if not image_info.get('blobId'):
- raise Exception("Failed to parse image info.")
- result = {'bcid': image_info.get('blobId', "")}
- result['blurredBcid'] = image_info.get('processedBlobId', "")
- if result['blurredBcid'] != "":
- result["imageUrl"] = "https://www.bing.com/images/blob?bcid=" + result['blurredBcid']
- elif result['bcid'] != "":
- result["imageUrl"] = "https://www.bing.com/images/blob?bcid=" + result['bcid']
- result['originalImageUrl'] = (
- "https://www.bing.com/images/blob?bcid="
- + result['blurredBcid']
- if config['visualSearch']["enableFaceBlurDebug"]
- else "https://www.bing.com/images/blob?bcid="
- + result['bcid']
- )
- conversation.imageInfo = result
- except Exception as e:
- print(f"An error happened while trying to send image: {str(e)}")
- return conversation
-
-async def list_conversations(session: ClientSession) -> list:
- url = "https://www.bing.com/turing/conversation/chats"
- async with session.get(url) as response:
- response = await response.json()
- return response["chats"]
-
-async def delete_conversation(session: ClientSession, conversation: Conversation, proxy: str = None) -> list:
- url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
- json = {
- "conversationId": conversation.conversationId,
- "conversationSignature": conversation.conversationSignature,
- "participant": {"id": conversation.clientId},
- "source": "cib",
- "optionsSets": ["autosave"]
- }
- async with session.post(url, json=json, proxy=proxy) as response:
- try:
- response = await response.json()
- return response["result"]["value"] == "Success"
- except:
- return False
-
class Defaults:
delimiter = "\x1e"
ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
@@ -264,123 +154,28 @@ class Defaults:
'eredirecturl',
'nojbfedge'
]
+
+ cookies = {
+ 'SRCHD' : 'AF=NOFORM',
+ 'PPLState' : '1',
+ 'KievRPSSecAuth': '',
+ 'SUID' : '',
+ 'SRCHUSR' : '',
+ 'SRCHHPGUSR' : f'HV={int(time.time())}',
+ }
def format_message(msg: dict) -> str:
return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
-def build_image_upload_api_payload(image_bin: str, conversation: Conversation, tone: str):
- payload = {
- 'invokedSkills': ["ImageById"],
- 'subscriptionId': "Bing.Chat.Multimodal",
- 'invokedSkillsRequestData': {
- 'enableFaceBlur': True
- },
- 'convoData': {
- 'convoid': "",
- 'convotone': tone
- }
- }
- knowledge_request = {
- 'imageInfo': {},
- 'knowledgeRequest': payload
- }
- boundary="----WebKitFormBoundary" + ''.join(random.choices(string.ascii_letters + string.digits, k=16))
- data = (
- f'--{boundary}'
- + '\r\nContent-Disposition: form-data; name="knowledgeRequest"\r\n\r\n'
- + json.dumps(knowledge_request, ensure_ascii=False)
- + "\r\n--"
- + boundary
- + '\r\nContent-Disposition: form-data; name="imageBase64"\r\n\r\n'
- + image_bin
- + "\r\n--"
- + boundary
- + "--\r\n"
- )
- return data, boundary
-
-def is_data_uri_an_image(data_uri: str):
- try:
- # Check if the data URI starts with 'data:image' and contains an image format (e.g., jpeg, png, gif)
- if not re.match(r'data:image/(\w+);base64,', data_uri):
- raise ValueError("Invalid data URI image.")
- # Extract the image format from the data URI
- image_format = re.match(r'data:image/(\w+);base64,', data_uri).group(1)
- # Check if the image format is one of the allowed formats (jpg, jpeg, png, gif)
- if image_format.lower() not in ['jpeg', 'jpg', 'png', 'gif']:
- raise ValueError("Invalid image format (from mime file type).")
- except Exception as e:
- raise e
-
-def is_accepted_format(binary_data: bytes) -> bool:
- try:
- check = False
- if binary_data.startswith(b'\xFF\xD8\xFF'):
- check = True # It's a JPEG image
- elif binary_data.startswith(b'\x89PNG\r\n\x1a\n'):
- check = True # It's a PNG image
- elif binary_data.startswith(b'GIF87a') or binary_data.startswith(b'GIF89a'):
- check = True # It's a GIF image
- elif binary_data.startswith(b'\x89JFIF') or binary_data.startswith(b'JFIF\x00'):
- check = True # It's a JPEG image
- elif binary_data.startswith(b'\xFF\xD8'):
- check = True # It's a JPEG image
- elif binary_data.startswith(b'RIFF') and binary_data[8:12] == b'WEBP':
- check = True # It's a WebP image
- # else we raise ValueError
- if not check:
- raise ValueError("Invalid image format (from magic code).")
- except Exception as e:
- raise e
-
-def extract_data_uri(data_uri: str) -> bytes:
- try:
- data = data_uri.split(",")[1]
- data = base64.b64decode(data)
- return data
- except Exception as e:
- raise e
-
-def get_orientation(data: bytes) -> int:
- try:
- if data[:2] != b'\xFF\xD8':
- raise Exception('NotJpeg')
- with Image.open(data) as img:
- exif_data = img._getexif()
- if exif_data is not None:
- orientation = exif_data.get(274) # 274 corresponds to the orientation tag in EXIF
- if orientation is not None:
- return orientation
- except Exception:
- pass
-
-def process_image(orientation: int, img: Image.Image, new_width: int, new_height: int) -> Image.Image:
- try:
- # Initialize the canvas
- new_img = Image.new("RGB", (new_width, new_height), color="#FFFFFF")
- if orientation:
- if orientation > 4:
- img = img.transpose(Image.FLIP_LEFT_RIGHT)
- if orientation in [3, 4]:
- img = img.transpose(Image.ROTATE_180)
- if orientation in [5, 6]:
- img = img.transpose(Image.ROTATE_270)
- if orientation in [7, 8]:
- img = img.transpose(Image.ROTATE_90)
- new_img.paste(img, (0, 0))
- return new_img
- except Exception as e:
- raise e
-
-def compress_image_to_base64(img, compression_rate) -> str:
- try:
- output_buffer = io.BytesIO()
- img.save(output_buffer, format="JPEG", quality=int(compression_rate * 100))
- return base64.b64encode(output_buffer.getvalue()).decode('utf-8')
- except Exception as e:
- raise e
-
-def create_message(conversation: Conversation, prompt: str, tone: str, context: str = None, web_search: bool = False, gpt4_turbo: bool = False) -> str:
+def create_message(
+ conversation: Conversation,
+ prompt: str,
+ tone: str,
+ context: str = None,
+ image_info: dict = None,
+ web_search: bool = False,
+ gpt4_turbo: bool = False
+) -> str:
options_sets = Defaults.optionsSets
if tone == Tones.creative:
options_sets.append("h3imaginative")
@@ -429,9 +224,9 @@ def create_message(conversation: Conversation, prompt: str, tone: str, context:
'target': 'chat',
'type': 4
}
- if conversation.imageInfo != None and "imageUrl" in conversation.imageInfo and "originalImageUrl" in conversation.imageInfo:
- struct['arguments'][0]['message']['originalImageUrl'] = conversation.imageInfo['originalImageUrl']
- struct['arguments'][0]['message']['imageUrl'] = conversation.imageInfo['imageUrl']
+ if image_info and "imageUrl" in image_info and "originalImageUrl" in image_info:
+ struct['arguments'][0]['message']['originalImageUrl'] = image_info['originalImageUrl']
+ struct['arguments'][0]['message']['imageUrl'] = image_info['imageUrl']
struct['arguments'][0]['experienceType'] = None
struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None}
if context:
@@ -454,28 +249,39 @@ async def stream_generate(
web_search: bool = False,
gpt4_turbo: bool = False
):
+ headers = Defaults.headers
+ if cookies:
+ headers["Cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items())
async with ClientSession(
- timeout=ClientTimeout(total=900),
- headers=Defaults.headers if not cookies else {**Defaults.headers, "Cookie": "; ".join(f"{k}={v}" for k, v in cookies.items())},
- ) as session:
- conversation = await create_conversation(session, tone, image, proxy)
+ timeout=ClientTimeout(total=900),
+ headers=headers
+ ) as session:
+ conversation = await create_conversation(session, proxy)
+ image_info = None
+ if image:
+ image_info = await upload_image(session, image, tone, proxy)
try:
- async with session.ws_connect('wss://sydney.bing.com/sydney/ChatHub', autoping=False, params={'sec_access_token': conversation.conversationSignature}, proxy=proxy) as wss:
-
+ async with session.ws_connect(
+ 'wss://sydney.bing.com/sydney/ChatHub',
+ autoping=False,
+ params={'sec_access_token': conversation.conversationSignature},
+ proxy=proxy
+ ) as wss:
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
await wss.receive(timeout=900)
- await wss.send_str(create_message(conversation, prompt, tone, context, web_search, gpt4_turbo))
+ await wss.send_str(create_message(conversation, prompt, tone, context, image_info, web_search, gpt4_turbo))
response_txt = ''
returned_text = ''
final = False
while not final:
msg = await wss.receive(timeout=900)
+ if not msg.data:
+ continue
objects = msg.data.split(Defaults.delimiter)
for obj in objects:
if obj is None or not obj:
continue
-
response = json.loads(obj)
if response.get('type') == 1 and response['arguments'][0].get('messages'):
message = response['arguments'][0]['messages'][0]
@@ -488,9 +294,11 @@ async def stream_generate(
inline_txt = card['inlines'][0].get('text')
response_txt += inline_txt + '\n'
elif message.get('contentType') == "IMAGE":
- query = urllib.parse.quote(message.get('text'))
- url = f"\nhttps://www.bing.com/images/create?q={query}"
- response_txt += url
+ prompt = message.get('text')
+ try:
+ response_txt += format_images_markdown(await create_images(session, prompt, proxy), prompt)
+ except:
+ response_txt += f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}"
final = True
if response_txt.startswith(returned_text):
new = response_txt[len(returned_text):]
@@ -500,7 +308,18 @@ async def stream_generate(
elif response.get('type') == 2:
result = response['item']['result']
if result.get('error'):
- raise Exception(f"{result['value']}: {result['message']}")
+ if result["value"] == "CaptchaChallenge":
+ driver = get_browser(proxy=proxy)
+ try:
+ for chunk in wait_for_login(driver):
+ yield chunk
+ cookies = get_driver_cookies(driver)
+ finally:
+ driver.quit()
+ async for chunk in stream_generate(prompt, tone, image, context, proxy, cookies, web_search, gpt4_turbo):
+ yield chunk
+ else:
+ raise Exception(f"{result['value']}: {result['message']}")
return
finally:
await delete_conversation(session, conversation, proxy)
diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py
new file mode 100644
index 00000000..05a0016e
--- /dev/null
+++ b/g4f/Provider/FreeChatgpt.py
@@ -0,0 +1,68 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+
+
+class FreeChatgpt(AsyncGeneratorProvider):
+ url = "https://free.chatgpt.org.uk"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_message_history = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
+ "Accept": "application/json, text/event-stream",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "application/json",
+ "Referer": "https://free.chatgpt.org.uk/",
+ "x-requested-with": "XMLHttpRequest",
+ "Origin": "https://free.chatgpt.org.uk",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Connection": "keep-alive",
+ "Alt-Used": "free.chatgpt.org.uk",
+ "Pragma": "no-cache",
+ "Cache-Control": "no-cache",
+ "TE": "trailers",
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": messages,
+ "isAzure": False,
+ "azureApiVersion": "2023-08-01-preview",
+ "stream": True,
+ "model": model,
+ "temperature": 0.5,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ "top_p": 1,
+ "baseUrl": "/api/openai",
+ "maxIterations": 10,
+ "returnIntermediateSteps": True,
+ "useTools": ["web-search", "calculator", "web-browser"],
+ **kwargs
+ }
+ async with session.post(f"{cls.url}/api/langchain/tool/agent/nodejs", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ data = json.loads(line[6:])
+ if data["isSuccess"] and not data["isToolMessage"]:
+ yield data["message"] \ No newline at end of file
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 19212836..7a3dbb76 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -3,6 +3,7 @@ from __future__ import annotations
from ..base_provider import BaseProvider, ProviderType
from .retry_provider import RetryProvider
from .base_provider import AsyncProvider, AsyncGeneratorProvider
+from .create_images import CreateImagesProvider
from .deprecated import *
from .needs_auth import *
from .unfinished import *
@@ -30,6 +31,7 @@ from .ChatgptX import ChatgptX
from .Chatxyz import Chatxyz
from .DeepInfra import DeepInfra
from .FakeGpt import FakeGpt
+from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
from .GeekGpt import GeekGpt
from .GeminiProChat import GeminiProChat
diff --git a/g4f/Provider/bing/conversation.py b/g4f/Provider/bing/conversation.py
new file mode 100644
index 00000000..ef45cd82
--- /dev/null
+++ b/g4f/Provider/bing/conversation.py
@@ -0,0 +1,43 @@
+from aiohttp import ClientSession
+
+
+class Conversation():
+ def __init__(self, conversationId: str, clientId: str, conversationSignature: str) -> None:
+ self.conversationId = conversationId
+ self.clientId = clientId
+ self.conversationSignature = conversationSignature
+
+async def create_conversation(session: ClientSession, proxy: str = None) -> Conversation:
+ url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1199.4'
+ async with session.get(url, proxy=proxy) as response:
+ data = await response.json()
+
+ conversationId = data.get('conversationId')
+ clientId = data.get('clientId')
+ conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature')
+
+ if not conversationId or not clientId or not conversationSignature:
+ raise Exception('Failed to create conversation.')
+ return Conversation(conversationId, clientId, conversationSignature)
+
+async def list_conversations(session: ClientSession) -> list:
+ url = "https://www.bing.com/turing/conversation/chats"
+ async with session.get(url) as response:
+ response = await response.json()
+ return response["chats"]
+
+async def delete_conversation(session: ClientSession, conversation: Conversation, proxy: str = None) -> list:
+ url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
+ json = {
+ "conversationId": conversation.conversationId,
+ "conversationSignature": conversation.conversationSignature,
+ "participant": {"id": conversation.clientId},
+ "source": "cib",
+ "optionsSets": ["autosave"]
+ }
+ try:
+ async with session.post(url, json=json, proxy=proxy) as response:
+ response = await response.json()
+ return response["result"]["value"] == "Success"
+ except:
+ return False \ No newline at end of file
diff --git a/g4f/Provider/bing/create_images.py b/g4f/Provider/bing/create_images.py
new file mode 100644
index 00000000..b203a0dc
--- /dev/null
+++ b/g4f/Provider/bing/create_images.py
@@ -0,0 +1,164 @@
+import asyncio
+import time, json, os
+from aiohttp import ClientSession
+from bs4 import BeautifulSoup
+from urllib.parse import quote
+from typing import Generator
+
+from ..create_images import CreateImagesProvider
+from ..helper import get_cookies, get_event_loop
+from ...webdriver import WebDriver, get_driver_cookies, get_browser
+from ...base_provider import ProviderType
+
+BING_URL = "https://www.bing.com"
+
+def wait_for_login(driver: WebDriver, timeout: int = 1200) -> None:
+ driver.get(f"{BING_URL}/")
+ value = driver.get_cookie("_U")
+ if value:
+ return
+ start_time = time.time()
+ while True:
+ if time.time() - start_time > timeout:
+ raise RuntimeError("Timeout error")
+ value = driver.get_cookie("_U")
+ if value:
+ return
+ time.sleep(0.5)
+
+def create_session(cookies: dict) -> ClientSession:
+ headers = {
+ "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
+ "accept-encoding": "gzip, deflate, br",
+ "accept-language": "en-US,en;q=0.9,zh-CN;q=0.8,zh-TW;q=0.7,zh;q=0.6",
+ "content-type": "application/x-www-form-urlencoded",
+ "referrer-policy": "origin-when-cross-origin",
+ "referrer": "https://www.bing.com/images/create/",
+ "origin": "https://www.bing.com",
+ "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.54",
+ "sec-ch-ua": "\"Microsoft Edge\";v=\"111\", \"Not(A:Brand\";v=\"8\", \"Chromium\";v=\"111\"",
+ "sec-ch-ua-mobile": "?0",
+ "sec-fetch-dest": "document",
+ "sec-fetch-mode": "navigate",
+ "sec-fetch-site": "same-origin",
+ "sec-fetch-user": "?1",
+ "upgrade-insecure-requests": "1",
+ }
+ if cookies:
+ headers["cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items())
+ return ClientSession(headers=headers)
+
+async def create_images(session: ClientSession, prompt: str, proxy: str = None, timeout: int = 300) -> list:
+ url_encoded_prompt = quote(prompt)
+ payload = f"q={url_encoded_prompt}&rt=4&FORM=GENCRE"
+ url = f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=4&FORM=GENCRE"
+ async with session.post(
+ url,
+ allow_redirects=False,
+ data=payload,
+ timeout=timeout,
+ ) as response:
+ response.raise_for_status()
+ errors = [
+ "this prompt is being reviewed",
+ "this prompt has been blocked",
+ "we're working hard to offer image creator in more languages"
+ ]
+ text = (await response.text()).lower()
+ for error in errors:
+ if error in text:
+ raise RuntimeError(f"Create images failed: {error}")
+ if response.status != 302:
+ url = f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=3&FORM=GENCRE"
+ async with session.post(url, allow_redirects=False, proxy=proxy, timeout=timeout) as response:
+ if response.status != 302:
+ raise RuntimeError(f"Create images failed. Status Code: {response.status}")
+
+ redirect_url = response.headers["Location"].replace("&nfy=1", "")
+ redirect_url = f"{BING_URL}{redirect_url}"
+ request_id = redirect_url.split("id=")[1]
+ async with session.get(redirect_url) as response:
+ response.raise_for_status()
+
+ polling_url = f"{BING_URL}/images/create/async/results/{request_id}?q={url_encoded_prompt}"
+ start_time = time.time()
+ while True:
+ if time.time() - start_time > timeout:
+ raise RuntimeError(f"Timeout error after {timeout} seconds")
+ async with session.get(polling_url) as response:
+ if response.status != 200:
+ raise RuntimeError(f"Polling images faild. Status Code: {response.status}")
+ text = await response.text()
+ if not text:
+ await asyncio.sleep(1)
+ else:
+ break
+ error = None
+ try:
+ error = json.loads(text).get("errorMessage")
+ except:
+ pass
+ if error == "Pending":
+ raise RuntimeError("Prompt is been blocked")
+ elif error:
+ raise RuntimeError(error)
+ return read_images(text)
+
+def read_images(text: str) -> list:
+ html_soup = BeautifulSoup(text, "html.parser")
+ tags = html_soup.find_all("img")
+ image_links = [img["src"] for img in tags if "mimg" in img["class"]]
+ images = [link.split("?w=")[0] for link in image_links]
+ bad_images = [
+ "https://r.bing.com/rp/in-2zU3AJUdkgFe7ZKv19yPBHVs.png",
+ "https://r.bing.com/rp/TX9QuO3WzcCJz1uaaSwQAz39Kb0.jpg",
+ ]
+ if any(im in bad_images for im in images):
+ raise RuntimeError("Bad images found")
+ if not images:
+ raise RuntimeError("No images found")
+ return images
+
+def format_images_markdown(images: list, prompt: str) -> str:
+ images = [f"[![#{idx+1} {prompt}]({image}?w=200&h=200)]({image})" for idx, image in enumerate(images)]
+ images = "\n".join(images)
+ start_flag = "<!-- generated images start -->\n"
+ end_flag = "<!-- generated images end -->\n"
+ return f"\n{start_flag}{images}\n{end_flag}\n"
+
+async def create_images_markdown(cookies: dict, prompt: str, proxy: str = None) -> str:
+ session = create_session(cookies)
+ try:
+ images = await create_images(session, prompt, proxy)
+ return format_images_markdown(images, prompt)
+ finally:
+ await session.close()
+
+def get_cookies_from_browser(proxy: str = None) -> dict:
+ driver = get_browser(proxy=proxy)
+ try:
+ wait_for_login(driver)
+ return get_driver_cookies(driver)
+ finally:
+ driver.quit()
+
+def create_completion(prompt: str, cookies: dict = None, proxy: str = None) -> Generator:
+ loop = get_event_loop()
+ if not cookies:
+ cookies = get_cookies(".bing.com")
+ if "_U" not in cookies:
+ login_url = os.environ.get("G4F_LOGIN_URL")
+ if login_url:
+ yield f"Please login: [Bing]({login_url})\n\n"
+ cookies = get_cookies_from_browser(proxy)
+ yield loop.run_until_complete(create_images_markdown(cookies, prompt, proxy))
+
+async def create_async(prompt: str, cookies: dict = None, proxy: str = None) -> str:
+ if not cookies:
+ cookies = get_cookies(".bing.com")
+ if "_U" not in cookies:
+ cookies = get_cookies_from_browser(proxy)
+ return await create_images_markdown(cookies, prompt, proxy)
+
+def patch_provider(provider: ProviderType) -> CreateImagesProvider:
+ return CreateImagesProvider(provider, create_completion, create_async) \ No newline at end of file
diff --git a/g4f/Provider/bing/upload_image.py b/g4f/Provider/bing/upload_image.py
new file mode 100644
index 00000000..329e6df4
--- /dev/null
+++ b/g4f/Provider/bing/upload_image.py
@@ -0,0 +1,162 @@
+from __future__ import annotations
+
+import string
+import random
+import json
+import re
+import io
+import base64
+import numpy as np
+from PIL import Image
+from aiohttp import ClientSession
+
+async def upload_image(
+ session: ClientSession,
+ image: str,
+ tone: str,
+ proxy: str = None
+):
+ try:
+ image_config = {
+ "maxImagePixels": 360000,
+ "imageCompressionRate": 0.7,
+ "enableFaceBlurDebug": 0,
+ }
+ is_data_uri_an_image(image)
+ img_binary_data = extract_data_uri(image)
+ is_accepted_format(img_binary_data)
+ img = Image.open(io.BytesIO(img_binary_data))
+ width, height = img.size
+ max_image_pixels = image_config['maxImagePixels']
+ if max_image_pixels / (width * height) < 1:
+ new_width = int(width * np.sqrt(max_image_pixels / (width * height)))
+ new_height = int(height * np.sqrt(max_image_pixels / (width * height)))
+ else:
+ new_width = width
+ new_height = height
+ try:
+ orientation = get_orientation(img)
+ except Exception:
+ orientation = None
+ new_img = process_image(orientation, img, new_width, new_height)
+ new_img_binary_data = compress_image_to_base64(new_img, image_config['imageCompressionRate'])
+ data, boundary = build_image_upload_api_payload(new_img_binary_data, tone)
+ headers = session.headers.copy()
+ headers["content-type"] = f'multipart/form-data; boundary={boundary}'
+ headers["referer"] = 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx'
+ headers["origin"] = 'https://www.bing.com'
+ async with session.post("https://www.bing.com/images/kblob", data=data, headers=headers, proxy=proxy) as response:
+ if response.status != 200:
+ raise RuntimeError("Failed to upload image.")
+ image_info = await response.json()
+ if not image_info.get('blobId'):
+ raise RuntimeError("Failed to parse image info.")
+ result = {'bcid': image_info.get('blobId', "")}
+ result['blurredBcid'] = image_info.get('processedBlobId', "")
+ if result['blurredBcid'] != "":
+ result["imageUrl"] = "https://www.bing.com/images/blob?bcid=" + result['blurredBcid']
+ elif result['bcid'] != "":
+ result["imageUrl"] = "https://www.bing.com/images/blob?bcid=" + result['bcid']
+ result['originalImageUrl'] = (
+ "https://www.bing.com/images/blob?bcid="
+ + result['blurredBcid']
+ if image_config["enableFaceBlurDebug"]
+ else "https://www.bing.com/images/blob?bcid="
+ + result['bcid']
+ )
+ return result
+ except Exception as e:
+ raise RuntimeError(f"Upload image failed: {e}")
+
+
+def build_image_upload_api_payload(image_bin: str, tone: str):
+ payload = {
+ 'invokedSkills': ["ImageById"],
+ 'subscriptionId': "Bing.Chat.Multimodal",
+ 'invokedSkillsRequestData': {
+ 'enableFaceBlur': True
+ },
+ 'convoData': {
+ 'convoid': "",
+ 'convotone': tone
+ }
+ }
+ knowledge_request = {
+ 'imageInfo': {},
+ 'knowledgeRequest': payload
+ }
+ boundary="----WebKitFormBoundary" + ''.join(random.choices(string.ascii_letters + string.digits, k=16))
+ data = (
+ f'--{boundary}'
+ + '\r\nContent-Disposition: form-data; name="knowledgeRequest"\r\n\r\n'
+ + json.dumps(knowledge_request, ensure_ascii=False)
+ + "\r\n--"
+ + boundary
+ + '\r\nContent-Disposition: form-data; name="imageBase64"\r\n\r\n'
+ + image_bin
+ + "\r\n--"
+ + boundary
+ + "--\r\n"
+ )
+ return data, boundary
+
+def is_data_uri_an_image(data_uri: str):
+ # Check if the data URI starts with 'data:image' and contains an image format (e.g., jpeg, png, gif)
+ if not re.match(r'data:image/(\w+);base64,', data_uri):
+ raise ValueError("Invalid data URI image.")
+ # Extract the image format from the data URI
+ image_format = re.match(r'data:image/(\w+);base64,', data_uri).group(1)
+ # Check if the image format is one of the allowed formats (jpg, jpeg, png, gif)
+ if image_format.lower() not in ['jpeg', 'jpg', 'png', 'gif']:
+ raise ValueError("Invalid image format (from mime file type).")
+
+def is_accepted_format(binary_data: bytes) -> bool:
+ if binary_data.startswith(b'\xFF\xD8\xFF'):
+ pass # It's a JPEG image
+ elif binary_data.startswith(b'\x89PNG\r\n\x1a\n'):
+ pass # It's a PNG image
+ elif binary_data.startswith(b'GIF87a') or binary_data.startswith(b'GIF89a'):
+ pass # It's a GIF image
+ elif binary_data.startswith(b'\x89JFIF') or binary_data.startswith(b'JFIF\x00'):
+ pass # It's a JPEG image
+ elif binary_data.startswith(b'\xFF\xD8'):
+ pass # It's a JPEG image
+ elif binary_data.startswith(b'RIFF') and binary_data[8:12] == b'WEBP':
+ pass # It's a WebP image
+ else:
+ raise ValueError("Invalid image format (from magic code).")
+
+def extract_data_uri(data_uri: str) -> bytes:
+ data = data_uri.split(",")[1]
+ data = base64.b64decode(data)
+ return data
+
+def get_orientation(data: bytes) -> int:
+ if data[:2] != b'\xFF\xD8':
+ raise Exception('NotJpeg')
+ with Image.open(data) as img:
+ exif_data = img._getexif()
+ if exif_data is not None:
+ orientation = exif_data.get(274) # 274 corresponds to the orientation tag in EXIF
+ if orientation is not None:
+ return orientation
+
+def process_image(orientation: int, img: Image.Image, new_width: int, new_height: int) -> Image.Image:
+ # Initialize the canvas
+ new_img = Image.new("RGB", (new_width, new_height), color="#FFFFFF")
+ if orientation:
+ if orientation > 4:
+ img = img.transpose(Image.FLIP_LEFT_RIGHT)
+ if orientation in [3, 4]:
+ img = img.transpose(Image.ROTATE_180)
+ if orientation in [5, 6]:
+ img = img.transpose(Image.ROTATE_270)
+ if orientation in [7, 8]:
+ img = img.transpose(Image.ROTATE_90)
+ new_img.paste(img, (0, 0))
+ return new_img
+
+def compress_image_to_base64(image: Image.Image, compression_rate: float) -> str:
+ output_buffer = io.BytesIO()
+ image.save(output_buffer, format="JPEG", quality=int(compression_rate * 100))
+ return base64.b64encode(output_buffer.getvalue()).decode('utf-8') \ No newline at end of file
diff --git a/g4f/Provider/create_images.py b/g4f/Provider/create_images.py
new file mode 100644
index 00000000..9c76a742
--- /dev/null
+++ b/g4f/Provider/create_images.py
@@ -0,0 +1,84 @@
+from __future__ import annotations
+
+import re
+import asyncio
+from ..typing import CreateResult, Messages
+from ..base_provider import BaseProvider, ProviderType
+
+system_message = """
+You can generate custom images with the DALL-E 3 image generator.
+To generate a image with a prompt, do this:
+<img data-prompt=\"keywords for the image\">
+Don't use images with data uri. It is important to use a prompt instead.
+<img data-prompt=\"image caption\">
+"""
+
+class CreateImagesProvider(BaseProvider):
+ def __init__(
+ self,
+ provider: ProviderType,
+ create_images: callable,
+ create_async: callable,
+ system_message: str = system_message,
+ include_placeholder: bool = True
+ ) -> None:
+ self.provider = provider
+ self.create_images = create_images
+ self.create_images_async = create_async
+ self.system_message = system_message
+ self.__name__ = provider.__name__
+ self.working = provider.working
+ self.supports_stream = provider.supports_stream
+ self.include_placeholder = include_placeholder
+ if hasattr(provider, "url"):
+ self.url = provider.url
+
+ def create_completion(
+ self,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ **kwargs
+ ) -> CreateResult:
+ messages.insert(0, {"role": "system", "content": self.system_message})
+ buffer = ""
+ for chunk in self.provider.create_completion(model, messages, stream, **kwargs):
+ if buffer or "<" in chunk:
+ buffer += chunk
+ if ">" in buffer:
+ match = re.search(r'<img data-prompt="(.*?)">', buffer)
+ if match:
+ placeholder, prompt = match.group(0), match.group(1)
+ start, append = buffer.split(placeholder, 1)
+ if start:
+ yield start
+ if self.include_placeholder:
+ yield placeholder
+ yield from self.create_images(prompt)
+ if append:
+ yield append
+ else:
+ yield buffer
+ buffer = ""
+ else:
+ yield chunk
+
+ async def create_async(
+ self,
+ model: str,
+ messages: Messages,
+ **kwargs
+ ) -> str:
+ messages.insert(0, {"role": "system", "content": self.system_message})
+ response = await self.provider.create_async(model, messages, **kwargs)
+ matches = re.findall(r'(<img data-prompt="(.*?)">)', result)
+ results = []
+ for _, prompt in matches:
+ results.append(self.create_images_async(prompt))
+ results = await asyncio.gather(*results)
+ for idx, result in enumerate(results):
+ placeholder = matches[idx][0]
+ if self.include_placeholder:
+ result = placeholder + result
+ response = response.replace(placeholder, result)
+ return result \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Bard.py b/g4f/Provider/needs_auth/Bard.py
index cf1000c4..aea67874 100644
--- a/g4f/Provider/needs_auth/Bard.py
+++ b/g4f/Provider/needs_auth/Bard.py
@@ -64,11 +64,14 @@ XMLHttpRequest.prototype.open = function(method, url) {
"""
driver.execute_script(script)
- # Submit prompt
- driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea").send_keys(prompt)
- driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea").send_keys(Keys.ENTER)
-
- # Yield response
+ textarea = driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea")
+ lines = prompt.splitlines()
+ for idx, line in enumerate(lines):
+ textarea.send_keys(line)
+ if (len(lines) - 1 != idx):
+ textarea.send_keys(Keys.SHIFT + "\n")
+ textarea.send_keys(Keys.ENTER)
+
while True:
chunk = driver.execute_script("return window._message;")
if chunk:
diff --git a/g4f/__init__.py b/g4f/__init__.py
index 699dc238..dc7808f9 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -68,6 +68,7 @@ class ChatCompletion:
ignored : list[str] = None,
ignore_working: bool = False,
ignore_stream_and_auth: bool = False,
+ patch_provider: callable = None,
**kwargs) -> Union[CreateResult, str]:
model, provider = get_model_and_provider(model, provider, stream, ignored, ignore_working, ignore_stream_and_auth)
@@ -83,6 +84,9 @@ class ChatCompletion:
if proxy:
kwargs['proxy'] = proxy
+ if patch_provider:
+ provider = patch_provider(provider)
+
result = provider.create_completion(model, messages, stream, **kwargs)
return result if stream else ''.join(result)
@@ -92,6 +96,7 @@ class ChatCompletion:
provider : Union[ProviderType, str, None] = None,
stream : bool = False,
ignored : list[str] = None,
+ patch_provider: callable = None,
**kwargs) -> Union[AsyncResult, str]:
model, provider = get_model_and_provider(model, provider, False, ignored)
@@ -101,6 +106,9 @@ class ChatCompletion:
return provider.create_async_generator(model, messages, **kwargs)
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument in "create_async"')
+ if patch_provider:
+ provider = patch_provider(provider)
+
return provider.create_async(model, messages, **kwargs)
class Completion:
diff --git a/g4f/gui/client/html/index.html b/g4f/gui/client/html/index.html
index da7aeefb..b47f2a37 100644
--- a/g4f/gui/client/html/index.html
+++ b/g4f/gui/client/html/index.html
@@ -125,6 +125,11 @@
<span class="about">Web Access</span>
</div>
<div class="field">
+ <input type="checkbox" id="patch" />
+ <label for="patch" title="Works only with Bing and some other providers"></label>
+ <span class="about">Image Generator</span>
+ </div>
+ <div class="field">
<select name="model" id="model">
<option value="gpt-3.5-turbo" selected="">gpt-3.5-turbo</option>
<option value="gpt-3.5-turbo-0613">gpt-3.5-turbo-0613</option>
diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js
index a335a3cc..9d49d24e 100644
--- a/g4f/gui/client/js/chat.v1.js
+++ b/g4f/gui/client/js/chat.v1.js
@@ -20,7 +20,10 @@ message_input.addEventListener("focus", () => {
});
const markdown_render = (content) => {
- return markdown.render(content)
+ return markdown.render(content
+ .replaceAll(/<!--.+-->/gm, "")
+ .replaceAll(/<img data-prompt="[^>]+">/gm, "")
+ )
.replaceAll("<a href=", '<a target="_blank" href=')
.replaceAll('<code>', '<code class="language-plaintext">')
}
@@ -68,6 +71,15 @@ const ask_gpt = async () => {
regenerate.classList.add(`regenerate-hidden`);
messages = await get_messages(window.conversation_id);
+ // Remove generated images from history
+ for (i in messages) {
+ messages[i]["content"] = messages[i]["content"].replace(
+ /<!-- generated images start -->[\s\S]+<!-- generated images end -->/m,
+ ""
+ )
+ delete messages[i]["provider"];
+ }
+
window.scrollTo(0, 0);
window.controller = new AbortController();
@@ -91,7 +103,8 @@ const ask_gpt = async () => {
</div>
<div class="content" id="gpt_${window.token}">
<div class="provider"></div>
- <div class="content_inner"><div id="cursor"></div></div>
+ <div class="content_inner"></div>
+ <div id="cursor"></div>
</div>
</div>
`;
@@ -115,6 +128,7 @@ const ask_gpt = async () => {
jailbreak: jailbreak.options[jailbreak.selectedIndex].value,
internet_access: document.getElementById(`switch`).checked,
provider: provider.options[provider.selectedIndex].value,
+ patch_provider: document.getElementById('patch').checked,
meta: {
id: window.token,
content: {
@@ -163,8 +177,6 @@ const ask_gpt = async () => {
} catch (e) {
console.log(e);
- let cursorDiv = document.getElementById(`cursor`);
- if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv);
if (e.name != `AbortError`) {
text = `oops ! something went wrong, please try again / reload. [stacktrace in console]`;
@@ -174,6 +186,8 @@ const ask_gpt = async () => {
text += ` [aborted]`
}
}
+ let cursorDiv = document.getElementById(`cursor`);
+ if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv);
add_message(window.conversation_id, "assistant", text, provider);
message_box.scrollTop = message_box.scrollHeight;
await remove_cancel_button();
@@ -430,7 +444,7 @@ document.querySelector(".mobile-sidebar").addEventListener("click", (event) => {
});
const register_settings_localstorage = async () => {
- settings_ids = ["switch", "model", "jailbreak"];
+ settings_ids = ["switch", "model", "jailbreak", "patch", "provider"];
settings_elements = settings_ids.map((id) => document.getElementById(id));
settings_elements.map((element) =>
element.addEventListener(`change`, async (event) => {
@@ -449,7 +463,7 @@ const register_settings_localstorage = async () => {
};
const load_settings_localstorage = async () => {
- settings_ids = ["switch", "model", "jailbreak"];
+ settings_ids = ["switch", "model", "jailbreak", "patch", "provider"];
settings_elements = settings_ids.map((id) => document.getElementById(id));
settings_elements.map((element) => {
if (localStorage.getItem(element.id)) {
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index 595f5aa1..67f13de4 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -1,11 +1,11 @@
import logging
-import g4f
-from g4f.Provider import __providers__
-
import json
-from flask import request, Flask
-from .internet import get_search_message
-from g4f import debug, version
+from flask import request, Flask
+from g4f import debug, version, models
+from g4f import _all_models, get_last_provider, ChatCompletion
+from g4f.Provider import __providers__
+from g4f.Provider.bing.create_images import patch_provider
+from .internet import get_search_message
debug.logging = True
@@ -45,7 +45,7 @@ class Backend_Api:
return 'ok', 200
def models(self):
- return g4f._all_models
+ return _all_models
def providers(self):
return [
@@ -69,25 +69,27 @@ class Backend_Api:
if request.json.get('internet_access'):
messages[-1]["content"] = get_search_message(messages[-1]["content"])
model = request.json.get('model')
- model = model if model else g4f.models.default
+ model = model if model else models.default
provider = request.json.get('provider', '').replace('g4f.Provider.', '')
provider = provider if provider and provider != "Auto" else None
-
+ patch = patch_provider if request.json.get('patch_provider') else None
+
def try_response():
try:
first = True
- for chunk in g4f.ChatCompletion.create(
+ for chunk in ChatCompletion.create(
model=model,
provider=provider,
messages=messages,
stream=True,
- ignore_stream_and_auth=True
+ ignore_stream_and_auth=True,
+ patch_provider=patch
):
if first:
first = False
yield json.dumps({
'type' : 'provider',
- 'provider': g4f.get_last_provider(True)
+ 'provider': get_last_provider(True)
}) + "\n"
yield json.dumps({
'type' : 'content',
diff --git a/g4f/gui/server/internet.py b/g4f/gui/server/internet.py
index 97f842d7..6c2e3a89 100644
--- a/g4f/gui/server/internet.py
+++ b/g4f/gui/server/internet.py
@@ -145,5 +145,5 @@ User request:
"""
return message
except Exception as e:
- print("Couldn't search DuckDuckGo:", e)
+ print("Couldn't do web search:", e)
return prompt
diff --git a/g4f/models.py b/g4f/models.py
index 6103ca03..21e85c72 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -7,20 +7,18 @@ from .Provider import (
ChatgptNext,
HuggingChat,
ChatgptDemo,
+ FreeChatgpt,
GptForLove,
ChatgptAi,
DeepInfra,
- OnlineGpt,
ChatBase,
Liaobots,
GeekGpt,
FakeGpt,
FreeGpt,
- Berlin,
Llama2,
Vercel,
Phind,
- Koala,
GptGo,
Gpt6,
Bard,
@@ -58,13 +56,12 @@ gpt_35_long = Model(
best_provider = RetryProvider([
FreeGpt, You,
GeekGpt, FakeGpt,
- Berlin, Koala,
Chatgpt4Online,
ChatgptDemoAi,
- OnlineGpt,
ChatgptNext,
ChatgptDemo,
Gpt6,
+ FreeChatgpt,
])
)
diff --git a/g4f/requests.py b/g4f/requests.py
index 00ab9488..467ea371 100644
--- a/g4f/requests.py
+++ b/g4f/requests.py
@@ -6,7 +6,7 @@ from functools import partialmethod
from typing import AsyncGenerator
from urllib.parse import urlparse
from curl_cffi.requests import AsyncSession, Session, Response
-from .webdriver import WebDriver, WebDriverSession, bypass_cloudflare
+from .webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies
class StreamResponse:
def __init__(self, inner: Response) -> None:
@@ -56,8 +56,7 @@ class StreamSession(AsyncSession):
def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120):
with WebDriverSession(webdriver, "", proxy=proxy, virtual_display=True) as driver:
bypass_cloudflare(driver, url, timeout)
-
- cookies = dict([(cookie["name"], cookie["value"]) for cookie in driver.get_cookies()])
+ cookies = get_driver_cookies(driver)
user_agent = driver.execute_script("return navigator.userAgent")
parse = urlparse(url)
diff --git a/g4f/version.py b/g4f/version.py
index 9e572bbb..44d14369 100644
--- a/g4f/version.py
+++ b/g4f/version.py
@@ -40,7 +40,7 @@ class VersionUtils():
def check_pypi_version(self) -> None:
try:
if self.current_version != self.latest_version:
- print(f'New pypi version: {self.latest_version} (current: {self.version}) | pip install -U g4f')
+ print(f'New pypi version: {self.latest_version} (current: {self.current_version}) | pip install -U g4f')
except Exception as e:
print(f'Failed to check g4f pypi version: {e}')
diff --git a/g4f/webdriver.py b/g4f/webdriver.py
index d274c619..da283409 100644
--- a/g4f/webdriver.py
+++ b/g4f/webdriver.py
@@ -39,6 +39,9 @@ def get_browser(
headless=headless
)
+def get_driver_cookies(driver: WebDriver):
+ return dict([(cookie["name"], cookie["value"]) for cookie in driver.get_cookies()])
+
def bypass_cloudflare(driver: WebDriver, url: str, timeout: int) -> None:
# Open website
driver.get(url)