summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorCommenter123321 <36051603+Commenter123321@users.noreply.github.com>2023-10-10 14:15:12 +0200
committerCommenter123321 <36051603+Commenter123321@users.noreply.github.com>2023-10-10 14:15:12 +0200
commit0e4297494dd46533738f786e4ac675541586177a (patch)
tree6c2999e14ed831f7f37cc60991571cd040772918 /g4f
parentadd cool testing for gpt-3.5 and and gpt-4 (diff)
parentUpdate Aivvm.py (diff)
downloadgpt4free-0e4297494dd46533738f786e4ac675541586177a.tar
gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.gz
gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.bz2
gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.lz
gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.xz
gpt4free-0e4297494dd46533738f786e4ac675541586177a.tar.zst
gpt4free-0e4297494dd46533738f786e4ac675541586177a.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/AItianhu.py8
-rw-r--r--g4f/Provider/AItianhuSpace.py8
-rw-r--r--g4f/Provider/Acytoo.py7
-rw-r--r--g4f/Provider/AiAsk.py9
-rw-r--r--g4f/Provider/Aibn.py15
-rw-r--r--g4f/Provider/Aichat.py3
-rw-r--r--g4f/Provider/Ails.py6
-rw-r--r--g4f/Provider/Bing.py31
-rw-r--r--g4f/Provider/ChatBase.py9
-rw-r--r--g4f/Provider/ChatForAi.py11
-rw-r--r--g4f/Provider/Chatgpt4Online.py9
-rw-r--r--g4f/Provider/ChatgptAi.py3
-rw-r--r--g4f/Provider/ChatgptDemo.py6
-rw-r--r--g4f/Provider/ChatgptDuo.py5
-rw-r--r--g4f/Provider/ChatgptX.py7
-rw-r--r--g4f/Provider/Cromicle.py6
-rw-r--r--g4f/Provider/DeepAi.py8
-rw-r--r--g4f/Provider/FreeGpt.py15
-rw-r--r--g4f/Provider/GPTalk.py13
-rw-r--r--g4f/Provider/GptForLove.py9
-rw-r--r--g4f/Provider/GptGo.py2
-rw-r--r--g4f/Provider/GptGod.py11
-rw-r--r--g4f/Provider/Liaobots.py6
-rw-r--r--g4f/Provider/Myshell.py4
-rw-r--r--g4f/Provider/Phind.py9
-rw-r--r--g4f/Provider/Vercel.py14
-rw-r--r--g4f/Provider/Vitalentum.py2
-rw-r--r--g4f/Provider/__init__.py62
-rw-r--r--g4f/Provider/needs_auth/Bard.py14
-rw-r--r--g4f/Provider/needs_auth/HuggingChat.py9
-rw-r--r--g4f/Provider/needs_auth/OpenAssistant.py11
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py17
-rw-r--r--g4f/Provider/needs_auth/Raycast.py15
-rw-r--r--g4f/Provider/needs_auth/Theb.py26
-rw-r--r--g4f/__init__.py31
-rw-r--r--g4f/gui/__init__.py2
-rw-r--r--g4f/gui/client/html/index.html291
-rw-r--r--g4f/gui/client/js/chat.v2.js14
-rw-r--r--g4f/gui/run.py16
-rw-r--r--g4f/gui/server/backend.py19
-rw-r--r--g4f/gui/server/provider.py17
-rw-r--r--g4f/models.py29
42 files changed, 490 insertions, 319 deletions
diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/AItianhu.py
index c6e4dbad..56d9a9ab 100644
--- a/g4f/Provider/AItianhu.py
+++ b/g4f/Provider/AItianhu.py
@@ -2,7 +2,7 @@ from __future__ import annotations
import json
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
@@ -16,12 +16,12 @@ class AItianhu(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
cookies: dict = None,
- timeout: int = 30,
+ timeout: int = 120,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
if not cookies:
cookies = get_cookies("www.aitianhu.com")
data = {
diff --git a/g4f/Provider/AItianhuSpace.py b/g4f/Provider/AItianhuSpace.py
index 78cdf657..7eb93d48 100644
--- a/g4f/Provider/AItianhuSpace.py
+++ b/g4f/Provider/AItianhuSpace.py
@@ -2,7 +2,7 @@ from __future__ import annotations
import random, json
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
@@ -20,13 +20,13 @@ class AItianhuSpace(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
domain: str = None,
cookies: dict = None,
- timeout: int = 30,
+ timeout: int = 120,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
elif not model in domains:
diff --git a/g4f/Provider/Acytoo.py b/g4f/Provider/Acytoo.py
index d36ca6da..0ac3425c 100644
--- a/g4f/Provider/Acytoo.py
+++ b/g4f/Provider/Acytoo.py
@@ -2,7 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
@@ -15,11 +15,10 @@ class Acytoo(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
**kwargs
- ) -> AsyncGenerator:
-
+ ) -> AsyncResult:
async with ClientSession(
headers=_create_header()
) as session:
diff --git a/g4f/Provider/AiAsk.py b/g4f/Provider/AiAsk.py
index 27d3bf15..f10be389 100644
--- a/g4f/Provider/AiAsk.py
+++ b/g4f/Provider/AiAsk.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from aiohttp import ClientSession
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
class AiAsk(AsyncGeneratorProvider):
@@ -13,9 +13,10 @@ class AiAsk(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
+ proxy: str = None,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
headers = {
"accept": "application/json, text/plain, */*",
"origin": cls.url,
@@ -33,7 +34,7 @@ class AiAsk(AsyncGeneratorProvider):
}
buffer = ""
rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
- async with session.post(f"{cls.url}/v1/chat/gpt/", json=data) as response:
+ async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
buffer += chunk.decode()
diff --git a/g4f/Provider/Aibn.py b/g4f/Provider/Aibn.py
index 3399d613..13f5c71e 100644
--- a/g4f/Provider/Aibn.py
+++ b/g4f/Provider/Aibn.py
@@ -3,7 +3,7 @@ from __future__ import annotations
import time
import hashlib
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
@@ -17,11 +17,16 @@ class Aibn(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
- timeout: int = 30,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
**kwargs
- ) -> AsyncGenerator:
- async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
+ ) -> AsyncResult:
+ async with StreamSession(
+ impersonate="chrome107",
+ proxies={"https": proxy},
+ timeout=timeout
+ ) as session:
timestamp = int(time.time())
data = {
"messages": messages,
diff --git a/g4f/Provider/Aichat.py b/g4f/Provider/Aichat.py
index 8edd17e2..ddc9eb1d 100644
--- a/g4f/Provider/Aichat.py
+++ b/g4f/Provider/Aichat.py
@@ -2,6 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession
+from ..typing import Messages
from .base_provider import AsyncProvider, format_prompt
@@ -13,7 +14,7 @@ class Aichat(AsyncProvider):
@staticmethod
async def create_async(
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
**kwargs
) -> str:
diff --git a/g4f/Provider/Ails.py b/g4f/Provider/Ails.py
index d533ae24..c1384faa 100644
--- a/g4f/Provider/Ails.py
+++ b/g4f/Provider/Ails.py
@@ -7,7 +7,7 @@ import json
from datetime import datetime
from aiohttp import ClientSession
-from ..typing import SHA256, AsyncGenerator
+from ..typing import SHA256, AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
@@ -19,11 +19,11 @@ class Ails(AsyncGeneratorProvider):
@staticmethod
async def create_async_generator(
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
stream: bool,
proxy: str = None,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
headers = {
"authority": "api.caipacity.com",
"accept": "*/*",
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index f4275a5f..f8c6a87a 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -7,7 +7,7 @@ import os
import uuid
import urllib.parse
from aiohttp import ClientSession, ClientTimeout
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
class Tones():
@@ -32,11 +32,12 @@ class Bing(AsyncGeneratorProvider):
@staticmethod
def create_async_generator(
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
+ proxy: str = None,
cookies: dict = None,
tone: str = Tones.creative,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
if len(messages) < 2:
prompt = messages[0]["content"]
context = None
@@ -46,9 +47,9 @@ class Bing(AsyncGeneratorProvider):
if not cookies or "SRCHD" not in cookies:
cookies = default_cookies
- return stream_generate(prompt, tone, context, cookies)
+ return stream_generate(prompt, tone, context, proxy, cookies)
-def create_context(messages: list[dict[str, str]]):
+def create_context(messages: Messages):
context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
return context
@@ -59,10 +60,10 @@ class Conversation():
self.clientId = clientId
self.conversationSignature = conversationSignature
-async def create_conversation(session: ClientSession) -> Conversation:
+async def create_conversation(session: ClientSession, proxy: str = None) -> Conversation:
url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1150.3'
- async with await session.get(url) as response:
+ async with await session.get(url, proxy=proxy) as response:
data = await response.json()
conversationId = data.get('conversationId')
@@ -80,7 +81,7 @@ async def list_conversations(session: ClientSession) -> list:
response = await response.json()
return response["chats"]
-async def delete_conversation(session: ClientSession, conversation: Conversation) -> list:
+async def delete_conversation(session: ClientSession, conversation: Conversation, proxy: str = None) -> list:
url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
json = {
"conversationId": conversation.conversationId,
@@ -89,7 +90,7 @@ async def delete_conversation(session: ClientSession, conversation: Conversation
"source": "cib",
"optionsSets": ["autosave"]
}
- async with session.post(url, json=json) as response:
+ async with session.post(url, json=json, proxy=proxy) as response:
response = await response.json()
return response["result"]["value"] == "Success"
@@ -239,20 +240,22 @@ def create_message(conversation: Conversation, prompt: str, tone: str, context:
async def stream_generate(
prompt: str,
tone: str,
- context: str=None,
- cookies: dict=None,
+ context: str = None,
+ proxy: str = None,
+ cookies: dict = None
):
async with ClientSession(
timeout=ClientTimeout(total=900),
cookies=cookies,
headers=Defaults.headers,
) as session:
- conversation = await create_conversation(session)
+ conversation = await create_conversation(session, proxy)
try:
async with session.ws_connect(
f'wss://sydney.bing.com/sydney/ChatHub',
autoping=False,
- params={'sec_access_token': conversation.conversationSignature}
+ params={'sec_access_token': conversation.conversationSignature},
+ proxy=proxy
) as wss:
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
@@ -297,4 +300,4 @@ async def stream_generate(
raise Exception(f"{result['value']}: {result['message']}")
return
finally:
- await delete_conversation(session, conversation) \ No newline at end of file
+ await delete_conversation(session, conversation, proxy) \ No newline at end of file
diff --git a/g4f/Provider/ChatBase.py b/g4f/Provider/ChatBase.py
index b98fe565..ce5160d8 100644
--- a/g4f/Provider/ChatBase.py
+++ b/g4f/Provider/ChatBase.py
@@ -2,7 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
@@ -16,9 +16,10 @@ class ChatBase(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
+ proxy: str = None,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
if model == "gpt-4":
chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn"
elif model == "gpt-3.5-turbo" or not model:
@@ -44,7 +45,7 @@ class ChatBase(AsyncGeneratorProvider):
"chatId": chat_id,
"conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}"
}
- async with session.post("https://www.chatbase.co/api/fe/chat", json=data) as response:
+ async with session.post("https://www.chatbase.co/api/fe/chat", json=data, proxy=proxy) as response:
response.raise_for_status()
async for stream in response.content.iter_any():
yield stream.decode()
diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py
index 86b29639..f2fe0335 100644
--- a/g4f/Provider/ChatForAi.py
+++ b/g4f/Provider/ChatForAi.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
@@ -14,11 +14,12 @@ class ChatForAi(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
- timeout: int = 30,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
**kwargs
- ) -> AsyncGenerator:
- async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
+ ) -> AsyncResult:
+ async with StreamSession(impersonate="chrome107", proxies={"https": proxy}, timeout=timeout) as session:
prompt = messages[-1]["content"]
data = {
"conversationId": "temp",
diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py
index b9631429..bfcb1dec 100644
--- a/g4f/Provider/Chatgpt4Online.py
+++ b/g4f/Provider/Chatgpt4Online.py
@@ -3,7 +3,7 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
@@ -16,9 +16,10 @@ class Chatgpt4Online(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
+ proxy: str = None,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
async with ClientSession() as session:
data = {
"botId": "default",
@@ -30,7 +31,7 @@ class Chatgpt4Online(AsyncGeneratorProvider):
"newMessage": messages[-1]["content"],
"stream": True
}
- async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data) as response:
+ async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
diff --git a/g4f/Provider/ChatgptAi.py b/g4f/Provider/ChatgptAi.py
index 996f99a5..cf45909c 100644
--- a/g4f/Provider/ChatgptAi.py
+++ b/g4f/Provider/ChatgptAi.py
@@ -3,6 +3,7 @@ from __future__ import annotations
import re
from aiohttp import ClientSession
+from ..typing import Messages
from .base_provider import AsyncProvider, format_prompt
@@ -18,7 +19,7 @@ class ChatgptAi(AsyncProvider):
async def create_async(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
**kwargs
) -> str:
diff --git a/g4f/Provider/ChatgptDemo.py b/g4f/Provider/ChatgptDemo.py
index 95cb9ecf..875751b9 100644
--- a/g4f/Provider/ChatgptDemo.py
+++ b/g4f/Provider/ChatgptDemo.py
@@ -2,8 +2,8 @@ from __future__ import annotations
import time, json, re
from aiohttp import ClientSession
-from typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
@@ -16,10 +16,10 @@ class ChatgptDemo(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
headers = {
"authority": "chat.chatgptdemo.net",
"accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US",
diff --git a/g4f/Provider/ChatgptDuo.py b/g4f/Provider/ChatgptDuo.py
index 119ff16b..039efc84 100644
--- a/g4f/Provider/ChatgptDuo.py
+++ b/g4f/Provider/ChatgptDuo.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+from ..typing import Messages
from curl_cffi.requests import AsyncSession
from .base_provider import AsyncProvider, format_prompt
@@ -13,9 +14,9 @@ class ChatgptDuo(AsyncProvider):
async def create_async(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
- timeout: int = 30,
+ timeout: int = 120,
**kwargs
) -> str:
async with AsyncSession(
diff --git a/g4f/Provider/ChatgptX.py b/g4f/Provider/ChatgptX.py
index 2944fb26..8052f65b 100644
--- a/g4f/Provider/ChatgptX.py
+++ b/g4f/Provider/ChatgptX.py
@@ -19,6 +19,7 @@ class ChatgptX(AsyncGeneratorProvider):
cls,
model: str,
messages: Messages,
+ proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
@@ -32,7 +33,7 @@ class ChatgptX(AsyncGeneratorProvider):
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
}
async with ClientSession(headers=headers) as session:
- async with session.get(f"{cls.url}/") as response:
+ async with session.get(f"{cls.url}/", proxy=proxy) as response:
response = await response.text()
result = re.search(r'<meta name="csrf-token" content="(.*?)"', response)
if result:
@@ -62,7 +63,7 @@ class ChatgptX(AsyncGeneratorProvider):
'x-csrf-token': csrf_token,
'x-requested-with': 'XMLHttpRequest'
}
- async with session.post(cls.url + '/sendchat', data=data, headers=headers) as response:
+ async with session.post(cls.url + '/sendchat', data=data, headers=headers, proxy=proxy) as response:
response.raise_for_status()
chat = await response.json()
if "response" not in chat or not chat["response"]:
@@ -82,7 +83,7 @@ class ChatgptX(AsyncGeneratorProvider):
"conversions_id": chat["conversions_id"],
"ass_conversions_id": chat["ass_conversions_id"],
}
- async with session.get(f'{cls.url}/chats_stream', params=data, headers=headers) as response:
+ async with session.get(f'{cls.url}/chats_stream', params=data, headers=headers, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
diff --git a/g4f/Provider/Cromicle.py b/g4f/Provider/Cromicle.py
index 5f521b3e..93ef8035 100644
--- a/g4f/Provider/Cromicle.py
+++ b/g4f/Provider/Cromicle.py
@@ -2,7 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession
from hashlib import sha256
-from typing import AsyncGenerator, Dict, List
+from ..typing import AsyncResult, Messages, Dict
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
@@ -17,10 +17,10 @@ class Cromicle(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: List[Dict[str, str]],
+ messages: Messages,
proxy: str = None,
**kwargs
- ) -> AsyncGenerator[str, None]:
+ ) -> AsyncResult:
async with ClientSession(
headers=_create_header()
) as session:
diff --git a/g4f/Provider/DeepAi.py b/g4f/Provider/DeepAi.py
index 9a4f922c..abc2644a 100644
--- a/g4f/Provider/DeepAi.py
+++ b/g4f/Provider/DeepAi.py
@@ -6,22 +6,22 @@ import random
import hashlib
from aiohttp import ClientSession
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
class DeepAi(AsyncGeneratorProvider):
- url: str = "https://deepai.org"
+ url = "https://deepai.org"
working = True
supports_gpt_35_turbo = True
@staticmethod
async def create_async_generator(
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
token_js = """
var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index 73b8acea..8a3ca6cc 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -2,7 +2,7 @@ from __future__ import annotations
import time, hashlib, random
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
@@ -20,11 +20,16 @@ class FreeGpt(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
- timeout: int = 30,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
**kwargs
- ) -> AsyncGenerator:
- async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
+ ) -> AsyncResult:
+ async with StreamSession(
+ impersonate="chrome107",
+ timeout=timeout,
+ proxies={"https": proxy}
+ ) as session:
prompt = messages[-1]["content"]
timestamp = int(time.time())
data = {
diff --git a/g4f/Provider/GPTalk.py b/g4f/Provider/GPTalk.py
index c85399c1..afb6ff68 100644
--- a/g4f/Provider/GPTalk.py
+++ b/g4f/Provider/GPTalk.py
@@ -2,8 +2,8 @@ from __future__ import annotations
import secrets, time, json
from aiohttp import ClientSession
-from typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
@@ -18,9 +18,10 @@ class GPTalk(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
+ proxy: str = None,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
timestamp = int(time.time())
@@ -48,7 +49,7 @@ class GPTalk(AsyncGeneratorProvider):
"fingerprint": secrets.token_hex(16).zfill(32),
"platform": "fingerprint"
}
- async with session.post(cls.url + "/api/chatgpt/user/login", json=data) as response:
+ async with session.post(cls.url + "/api/chatgpt/user/login", json=data, proxy=proxy) as response:
response.raise_for_status()
cls._auth = (await response.json())["data"]
data = {
@@ -68,11 +69,11 @@ class GPTalk(AsyncGeneratorProvider):
headers = {
'authorization': f'Bearer {cls._auth["token"]}',
}
- async with session.post(cls.url + "/api/chatgpt/chatapi/text", json=data, headers=headers) as response:
+ async with session.post(cls.url + "/api/chatgpt/chatapi/text", json=data, headers=headers, proxy=proxy) as response:
response.raise_for_status()
token = (await response.json())["data"]["token"]
last_message = ""
- async with session.get(cls.url + "/api/chatgpt/chatapi/stream", params={"token": token}) as response:
+ async with session.get(cls.url + "/api/chatgpt/chatapi/stream", params={"token": token}, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
diff --git a/g4f/Provider/GptForLove.py b/g4f/Provider/GptForLove.py
index 53c403e1..01cef443 100644
--- a/g4f/Provider/GptForLove.py
+++ b/g4f/Provider/GptForLove.py
@@ -3,7 +3,7 @@ from __future__ import annotations
from aiohttp import ClientSession
import execjs, os, json
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
@@ -16,9 +16,10 @@ class GptForLove(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
+ proxy: str = None,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
headers = {
@@ -47,7 +48,7 @@ class GptForLove(AsyncGeneratorProvider):
"secret": get_secret(),
**kwargs
}
- async with session.post("https://api.gptplus.one/chat-process", json=data) as response:
+ async with session.post("https://api.gptplus.one/chat-process", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
try:
diff --git a/g4f/Provider/GptGo.py b/g4f/Provider/GptGo.py
index 51764221..5f6cc362 100644
--- a/g4f/Provider/GptGo.py
+++ b/g4f/Provider/GptGo.py
@@ -18,7 +18,6 @@ class GptGo(AsyncGeneratorProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
- timeout: int = 30,
**kwargs
) -> AsyncGenerator:
headers = {
@@ -73,6 +72,7 @@ class GptGo(AsyncGeneratorProvider):
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
+ ("proxy", "str"),
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
diff --git a/g4f/Provider/GptGod.py b/g4f/Provider/GptGod.py
index 662884dd..6c975389 100644
--- a/g4f/Provider/GptGod.py
+++ b/g4f/Provider/GptGod.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import secrets, json
from aiohttp import ClientSession
-from typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
@@ -14,9 +14,10 @@ class GptGod(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
+ proxy: str = None,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "text/event-stream",
@@ -24,7 +25,7 @@ class GptGod(AsyncGeneratorProvider):
"Accept-Encoding": "gzip, deflate, br",
"Alt-Used": "gptgod.site",
"Connection": "keep-alive",
- "Referer": "https://gptgod.site/",
+ "Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
@@ -37,7 +38,7 @@ class GptGod(AsyncGeneratorProvider):
"content": prompt,
"id": secrets.token_hex(16).zfill(32)
}
- async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data) as response:
+ async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data, proxy=proxy) as response:
response.raise_for_status()
event = None
async for line in response.content:
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 2ab96ce3..740be856 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -4,7 +4,7 @@ import uuid
from aiohttp import ClientSession
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
models = {
@@ -39,11 +39,11 @@ class Liaobots(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
auth: str = None,
proxy: str = None,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
model = model if model in models else "gpt-3.5-turbo"
headers = {
"authority": "liaobots.com",
diff --git a/g4f/Provider/Myshell.py b/g4f/Provider/Myshell.py
index da170fa3..6ed4fd7a 100644
--- a/g4f/Provider/Myshell.py
+++ b/g4f/Provider/Myshell.py
@@ -28,6 +28,7 @@ class Myshell(AsyncGeneratorProvider):
cls,
model: str,
messages: list[dict[str, str]],
+ proxy: str = None,
timeout: int = 90,
**kwargs
) -> AsyncGenerator:
@@ -47,7 +48,8 @@ class Myshell(AsyncGeneratorProvider):
async with session.ws_connect(
"wss://api.myshell.ai/ws/?EIO=4&transport=websocket",
autoping=False,
- timeout=timeout
+ timeout=timeout,
+ proxy=proxy
) as wss:
# Send and receive hello message
await wss.receive_str()
diff --git a/g4f/Provider/Phind.py b/g4f/Provider/Phind.py
index 0db4e3c2..ae4de686 100644
--- a/g4f/Provider/Phind.py
+++ b/g4f/Provider/Phind.py
@@ -19,6 +19,7 @@ class Phind(AsyncGeneratorProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
+ timeout: int = 120,
**kwargs
) -> AsyncGenerator:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
@@ -43,7 +44,12 @@ class Phind(AsyncGeneratorProvider):
"Origin": cls.url,
"Referer": f"{cls.url}/"
}
- async with StreamSession(headers=headers, timeout=(5, 180), proxies={"https": proxy}, impersonate="chrome107") as session:
+ async with StreamSession(
+ headers=headers,
+ timeout=(5, timeout),
+ proxies={"https": proxy},
+ impersonate="chrome107"
+ ) as session:
async with session.post(f"{cls.url}/api/infer/answer", json=data) as response:
response.raise_for_status()
new_lines = 0
@@ -71,6 +77,7 @@ class Phind(AsyncGeneratorProvider):
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
+ ("timeout", "int"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py
index 2d20ca6a..2d856664 100644
--- a/g4f/Provider/Vercel.py
+++ b/g4f/Provider/Vercel.py
@@ -2,7 +2,7 @@ from __future__ import annotations
import json, base64, requests, execjs, random, uuid
-from ..typing import Any, TypedDict, CreateResult
+from ..typing import Messages, TypedDict, CreateResult
from .base_provider import BaseProvider
from abc import abstractmethod
@@ -17,8 +17,9 @@ class Vercel(BaseProvider):
@abstractmethod
def create_completion(
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
stream: bool,
+ proxy: str = None,
**kwargs
) -> CreateResult:
if not model:
@@ -52,15 +53,18 @@ class Vercel(BaseProvider):
'model' : model_info[model]['id'],
'messages' : messages,
'playgroundId': str(uuid.uuid4()),
- 'chatIndex' : 0} | model_info[model]['default_params']
+ 'chatIndex' : 0,
+ **model_info[model]['default_params'],
+ **kwargs
+ }
max_retries = kwargs.get('max_retries', 20)
for i in range(max_retries):
response = requests.post('https://sdk.vercel.ai/api/generate',
- headers=headers, json=json_data, stream=True)
+ headers=headers, json=json_data, stream=True, proxies={"https": proxy})
try:
response.raise_for_status()
- except:
+ except Exception:
continue
for token in response.iter_content(chunk_size=None):
yield token.decode()
diff --git a/g4f/Provider/Vitalentum.py b/g4f/Provider/Vitalentum.py
index ade492d2..37e259b2 100644
--- a/g4f/Provider/Vitalentum.py
+++ b/g4f/Provider/Vitalentum.py
@@ -42,7 +42,7 @@ class Vitalentum(AsyncGeneratorProvider):
async with ClientSession(
headers=headers
) as session:
- async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response:
+ async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
line = line.decode()
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 5b0ec33b..d256064c 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -38,6 +38,68 @@ from .deprecated import *
from .needs_auth import *
from .unfinished import *
+class ProviderUtils:
+ convert: dict[str, BaseProvider] = {
+ 'AItianhu': AItianhu,
+ 'AItianhuSpace': AItianhuSpace,
+ 'Acytoo': Acytoo,
+ 'AiAsk': AiAsk,
+ 'AiService': AiService,
+ 'Aibn': Aibn,
+ 'Aichat': Aichat,
+ 'Ails': Ails,
+ 'Aivvm': Aivvm,
+ 'AsyncGeneratorProvider': AsyncGeneratorProvider,
+ 'AsyncProvider': AsyncProvider,
+ 'Bard': Bard,
+ 'BaseProvider': BaseProvider,
+ 'Bing': Bing,
+ 'ChatBase': ChatBase,
+ 'ChatForAi': ChatForAi,
+ 'Chatgpt4Online': Chatgpt4Online,
+ 'ChatgptAi': ChatgptAi,
+ 'ChatgptDemo': ChatgptDemo,
+ 'ChatgptDuo': ChatgptDuo,
+ 'ChatgptLogin': ChatgptLogin,
+ 'ChatgptX': ChatgptX,
+ 'CodeLinkAva': CodeLinkAva,
+ 'Cromicle': Cromicle,
+ 'DeepAi': DeepAi,
+ 'DfeHub': DfeHub,
+ 'EasyChat': EasyChat,
+ 'Equing': Equing,
+ 'FastGpt': FastGpt,
+ 'Forefront': Forefront,
+ 'FreeGpt': FreeGpt,
+ 'GPTalk': GPTalk,
+ 'GetGpt': GetGpt,
+ 'GptForLove': GptForLove,
+ 'GptGo': GptGo,
+ 'GptGod': GptGod,
+ 'H2o': H2o,
+ 'HuggingChat': HuggingChat,
+ 'Komo': Komo,
+ 'Liaobots': Liaobots,
+ 'Lockchat': Lockchat,
+ 'MikuChat': MikuChat,
+ 'Myshell': Myshell,
+ 'Opchatgpts': Opchatgpts,
+ 'OpenAssistant': OpenAssistant,
+ 'OpenaiChat': OpenaiChat,
+ 'PerplexityAi': PerplexityAi,
+ 'Phind': Phind,
+ 'Raycast': Raycast,
+ 'Theb': Theb,
+ 'V50': V50,
+ 'Vercel': Vercel,
+ 'Vitalentum': Vitalentum,
+ 'Wewordle': Wewordle,
+ 'Wuguokai': Wuguokai,
+ 'Ylokh': Ylokh,
+ 'You': You,
+ 'Yqcloud': Yqcloud
+ }
+
__all__ = [
'BaseProvider',
'AsyncProvider',
diff --git a/g4f/Provider/needs_auth/Bard.py b/g4f/Provider/needs_auth/Bard.py
index 7c42b680..fe9777db 100644
--- a/g4f/Provider/needs_auth/Bard.py
+++ b/g4f/Provider/needs_auth/Bard.py
@@ -6,7 +6,9 @@ import re
from aiohttp import ClientSession
-from ..base_provider import AsyncProvider, format_prompt, get_cookies
+from ...typing import Messages
+from ..base_provider import AsyncProvider
+from ..helper import format_prompt, get_cookies
class Bard(AsyncProvider):
@@ -19,25 +21,22 @@ class Bard(AsyncProvider):
async def create_async(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
cookies: dict = None,
**kwargs
) -> str:
prompt = format_prompt(messages)
- if proxy and "://" not in proxy:
- proxy = f"http://{proxy}"
if not cookies:
cookies = get_cookies(".google.com")
headers = {
'authority': 'bard.google.com',
- 'origin': 'https://bard.google.com',
- 'referer': 'https://bard.google.com/',
+ 'origin': cls.url,
+ 'referer': f'{cls.url}/',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
'x-same-domain': '1',
}
-
async with ClientSession(
cookies=cookies,
headers=headers
@@ -67,7 +66,6 @@ class Bard(AsyncProvider):
'lamda',
'BardFrontendService'
])
-
async with session.post(
f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate',
data=data,
diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py
index 1d500338..68c6713b 100644
--- a/g4f/Provider/needs_auth/HuggingChat.py
+++ b/g4f/Provider/needs_auth/HuggingChat.py
@@ -4,8 +4,9 @@ import json, uuid
from aiohttp import ClientSession
-from ...typing import AsyncGenerator
-from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt, get_cookies
class HuggingChat(AsyncGeneratorProvider):
@@ -18,12 +19,12 @@ class HuggingChat(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
stream: bool = True,
proxy: str = None,
cookies: dict = None,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
model = model if model else cls.model
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
diff --git a/g4f/Provider/needs_auth/OpenAssistant.py b/g4f/Provider/needs_auth/OpenAssistant.py
index 3b0e0424..de62636c 100644
--- a/g4f/Provider/needs_auth/OpenAssistant.py
+++ b/g4f/Provider/needs_auth/OpenAssistant.py
@@ -4,8 +4,9 @@ import json
from aiohttp import ClientSession
-from ...typing import Any, AsyncGenerator
-from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt, get_cookies
class OpenAssistant(AsyncGeneratorProvider):
@@ -18,11 +19,11 @@ class OpenAssistant(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
cookies: dict = None,
- **kwargs: Any
- ) -> AsyncGenerator:
+ **kwargs
+ ) -> AsyncResult:
if not cookies:
cookies = get_cookies("open-assistant.io")
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index c41909e3..b4b4a670 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -4,7 +4,7 @@ import uuid, json, time
from ..base_provider import AsyncGeneratorProvider
from ..helper import get_browser, get_cookies, format_prompt
-from ...typing import AsyncGenerator
+from ...typing import AsyncResult, Messages
from ...requests import StreamSession
class OpenaiChat(AsyncGeneratorProvider):
@@ -18,13 +18,13 @@ class OpenaiChat(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
+ timeout: int = 120,
access_token: str = None,
cookies: dict = None,
- timeout: int = 30,
- **kwargs: dict
- ) -> AsyncGenerator:
+ **kwargs
+ ) -> AsyncResult:
proxies = {"https": proxy}
if not access_token:
access_token = await cls.get_access_token(cookies, proxies)
@@ -32,7 +32,12 @@ class OpenaiChat(AsyncGeneratorProvider):
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
}
- async with StreamSession(proxies=proxies, headers=headers, impersonate="chrome107", timeout=timeout) as session:
+ async with StreamSession(
+ proxies=proxies,
+ headers=headers,
+ impersonate="chrome107",
+ timeout=timeout
+ ) as session:
messages = [
{
"id": str(uuid.uuid4()),
diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py
index 619b217b..4b448985 100644
--- a/g4f/Provider/needs_auth/Raycast.py
+++ b/g4f/Provider/needs_auth/Raycast.py
@@ -4,7 +4,7 @@ import json
import requests
-from ...typing import Any, CreateResult
+from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
@@ -19,9 +19,10 @@ class Raycast(BaseProvider):
@staticmethod
def create_completion(
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
stream: bool,
- **kwargs: Any,
+ proxy: str = None,
+ **kwargs,
) -> CreateResult:
auth = kwargs.get('auth')
headers = {
@@ -47,7 +48,13 @@ class Raycast(BaseProvider):
"system_instruction": "markdown",
"temperature": 0.5
}
- response = requests.post("https://backend.raycast.com/api/v1/ai/chat_completions", headers=headers, json=data, stream=True)
+ response = requests.post(
+ "https://backend.raycast.com/api/v1/ai/chat_completions",
+ headers=headers,
+ json=data,
+ stream=True,
+ proxies={"https": proxy}
+ )
for token in response.iter_lines():
if b'data: ' not in token:
continue
diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py
index c35ea592..9803db97 100644
--- a/g4f/Provider/needs_auth/Theb.py
+++ b/g4f/Provider/needs_auth/Theb.py
@@ -2,11 +2,11 @@ from __future__ import annotations
import json
import random
-
import requests
-from ...typing import Any, CreateResult
+from ...typing import Any, CreateResult, Messages
from ..base_provider import BaseProvider
+from ..helper import format_prompt
class Theb(BaseProvider):
@@ -19,12 +19,11 @@ class Theb(BaseProvider):
@staticmethod
def create_completion(
model: str,
- messages: list[dict[str, str]],
- stream: bool, **kwargs: Any) -> CreateResult:
-
- conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
- conversation += "\nassistant: "
-
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ **kwargs
+ ) -> CreateResult:
auth = kwargs.get("auth", {
"bearer_token":"free",
"org_id":"theb",
@@ -54,7 +53,7 @@ class Theb(BaseProvider):
req_rand = random.randint(100000000, 9999999999)
json_data: dict[str, Any] = {
- "text" : conversation,
+ "text" : format_prompt(messages),
"category" : "04f58f64a4aa4191a957b47290fee864",
"model" : "ee8d4f29cb7047f78cbe84313ed6ace8",
"model_params": {
@@ -67,8 +66,13 @@ class Theb(BaseProvider):
}
}
- response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
- headers=headers, json=json_data, stream=True)
+ response = requests.post(
+ f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
+ headers=headers,
+ json=json_data,
+ stream=True,
+ proxies={"https": proxy}
+ )
response.raise_for_status()
content = ""
diff --git a/g4f/__init__.py b/g4f/__init__.py
index 4b1e4b80..fa20f6eb 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -2,10 +2,10 @@ from __future__ import annotations
from requests import get
from g4f.models import Model, ModelUtils
from .Provider import BaseProvider
-from .typing import CreateResult, Union
+from .typing import Messages, CreateResult, Union
from .debug import logging
-version = '0.1.5.6'
+version = '0.1.5.7'
version_check = True
def check_pypi_version() -> None:
@@ -27,19 +27,19 @@ def get_model_and_provider(model : Union[Model, str],
if model in ModelUtils.convert:
model = ModelUtils.convert[model]
else:
- raise Exception(f'The model: {model} does not exist')
+ raise ValueError(f'The model: {model} does not exist')
if not provider:
provider = model.best_provider
if not provider:
- raise Exception(f'No provider found for model: {model}')
+ raise RuntimeError(f'No provider found for model: {model}')
if not provider.working:
- raise Exception(f'{provider.__name__} is not working')
+ raise RuntimeError(f'{provider.__name__} is not working')
if not provider.supports_stream and stream:
- raise Exception(f'ValueError: {provider.__name__} does not support "stream" argument')
+ raise ValueError(f'{provider.__name__} does not support "stream" argument')
if logging:
print(f'Using {provider.__name__} provider')
@@ -48,17 +48,20 @@ def get_model_and_provider(model : Union[Model, str],
class ChatCompletion:
@staticmethod
- def create(model: Union[Model, str],
- messages : list[dict[str, str]],
+ def create(
+ model: Union[Model, str],
+ messages : Messages,
provider : Union[type[BaseProvider], None] = None,
stream : bool = False,
- auth : Union[str, None] = None, **kwargs) -> Union[CreateResult, str]:
+ auth : Union[str, None] = None,
+ **kwargs
+ ) -> Union[CreateResult, str]:
model, provider = get_model_and_provider(model, provider, stream)
if provider.needs_auth and not auth:
- raise Exception(
- f'ValueError: {provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)')
+ raise ValueError(
+ f'{provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)')
if provider.needs_auth:
kwargs['auth'] = auth
@@ -69,10 +72,14 @@ class ChatCompletion:
@staticmethod
async def create_async(
model: Union[Model, str],
- messages: list[dict[str, str]],
+ messages: Messages,
provider: Union[type[BaseProvider], None] = None,
+ stream: bool = False,
**kwargs
) -> str:
+ if stream:
+ raise ValueError(f'"create_async" does not support "stream" argument')
+
model, provider = get_model_and_provider(model, provider, False)
return await provider.create_async(model.name, messages, **kwargs)
diff --git a/g4f/gui/__init__.py b/g4f/gui/__init__.py
index 90a6c7a6..48b78881 100644
--- a/g4f/gui/__init__.py
+++ b/g4f/gui/__init__.py
@@ -16,7 +16,7 @@ def run_gui(host: str = '0.0.0.0', port: int = 80, debug: bool = False) -> None:
view_func = site.routes[route]['function'],
methods = site.routes[route]['methods'],
)
-
+
backend_api = Backend_Api(app)
for route in backend_api.routes:
app.add_url_rule(
diff --git a/g4f/gui/client/html/index.html b/g4f/gui/client/html/index.html
index 3acde2df..bb472706 100644
--- a/g4f/gui/client/html/index.html
+++ b/g4f/gui/client/html/index.html
@@ -1,161 +1,178 @@
<!DOCTYPE html>
<html lang="en">
- <head>
- <meta charset="UTF-8">
- <meta http-equiv="X-UA-Compatible" content="IE=edge">
- <meta name="viewport" content="width=device-width, initial-scale=1.0 maximum-scale=1.0">
- <meta name="description" content="A conversational AI system that listens, learns, and challenges">
- <meta property="og:title" content="ChatGPT">
- <meta property="og:image" content="https://openai.com/content/images/2022/11/ChatGPT.jpg">
- <meta property="og:description" content="A conversational AI system that listens, learns, and challenges">
- <meta property="og:url" content="https://g4f.ai">
- <link rel="stylesheet" href="/assets/css/style.css">
- <link rel="apple-touch-icon" sizes="180x180" href="/assets/img/apple-touch-icon.png">
- <link rel="icon" type="image/png" sizes="32x32" href="/assets/img/favicon-32x32.png">
- <link rel="icon" type="image/png" sizes="16x16" href="/assets/img/favicon-16x16.png">
- <link rel="manifest" href="/assets/img/site.webmanifest">
- <script src="/assets/js/icons.js"></script>
- <script src="/assets/js/highlightjs-copy.min.js"></script>
- <script src="/assets/js/chat.v2.js" defer></script>
- <script src="https://cdn.jsdelivr.net/npm/markdown-it@13.0.1/dist/markdown-it.min.js"></script>
- <link rel="stylesheet" href="//cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.7.0/build/styles/base16/dracula.min.css">
- <script>
- const user_image = `<img src="/assets/img/user.png" alt="your avatar">`;
- const gpt_image = `<img src="/assets/img/gpt.png" alt="your avatar">`;
- </script>
- <style>
- .hljs {
- color: #e9e9f4;
- background: #28293629;
- border-radius: var(--border-radius-1);
- border: 1px solid var(--blur-border);
- font-size: 15px;
- }
- #message-input {
- margin-right: 30px;
- height: 80px;
- }
+<head>
+ <meta charset="UTF-8">
+ <meta http-equiv="X-UA-Compatible" content="IE=edge">
+ <meta name="viewport" content="width=device-width, initial-scale=1.0 maximum-scale=1.0">
+ <meta name="description" content="A conversational AI system that listens, learns, and challenges">
+ <meta property="og:title" content="ChatGPT">
+ <meta property="og:image" content="https://openai.com/content/images/2022/11/ChatGPT.jpg">
+ <meta property="og:description" content="A conversational AI system that listens, learns, and challenges">
+ <meta property="og:url" content="https://g4f.ai">
+ <link rel="stylesheet" href="/assets/css/style.css">
+ <link rel="apple-touch-icon" sizes="180x180" href="/assets/img/apple-touch-icon.png">
+ <link rel="icon" type="image/png" sizes="32x32" href="/assets/img/favicon-32x32.png">
+ <link rel="icon" type="image/png" sizes="16x16" href="/assets/img/favicon-16x16.png">
+ <link rel="manifest" href="/assets/img/site.webmanifest">
+ <script src="/assets/js/icons.js"></script>
+ <script src="/assets/js/highlightjs-copy.min.js"></script>
+ <script src="/assets/js/chat.v2.js" defer></script>
+ <script src="https://cdn.jsdelivr.net/npm/markdown-it@13.0.1/dist/markdown-it.min.js"></script>
+ <link rel="stylesheet"
+ href="//cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.7.0/build/styles/base16/dracula.min.css">
+ <script>
+ const user_image = `<img src="/assets/img/user.png" alt="your avatar">`;
+ const gpt_image = `<img src="/assets/img/gpt.png" alt="your avatar">`;
+ </script>
+ <style>
+ .hljs {
+ color: #e9e9f4;
+ background: #28293629;
+ border-radius: var(--border-radius-1);
+ border: 1px solid var(--blur-border);
+ font-size: 15px;
+ }
+
+ #message-input {
+ margin-right: 30px;
+ height: 80px;
+ }
- #message-input::-webkit-scrollbar {
- width: 5px;
- }
+ #message-input::-webkit-scrollbar {
+ width: 5px;
+ }
- /* Track */
- #message-input::-webkit-scrollbar-track {
- background: #f1f1f1;
- }
-
- /* Handle */
- #message-input::-webkit-scrollbar-thumb {
- background: #c7a2ff;
- }
+ /* Track */
+ #message-input::-webkit-scrollbar-track {
+ background: #f1f1f1;
+ }
- /* Handle on hover */
- #message-input::-webkit-scrollbar-thumb:hover {
- background: #8b3dff;
- }
- </style>
- <script src="/assets/js/highlight.min.js"></script>
- <script>window.conversation_id = `{{chat_id}}`</script>
- <title>g4f - gui</title>
- </head>
- <body>
- <div class="gradient"></div>
- <div class="row">
- <div class="box conversations">
- <div class="top">
- <button class="new_convo" onclick="new_conversation()">
- <i class="fa-regular fa-plus"></i>
- <span>New Conversation</span>
- </button>
+ /* Handle */
+ #message-input::-webkit-scrollbar-thumb {
+ background: #c7a2ff;
+ }
+
+ /* Handle on hover */
+ #message-input::-webkit-scrollbar-thumb:hover {
+ background: #8b3dff;
+ }
+ </style>
+ <script src="/assets/js/highlight.min.js"></script>
+ <script>window.conversation_id = `{{chat_id}}`</script>
+ <title>g4f - gui</title>
+</head>
+
+<body>
+ <div class="gradient"></div>
+ <div class="row">
+ <div class="box conversations">
+ <div class="top">
+ <button class="new_convo" onclick="new_conversation()">
+ <i class="fa-regular fa-plus"></i>
+ <span>New Conversation</span>
+ </button>
+ </div>
+ <div class="bottom_buttons">
+ <button onclick="delete_conversations()">
+ <i class="fa-regular fa-trash"></i>
+ <span>Clear Conversations</span>
+ </button>
+ <div class="info">
+ <i class="fa-brands fa-discord"></i>
+ <span class="convo-title">telegram: <a href="https://t.me/g4f_official">@g4f_official</a><br>
+ </span>
</div>
- <div class="bottom_buttons">
- <button onclick="delete_conversations()">
- <i class="fa-regular fa-trash"></i>
- <span>Clear Conversations</span>
- </button>
- <div class="info">
- <i class="fa-brands fa-discord"></i>
- <span class="convo-title">telegram: <a href="https://t.me/g4f_official">@g4f_official</a><br>
- </span>
- </div>
- <div class="info">
- <i class="fa-brands fa-github"></i>
- <span class="convo-title">github: <a href="https://github.com/xtekky/gpt4free">@gpt4free</a><br>
- leave a star ; )
- </span>
- </div>
+ <div class="info">
+ <i class="fa-brands fa-github"></i>
+ <span class="convo-title">github: <a href="https://github.com/xtekky/gpt4free">@gpt4free</a><br>
+ leave a star ; )
+ </span>
</div>
</div>
- <div class="conversation">
- <div class="stop_generating stop_generating-hidden">
- <button id="cancelButton">
- <span>Stop Generating</span>
- <i class="fa-regular fa-stop"></i>
- </button>
+ </div>
+ <div class="conversation">
+ <div class="stop_generating stop_generating-hidden">
+ <button id="cancelButton">
+ <span>Stop Generating</span>
+ <i class="fa-regular fa-stop"></i>
+ </button>
+ </div>
+ <div class="box" id="messages">
+ </div>
+ <div class="user-input">
+ <div class="box input-box">
+ <textarea id="message-input" placeholder="Ask a question" cols="30" rows="10"
+ style="white-space: pre-wrap;resize: none;"></textarea>
+ <div id="send-button">
+ <i class="fa-solid fa-paper-plane-top"></i>
+ </div>
</div>
- <div class="box" id="messages">
+ </div>
+ <div class="buttons">
+ <div class="field">
+ <input type="checkbox" id="switch" />
+ <label for="switch"></label>
+ <span class="about">Web Access</span>
</div>
- <div class="user-input">
- <div class="box input-box">
- <textarea id="message-input" placeholder="Ask a question" cols="30" rows="10" style="white-space: pre-wrap;resize: none;"></textarea>
- <div id="send-button">
- <i class="fa-solid fa-paper-plane-top"></i>
- </div>
- </div>
+ <div class="field">
+ <select name="model" id="model">
+ <option value="gpt-3.5-turbo" selected>gpt-3.5</option>
+ <option value="gpt-4">gpt-4</option>
+ <option value="gpt-3.5-turbo-0613">gpt-3.5 fast</option>
+ <option value="gpt-3.5-turbo-16k">gpt-3.5 16k</option>
+ </select>
</div>
- <div class="buttons">
- <div class="field">
- <input type="checkbox" id="switch"/>
- <label for="switch"></label>
- <span class="about">Web Access</span>
- </div>
+ <div class="field">
+ <select name="jailbreak" id="jailbreak">
+ <option value="default" selected>Set Jailbreak</option>
+ <option value="gpt-math-1.0">math 1.0</option>
+ <option value="gpt-dude-1.0">dude 1.0</option>
+ <option value="gpt-dan-1.0">dan 1.0</option>
+ <option value="gpt-dan-2.0">dan 2.0</option>
+ <option value="gpt-dev-2.0">dev 2.0</option>
+ <option value="gpt-evil-1.0">evil 1.0</option>
+ </select>
<div class="field">
- <select name="model" id="model">
- <option value="gpt-3.5-turbo" selected>gpt-3.5</option>
- <option value="gpt-4">gpt-4</option>
- <option value="gpt-3.5-turbo-0613">gpt-3.5 fast</option>
- <option value="gpt-3.5-turbo-16k">gpt-3.5 16k</option>
+ <select name="provider" id="provider">
+ <option value="g4f.Provider.Auto" selected>Set Provider</option>
+ <option value="g4f.Provider.AItianhuSpace">AItianhuSpace</option>
+ <option value="g4f.Provider.ChatgptLogin">ChatgptLogin</option>
+ <option value="g4f.Provider.ChatgptDemo">ChatgptDemo</option>
+ <option value="g4f.Provider.ChatgptDuo">ChatgptDuo</option>
+ <option value="g4f.Provider.Vitalentum">Vitalentum</option>
+ <option value="g4f.Provider.ChatgptAi">ChatgptAi</option>
+ <option value="g4f.Provider.AItianhu">AItianhu</option>
+ <option value="g4f.Provider.ChatBase">ChatBase</option>
+ <option value="g4f.Provider.Liaobots">Liaobots</option>
+ <option value="g4f.Provider.Yqcloud">Yqcloud</option>
+ <option value="g4f.Provider.Myshell">Myshell</option>
+ <option value="g4f.Provider.FreeGpt">FreeGpt</option>
+ <option value="g4f.Provider.Vercel">Vercel</option>
+ <option value="g4f.Provider.DeepAi">DeepAi</option>
+ <option value="g4f.Provider.Aichat">Aichat</option>
+ <option value="g4f.Provider.GPTalk">GPTalk</option>
+ <option value="g4f.Provider.GptGod">GptGod</option>
+ <option value="g4f.Provider.AiAsk">AiAsk</option>
+ <option value="g4f.Provider.GptGo">GptGo</option>
+ <option value="g4f.Provider.Ylokh">Ylokh</option>
+ <option value="g4f.Provider.Bard">Bard</option>
+ <option value="g4f.Provider.Aibn">Aibn</option>
+ <option value="g4f.Provider.Bing">Bing</option>
+ <option value="g4f.Provider.You">You</option>
+ <option value="g4f.Provider.H2o">H2o</option>
+ <option value="g4f.Provider.Aivvm">Aivvm</option>
</select>
</div>
- <div class="field">
- <select name="jailbreak" id="jailbreak">
- <option value="default" selected>default</option>
- <option value="gpt-math-1.0">math 1.0</option>
- <option value="gpt-dude-1.0">dude 1.0</option>
- <option value="gpt-dan-1.0">dan 1.0</option>
- <option value="gpt-dan-2.0">dan 2.0</option>
- <option value="gpt-dev-2.0">dev 2.0</option>
- <option value="gpt-evil-1.0">evil 1.0</option>
- </select>
- <form class="color-picker" action="">
- <fieldset>
- <legend class="visually-hidden">Pick a color scheme</legend>
- <label for="light" class="visually-hidden">Light</label>
- <input type="radio" name="theme" id="light" checked>
-
- <label for="pink" class="visually-hidden">Pink theme</label>
- <input type="radio" id="pink" name="theme">
-
- <label for="blue" class="visually-hidden">Blue theme</label>
- <input type="radio" id="blue" name="theme">
-
- <label for="green" class="visually-hidden">Green theme</label>
- <input type="radio" id="green" name="theme">
-
- <label for="dark" class="visually-hidden">Dark theme</label>
- <input type="radio" id="dark" name="theme">
- </fieldset>
- </form>
- </div>
</div>
</div>
</div>
+ </div>
<div class="mobile-sidebar">
<i class="fa-solid fa-bars"></i>
</div>
<script>
</script>
- </body>
+</body>
+
</html> \ No newline at end of file
diff --git a/g4f/gui/client/js/chat.v2.js b/g4f/gui/client/js/chat.v2.js
index df827030..e1faa6bb 100644
--- a/g4f/gui/client/js/chat.v2.js
+++ b/g4f/gui/client/js/chat.v2.js
@@ -52,7 +52,7 @@ const remove_cancel_button = async () => {
const ask_gpt = async (message) => {
try {
- message_input.value = ``;
+ message_input.value = ``;
message_input.innerHTML = ``;
message_input.innerText = ``;
@@ -60,10 +60,11 @@ const ask_gpt = async (message) => {
window.scrollTo(0, 0);
window.controller = new AbortController();
- jailbreak = document.getElementById("jailbreak");
- model = document.getElementById("model");
- prompt_lock = true;
- window.text = ``;
+ jailbreak = document.getElementById("jailbreak");
+ provider = document.getElementById("provider");
+ model = document.getElementById("model");
+ prompt_lock = true;
+ window.text = ``;
window.token = message_id();
stop_generating.classList.remove(`stop_generating-hidden`);
@@ -109,12 +110,15 @@ const ask_gpt = async (message) => {
headers: {
"content-type": `application/json`,
accept: `text/event-stream`,
+ // v: `1.0.0`,
+ // ts: Date.now().toString(),
},
body: JSON.stringify({
conversation_id: window.conversation_id,
action: `_ask`,
model: model.options[model.selectedIndex].value,
jailbreak: jailbreak.options[jailbreak.selectedIndex].value,
+ provider: provider.options[provider.selectedIndex].value,
meta: {
id: window.token,
content: {
diff --git a/g4f/gui/run.py b/g4f/gui/run.py
index da672d59..731c7cbf 100644
--- a/g4f/gui/run.py
+++ b/g4f/gui/run.py
@@ -1,4 +1,18 @@
from g4f.gui import run_gui
+from argparse import ArgumentParser
+
if __name__ == '__main__':
- run_gui() \ No newline at end of file
+
+ parser = ArgumentParser(description='Run the GUI')
+
+ parser.add_argument('-host', type=str, default='0.0.0.0', help='hostname')
+ parser.add_argument('-port', type=int, default=80, help='port')
+ parser.add_argument('-debug', action='store_true', help='debug mode')
+
+ args = parser.parse_args()
+ port = args.port
+ host = args.host
+ debug = args.debug
+
+ run_gui(host, port, debug) \ No newline at end of file
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index 9a1dbbf8..8f4b529f 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -1,9 +1,11 @@
import g4f
from flask import request
-from threading import Thread
from .internet import search
from .config import special_instructions
+from .provider import get_provider
+
+g4f.logging = True
class Backend_Api:
def __init__(self, app) -> None:
@@ -31,22 +33,25 @@ class Backend_Api:
conversation = request.json['meta']['content']['conversation']
prompt = request.json['meta']['content']['parts'][0]
model = request.json['model']
+ provider = get_provider(request.json.get('provider'))
messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt]
def stream():
- answer = g4f.ChatCompletion.create(model = model,
- messages = messages, stream=True)
+ if provider:
+ answer = g4f.ChatCompletion.create(model=model,
+ provider=provider, messages=messages, stream=True)
+ else:
+ answer = g4f.ChatCompletion.create(model=model,
+ messages=messages, stream=True)
for token in answer:
yield token
-
+
return self.app.response_class(stream(), mimetype='text/event-stream')
- except Exception as e:
- print(e)
+ except Exception as e:
return {
- '_token': 'anerroroccuredmf',
'_action': '_ask',
'success': False,
"error": f"an error occured {str(e)}"}, 400 \ No newline at end of file
diff --git a/g4f/gui/server/provider.py b/g4f/gui/server/provider.py
new file mode 100644
index 00000000..286f881b
--- /dev/null
+++ b/g4f/gui/server/provider.py
@@ -0,0 +1,17 @@
+import g4f
+
+def get_provider(provider: str) -> g4f.Provider.BaseProvider:
+
+ if isinstance(provider, str):
+ print(provider)
+ if provider == 'g4f.Provider.Auto':
+ return None
+
+ if provider in g4f.Provider.ProviderUtils.convert:
+ return g4f.Provider.ProviderUtils.convert[provider]
+
+ else:
+ return None
+
+ else:
+ return None
diff --git a/g4f/models.py b/g4f/models.py
index aa2b3bd6..ced8e290 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -9,7 +9,6 @@ from .Provider import (
ChatgptDuo,
Vitalentum,
ChatgptAi,
- ChatForAi,
AItianhu,
ChatBase,
Liaobots,
@@ -23,13 +22,13 @@ from .Provider import (
GptGod,
AiAsk,
GptGo,
- Aivvm,
Ylokh,
Bard,
Aibn,
Bing,
You,
- H2o
+ H2o,
+ Aivvm
)
@dataclass(unsafe_hash=True)
@@ -46,7 +45,7 @@ default = Model(
Yqcloud, # Answers short questions in chinese
ChatBase, # Don't want to answer creatively
ChatgptDuo, # Include search results
- Aibn, Aichat, ChatForAi, ChatgptAi, ChatgptLogin, DeepAi, FreeGpt, GptGo, Myshell, Ylokh,
+ Aibn, Aichat, ChatgptAi, ChatgptLogin, DeepAi, FreeGpt, GptGo, Myshell, Ylokh,
])
)
@@ -55,7 +54,7 @@ gpt_35_long = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
- AiAsk, Aibn, Aichat, ChatForAi, ChatgptAi, ChatgptDemo, ChatgptDuo,
+ AiAsk, Aibn, Aichat, ChatgptAi, ChatgptDemo, ChatgptDuo,
FreeGpt, GptGo, Liaobots, Myshell, Vitalentum, Ylokh, You, Yqcloud,
GPTalk, GptGod
])
@@ -66,16 +65,14 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
- DeepAi, ChatgptLogin, ChatgptAi, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh
+ DeepAi, ChatgptLogin, ChatgptAi, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, FreeGpt, Ylokh
])
)
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
- best_provider = RetryProvider([
- Aivvm, Bing
- ])
+ best_provider = Bing
)
# Bard
@@ -168,31 +165,27 @@ gpt_35_turbo_16k = Model(
gpt_35_turbo_16k_0613 = Model(
name = 'gpt-3.5-turbo-16k-0613',
- base_provider = 'openai',
- best_provider = Aivvm)
+ base_provider = 'openai')
gpt_35_turbo_0613 = Model(
name = 'gpt-3.5-turbo-0613',
base_provider = 'openai',
- best_provider = Aivvm
+ best_provider=Aivvm
)
gpt_4_0613 = Model(
name = 'gpt-4-0613',
- base_provider = 'openai',
- best_provider = Aivvm
+ base_provider = 'openai'
)
gpt_4_32k = Model(
name = 'gpt-4-32k',
- base_provider = 'openai',
- best_provider = Aivvm
+ base_provider = 'openai'
)
gpt_4_32k_0613 = Model(
name = 'gpt-4-32k-0613',
- base_provider = 'openai',
- best_provider = Aivvm
+ base_provider = 'openai'
)
text_ada_001 = Model(