summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiner Lohaus <heiner.lohaus@netformic.com>2023-08-25 06:41:32 +0200
committerHeiner Lohaus <heiner.lohaus@netformic.com>2023-08-25 06:41:32 +0200
commit126496d3cacd06a4fa8cbb4e5bde417ce6bb5b4a (patch)
tree00f989c070b0c001860c39507450aaf30e4302b1
parentAdd create_async method (diff)
downloadgpt4free-126496d3cacd06a4fa8cbb4e5bde417ce6bb5b4a.tar
gpt4free-126496d3cacd06a4fa8cbb4e5bde417ce6bb5b4a.tar.gz
gpt4free-126496d3cacd06a4fa8cbb4e5bde417ce6bb5b4a.tar.bz2
gpt4free-126496d3cacd06a4fa8cbb4e5bde417ce6bb5b4a.tar.lz
gpt4free-126496d3cacd06a4fa8cbb4e5bde417ce6bb5b4a.tar.xz
gpt4free-126496d3cacd06a4fa8cbb4e5bde417ce6bb5b4a.tar.zst
gpt4free-126496d3cacd06a4fa8cbb4e5bde417ce6bb5b4a.zip
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Bard.py22
-rw-r--r--g4f/Provider/Bing.py86
-rw-r--r--g4f/Provider/Hugchat.py67
-rw-r--r--g4f/Provider/OpenaiChat.py74
-rw-r--r--g4f/Provider/__init__.py4
-rw-r--r--g4f/Provider/base_provider.py85
-rw-r--r--testing/test_needs_auth.py95
7 files changed, 354 insertions, 79 deletions
diff --git a/g4f/Provider/Bard.py b/g4f/Provider/Bard.py
index cbe728cd..a8c7d13f 100644
--- a/g4f/Provider/Bard.py
+++ b/g4f/Provider/Bard.py
@@ -2,42 +2,26 @@ import json
import random
import re
-import browser_cookie3
from aiohttp import ClientSession
import asyncio
from ..typing import Any, CreateResult
-from .base_provider import BaseProvider
+from .base_provider import AsyncProvider, get_cookies
-class Bard(BaseProvider):
+class Bard(AsyncProvider):
url = "https://bard.google.com"
needs_auth = True
working = True
@classmethod
- def create_completion(
- cls,
- model: str,
- messages: list[dict[str, str]],
- stream: bool,
- proxy: str = None,
- cookies: dict = {},
- **kwargs: Any,
- ) -> CreateResult:
- yield asyncio.run(cls.create_async(str, messages, proxy, cookies))
-
- @classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
- cookies: dict = {},
+ cookies: dict = get_cookies(".google.com"),
**kwargs: Any,
) -> str:
- if not cookies:
- for cookie in browser_cookie3.load(domain_name='.google.com'):
- cookies[cookie.name] = cookie.value
formatted = "\n".join(
["%s: %s" % (message["role"], message["content"]) for message in messages]
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index b9b9e9a4..2c2e60ad 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -5,48 +5,24 @@ import random
import aiohttp
import asyncio
-import browser_cookie3
from aiohttp import ClientSession
from ..typing import Any, AsyncGenerator, CreateResult, Union
-from .base_provider import BaseProvider
+from .base_provider import AsyncGeneratorProvider, get_cookies
-class Bing(BaseProvider):
+class Bing(AsyncGeneratorProvider):
url = "https://bing.com/chat"
+ needs_auth = True
working = True
supports_gpt_4 = True
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: list[dict[str, str]],
- stream: bool,
- **kwargs: Any
- ) -> CreateResult:
- if stream:
- yield from run(cls.create_async_generator(model, messages, **kwargs))
- else:
- yield asyncio.run(cls.create_async(model, messages, **kwargs))
-
- @classmethod
- async def create_async(
- cls,
- model: str,
- messages: list[dict[str, str]],
- **kwargs: Any,
- ) -> str:
- result = []
- async for chunk in cls.create_async_generator(model, messages, **kwargs):
- result.append(chunk)
- if result:
- return "".join(result)
+ supports_stream=True
@staticmethod
def create_async_generator(
model: str,
messages: list[dict[str, str]],
- cookies: dict = {}
+ cookies: dict = get_cookies(".bing.com"),
+ **kwargs
) -> AsyncGenerator:
if len(messages) < 2:
prompt = messages[0]["content"]
@@ -54,15 +30,11 @@ class Bing(BaseProvider):
else:
prompt = messages[-1]["content"]
- context = convert(messages[:-1])
-
- if not cookies:
- for cookie in browser_cookie3.load(domain_name='.bing.com'):
- cookies[cookie.name] = cookie.value
+ context = create_context(messages[:-1])
return stream_generate(prompt, context, cookies)
-def convert(messages: list[dict[str, str]]):
+def create_context(messages: list[dict[str, str]]):
context = ""
for message in messages:
@@ -187,34 +159,32 @@ class Defaults:
'x-forwarded-for': ip_address,
}
- optionsSets = {
- "optionsSets": [
- 'saharasugg',
- 'enablenewsfc',
- 'clgalileo',
- 'gencontentv3',
- "nlu_direct_response_filter",
- "deepleo",
- "disable_emoji_spoken_text",
- "responsible_ai_policy_235",
- "enablemm",
- "h3precise"
- "dtappid",
- "cricinfo",
- "cricinfov2",
- "dv3sugg",
- "nojbfedge"
- ]
- }
+ optionsSets = [
+ 'saharasugg',
+ 'enablenewsfc',
+ 'clgalileo',
+ 'gencontentv3',
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "h3precise"
+ "dtappid",
+ "cricinfo",
+ "cricinfov2",
+ "dv3sugg",
+ "nojbfedge"
+ ]
-def format_message(msg: dict) -> str:
- return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
+def format_message(message: dict) -> str:
+ return json.dumps(message, ensure_ascii=False) + Defaults.delimiter
def create_message(conversation: Conversation, prompt: str, context: str=None) -> str:
struct = {
'arguments': [
{
- **Defaults.optionsSets,
+ 'optionsSets': Defaults.optionsSets,
'source': 'cib',
'allowedMessageTypes': Defaults.allowedMessageTypes,
'sliceIds': Defaults.sliceIds,
diff --git a/g4f/Provider/Hugchat.py b/g4f/Provider/Hugchat.py
new file mode 100644
index 00000000..cedf8402
--- /dev/null
+++ b/g4f/Provider/Hugchat.py
@@ -0,0 +1,67 @@
+has_module = False
+try:
+ from hugchat.hugchat import ChatBot
+except ImportError:
+ has_module = False
+
+from .base_provider import BaseProvider, get_cookies
+from g4f.typing import CreateResult
+
+class Hugchat(BaseProvider):
+ url = "https://huggingface.co/chat/"
+ needs_auth = True
+ working = has_module
+ llms = ['OpenAssistant/oasst-sft-6-llama-30b-xor', 'meta-llama/Llama-2-70b-chat-hf']
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool = False,
+ proxy: str = None,
+ cookies: str = get_cookies(".huggingface.co"),
+ **kwargs
+ ) -> CreateResult:
+ bot = ChatBot(
+ cookies=cookies
+ )
+
+ if proxy and "://" not in proxy:
+ proxy = f"http://{proxy}"
+ bot.session.proxies = {"http": proxy, "https": proxy}
+
+ if model:
+ try:
+ if not isinstance(model, int):
+ model = cls.llms.index(model)
+ bot.switch_llm(model)
+ except:
+ raise RuntimeError(f"Model are not supported: {model}")
+
+ if len(messages) > 1:
+ formatted = "\n".join(
+ ["%s: %s" % (message["role"], message["content"]) for message in messages]
+ )
+ prompt = f"{formatted}\nAssistant:"
+ else:
+ prompt = messages.pop()["content"]
+
+ try:
+ yield bot.chat(prompt, **kwargs)
+ finally:
+ bot.delete_conversation(bot.current_conversation)
+ bot.current_conversation = ""
+ pass
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("proxy", "str"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/OpenaiChat.py b/g4f/Provider/OpenaiChat.py
new file mode 100644
index 00000000..cca258b3
--- /dev/null
+++ b/g4f/Provider/OpenaiChat.py
@@ -0,0 +1,74 @@
+has_module = True
+try:
+ from revChatGPT.V1 import AsyncChatbot
+except ImportError:
+ has_module = False
+from .base_provider import AsyncGeneratorProvider, get_cookies
+from ..typing import AsyncGenerator
+
+class OpenaiChat(AsyncGeneratorProvider):
+ url = "https://chat.openai.com"
+ needs_auth = True
+ working = has_module
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_stream = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ proxy: str = None,
+ access_token: str = None,
+ cookies: dict = None,
+ **kwargs
+ ) -> AsyncGenerator:
+
+ config = {"access_token": access_token, "model": model}
+ if proxy:
+ if "://" not in proxy:
+ proxy = f"http://{proxy}"
+ config["proxy"] = proxy
+
+ bot = AsyncChatbot(
+ config=config
+ )
+
+ if not access_token:
+ cookies = cookies if cookies else get_cookies("chat.openai.com")
+ response = await bot.session.get("https://chat.openai.com/api/auth/session", cookies=cookies)
+ access_token = response.json()["accessToken"]
+ bot.set_access_token(access_token)
+
+ if len(messages) > 1:
+ formatted = "\n".join(
+ ["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
+ )
+ prompt = f"{formatted}\nAssistant:"
+ else:
+ prompt = messages.pop()["content"]
+
+ returned = None
+ async for message in bot.ask(prompt):
+ message = message["message"]
+ if returned:
+ if message.startswith(returned):
+ new = message[len(returned):]
+ if new:
+ yield new
+ else:
+ yield message
+ returned = message
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("proxy", "str"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index e27dee5d..5ad9f156 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -14,9 +14,11 @@ from .EasyChat import EasyChat
from .Forefront import Forefront
from .GetGpt import GetGpt
from .H2o import H2o
+from .Hugchat import Hugchat
from .Liaobots import Liaobots
from .Lockchat import Lockchat
from .Opchatgpts import Opchatgpts
+from .OpenaiChat import OpenaiChat
from .Raycast import Raycast
from .Theb import Theb
from .Vercel import Vercel
@@ -44,10 +46,12 @@ __all__ = [
"Forefront",
"GetGpt",
"H2o",
+ "Hugchat",
"Liaobots",
"Lockchat",
"Opchatgpts",
"Raycast",
+ "OpenaiChat",
"Theb",
"Vercel",
"Wewordle",
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index 98ad3514..56d79ee6 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -1,7 +1,11 @@
from abc import ABC, abstractmethod
-from ..typing import Any, CreateResult
+from ..typing import Any, CreateResult, AsyncGenerator, Union
+import browser_cookie3
+import asyncio
+from time import time
+import math
class BaseProvider(ABC):
url: str
@@ -30,4 +34,81 @@ class BaseProvider(ABC):
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
+
+
+_cookies = {}
+
+def get_cookies(cookie_domain: str) -> dict:
+ if cookie_domain not in _cookies:
+ _cookies[cookie_domain] = {}
+ for cookie in browser_cookie3.load(cookie_domain):
+ _cookies[cookie_domain][cookie.name] = cookie.value
+ return _cookies[cookie_domain]
+
+
+class AsyncProvider(BaseProvider):
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool = False,
+ **kwargs: Any
+ ) -> CreateResult:
+ yield asyncio.run(cls.create_async(model, messages, **kwargs))
+
+ @staticmethod
+ @abstractmethod
+ async def create_async(
+ model: str,
+ messages: list[dict[str, str]],
+ **kwargs: Any,
+ ) -> str:
+ raise NotImplementedError()
+
+
+class AsyncGeneratorProvider(AsyncProvider):
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool = True,
+ **kwargs: Any
+ ) -> CreateResult:
+ if stream:
+ yield from run_generator(cls.create_async_generator(model, messages, **kwargs))
+ else:
+ yield from AsyncProvider.create_completion(cls=cls, model=model, messages=messages, **kwargs)
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ **kwargs: Any,
+ ) -> str:
+ chunks = [chunk async for chunk in cls.create_async_generator(model, messages, **kwargs)]
+ if chunks:
+ return "".join(chunks)
+
+ @staticmethod
+ @abstractmethod
+ def create_async_generator(
+ model: str,
+ messages: list[dict[str, str]],
+ ) -> AsyncGenerator:
+ raise NotImplementedError()
+
+
+def run_generator(generator: AsyncGenerator[Union[Any, str], Any]):
+ loop = asyncio.new_event_loop()
+ gen = generator.__aiter__()
+
+ while True:
+ try:
+ yield loop.run_until_complete(gen.__anext__())
+
+ except StopAsyncIteration:
+ break
diff --git a/testing/test_needs_auth.py b/testing/test_needs_auth.py
new file mode 100644
index 00000000..d44ed1df
--- /dev/null
+++ b/testing/test_needs_auth.py
@@ -0,0 +1,95 @@
+import sys
+from pathlib import Path
+import asyncio
+from time import time
+
+sys.path.append(str(Path(__file__).parent.parent))
+
+import g4f
+
+providers = [g4f.Provider.OpenaiChat, g4f.Provider.Bard, g4f.Provider.Bing]
+
+# Async support
+async def log_time_async(method: callable, **kwargs):
+ start = time()
+ result = await method(**kwargs)
+ secs = f"{round(time() - start, 2)} secs"
+ if result:
+ return " ".join([result, secs])
+ return secs
+
+def log_time_yield(method: callable, **kwargs):
+ start = time()
+ result = yield from method(**kwargs)
+ yield f" {round(time() - start, 2)} secs"
+
+def log_time(method: callable, **kwargs):
+ start = time()
+ result = method(**kwargs)
+ secs = f"{round(time() - start, 2)} secs"
+ if result:
+ return " ".join([result, secs])
+ return secs
+
+async def run_async():
+ responses = []
+ for provider in providers:
+ responses.append(log_time_async(
+ provider.create_async,
+ model=None,
+ messages=[{"role": "user", "content": "Hello"}],
+ log_time=True
+ ))
+ responses = await asyncio.gather(*responses)
+ for idx, provider in enumerate(providers):
+ print(f"{provider.__name__}:", responses[idx])
+print("Async Total:", asyncio.run(log_time_async(run_async)))
+
+# Streaming support:
+def run_stream():
+ for provider in providers:
+ print(f"{provider.__name__}: ", end="")
+ for response in log_time_yield(
+ provider.create_completion,
+ model=None,
+ messages=[{"role": "user", "content": "Hello"}],
+ ):
+ print(response, end="")
+ print()
+print("Stream Total:", log_time(run_stream))
+
+# No streaming support:
+def create_completion():
+ for provider in providers:
+ print(f"{provider.__name__}:", end=" ")
+ for response in log_time_yield(
+ g4f.Provider.Bard.create_completion,
+ model=None,
+ messages=[{"role": "user", "content": "Hello"}],
+ ):
+ print(response, end="")
+ print()
+print("No Stream Total:", log_time(create_completion))
+
+for response in g4f.Provider.Hugchat.create_completion(
+ model=None,
+ messages=[{"role": "user", "content": "Hello, tell about you."}],
+):
+ print("Hugchat:", response)
+
+"""
+OpenaiChat: Hello! How can I assist you today? 2.0 secs
+Bard: Hello! How can I help you today? 3.44 secs
+Bing: Hello, this is Bing. How can I help? 😊 4.14 secs
+Async Total: 4.25 secs
+
+OpenaiChat: Hello! How can I assist you today? 1.85 secs
+Bard: Hello! How can I help you today? 3.38 secs
+Bing: Hello, this is Bing. How can I help? 😊 6.14 secs
+Stream Total: 11.37 secs
+
+OpenaiChat: Hello! How can I help you today? 3.28 secs
+Bard: Hello there! How can I help you today? 3.58 secs
+Bing: Hello! How can I help you today? 3.28 secs
+No Stream Total: 10.14 secs
+""" \ No newline at end of file