summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Airforce.py24
-rw-r--r--g4f/Provider/Blackbox.py44
-rw-r--r--g4f/Provider/Copilot.py9
-rw-r--r--g4f/Provider/PollinationsAI.py7
-rw-r--r--g4f/Provider/ReplicateHome.py46
-rw-r--r--g4f/Provider/RubiksAI.py8
-rw-r--r--g4f/Provider/base_provider.py4
-rw-r--r--g4f/Provider/bing/conversation.py2
-rw-r--r--g4f/Provider/needs_auth/BingCreateImages.py3
-rw-r--r--g4f/Provider/needs_auth/DeepInfraImage.py3
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py2
11 files changed, 75 insertions, 77 deletions
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index 54bb543b..f5bcfefa 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -20,7 +20,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_system_message = True
supports_message_history = True
-
+
@classmethod
def fetch_completions_models(cls):
response = requests.get('https://api.airforce/models', verify=False)
@@ -34,19 +34,20 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
response.raise_for_status()
return response.json()
- completions_models = fetch_completions_models.__func__(None)
- imagine_models = fetch_imagine_models.__func__(None)
-
default_model = "gpt-4o-mini"
default_image_model = "flux"
additional_models_imagine = ["stable-diffusion-xl-base", "stable-diffusion-xl-lightning", "Flux-1.1-Pro"]
- text_models = completions_models
- image_models = [*imagine_models, *additional_models_imagine]
- models = [
- *text_models,
- *image_models,
- ]
-
+
+ @classmethod
+ def get_models(cls):
+ if not cls.models:
+ cls.image_models = [*cls.fetch_imagine_models(), *cls.additional_models_imagine]
+ cls.models = [
+ *cls.fetch_completions_models(),
+ *cls.image_models
+ ]
+ return cls.models
+
model_aliases = {
### completions ###
# openchat
@@ -100,7 +101,6 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
-
if model in cls.image_models:
return cls._generate_image(model, messages, proxy, seed, size)
else:
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index b259b4aa..41905537 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -20,15 +20,15 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_message_history = True
_last_validated_value = None
-
+
default_model = 'blackboxai'
default_vision_model = default_model
default_image_model = 'Image Generation'
image_models = ['Image Generation', 'repomap']
vision_models = [default_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']
-
+
userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
-
+
agentMode = {
'Image Generation': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
}
@@ -77,22 +77,21 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
}
additional_prefixes = {
- 'gpt-4o': '@gpt-4o',
- 'gemini-pro': '@gemini-pro',
- 'claude-sonnet-3.5': '@claude-sonnet'
- }
+ 'gpt-4o': '@gpt-4o',
+ 'gemini-pro': '@gemini-pro',
+ 'claude-sonnet-3.5': '@claude-sonnet'
+ }
model_prefixes = {
- **{mode: f"@{value['id']}" for mode, value in trendingAgentMode.items()
- if mode not in ["gemini-1.5-flash", "llama-3.1-8b", "llama-3.1-70b", "llama-3.1-405b", "repomap"]},
- **additional_prefixes
- }
+ **{
+ mode: f"@{value['id']}" for mode, value in trendingAgentMode.items()
+ if mode not in ["gemini-1.5-flash", "llama-3.1-8b", "llama-3.1-70b", "llama-3.1-405b", "repomap"]
+ },
+ **additional_prefixes
+ }
-
models = list(dict.fromkeys([default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
-
-
model_aliases = {
"gemini-flash": "gemini-1.5-flash",
"claude-3.5-sonnet": "claude-sonnet-3.5",
@@ -131,12 +130,11 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
return cls._last_validated_value
-
@staticmethod
def generate_id(length=7):
characters = string.ascii_letters + string.digits
return ''.join(random.choice(characters) for _ in range(length))
-
+
@classmethod
def add_prefix_to_messages(cls, messages: Messages, model: str) -> Messages:
prefix = cls.model_prefixes.get(model, "")
@@ -157,6 +155,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
cls,
model: str,
messages: Messages,
+ prompt: str = None,
proxy: str = None,
web_search: bool = False,
image: ImageType = None,
@@ -191,7 +190,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
}
-
+
data = {
"messages": messages,
"id": message_id,
@@ -221,26 +220,25 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
response_text = await response.text()
-
+
if model in cls.image_models:
image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text)
if image_matches:
image_url = image_matches[0]
- image_response = ImageResponse(images=[image_url], alt="Generated Image")
- yield image_response
+ yield ImageResponse(image_url, prompt)
return
response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL)
-
+
json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL)
if json_match:
search_results = json.loads(json_match.group(1))
answer = response_text.split('$~~~$')[-1].strip()
-
+
formatted_response = f"{answer}\n\n**Source:**"
for i, result in enumerate(search_results, 1):
formatted_response += f"\n{i}. {result['title']}: {result['link']}"
-
+
yield formatted_response
else:
yield response_text.strip()
diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py
index e8eea0a5..e10a55e8 100644
--- a/g4f/Provider/Copilot.py
+++ b/g4f/Provider/Copilot.py
@@ -57,6 +57,7 @@ class Copilot(AbstractProvider):
image: ImageType = None,
conversation: Conversation = None,
return_conversation: bool = False,
+ web_search: bool = True,
**kwargs
) -> CreateResult:
if not has_curl_cffi:
@@ -124,12 +125,14 @@ class Copilot(AbstractProvider):
is_started = False
msg = None
image_prompt: str = None
+ last_msg = None
while True:
try:
msg = wss.recv()[0]
msg = json.loads(msg)
except:
break
+ last_msg = msg
if msg.get("event") == "appendText":
is_started = True
yield msg.get("text")
@@ -139,8 +142,12 @@ class Copilot(AbstractProvider):
yield ImageResponse(msg.get("url"), image_prompt, {"preview": msg.get("thumbnailUrl")})
elif msg.get("event") == "done":
break
+ elif msg.get("event") == "error":
+ raise RuntimeError(f"Error: {msg}")
+ elif msg.get("event") not in ["received", "startMessage", "citation", "partCompleted"]:
+ debug.log(f"Copilot Message: {msg}")
if not is_started:
- raise RuntimeError(f"Last message: {msg}")
+ raise RuntimeError(f"Invalid response: {last_msg}")
@classmethod
async def get_access_token_and_cookies(cls, proxy: str = None):
diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py
index 57597bf1..a30f896d 100644
--- a/g4f/Provider/PollinationsAI.py
+++ b/g4f/Provider/PollinationsAI.py
@@ -3,7 +3,6 @@ from __future__ import annotations
from urllib.parse import quote
import random
import requests
-from sys import maxsize
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
@@ -40,6 +39,7 @@ class PollinationsAI(OpenaiAPI):
cls,
model: str,
messages: Messages,
+ prompt: str = None,
api_base: str = "https://text.pollinations.ai/openai",
api_key: str = None,
proxy: str = None,
@@ -49,9 +49,10 @@ class PollinationsAI(OpenaiAPI):
if model:
model = cls.get_model(model)
if model in cls.image_models:
- prompt = messages[-1]["content"]
+ if prompt is None:
+ prompt = messages[-1]["content"]
if seed is None:
- seed = random.randint(0, maxsize)
+ seed = random.randint(0, 100000)
image = f"https://image.pollinations.ai/prompt/{quote(prompt)}?width=1024&height=1024&seed={int(seed)}&nofeed=true&nologo=true&model={quote(model)}"
yield ImageResponse(image, prompt)
return
diff --git a/g4f/Provider/ReplicateHome.py b/g4f/Provider/ReplicateHome.py
index a7fc9b54..00de09e0 100644
--- a/g4f/Provider/ReplicateHome.py
+++ b/g4f/Provider/ReplicateHome.py
@@ -6,6 +6,8 @@ from aiohttp import ClientSession, ContentTypeError
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..requests.aiohttp import get_connector
+from ..requests.raise_for_status import raise_for_status
from .helper import format_prompt
from ..image import ImageResponse
@@ -32,10 +34,8 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
'yorickvp/llava-13b',
]
-
-
models = text_models + image_models
-
+
model_aliases = {
# image_models
"sd-3": "stability-ai/stable-diffusion-3",
@@ -56,23 +56,14 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
# text_models
"google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626",
"yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb",
-
}
@classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
+ prompt: str = None,
proxy: str = None,
**kwargs
) -> AsyncResult:
@@ -96,29 +87,30 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
}
- async with ClientSession(headers=headers) as session:
- if model in cls.image_models:
- prompt = messages[-1]['content'] if messages else ""
- else:
- prompt = format_prompt(messages)
-
+ async with ClientSession(headers=headers, connector=get_connector(proxy=proxy)) as session:
+ if prompt is None:
+ if model in cls.image_models:
+ prompt = messages[-1]['content']
+ else:
+ prompt = format_prompt(messages)
+
data = {
"model": model,
"version": cls.model_versions[model],
"input": {"prompt": prompt},
}
-
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
+
+ async with session.post(cls.api_endpoint, json=data) as response:
+ await raise_for_status(response)
result = await response.json()
prediction_id = result['id']
-
+
poll_url = f"https://homepage.replicate.com/api/poll?id={prediction_id}"
max_attempts = 30
delay = 5
for _ in range(max_attempts):
- async with session.get(poll_url, proxy=proxy) as response:
- response.raise_for_status()
+ async with session.get(poll_url) as response:
+ await raise_for_status(response)
try:
result = await response.json()
except ContentTypeError:
@@ -131,7 +123,7 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
if result['status'] == 'succeeded':
if model in cls.image_models:
image_url = result['output'][0]
- yield ImageResponse(image_url, "Generated image")
+ yield ImageResponse(image_url, prompt)
return
else:
for chunk in result['output']:
@@ -140,6 +132,6 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
elif result['status'] == 'failed':
raise Exception(f"Prediction failed: {result.get('error')}")
await asyncio.sleep(delay)
-
+
if result['status'] != 'succeeded':
raise Exception("Prediction timed out")
diff --git a/g4f/Provider/RubiksAI.py b/g4f/Provider/RubiksAI.py
index c06e6c3d..816ea60c 100644
--- a/g4f/Provider/RubiksAI.py
+++ b/g4f/Provider/RubiksAI.py
@@ -9,7 +9,7 @@ from urllib.parse import urlencode
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, Sources
from ..requests.raise_for_status import raise_for_status
class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
@@ -23,7 +23,6 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
default_model = 'gpt-4o-mini'
models = [default_model, 'gpt-4o', 'o1-mini', 'claude-3.5-sonnet', 'grok-beta', 'gemini-1.5-pro', 'nova-pro']
-
model_aliases = {
"llama-3.1-70b": "llama-3.1-70b-versatile",
}
@@ -118,7 +117,7 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
if 'url' in json_data and 'title' in json_data:
if web_search:
- sources.append({'title': json_data['title'], 'url': json_data['url']})
+ sources.append(json_data)
elif 'choices' in json_data:
for choice in json_data['choices']:
@@ -128,5 +127,4 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
yield content
if web_search and sources:
- sources_text = '\n'.join([f"{i+1}. [{s['title']}]: {s['url']}" for i, s in enumerate(sources)])
- yield f"\n\n**Source:**\n{sources_text}" \ No newline at end of file
+ yield Sources(sources) \ No newline at end of file
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index 667f6964..c0d8edf0 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -1,4 +1,4 @@
from ..providers.base_provider import *
-from ..providers.types import FinishReason, Streaming
-from ..providers.conversation import BaseConversation
+from ..providers.types import Streaming
+from ..providers.response import BaseConversation, Sources, FinishReason
from .helper import get_cookies, format_prompt \ No newline at end of file
diff --git a/g4f/Provider/bing/conversation.py b/g4f/Provider/bing/conversation.py
index b5c237f9..43bcbb4d 100644
--- a/g4f/Provider/bing/conversation.py
+++ b/g4f/Provider/bing/conversation.py
@@ -2,7 +2,7 @@ from __future__ import annotations
from ...requests import StreamSession, raise_for_status
from ...errors import RateLimitError
-from ...providers.conversation import BaseConversation
+from ...providers.response import BaseConversation
class Conversation(BaseConversation):
"""
diff --git a/g4f/Provider/needs_auth/BingCreateImages.py b/g4f/Provider/needs_auth/BingCreateImages.py
index 80984d40..b95a78c3 100644
--- a/g4f/Provider/needs_auth/BingCreateImages.py
+++ b/g4f/Provider/needs_auth/BingCreateImages.py
@@ -28,13 +28,14 @@ class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin):
cls,
model: str,
messages: Messages,
+ prompt: str = None,
api_key: str = None,
cookies: Cookies = None,
proxy: str = None,
**kwargs
) -> AsyncResult:
session = BingCreateImages(cookies, proxy, api_key)
- yield await session.generate(messages[-1]["content"])
+ yield await session.generate(messages[-1]["content"] if prompt is None else prompt)
async def generate(self, prompt: str) -> ImageResponse:
"""
diff --git a/g4f/Provider/needs_auth/DeepInfraImage.py b/g4f/Provider/needs_auth/DeepInfraImage.py
index 24df04e3..44790561 100644
--- a/g4f/Provider/needs_auth/DeepInfraImage.py
+++ b/g4f/Provider/needs_auth/DeepInfraImage.py
@@ -29,9 +29,10 @@ class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin):
cls,
model: str,
messages: Messages,
+ prompt: str = None,
**kwargs
) -> AsyncResult:
- yield await cls.create_async(messages[-1]["content"], model, **kwargs)
+ yield await cls.create_async(messages[-1]["content"] if prompt is None else prompt, model, **kwargs)
@classmethod
async def create_async(
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 97515ec4..587c0a23 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -28,7 +28,7 @@ from ...requests.raise_for_status import raise_for_status
from ...requests.aiohttp import StreamSession
from ...image import ImageResponse, ImageRequest, to_image, to_bytes, is_accepted_format
from ...errors import MissingAuthError, ResponseError
-from ...providers.conversation import BaseConversation
+from ...providers.response import BaseConversation
from ..helper import format_cookies
from ..openai.har_file import get_request_config, NoValidHarFileError
from ..openai.har_file import RequestConfig, arkReq, arkose_url, start_url, conversation_url, backend_url, backend_anon_url