summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/Airforce.py96
-rw-r--r--g4f/Provider/Blackbox.py107
-rwxr-xr-xg4f/Provider/RobocodersAPI.py90
-rw-r--r--g4f/Provider/__init__.py3
-rw-r--r--g4f/Provider/airforce/AirforceChat.py179
-rw-r--r--g4f/Provider/airforce/AirforceImage.py81
-rw-r--r--g4f/Provider/airforce/__init__.py2
7 files changed, 223 insertions, 335 deletions
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index 6254e160..54bb543b 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -4,46 +4,88 @@ import random
import json
import re
+import requests
+from requests.packages.urllib3.exceptions import InsecureRequestWarning
+requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse
from ..requests import StreamSession, raise_for_status
-from .airforce.AirforceChat import AirforceChat
-from .airforce.AirforceImage import AirforceImage
class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://api.airforce"
- api_endpoint_completions = AirforceChat.api_endpoint
- api_endpoint_imagine = AirforceImage.api_endpoint
+ url = "https://llmplayground.net"
+ api_endpoint_completions = "https://api.airforce/chat/completions"
+ api_endpoint_imagine = "https://api.airforce/imagine2"
working = True
- default_model = "gpt-4o-mini"
supports_system_message = True
supports_message_history = True
- text_models = [
- 'gpt-4-turbo',
- default_model,
- 'llama-3.1-70b-turbo',
- 'llama-3.1-8b-turbo',
- ]
- image_models = [
- 'flux',
- 'flux-realism',
- 'flux-anime',
- 'flux-3d',
- 'flux-disney',
- 'flux-pixel',
- 'flux-4o',
- 'any-dark',
- ]
+
+ @classmethod
+ def fetch_completions_models(cls):
+ response = requests.get('https://api.airforce/models', verify=False)
+ response.raise_for_status()
+ data = response.json()
+ return [model['id'] for model in data['data']]
+
+ @classmethod
+ def fetch_imagine_models(cls):
+ response = requests.get('https://api.airforce/imagine/models', verify=False)
+ response.raise_for_status()
+ return response.json()
+
+ completions_models = fetch_completions_models.__func__(None)
+ imagine_models = fetch_imagine_models.__func__(None)
+
+ default_model = "gpt-4o-mini"
+ default_image_model = "flux"
+ additional_models_imagine = ["stable-diffusion-xl-base", "stable-diffusion-xl-lightning", "Flux-1.1-Pro"]
+ text_models = completions_models
+ image_models = [*imagine_models, *additional_models_imagine]
models = [
*text_models,
*image_models,
]
- model_aliases = {
- "gpt-4o": "chatgpt-4o-latest",
+
+ model_aliases = {
+ ### completions ###
+ # openchat
+ "openchat-3.5": "openchat-3.5-0106",
+
+ # deepseek-ai
+ "deepseek-coder": "deepseek-coder-6.7b-instruct",
+
+ # NousResearch
+ "hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
+ "hermes-2-pro": "hermes-2-pro-mistral-7b",
+
+ # teknium
+ "openhermes-2.5": "openhermes-2.5-mistral-7b",
+
+ # liquid
+ "lfm-40b": "lfm-40b-moe",
+
+ # DiscoResearch
+ "german-7b": "discolm-german-7b-v1",
+
+ # meta-llama
+ "llama-2-7b": "llama-2-7b-chat-int8",
+ "llama-2-7b": "llama-2-7b-chat-fp16",
+ "llama-3.1-70b": "llama-3.1-70b-chat",
+ "llama-3.1-8b": "llama-3.1-8b-chat",
"llama-3.1-70b": "llama-3.1-70b-turbo",
"llama-3.1-8b": "llama-3.1-8b-turbo",
- "gpt-4": "gpt-4-turbo",
+
+ # inferless
+ "neural-7b": "neural-chat-7b-v3-1",
+
+ # HuggingFaceH4
+ "zephyr-7b": "zephyr-7b-beta",
+
+ ### imagine ###
+ "sdxl": "stable-diffusion-xl-base",
+ "sdxl": "stable-diffusion-xl-lightning",
+ "flux-pro": "Flux-1.1-Pro",
}
@classmethod
@@ -53,7 +95,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
messages: Messages,
proxy: str = None,
seed: int = None,
- size: str = "1:1",
+ size: str = "1:1", # "1:1", "16:9", "9:16", "21:9", "9:21", "1:2", "2:1"
stream: bool = False,
**kwargs
) -> AsyncResult:
@@ -168,4 +210,4 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
'',
part_response
)
- return part_response \ No newline at end of file
+ return part_response
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index ba58a511..b259b4aa 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -10,7 +10,6 @@ import aiohttp
from ..typing import AsyncResult, Messages, ImageType
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse, to_data_uri
-from .helper import get_random_string
class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
label = "Blackbox AI"
@@ -21,19 +20,19 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_message_history = True
_last_validated_value = None
-
+
default_model = 'blackboxai'
default_vision_model = default_model
- default_image_model = 'generate_image'
- image_models = [default_image_model, 'repomap']
- text_models = [default_model, 'gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
- vision_models = [default_model, 'gpt-4o', 'gemini-pro', 'blackboxai-pro']
- model_aliases = {
- "claude-3.5-sonnet": "claude-sonnet-3.5",
- }
+ default_image_model = 'Image Generation'
+ image_models = ['Image Generation', 'repomap']
+ vision_models = [default_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']
+
+ userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
+
agentMode = {
- default_image_model: {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
+ 'Image Generation': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
}
+
trendingAgentMode = {
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
@@ -53,6 +52,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'React Agent': {'mode': True, 'id': "React Agent"},
'Xcode Agent': {'mode': True, 'id': "Xcode Agent"},
'AngularJS Agent': {'mode': True, 'id': "AngularJS Agent"},
+ #
'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
#
'repomap': {'mode': True, 'id': "repomap"},
@@ -75,8 +75,24 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'Youtube Agent': {'mode': True, 'id': "Youtube Agent"},
'builder Agent': {'mode': True, 'id': "builder Agent"},
}
- model_prefixes = {mode: f"@{value['id']}" for mode, value in trendingAgentMode.items() if mode not in ["gemini-1.5-flash", "llama-3.1-8b", "llama-3.1-70b", "llama-3.1-405b", "repomap"]}
- models = [*text_models, default_image_model, *list(trendingAgentMode.keys())]
+
+ additional_prefixes = {
+ 'gpt-4o': '@gpt-4o',
+ 'gemini-pro': '@gemini-pro',
+ 'claude-sonnet-3.5': '@claude-sonnet'
+ }
+
+ model_prefixes = {
+ **{mode: f"@{value['id']}" for mode, value in trendingAgentMode.items()
+ if mode not in ["gemini-1.5-flash", "llama-3.1-8b", "llama-3.1-70b", "llama-3.1-405b", "repomap"]},
+ **additional_prefixes
+ }
+
+
+ models = list(dict.fromkeys([default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
+
+
+
model_aliases = {
"gemini-flash": "gemini-1.5-flash",
"claude-3.5-sonnet": "claude-sonnet-3.5",
@@ -85,11 +101,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
@classmethod
async def fetch_validated(cls):
- # If the key is already stored in memory, return it
if cls._last_validated_value:
return cls._last_validated_value
- # If the key is not found, perform a search
async with aiohttp.ClientSession() as session:
try:
async with session.get(cls.url) as response:
@@ -110,13 +124,19 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
match = key_pattern.search(js_content)
if match:
validated_value = match.group(1)
- cls._last_validated_value = validated_value # Keep in mind
+ cls._last_validated_value = validated_value
return validated_value
except Exception as e:
print(f"Error fetching validated value: {e}")
return cls._last_validated_value
+
+ @staticmethod
+ def generate_id(length=7):
+ characters = string.ascii_letters + string.digits
+ return ''.join(random.choice(characters) for _ in range(length))
+
@classmethod
def add_prefix_to_messages(cls, messages: Messages, model: str) -> Messages:
prefix = cls.model_prefixes.get(model, "")
@@ -143,8 +163,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
image_name: str = None,
**kwargs
) -> AsyncResult:
- model = cls.get_model(model)
- message_id = get_random_string(7)
+ message_id = cls.generate_id()
messages = cls.add_prefix_to_messages(messages, model)
validated_value = await cls.fetch_validated()
@@ -172,7 +191,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
}
-
+
data = {
"messages": messages,
"id": message_id,
@@ -193,7 +212,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"clickedForceWebSearch": False,
"visitFromDelta": False,
"mobileClient": False,
- "userSelectedModel": model if model in cls.text_models else None,
+ "userSelectedModel": model if model in cls.userSelectedModel else None,
"webSearchMode": web_search,
"validated": validated_value,
}
@@ -201,29 +220,27 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
async with ClientSession(headers=headers) as session:
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- is_first = False
- async for chunk in response.content.iter_any():
- text_chunk = chunk.decode(errors="ignore")
- if model in cls.image_models:
- image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', text_chunk)
- if image_matches:
- image_url = image_matches[0]
- image_response = ImageResponse(images=[image_url])
- yield image_response
- continue
-
- text_chunk = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', text_chunk, flags=re.DOTALL)
- json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', text_chunk, re.DOTALL)
- if json_match:
- search_results = json.loads(json_match.group(1))
- answer = text_chunk.split('$~~~$')[-1].strip()
- formatted_response = f"{answer}\n\n**Source:**"
- for i, result in enumerate(search_results, 1):
- formatted_response += f"\n{i}. {result['title']}: {result['link']}"
- yield formatted_response
- elif text_chunk:
- if is_first:
- is_first = False
- yield text_chunk.lstrip()
- else:
- yield text_chunk \ No newline at end of file
+ response_text = await response.text()
+
+ if model in cls.image_models:
+ image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text)
+ if image_matches:
+ image_url = image_matches[0]
+ image_response = ImageResponse(images=[image_url], alt="Generated Image")
+ yield image_response
+ return
+
+ response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL)
+
+ json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL)
+ if json_match:
+ search_results = json.loads(json_match.group(1))
+ answer = response_text.split('$~~~$')[-1].strip()
+
+ formatted_response = f"{answer}\n\n**Source:**"
+ for i, result in enumerate(search_results, 1):
+ formatted_response += f"\n{i}. {result['title']}: {result['link']}"
+
+ yield formatted_response
+ else:
+ yield response_text.strip()
diff --git a/g4f/Provider/RobocodersAPI.py b/g4f/Provider/RobocodersAPI.py
new file mode 100755
index 00000000..aa82902b
--- /dev/null
+++ b/g4f/Provider/RobocodersAPI.py
@@ -0,0 +1,90 @@
+from __future__ import annotations
+
+import json
+import aiohttp
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+class RobocodersAPI(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "API Robocoders AI"
+ url = "https://api.robocoders.ai/docs"
+ api_endpoint = "https://api.robocoders.ai/chat"
+ working = True
+ supports_message_history = True
+ default_model = 'GeneralCodingAgent'
+ agent = [default_model, "RepoAgent", "FrontEndAgent"]
+ models = [*agent]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ async with aiohttp.ClientSession() as session:
+ access_token = await cls._get_access_token(session)
+ if not access_token:
+ raise Exception("Failed to get access token")
+
+ session_id = await cls._create_session(session, access_token)
+ if not session_id:
+ raise Exception("Failed to create session")
+
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {access_token}"
+ }
+
+ prompt = format_prompt(messages)
+
+ data = {
+ "sid": session_id,
+ "prompt": prompt,
+ "agent": model
+ }
+
+ async with session.post(cls.api_endpoint, headers=headers, json=data, proxy=proxy) as response:
+ if response.status != 200:
+ raise Exception(f"Error: {response.status}")
+
+ async for line in response.content:
+ if line:
+ try:
+ response_data = json.loads(line)
+ message = response_data.get('message', '')
+ if message:
+ yield message
+ except json.JSONDecodeError:
+ pass
+
+ @staticmethod
+ async def _get_access_token(session: aiohttp.ClientSession) -> str:
+ url_auth = 'https://api.robocoders.ai/auth'
+ headers_auth = {
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
+ }
+
+ async with session.get(url_auth, headers=headers_auth) as response:
+ if response.status == 200:
+ text = await response.text()
+ return text.split('id="token">')[1].split('</pre>')[0].strip()
+ return None
+
+ @staticmethod
+ async def _create_session(session: aiohttp.ClientSession, access_token: str) -> str:
+ url_create_session = 'https://api.robocoders.ai/create-session'
+ headers_create_session = {
+ 'Authorization': f'Bearer {access_token}'
+ }
+
+ async with session.get(url_create_session, headers=headers_create_session) as response:
+ if response.status == 200:
+ data = await response.json()
+ return data.get('sid')
+ return None
+
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index faf9979e..2083c0ff 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -35,11 +35,12 @@ from .Pizzagpt import Pizzagpt
from .Prodia import Prodia
from .Reka import Reka
from .ReplicateHome import ReplicateHome
+from .RobocodersAPI import RobocodersAPI
from .RubiksAI import RubiksAI
from .TeachAnything import TeachAnything
from .Upstage import Upstage
from .You import You
-from .Mhystical import Mhystical
+from .Mhystical import Mhystical
import sys
diff --git a/g4f/Provider/airforce/AirforceChat.py b/g4f/Provider/airforce/AirforceChat.py
deleted file mode 100644
index 1efe0026..00000000
--- a/g4f/Provider/airforce/AirforceChat.py
+++ /dev/null
@@ -1,179 +0,0 @@
-from __future__ import annotations
-import re
-import json
-import requests
-from aiohttp import ClientSession
-from typing import List
-import logging
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-
-# Helper function to clean the response
-def clean_response(text: str) -> str:
- """Clean response from unwanted patterns."""
- patterns = [
- r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
- r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
- r"Rate limit \(\d+\/hour\) exceeded\. Join our discord for more: https:\/\/discord\.com\/invite\/\S+",
- r"</s>", # zephyr-7b-beta
- r"\[ERROR\] '\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'", # Matches [ERROR] 'UUID'
- ]
- for pattern in patterns:
- text = re.sub(pattern, '', text)
-
- # Remove the <|im_end|> token if present
- text = text.replace("<|im_end|>", "").strip()
-
- return text
-
-def split_message(message: str, max_length: int = 1000) -> List[str]:
- """Splits the message into chunks of a given length (max_length)"""
- # Split the message into smaller chunks to avoid exceeding the limit
- chunks = []
- while len(message) > max_length:
- # Find the last space or punctuation before max_length to avoid cutting words
- split_point = message.rfind(' ', 0, max_length)
- if split_point == -1: # No space found, split at max_length
- split_point = max_length
- chunks.append(message[:split_point])
- message = message[split_point:].strip()
- if message:
- chunks.append(message) # Append the remaining part of the message
- return chunks
-
-class AirforceChat(AsyncGeneratorProvider, ProviderModelMixin):
- label = "AirForce Chat"
- api_endpoint = "https://api.airforce/chat/completions"
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- default_model = 'llama-3.1-70b-chat'
-
- @classmethod
- def get_models(cls) -> list:
- if not cls.models:
- try:
- response = requests.get('https://api.airforce/models', verify=False)
- data = response.json()
- cls.models = [model['id'] for model in data['data']]
- except Exception as e:
- logging.exception(e)
- cls.models = [cls.default_model]
-
- model_aliases = {
- # openchat
- "openchat-3.5": "openchat-3.5-0106",
-
- # deepseek-ai
- "deepseek-coder": "deepseek-coder-6.7b-instruct",
-
- # NousResearch
- "hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
- "hermes-2-pro": "hermes-2-pro-mistral-7b",
-
- # teknium
- "openhermes-2.5": "openhermes-2.5-mistral-7b",
-
- # liquid
- "lfm-40b": "lfm-40b-moe",
-
- # DiscoResearch
- "german-7b": "discolm-german-7b-v1",
-
- # meta-llama
- "llama-2-7b": "llama-2-7b-chat-int8",
- "llama-2-7b": "llama-2-7b-chat-fp16",
- "llama-3.1-70b": "llama-3.1-70b-chat",
- "llama-3.1-8b": "llama-3.1-8b-chat",
- "llama-3.1-70b": "llama-3.1-70b-turbo",
- "llama-3.1-8b": "llama-3.1-8b-turbo",
-
- # inferless
- "neural-7b": "neural-chat-7b-v3-1",
-
- # HuggingFaceH4
- "zephyr-7b": "zephyr-7b-beta",
-
- # llmplayground.net
- #"any-uncensored": "any-uncensored",
- }
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- stream: bool = False,
- proxy: str = None,
- max_tokens: str = 4096,
- temperature: str = 1,
- top_p: str = 1,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- 'accept': '*/*',
- 'accept-language': 'en-US,en;q=0.9',
- 'authorization': 'Bearer missing api key',
- 'cache-control': 'no-cache',
- 'content-type': 'application/json',
- 'origin': 'https://llmplayground.net',
- 'pragma': 'no-cache',
- 'priority': 'u=1, i',
- 'referer': 'https://llmplayground.net/',
- 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Linux"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'cross-site',
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
- }
-
- # Format the messages for the API
- formatted_messages = format_prompt(messages)
- message_chunks = split_message(formatted_messages)
-
- full_response = ""
- for chunk in message_chunks:
- data = {
- "messages": [{"role": "user", "content": chunk}],
- "model": model,
- "max_tokens": max_tokens,
- "temperature": temperature,
- "top_p": top_p,
- "stream": stream
- }
-
- async with ClientSession(headers=headers) as session:
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
-
- text = ""
- if stream:
- async for line in response.content:
- line = line.decode('utf-8').strip()
- if line.startswith('data: '):
- json_str = line[6:]
- try:
- if json_str and json_str != "[DONE]":
- chunk = json.loads(json_str)
- if 'choices' in chunk and chunk['choices']:
- content = chunk['choices'][0].get('delta', {}).get('content', '')
- text += content
- except json.JSONDecodeError as e:
- print(f"Error decoding JSON: {json_str}, Error: {e}")
- elif line == "[DONE]":
- break
- full_response += clean_response(text)
- else:
- response_json = await response.json()
- text = response_json["choices"][0]["message"]["content"]
- full_response += clean_response(text)
-
- # Return the complete response after all chunks
- yield full_response
diff --git a/g4f/Provider/airforce/AirforceImage.py b/g4f/Provider/airforce/AirforceImage.py
deleted file mode 100644
index a5bd113f..00000000
--- a/g4f/Provider/airforce/AirforceImage.py
+++ /dev/null
@@ -1,81 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-from urllib.parse import urlencode
-import random
-import requests
-import logging
-
-from ...typing import AsyncResult, Messages
-from ...image import ImageResponse
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-
-class AirforceImage(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Airforce Image"
- url = "https://api.airforce"
- api_endpoint = "https://api.airforce/imagine2"
- working = False
-
- default_model = 'flux'
- additional_models = ["stable-diffusion-xl-base", "stable-diffusion-xl-lightning", "Flux-1.1-Pro"]
- model_aliases = {
- "sdxl": "stable-diffusion-xl-base",
- "sdxl": "stable-diffusion-xl-lightning",
- "flux-pro": "Flux-1.1-Pro",
- }
-
- @classmethod
- def get_models(cls) -> list:
- if not cls.models:
- try:
- response = requests.get('https://api.airforce/imagine/models', verify=False)
- response.raise_for_status()
- cls.models = [*response.json(), *cls.additional_models]
- except Exception as e:
- logging.exception(e)
- cls.models = [cls.default_model]
- return cls.models
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- size: str = '1:1', # "1:1", "16:9", "9:16", "21:9", "9:21", "1:2", "2:1"
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- 'accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8',
- 'accept-language': 'en-US,en;q=0.9',
- 'cache-control': 'no-cache',
- 'dnt': '1',
- 'pragma': 'no-cache',
- 'priority': 'u=1, i',
- 'referer': 'https://llmplayground.net/',
- 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Linux"',
- 'sec-fetch-dest': 'image',
- 'sec-fetch-mode': 'no-cors',
- 'sec-fetch-site': 'cross-site',
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
- }
-
- async with ClientSession(headers=headers) as session:
- seed = random.randint(0, 58463)
- params = {
- 'model': model,
- 'prompt': messages[-1]["content"],
- 'size': size,
- 'seed': seed
- }
- full_url = f"{cls.api_endpoint}?{urlencode(params)}"
-
- async with session.get(full_url, headers=headers, proxy=proxy) as response:
- if response.status == 200 and response.headers.get('content-type', '').startswith('image'):
- yield ImageResponse(images=[full_url], alt="Generated Image")
- else:
- raise Exception(f"Error: status {response.status}, content type {response.headers.get('content-type')}")
diff --git a/g4f/Provider/airforce/__init__.py b/g4f/Provider/airforce/__init__.py
deleted file mode 100644
index 5ffa6d31..00000000
--- a/g4f/Provider/airforce/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .AirforceChat import AirforceChat
-from .AirforceImage import AirforceImage