summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Airforce.py255
-rw-r--r--g4f/Provider/FluxAirforce.py82
-rw-r--r--g4f/Provider/Rocks.py70
-rw-r--r--g4f/Provider/__init__.py3
4 files changed, 256 insertions, 154 deletions
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
new file mode 100644
index 00000000..88896096
--- /dev/null
+++ b/g4f/Provider/Airforce.py
@@ -0,0 +1,255 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession, ClientResponseError
+from urllib.parse import urlencode
+import json
+import io
+import asyncio
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse, is_accepted_format
+from .helper import format_prompt
+
+class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://api.airforce"
+ text_api_endpoint = "https://api.airforce/chat/completions"
+ image_api_endpoint = "https://api.airforce/v1/imagine2"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+ default_model = 'llama-3-70b-chat'
+ text_models = [
+ # Open source models
+ 'llama-2-13b-chat',
+
+ 'llama-3-70b-chat',
+ 'llama-3-70b-chat-turbo',
+ 'llama-3-70b-chat-lite',
+
+ 'llama-3-8b-chat',
+ 'llama-3-8b-chat-turbo',
+ 'llama-3-8b-chat-lite',
+
+ 'llama-3.1-405b-turbo',
+ 'llama-3.1-70b-turbo',
+ 'llama-3.1-8b-turbo',
+
+ 'LlamaGuard-2-8b',
+ 'Llama-Guard-7b',
+ 'Meta-Llama-Guard-3-8B',
+
+ 'Mixtral-8x7B-Instruct-v0.1',
+ 'Mixtral-8x22B-Instruct-v0.1',
+ 'Mistral-7B-Instruct-v0.1',
+ 'Mistral-7B-Instruct-v0.2',
+ 'Mistral-7B-Instruct-v0.3',
+
+ 'Qwen1.5-72B-Chat',
+ 'Qwen1.5-110B-Chat',
+ 'Qwen2-72B-Instruct',
+
+ 'gemma-2b-it',
+ 'gemma-2-9b-it',
+ 'gemma-2-27b-it',
+
+ 'dbrx-instruct',
+
+ 'deepseek-llm-67b-chat',
+
+ 'Nous-Hermes-2-Mixtral-8x7B-DPO',
+ 'Nous-Hermes-2-Yi-34B',
+
+ 'WizardLM-2-8x22B',
+
+ 'SOLAR-10.7B-Instruct-v1.0',
+
+ 'StripedHyena-Nous-7B',
+
+ 'sparkdesk',
+
+
+ # Other models
+ 'chatgpt-4o-latest',
+ 'gpt-4',
+ 'gpt-4-turbo',
+ 'gpt-4o-mini-2024-07-18',
+ 'gpt-4o-mini',
+ 'gpt-4o',
+ 'gpt-3.5-turbo',
+ 'gpt-3.5-turbo-0125',
+ 'gpt-3.5-turbo-1106',
+ 'gpt-3.5-turbo-16k',
+ 'gpt-3.5-turbo-0613',
+ 'gpt-3.5-turbo-16k-0613',
+
+ 'gemini-1.5-flash',
+ 'gemini-1.5-pro',
+ ]
+ image_models = [
+ 'flux',
+ 'flux-realism',
+ 'flux-anime',
+ 'flux-3d',
+ 'flux-disney',
+ 'flux-pixel',
+ 'any-dark',
+ ]
+
+ models = [
+ *text_models,
+ *image_models
+ ]
+ model_aliases = {
+ # Open source models
+ "llama-2-13b": "llama-2-13b-chat",
+
+ "llama-3-70b": "llama-3-70b-chat",
+ "llama-3-70b": "llama-3-70b-chat-turbo",
+ "llama-3-70b": "llama-3-70b-chat-lite",
+
+ "llama-3-8b": "llama-3-8b-chat",
+ "llama-3-8b": "llama-3-8b-chat-turbo",
+ "llama-3-8b": "llama-3-8b-chat-lite",
+
+ "llama-3.1-405b": "llama-3.1-405b-turbo",
+ "llama-3.1-70b": "llama-3.1-70b-turbo",
+ "llama-3.1-8b": "llama-3.1-8b-turbo",
+
+ "mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
+ "mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1",
+ "mistral-7b": "Mistral-7B-Instruct-v0.1",
+ "mistral-7b": "Mistral-7B-Instruct-v0.2",
+ "mistral-7b": "Mistral-7B-Instruct-v0.3",
+
+ "mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
+
+ "qwen-1-5-72b": "Qwen1.5-72B-Chat",
+ "qwen-1_5-110b": "Qwen1.5-110B-Chat",
+ "qwen-2-72b": "Qwen2-72B-Instruct",
+
+ "gemma-2b": "gemma-2b-it",
+ "gemma-2b-9b": "gemma-2-9b-it",
+ "gemma-2b-27b": "gemma-2-27b-it",
+
+ "deepseek": "deepseek-llm-67b-chat",
+
+ "yi-34b": "Nous-Hermes-2-Yi-34B",
+
+ "wizardlm-2-8x22b": "WizardLM-2-8x22B",
+
+ "solar-10-7b": "SOLAR-10.7B-Instruct-v1.0",
+
+ "sh-n-7b": "StripedHyena-Nous-7B",
+
+ "sparkdesk-v1.1": "sparkdesk",
+
+
+ # Other models
+ "gpt-4o": "chatgpt-4o-latest",
+ "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
+
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
+
+
+ "gemini-flash": "gemini-1.5-flash",
+ "gemini-pro": "gemini-1.5-pro",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "origin": "https://api.airforce",
+ "sec-ch-ua": '"Chromium";v="128", "Not(A:Brand";v="24"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "cross-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
+ }
+
+
+ if model in cls.image_models:
+ async for item in cls.generate_image(model, messages, headers, proxy, **kwargs):
+ yield item
+ else:
+ async for item in cls.generate_text(model, messages, headers, proxy, **kwargs):
+ yield item
+
+ @classmethod
+ async def generate_text(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": [{"role": "user", "content": format_prompt(messages)}],
+ "model": model,
+ "temperature": kwargs.get('temperature', 1),
+ "top_p": kwargs.get('top_p', 1),
+ "stream": True
+ }
+
+ async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line:
+ line = line.decode('utf-8').strip()
+ if line.startswith("data: "):
+ try:
+ data = json.loads(line[6:])
+ if 'choices' in data and len(data['choices']) > 0:
+ delta = data['choices'][0].get('delta', {})
+ if 'content' in delta:
+ yield delta['content']
+ except json.JSONDecodeError:
+ continue
+ elif line == "data: [DONE]":
+ break
+
+ @classmethod
+ async def generate_image(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
+ prompt = messages[-1]['content'] if messages else ""
+ params = {
+ "prompt": prompt,
+ "size": kwargs.get("size", "1:1"),
+ "seed": kwargs.get("seed"),
+ "model": model
+ }
+ params = {k: v for k, v in params.items() if v is not None}
+
+ try:
+ async with ClientSession(headers=headers) as session:
+ async with session.get(cls.image_api_endpoint, params=params, proxy=proxy) as response:
+ response.raise_for_status()
+ content = await response.read()
+
+ if response.content_type.startswith('image/'):
+ image_url = str(response.url)
+ yield ImageResponse(image_url, prompt)
+ else:
+ try:
+ text = content.decode('utf-8', errors='ignore')
+ yield f"Error: {text}"
+ except Exception as decode_error:
+ yield f"Error: Unable to decode response - {str(decode_error)}"
+ except ClientResponseError as e:
+ yield f"Error: HTTP {e.status}: {e.message}"
+ except Exception as e:
+ yield f"Unexpected error: {str(e)}"
diff --git a/g4f/Provider/FluxAirforce.py b/g4f/Provider/FluxAirforce.py
deleted file mode 100644
index fe003a61..00000000
--- a/g4f/Provider/FluxAirforce.py
+++ /dev/null
@@ -1,82 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession, ClientResponseError
-from urllib.parse import urlencode
-import io
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..image import ImageResponse, is_accepted_format
-
-class FluxAirforce(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://flux.api.airforce/"
- api_endpoint = "https://api.airforce/v1/imagine2"
- working = True
- default_model = 'flux-realism'
- models = [
- 'flux',
- 'flux-realism',
- 'flux-anime',
- 'flux-3d',
- 'flux-disney'
- ]
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "accept": "*/*",
- "accept-language": "en-US,en;q=0.9",
- "origin": "https://flux.api.airforce",
- "priority": "u=1, i",
- "referer": "https://flux.api.airforce/",
- "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-site",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
- }
-
- prompt = messages[-1]['content'] if messages else ""
-
- params = {
- "prompt": prompt,
- "size": kwargs.get("size", "1:1"),
- "seed": kwargs.get("seed"),
- "model": model
- }
-
- params = {k: v for k, v in params.items() if v is not None}
-
- try:
- async with ClientSession(headers=headers) as session:
- async with session.get(f"{cls.api_endpoint}", params=params, proxy=proxy) as response:
- response.raise_for_status()
-
- content = await response.read()
-
- if response.content_type.startswith('image/'):
- image_url = str(response.url)
- yield ImageResponse(image_url, prompt)
- else:
- try:
- text = content.decode('utf-8', errors='ignore')
- yield f"Error: {text}"
- except Exception as decode_error:
- yield f"Error: Unable to decode response - {str(decode_error)}"
-
- except ClientResponseError as e:
- yield f"Error: HTTP {e.status}: {e.message}"
- except Exception as e:
- yield f"Unexpected error: {str(e)}"
-
- finally:
- if not session.closed:
- await session.close()
diff --git a/g4f/Provider/Rocks.py b/g4f/Provider/Rocks.py
deleted file mode 100644
index f44e0060..00000000
--- a/g4f/Provider/Rocks.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import asyncio
-import json
-from aiohttp import ClientSession
-from ..typing import Messages, AsyncResult
-from .base_provider import AsyncGeneratorProvider
-
-class Rocks(AsyncGeneratorProvider):
- url = "https://api.airforce"
- api_endpoint = "/chat/completions"
- supports_message_history = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
- supports_stream = True
- supports_system_message = True
- working = True
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- payload = {"messages":messages,"model":model,"max_tokens":4096,"temperature":1,"top_p":1,"stream":True}
-
- headers = {
- "Accept": "application/json",
- "Accept-Encoding": "gzip, deflate, br, zstd",
- "Accept-Language": "en-US,en;q=0.9",
- "Authorization": "Bearer missing api key",
- "Origin": "https://llmplayground.net",
- "Referer": "https://llmplayground.net/",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
- }
-
- async with ClientSession() as session:
- async with session.post(
- f"{cls.url}{cls.api_endpoint}",
- json=payload,
- proxy=proxy,
- headers=headers
- ) as response:
- response.raise_for_status()
- last_chunk_time = asyncio.get_event_loop().time()
-
- async for line in response.content:
- current_time = asyncio.get_event_loop().time()
- if current_time - last_chunk_time > 5:
- return
-
- if line.startswith(b"\n"):
- pass
- elif "discord.com/invite/" in line.decode() or "discord.gg/" in line.decode():
- pass # trolled
- elif line.startswith(b"data: "):
- try:
- line = json.loads(line[6:])
- except json.JSONDecodeError:
- continue
- chunk = line["choices"][0]["delta"].get("content")
- if chunk:
- yield chunk
- last_chunk_time = current_time
- else:
- raise Exception(f"Unexpected line: {line}")
- return \ No newline at end of file
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index de211c7e..e4c85d6a 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -13,6 +13,7 @@ from .AI365VIP import AI365VIP
from .Allyfy import Allyfy
from .AiChatOnline import AiChatOnline
from .AiChats import AiChats
+from .Airforce import Airforce
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
@@ -28,7 +29,6 @@ from .DDG import DDG
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
from .FlowGpt import FlowGpt
-from .FluxAirforce import FluxAirforce
from .Free2GPT import Free2GPT
from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
@@ -55,7 +55,6 @@ from .Reka import Reka
from .Snova import Snova
from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
-from .Rocks import Rocks
from .TeachAnything import TeachAnything
from .TwitterBio import TwitterBio
from .Upstage import Upstage