summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-08-31 08:47:39 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-08-31 08:47:39 +0200
commitf1683c8db83cee5805a6e103fca2c2551aae5086 (patch)
tree44712b453191566e7ce0877d43a3ed2f807d78b6 /g4f/Provider
parentMerge pull request #2206 from Parthsadaria/patch-1 (diff)
downloadgpt4free-f1683c8db83cee5805a6e103fca2c2551aae5086.tar
gpt4free-f1683c8db83cee5805a6e103fca2c2551aae5086.tar.gz
gpt4free-f1683c8db83cee5805a6e103fca2c2551aae5086.tar.bz2
gpt4free-f1683c8db83cee5805a6e103fca2c2551aae5086.tar.lz
gpt4free-f1683c8db83cee5805a6e103fca2c2551aae5086.tar.xz
gpt4free-f1683c8db83cee5805a6e103fca2c2551aae5086.tar.zst
gpt4free-f1683c8db83cee5805a6e103fca2c2551aae5086.zip
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/Blackbox.py20
-rw-r--r--g4f/Provider/ChatgptFree.py11
-rw-r--r--g4f/Provider/DDG.py2
-rw-r--r--g4f/Provider/FluxAirforce.py69
-rw-r--r--g4f/Provider/FreeChatgpt.py16
-rw-r--r--g4f/Provider/HuggingChat.py16
-rw-r--r--g4f/Provider/HuggingFace.py12
-rw-r--r--g4f/Provider/Koala.py9
-rw-r--r--g4f/Provider/Liaobots.py270
-rw-r--r--g4f/Provider/LiteIcoding.py9
-rw-r--r--g4f/Provider/Pizzagpt.py2
-rw-r--r--g4f/Provider/Upstage.py74
-rw-r--r--g4f/Provider/You.py1
-rw-r--r--g4f/Provider/__init__.py7
14 files changed, 405 insertions, 113 deletions
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index fd84875e..3c9e394e 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -21,6 +21,20 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'llama-3.1-70b',
'llama-3.1-405b',
]
+
+ model_aliases = {
+ "gemini-flash": "gemini-1.5-flash",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
@classmethod
async def create_async_generator(
cls,
@@ -55,6 +69,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
async with ClientSession(headers=headers) as session:
random_id = secrets.token_hex(16)
random_user_id = str(uuid.uuid4())
+
+ model = cls.get_model(model) # Resolve the model alias
+
model_id_map = {
"blackbox": {},
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
@@ -62,6 +79,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}
}
+
data = {
"messages": messages,
"id": random_id,
@@ -75,7 +93,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"webSearchMode": False,
"userSystemPrompt": "",
"githubToken": None,
- "trendingAgentModel": model_id_map[model], # if you actually test this on the site, just ask each model "yo", weird behavior imo
+ "trendingAgentModel": model_id_map.get(model, {}), # Default to empty dict if model not found
"maxTokens": None
}
diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/ChatgptFree.py
index 3cf363a5..95efa865 100644
--- a/g4f/Provider/ChatgptFree.py
+++ b/g4f/Provider/ChatgptFree.py
@@ -43,6 +43,7 @@ class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
+
async with StreamSession(
headers=headers,
cookies=cookies,
@@ -55,6 +56,12 @@ class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
async with session.get(f"{cls.url}/") as response:
await raise_for_status(response)
response = await response.text()
+
+ result = re.search(r'data-post-id="([0-9]+)"', response)
+ if not result:
+ raise RuntimeError("No post id found")
+ cls._post_id = result.group(1)
+
result = re.search(r'data-nonce="(.*?)"', response)
if result:
cls._nonce = result.group(1)
@@ -70,7 +77,7 @@ class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
"message": prompt,
"bot_id": "0"
}
-
+
async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
await raise_for_status(response)
buffer = ""
@@ -96,4 +103,4 @@ class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
if 'data' in json_response:
yield json_response['data']
except json.JSONDecodeError:
- print(f"Failed to decode final JSON. Buffer content: {buffer}") \ No newline at end of file
+ print(f"Failed to decode final JSON. Buffer content: {buffer}")
diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py
index b8e1732f..c8c36fc9 100644
--- a/g4f/Provider/DDG.py
+++ b/g4f/Provider/DDG.py
@@ -21,8 +21,6 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
default_model = "gpt-4o-mini"
models = ["gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
model_aliases = {
- "gpt-4": "gpt-4o-mini",
- "gpt-4o": "gpt-4o-mini",
"claude-3-haiku": "claude-3-haiku-20240307",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1"
diff --git a/g4f/Provider/FluxAirforce.py b/g4f/Provider/FluxAirforce.py
new file mode 100644
index 00000000..139e6c5e
--- /dev/null
+++ b/g4f/Provider/FluxAirforce.py
@@ -0,0 +1,69 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+from urllib.parse import urlencode
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse
+
+class FluxAirforce(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://flux.api.airforce/"
+ api_endpoint = "https://api.airforce/v1/imagine2"
+ working = True
+ default_model = 'flux-realism'
+ models = [
+ 'flux',
+ 'flux-realism',
+ 'flux-anime',
+ 'flux-3d',
+ 'flux-disney'
+ ]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "origin": "https://flux.api.airforce",
+ "priority": "u=1, i",
+ "referer": "https://flux.api.airforce/",
+ "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
+ }
+
+ # Assume the last message in the messages list is the prompt
+ prompt = messages[-1]['content'] if messages else ""
+
+ params = {
+ "prompt": prompt,
+ "size": kwargs.get("size", "1:1"),
+ "seed": kwargs.get("seed"),
+ "model": model
+ }
+
+ # Remove None values from params
+ params = {k: v for k, v in params.items() if v is not None}
+
+ async with ClientSession(headers=headers) as session:
+ async with session.get(f"{cls.api_endpoint}", params=params, proxy=proxy) as response:
+ response.raise_for_status()
+
+ # Get the URL of the generated image
+ image_url = str(response.url)
+
+ # Create an ImageResponse object
+ image_response = ImageResponse(image_url, prompt)
+
+ yield image_response
diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py
index 6485ced2..20487594 100644
--- a/g4f/Provider/FreeChatgpt.py
+++ b/g4f/Provider/FreeChatgpt.py
@@ -10,19 +10,16 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat.chatgpt.org.uk"
api_endpoint = "/api/openai/v1/chat/completions"
working = True
- supports_gpt_35_turbo = True
- default_model = 'gpt-3.5-turbo'
+ default_model = '@cf/qwen/qwen1.5-14b-chat-awq'
models = [
- 'gpt-3.5-turbo',
- 'SparkDesk-v1.1',
- 'deepseek-coder',
'@cf/qwen/qwen1.5-14b-chat-awq',
- 'deepseek-chat',
+ 'SparkDesk-v1.1',
'Qwen2-7B-Instruct',
'glm4-9B-chat',
'chatglm3-6B',
'Yi-1.5-9B-Chat',
]
+
model_aliases = {
"qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq",
"sparkdesk-v1.1": "SparkDesk-v1.1",
@@ -40,7 +37,7 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
return cls.model_aliases[model.lower()]
else:
return cls.default_model
-
+
@classmethod
async def create_async_generator(
cls,
@@ -49,6 +46,8 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
proxy: str = None,
**kwargs
) -> AsyncResult:
+ model = cls.get_model(model)
+
headers = {
"accept": "application/json, text/event-stream",
"accept-language": "en-US,en;q=0.9",
@@ -64,7 +63,6 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
}
- model = cls.get_model(model)
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
@@ -93,6 +91,6 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
chunk = json.loads(line_str[6:])
delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
accumulated_text += delta_content
- yield delta_content
+ yield delta_content # Yield each chunk of content
except json.JSONDecodeError:
pass
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index f1010c1c..76c76a35 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -22,7 +22,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
'mistralai/Mistral-7B-Instruct-v0.3',
'microsoft/Phi-3-mini-4k-instruct',
]
-
+
model_aliases = {
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
@@ -42,7 +42,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
return cls.model_aliases[model]
else:
return cls.default_model
-
+
@classmethod
def create_completion(
cls,
@@ -52,7 +52,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
**kwargs
) -> CreateResult:
model = cls.get_model(model)
-
+
if model in cls.models:
session = cf_reqs.Session()
session.headers = {
@@ -71,12 +71,17 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
}
+
+ print(model)
json_data = {
'model': model,
}
+
response = session.post('https://huggingface.co/chat/conversation', json=json_data)
conversationId = response.json()['conversationId']
+
response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=01',)
+
data: list = (response.json())["nodes"][1]["data"]
keys: list[int] = data[data[0]["messages"]]
message_keys: dict = data[keys[0]]
@@ -117,6 +122,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
headers=headers,
files=files,
)
+
first_token = True
for line in response.iter_lines():
line = json.loads(line)
@@ -133,6 +139,6 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
token = token.replace('\u0000', '')
yield token
-
+
elif line["type"] == "finalAnswer":
- break \ No newline at end of file
+ break
diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py
index a3741196..74957862 100644
--- a/g4f/Provider/HuggingFace.py
+++ b/g4f/Provider/HuggingFace.py
@@ -1,11 +1,14 @@
from __future__ import annotations
+
import json
from aiohttp import ClientSession, BaseConnector
+
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector
from ..errors import RateLimitError, ModelNotFoundError
from ..requests.raise_for_status import raise_for_status
+
class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/chat"
working = True
@@ -22,7 +25,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
'mistralai/Mistral-7B-Instruct-v0.3',
'microsoft/Phi-3-mini-4k-instruct',
]
-
+
model_aliases = {
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
@@ -76,7 +79,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
}
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
-
+
params = {
"return_full_text": False,
"max_new_tokens": max_new_tokens,
@@ -84,7 +87,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs
}
payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream}
-
+
async with ClientSession(
headers=headers,
connector=get_connector(connector, proxy)
@@ -106,6 +109,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
yield chunk
else:
yield (await response.json())[0]["generated_text"].strip()
+
def format_prompt(messages: Messages) -> str:
system_messages = [message["content"] for message in messages if message["role"] == "system"]
question = " ".join([messages[-1]["content"], *system_messages])
@@ -114,4 +118,4 @@ def format_prompt(messages: Messages) -> str:
for idx, message in enumerate(messages)
if message["role"] == "assistant"
])
- return f"{history}<s>[INST] {question} [/INST]" \ No newline at end of file
+ return f"{history}<s>[INST] {question} [/INST]"
diff --git a/g4f/Provider/Koala.py b/g4f/Provider/Koala.py
index 6b2d02c3..0e810083 100644
--- a/g4f/Provider/Koala.py
+++ b/g4f/Provider/Koala.py
@@ -4,16 +4,17 @@ import json
from typing import AsyncGenerator, Optional, List, Dict, Union, Any
from aiohttp import ClientSession, BaseConnector, ClientResponse
-from ..typing import Messages
-from .base_provider import AsyncGeneratorProvider
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_random_string, get_connector
from ..requests import raise_for_status
-class Koala(AsyncGeneratorProvider):
+class Koala(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://koala.sh"
working = True
- supports_gpt_35_turbo = True
supports_message_history = True
+ supports_gpt_4 = True
+ default_model = 'gpt-4o-mini'
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 2034c34a..8a9f46b1 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -1,7 +1,6 @@
from __future__ import annotations
import uuid
-import requests
from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
@@ -9,22 +8,170 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector
from ..requests import raise_for_status
+models = {
+ "gpt-4o-mini-free": {
+ "id": "gpt-4o-mini-free",
+ "name": "GPT-4o-Mini-Free",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 31200,
+ "tokenLimit": 7800,
+ "context": "8K",
+ },
+ "gpt-4o-mini": {
+ "id": "gpt-4o-mini",
+ "name": "GPT-4o-Mini",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
+ },
+ "gpt-4o-free": {
+ "id": "gpt-4o-free",
+ "name": "GPT-4o-free",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 31200,
+ "tokenLimit": 7800,
+ "context": "8K",
+ },
+ "gpt-4-turbo-2024-04-09": {
+ "id": "gpt-4-turbo-2024-04-09",
+ "name": "GPT-4-Turbo",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
+ },
+ "gpt-4o-2024-08-06": {
+ "id": "gpt-4o-2024-08-06",
+ "name": "GPT-4o",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
+ },
+ "gpt-4-0613": {
+ "id": "gpt-4-0613",
+ "name": "GPT-4-0613",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 32000,
+ "tokenLimit": 7600,
+ "context": "8K",
+ },
+ "claude-3-opus-20240229": {
+ "id": "claude-3-opus-20240229",
+ "name": "Claude-3-Opus",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "claude-3-opus-20240229-aws": {
+ "id": "claude-3-opus-20240229-aws",
+ "name": "Claude-3-Opus-Aws",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "claude-3-opus-20240229-gcp": {
+ "id": "claude-3-opus-20240229-gcp",
+ "name": "Claude-3-Opus-Gcp",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "claude-3-sonnet-20240229": {
+ "id": "claude-3-sonnet-20240229",
+ "name": "Claude-3-Sonnet",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "claude-3-5-sonnet-20240620": {
+ "id": "claude-3-5-sonnet-20240620",
+ "name": "Claude-3.5-Sonnet",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "claude-3-haiku-20240307": {
+ "id": "claude-3-haiku-20240307",
+ "name": "Claude-3-Haiku",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "claude-2.1": {
+ "id": "claude-2.1",
+ "name": "Claude-2.1-200k",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "gemini-1.0-pro-latest": {
+ "id": "gemini-1.0-pro-latest",
+ "name": "Gemini-Pro",
+ "model": "Gemini",
+ "provider": "Google",
+ "maxLength": 120000,
+ "tokenLimit": 30000,
+ "context": "32K",
+ },
+ "gemini-1.5-flash-latest": {
+ "id": "gemini-1.5-flash-latest",
+ "name": "Gemini-1.5-Flash-1M",
+ "model": "Gemini",
+ "provider": "Google",
+ "maxLength": 4000000,
+ "tokenLimit": 1000000,
+ "context": "1024K",
+ },
+ "gemini-1.5-pro-latest": {
+ "id": "gemini-1.5-pro-latest",
+ "name": "Gemini-1.5-Pro-1M",
+ "model": "Gemini",
+ "provider": "Google",
+ "maxLength": 4000000,
+ "tokenLimit": 1000000,
+ "context": "1024K",
+ },
+}
+
+
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://liaobots.site"
working = True
supports_message_history = True
supports_system_message = True
- supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = "gpt-4o"
- models = None
+ models = list(models.keys())
+
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-free",
"gpt-4o": "gpt-4o-free",
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"gpt-4o": "gpt-4o-2024-08-06",
"gpt-4": "gpt-4-0613",
-
+
"claude-3-opus": "claude-3-opus-20240229",
"claude-3-opus": "claude-3-opus-20240229-aws",
"claude-3-opus": "claude-3-opus-20240229-gcp",
@@ -32,50 +179,32 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"claude-3-5-sonnet": "claude-3-5-sonnet-20240620",
"claude-3-haiku": "claude-3-haiku-20240307",
"claude-2.1": "claude-2.1",
-
+
"gemini-pro": "gemini-1.0-pro-latest",
"gemini-flash": "gemini-1.5-flash-latest",
"gemini-pro": "gemini-1.5-pro-latest",
}
+
_auth_code = ""
_cookie_jar = None
@classmethod
- def get_models(cls):
- if cls.models is None:
- url = 'https://liaobots.work/api/models'
- headers = {
- 'accept': '/',
- 'accept-language': 'en-US,en;q=0.9',
- 'content-type': 'application/json',
- 'cookie': 'gkp2=ehnhUPJtkCgMmod8Sbxn',
- 'origin': 'https://liaobots.work',
- 'priority': 'u=1, i',
- 'referer': 'https://liaobots.work/',
- 'sec-ch-ua': '"Chromium";v="127", "Not)A;Brand";v="99"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Linux"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
- }
- data = {'key': ''}
-
- response = requests.post(url, headers=headers, json=data)
-
- if response.status_code == 200:
- try:
- models_data = response.json()
- cls.models = {model['id']: model for model in models_data}
- except (ValueError, KeyError) as e:
- print(f"Error processing JSON response: {e}")
- cls.models = {}
- else:
- print(f"Request failed with status code: {response.status_code}")
- cls.models = {}
+ def get_model(cls, model: str) -> str:
+ """
+ Retrieve the internal model identifier based on the provided model name or alias.
+ """
+ if model in cls.model_aliases:
+ model = cls.model_aliases[model]
+ if model not in models:
+ raise ValueError(f"Model '{model}' is not supported.")
+ return model
- return cls.models
+ @classmethod
+ def is_supported(cls, model: str) -> bool:
+ """
+ Check if the given model is supported.
+ """
+ return model in models or model in cls.model_aliases
@classmethod
async def create_async_generator(
@@ -87,6 +216,8 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
connector: BaseConnector = None,
**kwargs
) -> AsyncResult:
+ model = cls.get_model(model)
+
headers = {
"authority": "liaobots.com",
"content-type": "application/json",
@@ -99,10 +230,9 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
cookie_jar=cls._cookie_jar,
connector=get_connector(connector, proxy, True)
) as session:
- models = cls.get_models()
data = {
"conversationId": str(uuid.uuid4()),
- "model": models[cls.get_model(model)],
+ "model": models[model],
"messages": messages,
"key": "",
"prompt": kwargs.get("system_message", "You are a helpful assistant."),
@@ -115,11 +245,20 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
) as response:
await raise_for_status(response)
try:
- await cls.ensure_auth_code(session)
+ async with session.post(
+ "https://liaobots.work/api/user",
+ json={"authcode": cls._auth_code},
+ verify_ssl=False
+ ) as response:
+ await raise_for_status(response)
+ cls._auth_code = (await response.json(content_type=None))["authCode"]
+ if not cls._auth_code:
+ raise RuntimeError("Empty auth code")
+ cls._cookie_jar = session.cookie_jar
async with session.post(
"https://liaobots.work/api/chat",
json=data,
- headers={"x-auth-code": cls._auth_code},
+ headers={"x-auth-code": cls._auth_code},
verify_ssl=False
) as response:
await raise_for_status(response)
@@ -129,7 +268,16 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
if chunk:
yield chunk.decode(errors="ignore")
except:
- await cls.initialize_auth_code(session)
+ async with session.post(
+ "https://liaobots.work/api/user",
+ json={"authcode": "pTIQr4FTnVRfr"},
+ verify_ssl=False
+ ) as response:
+ await raise_for_status(response)
+ cls._auth_code = (await response.json(content_type=None))["authCode"]
+ if not cls._auth_code:
+ raise RuntimeError("Empty auth code")
+ cls._cookie_jar = session.cookie_jar
async with session.post(
"https://liaobots.work/api/chat",
json=data,
@@ -142,24 +290,6 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
raise RuntimeError("Invalid session")
if chunk:
yield chunk.decode(errors="ignore")
- @classmethod
- def get_model(cls, model: str) -> str:
- """
- Retrieve the internal model identifier based on the provided model name or alias.
- """
- if model in cls.model_aliases:
- model = cls.model_aliases[model]
- models = cls.get_models()
- if model not in models:
- raise ValueError(f"Model '{model}' is not supported.")
- return model
- @classmethod
- def is_supported(cls, model: str) -> bool:
- """
- Check if the given model is supported.
- """
- models = cls.get_models()
- return model in models or model in cls.model_aliases
@classmethod
async def initialize_auth_code(cls, session: ClientSession) -> None:
@@ -176,6 +306,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
if not cls._auth_code:
raise RuntimeError("Empty auth code")
cls._cookie_jar = session.cookie_jar
+
@classmethod
async def ensure_auth_code(cls, session: ClientSession) -> None:
"""
@@ -183,18 +314,3 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"""
if not cls._auth_code:
await cls.initialize_auth_code(session)
-
- @classmethod
- async def refresh_auth_code(cls, session: ClientSession) -> None:
- """
- Refresh the auth code by making a new request.
- """
- await cls.initialize_auth_code(session)
-
- @classmethod
- async def get_auth_code(cls, session: ClientSession) -> str:
- """
- Get the current auth code, initializing it if necessary.
- """
- await cls.ensure_auth_code(session)
- return cls._auth_code \ No newline at end of file
diff --git a/g4f/Provider/LiteIcoding.py b/g4f/Provider/LiteIcoding.py
index ad04aceb..69294a57 100644
--- a/g4f/Provider/LiteIcoding.py
+++ b/g4f/Provider/LiteIcoding.py
@@ -1,6 +1,7 @@
from __future__ import annotations
from aiohttp import ClientSession, ClientResponseError
+import re
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
@@ -31,7 +32,7 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
headers = {
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.9",
- "Authorization": "Bearer b3b2712cf83640a5acfdc01e78369930",
+ "Authorization": "Bearer aa3020ee873e40cb8b3f515a0708ebc4",
"Connection": "keep-alive",
"Content-Type": "application/json;charset=utf-8",
"DNT": "1",
@@ -97,7 +98,11 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
.replace('\\"', '"')
.strip()
)
- yield full_response.strip()
+ # Add filter to remove unwanted text
+ filtered_response = re.sub(r'\n---\n.*', '', full_response, flags=re.DOTALL)
+ # Remove extra quotes at the beginning and end
+ cleaned_response = filtered_response.strip().strip('"')
+ yield cleaned_response
except ClientResponseError as e:
raise RuntimeError(
diff --git a/g4f/Provider/Pizzagpt.py b/g4f/Provider/Pizzagpt.py
index 860aef80..47cb135c 100644
--- a/g4f/Provider/Pizzagpt.py
+++ b/g4f/Provider/Pizzagpt.py
@@ -47,4 +47,4 @@ class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin):
response.raise_for_status()
response_json = await response.json()
content = response_json.get("answer", {}).get("content", "")
- yield content \ No newline at end of file
+ yield content
diff --git a/g4f/Provider/Upstage.py b/g4f/Provider/Upstage.py
new file mode 100644
index 00000000..e61a5af2
--- /dev/null
+++ b/g4f/Provider/Upstage.py
@@ -0,0 +1,74 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Upstage(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://console.upstage.ai/playground/chat"
+ api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions"
+ working = True
+ default_model = 'upstage/solar-1-mini-chat'
+ models = [
+ 'upstage/solar-1-mini-chat',
+ 'upstage/solar-1-mini-chat-ja',
+ ]
+ model_aliases = {
+ "solar-1-mini": "upstage/solar-1-mini-chat",
+ "solar-1-mini": "upstage/solar-1-mini-chat-ja",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "origin": "https://console.upstage.ai",
+ "priority": "u=1, i",
+ "referer": "https://console.upstage.ai/",
+ "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "cross-site",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "stream": True,
+ "messages": [{"role": "user", "content": format_prompt(messages)}],
+ "model": model
+ }
+ async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line:
+ line = line.decode('utf-8').strip()
+ if line.startswith("data: ") and line != "data: [DONE]":
+ data = json.loads(line[6:])
+ content = data['choices'][0]['delta'].get('content', '')
+ if content:
+ yield content
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index b85854b3..af8aab0e 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -24,7 +24,6 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
image_models = ["dall-e"]
models = [
default_model,
- "gpt-4o-mini",
"gpt-4o",
"gpt-4-turbo",
"gpt-4",
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 34b4ef2d..dc8acfbe 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -6,7 +6,6 @@ from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
from ..providers.create_images import CreateImagesProvider
from .deprecated import *
-from .not_working import *
from .selenium import *
from .needs_auth import *
@@ -21,16 +20,15 @@ from .ChatGot import ChatGot
from .Chatgpt4o import Chatgpt4o
from .Chatgpt4Online import Chatgpt4Online
from .ChatgptFree import ChatgptFree
-from .Cohere import Cohere
from .DDG import DDG
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
from .FlowGpt import FlowGpt
+from .FluxAirforce import FluxAirforce
from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt
from .FreeNetfly import FreeNetfly
from .GeminiPro import GeminiPro
-from .GeminiProChat import GeminiProChat
from .GigaChat import GigaChat
from .GptTalkRu import GptTalkRu
from .HuggingChat import HuggingChat
@@ -43,18 +41,17 @@ from .Llama import Llama
from .Local import Local
from .MagickPenAsk import MagickPenAsk
from .MagickPenChat import MagickPenChat
-from .Marsyoo import Marsyoo
from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount
from .Ollama import Ollama
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
+from .Upstage import Upstage
from .Reka import Reka
from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
from .Rocks import Rocks
-from .TeachAnything import TeachAnything
from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You