summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Berlin.py78
-rw-r--r--g4f/Provider/Koala.py71
-rw-r--r--g4f/Provider/__init__.py6
-rw-r--r--g4f/models.py10
4 files changed, 160 insertions, 5 deletions
diff --git a/g4f/Provider/Berlin.py b/g4f/Provider/Berlin.py
new file mode 100644
index 00000000..bfdb1ef0
--- /dev/null
+++ b/g4f/Provider/Berlin.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+import secrets
+import uuid
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+from .helper import format_prompt
+
+
+class Berlin(AsyncGeneratorProvider):
+ url = "https://ai.berlin4h.top"
+ working = True
+ supports_gpt_35_turbo = True
+ _token = None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
+ "Accept": "*/*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "Alt-Used": "ai.berlin4h.top",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Pragma": "no-cache",
+ "Cache-Control": "no-cache",
+ "TE": "trailers",
+ }
+ async with ClientSession(headers=headers) as session:
+ if not cls._token:
+ data = {
+ "account": '免费使用GPT3.5模型@163.com',
+ "password": '659e945c2d004686bad1a75b708c962f'
+ }
+ async with session.post(f"{cls.url}/api/login", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ cls._token = (await response.json())["data"]["token"]
+ headers = {
+ "token": cls._token
+ }
+ prompt = format_prompt(messages)
+ data = {
+ "prompt": prompt,
+ "parentMessageId": str(uuid.uuid4()),
+ "options": {
+ "model": model,
+ "temperature": 0,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ "max_tokens": 1888,
+ **kwargs
+ },
+ }
+ async with session.post(f"{cls.url}/api/chat/completions", json=data, proxy=proxy, headers=headers) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk.strip():
+ try:
+ yield json.loads(chunk)["content"]
+ except:
+ raise RuntimeError(f"Response: {chunk.decode()}")
diff --git a/g4f/Provider/Koala.py b/g4f/Provider/Koala.py
new file mode 100644
index 00000000..70a40882
--- /dev/null
+++ b/g4f/Provider/Koala.py
@@ -0,0 +1,71 @@
+from __future__ import annotations
+
+import random
+import string
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+
+class Koala(AsyncGeneratorProvider):
+ url = "https://koala.sh"
+ supports_gpt_35_turbo = True
+ supports_message_history = True
+ working = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
+ "Accept": "text/event-stream",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/chat",
+ "Content-Type": "application/json",
+ "Flag-Real-Time-Data": "false",
+ "Visitor-ID": random_string(),
+ "Origin": cls.url,
+ "Alt-Used": "koala.sh",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Pragma": "no-cache",
+ "Cache-Control": "no-cache",
+ "TE": "trailers",
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "input": messages[-1]["content"],
+ "inputHistory": [
+ message["content"]
+ for message in messages
+ if message["role"] == "user"
+ ],
+ "outputHistory": [
+ message["content"]
+ for message in messages
+ if message["role"] == "assistant"
+ ],
+ "model": model,
+ }
+ async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk.startswith(b"data: "):
+ yield json.loads(chunk[6:])
+
+
+def random_string(length: int = 20):
+ return ''.join(random.choice(
+ string.ascii_letters + string.digits
+ ) for _ in range(length)) \ No newline at end of file
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 1dd603b1..a72db45c 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -6,6 +6,7 @@ from .Aichat import Aichat
from .Ails import Ails
from .AItianhu import AItianhu
from .AItianhuSpace import AItianhuSpace
+from .Berlin import Berlin
from .Bing import Bing
from .ChatBase import ChatBase
from .ChatForAi import ChatForAi
@@ -26,6 +27,7 @@ from .GptForLove import GptForLove
from .GptGo import GptGo
from .GptGod import GptGod
from .Hashnode import Hashnode
+from .Koala import Koala
from .Liaobots import Liaobots
from .Llama2 import Llama2
from .MyShell import MyShell
@@ -59,6 +61,7 @@ class ProviderUtils:
'AsyncProvider': AsyncProvider,
'Bard': Bard,
'BaseProvider': BaseProvider,
+ 'Berlin': Berlin,
'Bing': Bing,
'ChatBase': ChatBase,
'ChatForAi': ChatForAi,
@@ -89,6 +92,7 @@ class ProviderUtils:
'H2o': H2o,
'HuggingChat': HuggingChat,
'Komo': Komo,
+ 'Koala': Koala,
'Liaobots': Liaobots,
'Llama2': Llama2,
'Lockchat': Lockchat,
@@ -135,6 +139,7 @@ __all__ = [
'AItianhuSpace',
'Aivvm',
'Bard',
+ 'Berlin',
'Bing',
'ChatBase',
'ChatForAi',
@@ -162,6 +167,7 @@ __all__ = [
'Hashnode',
'H2o',
'HuggingChat',
+ 'Koala',
'Liaobots',
'Llama2',
'Lockchat',
diff --git a/g4f/models.py b/g4f/models.py
index 7e9351b8..0ce7b886 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -5,7 +5,6 @@ from .Provider import BaseProvider, RetryProvider
from .Provider import (
GptForLove,
ChatgptAi,
- GptChatly,
DeepInfra,
ChatgptX,
ChatBase,
@@ -13,10 +12,12 @@ from .Provider import (
FakeGpt,
FreeGpt,
NoowAi,
+ Berlin,
Llama2,
Vercel,
Aichat,
GPTalk,
+ Koala,
AiAsk,
GptGo,
Phind,
@@ -51,10 +52,9 @@ gpt_35_long = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
- AiAsk, Aichat, FreeGpt, You,
- GptChatly, GptForLove,
- NoowAi, GeekGpt, Phind,
- FakeGpt
+ FreeGpt, You,
+ GeekGpt, FakeGpt,
+ Berlin, Koala
])
)