summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/deprecated/Lockchat.py
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-09-25 10:44:23 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-09-25 10:44:23 +0200
commitec4e25073b5357a1213bfe00a93a21b5b6652bea (patch)
treec7cfa751ae9604d0bc05e42661492f934a2d333d /g4f/Provider/deprecated/Lockchat.py
parentfeat(g4f/Provider/HuggingChat.): add Qwen2.5-72B model and alias (diff)
downloadgpt4free-ec4e25073b5357a1213bfe00a93a21b5b6652bea.tar
gpt4free-ec4e25073b5357a1213bfe00a93a21b5b6652bea.tar.gz
gpt4free-ec4e25073b5357a1213bfe00a93a21b5b6652bea.tar.bz2
gpt4free-ec4e25073b5357a1213bfe00a93a21b5b6652bea.tar.lz
gpt4free-ec4e25073b5357a1213bfe00a93a21b5b6652bea.tar.xz
gpt4free-ec4e25073b5357a1213bfe00a93a21b5b6652bea.tar.zst
gpt4free-ec4e25073b5357a1213bfe00a93a21b5b6652bea.zip
Diffstat (limited to 'g4f/Provider/deprecated/Lockchat.py')
-rw-r--r--g4f/Provider/deprecated/Lockchat.py54
1 files changed, 54 insertions, 0 deletions
diff --git a/g4f/Provider/deprecated/Lockchat.py b/g4f/Provider/deprecated/Lockchat.py
new file mode 100644
index 00000000..edab0bd4
--- /dev/null
+++ b/g4f/Provider/deprecated/Lockchat.py
@@ -0,0 +1,54 @@
+from __future__ import annotations
+
+import json
+
+import requests
+
+from ...typing import Any, CreateResult
+from ..base_provider import AbstractProvider
+
+
+class Lockchat(AbstractProvider):
+ url: str = "http://supertest.lockchat.app"
+ supports_stream = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool, **kwargs: Any) -> CreateResult:
+
+ temperature = float(kwargs.get("temperature", 0.7))
+ payload = {
+ "temperature": temperature,
+ "messages" : messages,
+ "model" : model,
+ "stream" : True,
+ }
+
+ headers = {
+ "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
+ }
+ response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
+ json=payload, headers=headers, stream=True)
+
+ response.raise_for_status()
+ for token in response.iter_lines():
+ if b"The model: `gpt-4` does not exist" in token:
+ print("error, retrying...")
+
+ Lockchat.create_completion(
+ model = model,
+ messages = messages,
+ stream = stream,
+ temperature = temperature,
+ **kwargs)
+
+ if b"content" in token:
+ token = json.loads(token.decode("utf-8").split("data: ")[1])
+ token = token["choices"][0]["delta"].get("content")
+
+ if token:
+ yield (token) \ No newline at end of file