summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-10-02 23:47:36 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-10-02 23:47:36 +0200
commit9b5f992162f5174930d96e7e9bcf5e60fb2f9630 (patch)
tree757f2e4b0894952acfdb9cf26173eeb7c6e97aa8 /g4f
parentfeat(g4f/Provider/Liaobots.py): add support for GPT-3.5-Turbo model (diff)
downloadgpt4free-9b5f992162f5174930d96e7e9bcf5e60fb2f9630.tar
gpt4free-9b5f992162f5174930d96e7e9bcf5e60fb2f9630.tar.gz
gpt4free-9b5f992162f5174930d96e7e9bcf5e60fb2f9630.tar.bz2
gpt4free-9b5f992162f5174930d96e7e9bcf5e60fb2f9630.tar.lz
gpt4free-9b5f992162f5174930d96e7e9bcf5e60fb2f9630.tar.xz
gpt4free-9b5f992162f5174930d96e7e9bcf5e60fb2f9630.tar.zst
gpt4free-9b5f992162f5174930d96e7e9bcf5e60fb2f9630.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/LiteIcoding.py21
1 files changed, 12 insertions, 9 deletions
diff --git a/g4f/Provider/LiteIcoding.py b/g4f/Provider/LiteIcoding.py
index 1b568e80..bf8f9ba8 100644
--- a/g4f/Provider/LiteIcoding.py
+++ b/g4f/Provider/LiteIcoding.py
@@ -1,12 +1,11 @@
from __future__ import annotations
-
-from aiohttp import ClientSession, ClientResponseError
+import base64
import re
+from aiohttp import ClientSession, ClientResponseError
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
-
class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://lite.icoding.ink"
api_endpoint = "/api/v1/gpt/message"
@@ -27,18 +26,20 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
}
bearer_tokens = [
- "aa3020ee873e40cb8b3f515a0708ebc4",
- "5d69cd271b144226ac1199b3c849a566",
- "62977f48a95844f8853a953679401850",
- "d815b091959e42dd8b7871dfaf879485"
+ "NWQ2OWNkMjcxYjE0NDIyNmFjMTE5OWIzYzg0OWE1NjY=",
+ "ZDgxNWIwOTU5NTk0ZTRkZDhiNzg3MWRmYWY4Nzk0ODU="
]
current_token_index = 0
@classmethod
+ def decode_token(cls, encoded_token: str) -> str:
+ return base64.b64decode(encoded_token).decode('utf-8')
+
+ @classmethod
def get_next_bearer_token(cls):
- token = cls.bearer_tokens[cls.current_token_index]
+ encoded_token = cls.bearer_tokens[cls.current_token_index]
cls.current_token_index = (cls.current_token_index + 1) % len(cls.bearer_tokens)
- return token
+ return cls.decode_token(encoded_token)
@classmethod
async def create_async_generator(
@@ -95,9 +96,11 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
response.raise_for_status()
buffer = ""
full_response = ""
+
def decode_content(data):
bytes_array = bytes([int(b, 16) ^ 255 for b in data.split()])
return bytes_array.decode('utf-8')
+
async for chunk in response.content.iter_any():
if chunk:
buffer += chunk.decode()