summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/unfinished
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-09-24 12:23:53 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-09-24 12:23:53 +0200
commitf8e403a745c5caff31d7edb854dcba40eba3166d (patch)
treea75c6030a8054c56201fa2d41306a51b9052545c /g4f/Provider/unfinished
parentAdded gpt-4o provider (diff)
downloadgpt4free-f8e403a745c5caff31d7edb854dcba40eba3166d.tar
gpt4free-f8e403a745c5caff31d7edb854dcba40eba3166d.tar.gz
gpt4free-f8e403a745c5caff31d7edb854dcba40eba3166d.tar.bz2
gpt4free-f8e403a745c5caff31d7edb854dcba40eba3166d.tar.lz
gpt4free-f8e403a745c5caff31d7edb854dcba40eba3166d.tar.xz
gpt4free-f8e403a745c5caff31d7edb854dcba40eba3166d.tar.zst
gpt4free-f8e403a745c5caff31d7edb854dcba40eba3166d.zip
Diffstat (limited to 'g4f/Provider/unfinished')
-rw-r--r--g4f/Provider/unfinished/AiChatting.py66
-rw-r--r--g4f/Provider/unfinished/ChatAiGpt.py68
-rw-r--r--g4f/Provider/unfinished/Komo.py44
-rw-r--r--g4f/Provider/unfinished/MikuChat.py97
-rw-r--r--g4f/Provider/unfinished/__init__.py4
5 files changed, 0 insertions, 279 deletions
diff --git a/g4f/Provider/unfinished/AiChatting.py b/g4f/Provider/unfinished/AiChatting.py
deleted file mode 100644
index f062fa98..00000000
--- a/g4f/Provider/unfinished/AiChatting.py
+++ /dev/null
@@ -1,66 +0,0 @@
-from __future__ import annotations
-
-from urllib.parse import unquote
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AbstractProvider
-from ...webdriver import WebDriver
-from ...requests import Session, get_session_from_browser
-
-class AiChatting(AbstractProvider):
- url = "https://www.aichatting.net"
- supports_gpt_35_turbo = True
- _session: Session = None
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool,
- proxy: str = None,
- timeout: int = 120,
- webdriver: WebDriver = None,
- **kwargs
- ) -> AsyncResult:
- if not cls._session:
- cls._session = get_session_from_browser(cls.url, webdriver, proxy, timeout)
- visitorId = unquote(cls._session.cookies.get("aichatting.website.visitorId"))
-
- headers = {
- "accept": "application/json, text/plain, */*",
- "lang": "en",
- "source": "web"
- }
- data = {
- "roleId": 0,
- }
- try:
- response = cls._session.post("https://aga-api.aichatting.net/aigc/chat/record/conversation/create", json=data, headers=headers)
- response.raise_for_status()
- conversation_id = response.json()["data"]["conversationId"]
- except Exception as e:
- cls.reset()
- raise e
- headers = {
- "authority": "aga-api.aichatting.net",
- "accept": "text/event-stream,application/json, text/event-stream",
- "lang": "en",
- "source": "web",
- "vtoken": visitorId,
- }
- data = {
- "spaceHandle": True,
- "roleId": 0,
- "messages": messages,
- "conversationId": conversation_id,
- }
- response = cls._session.post("https://aga-api.aichatting.net/aigc/chat/v2/stream", json=data, headers=headers, stream=True)
- response.raise_for_status()
- for chunk in response.iter_lines():
- if chunk.startswith(b"data:"):
- yield chunk[5:].decode().replace("-=- --", " ").replace("-=-n--", "\n").replace("--@DONE@--", "")
-
- @classmethod
- def reset(cls):
- cls._session = None \ No newline at end of file
diff --git a/g4f/Provider/unfinished/ChatAiGpt.py b/g4f/Provider/unfinished/ChatAiGpt.py
deleted file mode 100644
index bc962623..00000000
--- a/g4f/Provider/unfinished/ChatAiGpt.py
+++ /dev/null
@@ -1,68 +0,0 @@
-from __future__ import annotations
-
-import re
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider
-from ..helper import format_prompt
-
-
-class ChatAiGpt(AsyncGeneratorProvider):
- url = "https://chataigpt.org"
- supports_gpt_35_turbo = True
- _nonce = None
- _post_id = None
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
- "Accept": "*/*",
- "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
- "Accept-Encoding": "gzip, deflate, br",
- "Origin": cls.url,
- "Alt-Used": cls.url,
- "Connection": "keep-alive",
- "Referer": cls.url,
- "Pragma": "no-cache",
- "Cache-Control": "no-cache",
- "TE": "trailers",
- "Sec-Fetch-Dest": "empty",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Site": "same-origin",
- }
- async with ClientSession(headers=headers) as session:
- if not cls._nonce:
- async with session.get(f"{cls.url}/", proxy=proxy) as response:
- response.raise_for_status()
- response = await response.text()
-
- result = re.search(
- r'data-nonce=(.*?) data-post-id=([0-9]+)', response
- )
-
- if result:
- cls._nonce, cls._post_id = result.group(1), result.group(2)
- else:
- raise RuntimeError("No nonce found")
- prompt = format_prompt(messages)
- data = {
- "_wpnonce": cls._nonce,
- "post_id": cls._post_id,
- "url": cls.url,
- "action": "wpaicg_chat_shortcode_message",
- "message": prompt,
- "bot_id": 0
- }
- async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response:
- response.raise_for_status()
- async for chunk in response.content:
- if chunk:
- yield chunk.decode() \ No newline at end of file
diff --git a/g4f/Provider/unfinished/Komo.py b/g4f/Provider/unfinished/Komo.py
deleted file mode 100644
index 84d8d634..00000000
--- a/g4f/Provider/unfinished/Komo.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from __future__ import annotations
-
-import json
-
-from ...requests import StreamSession
-from ...typing import AsyncGenerator
-from ..base_provider import AsyncGeneratorProvider, format_prompt
-
-class Komo(AsyncGeneratorProvider):
- url = "https://komo.ai/api/ask"
- supports_gpt_35_turbo = True
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: list[dict[str, str]],
- **kwargs
- ) -> AsyncGenerator:
- async with StreamSession(impersonate="chrome107") as session:
- prompt = format_prompt(messages)
- data = {
- "query": prompt,
- "FLAG_URLEXTRACT": "false",
- "token": "",
- "FLAG_MODELA": "1",
- }
- headers = {
- 'authority': 'komo.ai',
- 'accept': 'text/event-stream',
- 'cache-control': 'no-cache',
- 'referer': 'https://komo.ai/',
- }
-
- async with session.get(cls.url, params=data, headers=headers) as response:
- response.raise_for_status()
- next = False
- async for line in response.iter_lines():
- if line == b"event: line":
- next = True
- elif next and line.startswith(b"data: "):
- yield json.loads(line[6:])
- next = False
-
diff --git a/g4f/Provider/unfinished/MikuChat.py b/g4f/Provider/unfinished/MikuChat.py
deleted file mode 100644
index bf19631f..00000000
--- a/g4f/Provider/unfinished/MikuChat.py
+++ /dev/null
@@ -1,97 +0,0 @@
-from __future__ import annotations
-
-import random, json
-from datetime import datetime
-from ...requests import StreamSession
-
-from ...typing import AsyncGenerator
-from ..base_provider import AsyncGeneratorProvider
-
-
-class MikuChat(AsyncGeneratorProvider):
- url = "https://ai.okmiku.com"
- supports_gpt_35_turbo = True
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: list[dict[str, str]],
- **kwargs
- ) -> AsyncGenerator:
- if not model:
- model = "gpt-3.5-turbo"
- headers = {
- "authority": "api.catgpt.cc",
- "accept": "application/json",
- "origin": cls.url,
- "referer": f"{cls.url}/chat/",
- 'x-app-version': 'undefined',
- 'x-date': get_datetime(),
- 'x-fingerprint': get_fingerprint(),
- 'x-platform': 'web'
- }
- async with StreamSession(headers=headers, impersonate="chrome107") as session:
- data = {
- "model": model,
- "top_p": 0.8,
- "temperature": 0.5,
- "presence_penalty": 1,
- "frequency_penalty": 0,
- "max_tokens": 2000,
- "stream": True,
- "messages": messages,
- }
- async with session.post("https://api.catgpt.cc/ai/v1/chat/completions", json=data) as response:
- print(await response.text())
- response.raise_for_status()
- async for line in response.iter_lines():
- if line.startswith(b"data: "):
- line = json.loads(line[6:])
- chunk = line["choices"][0]["delta"].get("content")
- if chunk:
- yield chunk
-
-def k(e: str, t: int):
- a = len(e) & 3
- s = len(e) - a
- i = t
- c = 3432918353
- o = 461845907
- n = 0
- r = 0
- while n < s:
- r = (ord(e[n]) & 255) | ((ord(e[n + 1]) & 255) << 8) | ((ord(e[n + 2]) & 255) << 16) | ((ord(e[n + 3]) & 255) << 24)
- n += 4
- r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
- r = (r << 15) | (r >> 17)
- r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
- i ^= r
- i = (i << 13) | (i >> 19)
- l = (i & 65535) * 5 + (((i >> 16) * 5 & 65535) << 16) & 4294967295
- i = (l & 65535) + 27492 + (((l >> 16) + 58964 & 65535) << 16)
-
- if a == 3:
- r ^= (ord(e[n + 2]) & 255) << 16
- elif a == 2:
- r ^= (ord(e[n + 1]) & 255) << 8
- elif a == 1:
- r ^= ord(e[n]) & 255
- r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
- r = (r << 15) | (r >> 17)
- r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
- i ^= r
-
- i ^= len(e)
- i ^= i >> 16
- i = (i & 65535) * 2246822507 + (((i >> 16) * 2246822507 & 65535) << 16) & 4294967295
- i ^= i >> 13
- i = (i & 65535) * 3266489909 + (((i >> 16) * 3266489909 & 65535) << 16) & 4294967295
- i ^= i >> 16
- return i & 0xFFFFFFFF
-
-def get_fingerprint() -> str:
- return str(k(str(int(random.random() * 100000)), 256))
-
-def get_datetime() -> str:
- return datetime.now().strftime("%Y-%m-%d %H:%M:%S") \ No newline at end of file
diff --git a/g4f/Provider/unfinished/__init__.py b/g4f/Provider/unfinished/__init__.py
deleted file mode 100644
index eb5e8825..00000000
--- a/g4f/Provider/unfinished/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .MikuChat import MikuChat
-from .Komo import Komo
-from .ChatAiGpt import ChatAiGpt
-from .AiChatting import AiChatting \ No newline at end of file