From 88d2cbff099df00944ed6dfb6c73b1b5e8dfc7f9 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Thu, 5 Oct 2023 05:13:37 +0200 Subject: Add AiAsk, Chatgpt4Online, ChatgptDemo and ChatgptX Provider Fix Bing, Liaobots and ChatgptAi Provider Add "gpt_35_long" model and custom timeout --- g4f/Provider/unfinished/Komo.py | 44 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 g4f/Provider/unfinished/Komo.py (limited to 'g4f/Provider/unfinished/Komo.py') diff --git a/g4f/Provider/unfinished/Komo.py b/g4f/Provider/unfinished/Komo.py new file mode 100644 index 00000000..84d8d634 --- /dev/null +++ b/g4f/Provider/unfinished/Komo.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +import json + +from ...requests import StreamSession +from ...typing import AsyncGenerator +from ..base_provider import AsyncGeneratorProvider, format_prompt + +class Komo(AsyncGeneratorProvider): + url = "https://komo.ai/api/ask" + supports_gpt_35_turbo = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> AsyncGenerator: + async with StreamSession(impersonate="chrome107") as session: + prompt = format_prompt(messages) + data = { + "query": prompt, + "FLAG_URLEXTRACT": "false", + "token": "", + "FLAG_MODELA": "1", + } + headers = { + 'authority': 'komo.ai', + 'accept': 'text/event-stream', + 'cache-control': 'no-cache', + 'referer': 'https://komo.ai/', + } + + async with session.get(cls.url, params=data, headers=headers) as response: + response.raise_for_status() + next = False + async for line in response.iter_lines(): + if line == b"event: line": + next = True + elif next and line.startswith(b"data: "): + yield json.loads(line[6:]) + next = False + -- cgit v1.2.3