summaryrefslogtreecommitdiffstats
path: root/g4f/client/__init__.py
blob: f6a0f5e8eb93490eddd29c952f8c623480c23ae4 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
from __future__ import annotations

import os
import time
import random
import string
import asyncio
import base64
from typing import Union, AsyncIterator, Iterator, Coroutine, Optional

from ..providers.base_provider import AsyncGeneratorProvider
from ..image import ImageResponse, copy_images, images_dir
from ..typing import Messages, Image, ImageType
from ..providers.types import ProviderType
from ..providers.response import ResponseType, FinishReason, BaseConversation, SynthesizeData
from ..errors import NoImageResponseError, ModelNotFoundError
from ..providers.retry_provider import IterListProvider
from ..providers.asyncio import get_running_loop, to_sync_generator, async_generator_to_list
from ..Provider.needs_auth.BingCreateImages import BingCreateImages
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
from .image_models import ImageModels
from .types import IterResponse, ImageProvider, Client as BaseClient
from .service import get_model_and_provider, get_last_provider, convert_to_provider
from .helper import find_stop, filter_json, filter_none, safe_aclose, to_async_iterator

ChatCompletionResponseType = Iterator[Union[ChatCompletion, ChatCompletionChunk, BaseConversation]]
AsyncChatCompletionResponseType = AsyncIterator[Union[ChatCompletion, ChatCompletionChunk, BaseConversation]]

try:
    anext # Python 3.8+
except NameError:
    async def anext(aiter):
        try:
            return await aiter.__anext__()
        except StopAsyncIteration:
            raise StopIteration

# Synchronous iter_response function
def iter_response(
    response: Union[Iterator[Union[str, ResponseType]]],
    stream: bool,
    response_format: Optional[dict] = None,
    max_tokens: Optional[int] = None,
    stop: Optional[list[str]] = None
) -> ChatCompletionResponseType:
    content = ""
    finish_reason = None
    completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
    idx = 0

    if hasattr(response, '__aiter__'):
        response = to_sync_generator(response)

    for chunk in response:
        if isinstance(chunk, FinishReason):
            finish_reason = chunk.reason
            break
        elif isinstance(chunk, BaseConversation):
            yield chunk
            continue
        elif isinstance(chunk, SynthesizeData):
            continue

        chunk = str(chunk)
        content += chunk

        if max_tokens is not None and idx + 1 >= max_tokens:
            finish_reason = "length"

        first, content, chunk = find_stop(stop, content, chunk if stream else None)

        if first != -1:
            finish_reason = "stop"

        if stream:
            yield ChatCompletionChunk(chunk, None, completion_id, int(time.time()))

        if finish_reason is not None:
            break

        idx += 1

    finish_reason = "stop" if finish_reason is None else finish_reason

    if stream:
        yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time()))
    else:
        if response_format is not None and "type" in response_format:
            if response_format["type"] == "json_object":
                content = filter_json(content)
        yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))

# Synchronous iter_append_model_and_provider function
def iter_append_model_and_provider(response: ChatCompletionResponseType) -> ChatCompletionResponseType:
    last_provider = None

    for chunk in response:
        if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
            last_provider = get_last_provider(True) if last_provider is None else last_provider
            chunk.model = last_provider.get("model")
            chunk.provider = last_provider.get("name")
            yield chunk

async def async_iter_response(
    response: AsyncIterator[Union[str, ResponseType]],
    stream: bool,
    response_format: Optional[dict] = None,
    max_tokens: Optional[int] = None,
    stop: Optional[list[str]] = None
) -> AsyncChatCompletionResponseType:
    content = ""
    finish_reason = None
    completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
    idx = 0

    try:
        async for chunk in response:
            if isinstance(chunk, FinishReason):
                finish_reason = chunk.reason
                break
            elif isinstance(chunk, BaseConversation):
                yield chunk
                continue
            elif isinstance(chunk, SynthesizeData):
                continue

            chunk = str(chunk)
            content += chunk
            idx += 1

            if max_tokens is not None and idx >= max_tokens:
                finish_reason = "length"

            first, content, chunk = find_stop(stop, content, chunk if stream else None)

            if first != -1:
                finish_reason = "stop"

            if stream:
                yield ChatCompletionChunk(chunk, None, completion_id, int(time.time()))

            if finish_reason is not None:
                break

        finish_reason = "stop" if finish_reason is None else finish_reason

        if stream:
            yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time()))
        else:
            if response_format is not None and "type" in response_format:
                if response_format["type"] == "json_object":
                    content = filter_json(content)
            yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
    finally:
        if hasattr(response, 'aclose'):
            await safe_aclose(response)

async def async_iter_append_model_and_provider(
        response: AsyncChatCompletionResponseType
    ) -> AsyncChatCompletionResponseType:
    last_provider = None
    try:
        async for chunk in response:
            if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
                last_provider = get_last_provider(True) if last_provider is None else last_provider
                chunk.model = last_provider.get("model")
                chunk.provider = last_provider.get("name")
            yield chunk
    finally:
        if hasattr(response, 'aclose'):
            await safe_aclose(response)

class Client(BaseClient):
    def __init__(
        self,
        provider: Optional[ProviderType] = None,
        image_provider: Optional[ImageProvider] = None,
        **kwargs
    ) -> None:
        super().__init__(**kwargs)
        self.chat: Chat = Chat(self, provider)
        self.images: Images = Images(self, image_provider)

class Completions:
    def __init__(self, client: Client, provider: Optional[ProviderType] = None):
        self.client: Client = client
        self.provider: ProviderType = provider

    def create(
        self,
        messages: Messages,
        model: str,
        provider: Optional[ProviderType] = None,
        stream: Optional[bool] = False,
        proxy: Optional[str] = None,
        response_format: Optional[dict] = None,
        max_tokens: Optional[int] = None,
        stop: Optional[Union[list[str], str]] = None,
        api_key: Optional[str] = None,
        ignored: Optional[list[str]] = None,
        ignore_working: Optional[bool] = False,
        ignore_stream: Optional[bool] = False,
        **kwargs
    ) -> IterResponse:
        model, provider = get_model_and_provider(
            model,
            self.provider if provider is None else provider,
            stream,
            ignored,
            ignore_working,
            ignore_stream,
        )
        stop = [stop] if isinstance(stop, str) else stop

        response = provider.create_completion(
            model,
            messages,
            stream=stream,
            **filter_none(
                proxy=self.client.proxy if proxy is None else proxy,
                max_tokens=max_tokens,
                stop=stop,
                api_key=self.client.api_key if api_key is None else api_key
            ),
            **kwargs
        )
        if asyncio.iscoroutinefunction(provider.create_completion):
            # Run the asynchronous function in an event loop
            response = asyncio.run(response)
        if stream and hasattr(response, '__aiter__'):
            # It's an async generator, wrap it into a sync iterator
            response = to_sync_generator(response)
        elif hasattr(response, '__aiter__'):
            # If response is an async generator, collect it into a list
            response = asyncio.run(async_generator_to_list(response))
        response = iter_response(response, stream, response_format, max_tokens, stop)
        response = iter_append_model_and_provider(response)
        if stream:
            return response
        else:
            return next(response)

class Chat:
    completions: Completions

    def __init__(self, client: Client, provider: Optional[ProviderType] = None):
        self.completions = Completions(client, provider)

class Images:
    def __init__(self, client: Client, provider: Optional[ProviderType] = None):
        self.client: Client = client
        self.provider: Optional[ProviderType] = provider
        self.models: ImageModels = ImageModels(client)

    def generate(
        self,
        prompt: str,
        model: str = None,
        provider: Optional[ProviderType] = None,
        response_format: str = "url",
        proxy: Optional[str] = None,
        **kwargs
    ) -> ImagesResponse:
        """
        Synchronous generate method that runs the async_generate method in an event loop.
        """
        return asyncio.run(self.async_generate(prompt, model, provider, response_format, proxy, **kwargs))

    async def async_generate(
        self,
        prompt: str,
        model: Optional[str] = None,
        provider: Optional[ProviderType] = None,
        response_format: Optional[str] = "url",
        proxy: Optional[str] = None,
        **kwargs
    ) -> ImagesResponse:
        if provider is None:
            provider_handler = self.models.get(model, provider or self.provider or BingCreateImages)
        elif isinstance(provider, str):
            provider_handler = convert_to_provider(provider)
        else:
            provider_handler = provider
        if provider_handler is None:
            raise ModelNotFoundError(f"Unknown model: {model}")
        if isinstance(provider_handler, IterListProvider):
            if provider_handler.providers:
                provider_handler = provider_handler.providers[0]
            else:
                raise ModelNotFoundError(f"IterListProvider for model {model} has no providers")
        if proxy is None:
            proxy = self.client.proxy

        response = None
        if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
            messages = [{"role": "user", "content": f"Generate a image: {prompt}"}]
            async for item in provider_handler.create_async_generator(model, messages, prompt=prompt, **kwargs):
                if isinstance(item, ImageResponse):
                    response = item
                    break
        elif hasattr(provider_handler, 'create'):
            if asyncio.iscoroutinefunction(provider_handler.create):
                response = await provider_handler.create(prompt)
            else:
                response = provider_handler.create(prompt) 
            if isinstance(response, str):
                response = ImageResponse([response], prompt)
        elif hasattr(provider_handler, "create_completion"):
            get_running_loop(check_nested=True)
            messages = [{"role": "user", "content": f"Generate a image: {prompt}"}]
            for item in provider_handler.create_completion(model, messages, prompt=prompt, **kwargs):
                if isinstance(item, ImageResponse):
                    response = item
                    break
        else:
            raise ValueError(f"Provider {provider} does not support image generation")
        if isinstance(response, ImageResponse):
            return await self._process_image_response(
                response,
                response_format,
                proxy,
                model,
                getattr(provider_handler, "__name__", None)
            )
        raise NoImageResponseError(f"Unexpected response type: {type(response)}")

    def create_variation(
        self,
        image: Union[str, bytes],
        model: str = None,
        provider: Optional[ProviderType] = None,
        response_format: str = "url",
        **kwargs
    ) -> ImagesResponse:
        return asyncio.run(self.async_create_variation(
           image, model, provider, response_format, **kwargs
        ))

    async def async_create_variation(
        self,
        image: ImageType,
        model: Optional[str] = None,
        provider: Optional[ProviderType] = None,
        response_format: str = "url",
        proxy: Optional[str] = None,
        **kwargs
    ) -> ImagesResponse:
        if provider is None:
            provider = self.models.get(model, provider or self.provider or BingCreateImages)
            if provider is None:
                raise ModelNotFoundError(f"Unknown model: {model}")
        if isinstance(provider, str):
            provider = convert_to_provider(provider)
        if proxy is None:
            proxy = self.client.proxy

        if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
            messages = [{"role": "user", "content": "create a variation of this image"}]
            generator = None
            try:
                generator = provider.create_async_generator(model, messages, image=image, response_format=response_format, proxy=proxy, **kwargs)
                async for chunk in generator:
                    if isinstance(chunk, ImageResponse):
                        response = chunk
                        break
            finally:
                if generator and hasattr(generator, 'aclose'):
                    await safe_aclose(generator)
        elif hasattr(provider, 'create_variation'):
            if asyncio.iscoroutinefunction(provider.create_variation):
                response = await provider.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs)
            else:
                response = provider.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs)
        else:
            raise NoImageResponseError(f"Provider {provider} does not support image variation")
    
        if isinstance(response, str):
            response = ImageResponse([response])
        if isinstance(response, ImageResponse):
            return self._process_image_response(response, response_format, proxy, model, getattr(provider, "__name__", None))
        raise NoImageResponseError(f"Unexpected response type: {type(response)}")

    async def _process_image_response(
        self,
        response: ImageResponse,
        response_format: str,
        proxy: str = None,
        model: Optional[str] = None,
        provider: Optional[str] = None
    ) -> list[Image]:
        if response_format in ("url", "b64_json"):
            images = await copy_images(response.get_list(), response.options.get("cookies"), proxy)
            async def process_image_item(image_file: str) -> Image:
                if response_format == "b64_json":
                    with open(os.path.join(images_dir, os.path.basename(image_file)), "rb") as file:
                        image_data = base64.b64encode(file.read()).decode()
                        return Image(url=image_file, b64_json=image_data, revised_prompt=response.alt)
                return Image(url=image_file, revised_prompt=response.alt)
            images = await asyncio.gather(*[process_image_item(image) for image in images])
        else:
            images = [Image(url=image, revised_prompt=response.alt) for image in response.get_list()]
        last_provider = get_last_provider(True)
        return ImagesResponse(
            images,
            model=last_provider.get("model") if model is None else model,
            provider=last_provider.get("name") if provider is None else provider
        )

class AsyncClient(BaseClient):
    def __init__(
        self,
        provider: Optional[ProviderType] = None,
        image_provider: Optional[ImageProvider] = None,
        **kwargs
    ) -> None:
        super().__init__(**kwargs)
        self.chat: AsyncChat = AsyncChat(self, provider)
        self.images: AsyncImages = AsyncImages(self, image_provider)

class AsyncChat:
    completions: AsyncCompletions

    def __init__(self, client: AsyncClient, provider: Optional[ProviderType] = None):
        self.completions = AsyncCompletions(client, provider)

class AsyncCompletions:
    def __init__(self, client: AsyncClient, provider: Optional[ProviderType] = None):
        self.client: AsyncClient = client
        self.provider: ProviderType = provider

    def create(
        self,
        messages: Messages,
        model: str,
        provider: Optional[ProviderType] = None,
        stream: Optional[bool] = False,
        proxy: Optional[str] = None,
        response_format: Optional[dict] = None,
        max_tokens: Optional[int] = None,
        stop: Optional[Union[list[str], str]] = None,
        api_key: Optional[str] = None,
        ignored: Optional[list[str]] = None,
        ignore_working: Optional[bool] = False,
        ignore_stream: Optional[bool] = False,
        **kwargs
    ) -> Union[Coroutine[ChatCompletion], AsyncIterator[ChatCompletionChunk, BaseConversation]]:
        model, provider = get_model_and_provider(
            model,
            self.provider if provider is None else provider,
            stream,
            ignored,
            ignore_working,
            ignore_stream,
        )
        stop = [stop] if isinstance(stop, str) else stop

        response = provider.create_completion(
            model,
            messages,
            stream=stream,
            **filter_none(
                proxy=self.client.proxy if proxy is None else proxy,
                max_tokens=max_tokens,
                stop=stop,
                api_key=self.client.api_key if api_key is None else api_key
            ),
            **kwargs
        )

        if not isinstance(response, AsyncIterator):
            response = to_async_iterator(response)
        response = async_iter_response(response, stream, response_format, max_tokens, stop)
        response = async_iter_append_model_and_provider(response)
        return response if stream else anext(response)

class AsyncImages(Images):
    def __init__(self, client: AsyncClient, provider: Optional[ProviderType] = None):
        self.client: AsyncClient = client
        self.provider: Optional[ProviderType] = provider
        self.models: ImageModels = ImageModels(client)

    async def generate(
        self,
        prompt: str,
        model: Optional[str] = None,
        provider: Optional[ProviderType] = None,
        response_format: str = "url",
        **kwargs
    ) -> ImagesResponse:
        return await self.async_generate(prompt, model, provider, response_format, **kwargs)

    async def create_variation(
        self,
        image: ImageType,
        model: str = None,
        provider: ProviderType = None,
        response_format: str = "url",
        **kwargs
    ) -> ImagesResponse:
        return await self.async_create_variation(
           image, model, provider, response_format, **kwargs
        )