From 75fe95cbedd06d86fb64245f154480b8b731aef8 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 30 Dec 2024 02:51:36 +0100 Subject: Add continue messages support, Remove old text_to_speech service from gui Update gui and client readmes, Add HuggingSpaces group provider; Add providers parameters config forms to gui --- g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py | 19 ++++++-- .../hf_space/BlackForestLabsFlux1Schnell.py | 48 +++++++++--------- g4f/Provider/hf_space/VoodoohopFlux1Schnell.py | 50 +++++++++---------- g4f/Provider/hf_space/__init__.py | 57 ++++++++++++++++++++++ 4 files changed, 116 insertions(+), 58 deletions(-) (limited to 'g4f/Provider/hf_space') diff --git a/g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py b/g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py index 7987cc1b..74c9502d 100644 --- a/g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py +++ b/g4f/Provider/hf_space/BlackForestLabsFlux1Dev.py @@ -5,6 +5,7 @@ from aiohttp import ClientSession from ...typing import AsyncResult, Messages from ...image import ImageResponse, ImagePreview +from ...errors import ResponseError from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin class BlackForestLabsFlux1Dev(AsyncGeneratorProvider, ProviderModelMixin): @@ -12,14 +13,24 @@ class BlackForestLabsFlux1Dev(AsyncGeneratorProvider, ProviderModelMixin): api_endpoint = "/gradio_api/call/infer" working = True - + default_model = 'flux-dev' models = [default_model] image_models = [default_model] @classmethod async def create_async_generator( - cls, model: str, messages: Messages, prompt: str = None, api_key: str = None, proxy: str = None, **kwargs + cls, model: str, messages: Messages, + prompt: str = None, + api_key: str = None, + proxy: str = None, + width: int = 1024, + height: int = 1024, + guidance_scale: float = 3.5, + num_inference_steps: int = 28, + seed: int = 0, + randomize_seed: bool = True, + **kwargs ) -> AsyncResult: headers = { "Content-Type": "application/json", @@ -30,7 +41,7 @@ class BlackForestLabsFlux1Dev(AsyncGeneratorProvider, ProviderModelMixin): async with ClientSession(headers=headers) as session: prompt = messages[-1]["content"] if prompt is None else prompt data = { - "data": [prompt, 0, True, 1024, 1024, 3.5, 28] + "data": [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps] } async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response: response.raise_for_status() @@ -43,7 +54,7 @@ class BlackForestLabsFlux1Dev(AsyncGeneratorProvider, ProviderModelMixin): event = chunk[7:].decode(errors="replace").strip() if chunk.startswith(b"data: "): if event == "error": - raise RuntimeError(f"GPU token limit exceeded: {chunk.decode(errors='replace')}") + raise ResponseError(f"GPU token limit exceeded: {chunk.decode(errors='replace')}") if event in ("complete", "generating"): try: data = json.loads(chunk[6:]) diff --git a/g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py b/g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py index 7b29b7af..2dd129d2 100644 --- a/g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py +++ b/g4f/Provider/hf_space/BlackForestLabsFlux1Schnell.py @@ -2,11 +2,11 @@ from __future__ import annotations from aiohttp import ClientSession import json -import random -from typing import Optional from ...typing import AsyncResult, Messages from ...image import ImageResponse +from ...errors import ResponseError +from ...requests.raise_for_status import raise_for_status from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin class BlackForestLabsFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): @@ -19,6 +19,7 @@ class BlackForestLabsFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): default_image_model = default_model image_models = [default_image_model] models = [*image_models] + model_aliases = {"flux-schnell-black-forest-labs": default_model} @classmethod async def create_async_generator( @@ -26,21 +27,21 @@ class BlackForestLabsFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): model: str, messages: Messages, proxy: str = None, + prompt: str = None, width: int = 768, height: int = 768, num_inference_steps: int = 2, - seed: Optional[int] = None, - randomize_seed: bool = False, + seed: int = 0, + randomize_seed: bool = True, **kwargs ) -> AsyncResult: - if seed is None: - seed = random.randint(0, 10000) - + width = max(32, width - (width % 8)) height = max(32, height - (height % 8)) - - prompt = messages[-1]["content"] - + + if prompt is None: + prompt = messages[-1]["content"] + payload = { "data": [ prompt, @@ -51,31 +52,26 @@ class BlackForestLabsFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): num_inference_steps ] } - async with ClientSession() as session: async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response: - response.raise_for_status() + await raise_for_status(response) response_data = await response.json() event_id = response_data['event_id'] - while True: async with session.get(f"{cls.api_endpoint}/{event_id}", proxy=proxy) as status_response: - status_response.raise_for_status() - events = (await status_response.text()).split('\n\n') - - for event in events: - if event.startswith('event:'): - event_parts = event.split('\ndata: ') + await raise_for_status(status_response) + while not status_response.content.at_eof(): + event = await status_response.content.readuntil(b'\n\n') + if event.startswith(b'event:'): + event_parts = event.split(b'\ndata: ') if len(event_parts) < 2: continue - - event_type = event_parts[0].split(': ')[1] + event_type = event_parts[0].split(b': ')[1] data = event_parts[1] - - if event_type == 'error': - raise Exception(f"Error generating image: {data}") - elif event_type == 'complete': + if event_type == b'error': + raise ResponseError(f"Error generating image: {data}") + elif event_type == b'complete': json_data = json.loads(data) image_url = json_data[0]['url'] yield ImageResponse(images=[image_url], alt=prompt) - return + return \ No newline at end of file diff --git a/g4f/Provider/hf_space/VoodoohopFlux1Schnell.py b/g4f/Provider/hf_space/VoodoohopFlux1Schnell.py index bd55b20b..c1778d94 100644 --- a/g4f/Provider/hf_space/VoodoohopFlux1Schnell.py +++ b/g4f/Provider/hf_space/VoodoohopFlux1Schnell.py @@ -2,23 +2,23 @@ from __future__ import annotations from aiohttp import ClientSession import json -import random -from typing import Optional from ...typing import AsyncResult, Messages from ...image import ImageResponse +from ...errors import ResponseError +from ...requests.raise_for_status import raise_for_status from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin class VoodoohopFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): url = "https://voodoohop-flux-1-schnell.hf.space" api_endpoint = "https://voodoohop-flux-1-schnell.hf.space/call/infer" - working = True - + default_model = "flux-schnell" default_image_model = default_model image_models = [default_image_model] models = [*image_models] + model_aliases = {"flux-schnell-voodoohop": default_model} @classmethod async def create_async_generator( @@ -26,21 +26,20 @@ class VoodoohopFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): model: str, messages: Messages, proxy: str = None, + prompt: str = None, width: int = 768, height: int = 768, num_inference_steps: int = 2, - seed: Optional[int] = None, - randomize_seed: bool = False, + seed: int = 0, + randomize_seed: bool = True, **kwargs ) -> AsyncResult: - if seed is None: - seed = random.randint(0, 10000) - width = max(32, width - (width % 8)) height = max(32, height - (height % 8)) - - prompt = messages[-1]["content"] - + + if prompt is None: + prompt = messages[-1]["content"] + payload = { "data": [ prompt, @@ -51,31 +50,26 @@ class VoodoohopFlux1Schnell(AsyncGeneratorProvider, ProviderModelMixin): num_inference_steps ] } - async with ClientSession() as session: async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response: - response.raise_for_status() + await raise_for_status(response) response_data = await response.json() event_id = response_data['event_id'] - while True: async with session.get(f"{cls.api_endpoint}/{event_id}", proxy=proxy) as status_response: - status_response.raise_for_status() - events = (await status_response.text()).split('\n\n') - - for event in events: - if event.startswith('event:'): - event_parts = event.split('\ndata: ') + await raise_for_status(status_response) + while not status_response.content.at_eof(): + event = await status_response.content.readuntil(b'\n\n') + if event.startswith(b'event:'): + event_parts = event.split(b'\ndata: ') if len(event_parts) < 2: continue - - event_type = event_parts[0].split(': ')[1] + event_type = event_parts[0].split(b': ')[1] data = event_parts[1] - - if event_type == 'error': - raise Exception(f"Error generating image: {data}") - elif event_type == 'complete': + if event_type == b'error': + raise ResponseError(f"Error generating image: {data}") + elif event_type == b'complete': json_data = json.loads(data) image_url = json_data[0]['url'] yield ImageResponse(images=[image_url], alt=prompt) - return + return \ No newline at end of file diff --git a/g4f/Provider/hf_space/__init__.py b/g4f/Provider/hf_space/__init__.py index 94524e35..87dfb32b 100644 --- a/g4f/Provider/hf_space/__init__.py +++ b/g4f/Provider/hf_space/__init__.py @@ -1,3 +1,60 @@ +from __future__ import annotations + +from ...typing import AsyncResult, Messages +from ...errors import ResponseError +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin + from .BlackForestLabsFlux1Dev import BlackForestLabsFlux1Dev from .BlackForestLabsFlux1Schnell import BlackForestLabsFlux1Schnell from .VoodoohopFlux1Schnell import VoodoohopFlux1Schnell + +class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://huggingface.co/spaces" + working = True + default_model = BlackForestLabsFlux1Dev.default_model + providers = [BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell] + + @classmethod + def get_parameters(cls, **kwargs) -> dict: + parameters = {} + for provider in cls.providers: + parameters = {**parameters, **provider.get_parameters(**kwargs)} + return parameters + + @classmethod + def get_models(cls, **kwargs) -> list[str]: + if not cls.models: + for provider in cls.providers: + cls.models.extend(provider.get_models(**kwargs)) + cls.models.extend(provider.model_aliases.keys()) + cls.models = list(set(cls.models)) + cls.models.sort() + return cls.models + + @classmethod + async def create_async_generator( + cls, model: str, messages: Messages, **kwargs + ) -> AsyncResult: + is_started = False + for provider in cls.providers: + if model in provider.model_aliases: + async for chunk in provider.create_async_generator(provider.model_aliases[model], messages, **kwargs): + is_started = True + yield chunk + if is_started: + return + error = None + for provider in cls.providers: + if model in provider.get_models(): + try: + async for chunk in provider.create_async_generator(model, messages, **kwargs): + is_started = True + yield chunk + if is_started: + break + except ResponseError as e: + if is_started: + raise e + error = e + if not is_started and error is not None: + raise error \ No newline at end of file -- cgit v1.2.3