summaryrefslogtreecommitdiffstats
path: root/g4f/gui/server/api.py
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2024-10-22 23:32:27 +0200
committerGitHub <noreply@github.com>2024-10-22 23:32:27 +0200
commita63c18de796bd4f3e818ff170b6ff595304f95e0 (patch)
tree844dbb9a8d3526a8b60564b78f7a19a4e0f605d9 /g4f/gui/server/api.py
parentMerge pull request #2282 from Karasiq/patch-1 (diff)
parentUpdated docs/providers-and-models.md g4f/models.py g4f/Provider/Upstage.py (diff)
downloadgpt4free-a63c18de796bd4f3e818ff170b6ff595304f95e0.tar
gpt4free-a63c18de796bd4f3e818ff170b6ff595304f95e0.tar.gz
gpt4free-a63c18de796bd4f3e818ff170b6ff595304f95e0.tar.bz2
gpt4free-a63c18de796bd4f3e818ff170b6ff595304f95e0.tar.lz
gpt4free-a63c18de796bd4f3e818ff170b6ff595304f95e0.tar.xz
gpt4free-a63c18de796bd4f3e818ff170b6ff595304f95e0.tar.zst
gpt4free-a63c18de796bd4f3e818ff170b6ff595304f95e0.zip
Diffstat (limited to 'g4f/gui/server/api.py')
-rw-r--r--g4f/gui/server/api.py127
1 files changed, 62 insertions, 65 deletions
diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py
index 3da0fe17..57f3eaa1 100644
--- a/g4f/gui/server/api.py
+++ b/g4f/gui/server/api.py
@@ -23,8 +23,8 @@ from g4f.providers.conversation import BaseConversation
conversations: dict[dict[str, BaseConversation]] = {}
images_dir = "./generated_images"
-class Api():
+class Api:
@staticmethod
def get_models() -> list[str]:
"""
@@ -42,14 +42,11 @@ class Api():
if provider in __map__:
provider: ProviderType = __map__[provider]
if issubclass(provider, ProviderModelMixin):
- return [{"model": model, "default": model == provider.default_model} for model in provider.get_models()]
- elif provider.supports_gpt_35_turbo or provider.supports_gpt_4:
return [
- *([{"model": "gpt-4", "default": not provider.supports_gpt_4}] if provider.supports_gpt_4 else []),
- *([{"model": "gpt-3.5-turbo", "default": not provider.supports_gpt_4}] if provider.supports_gpt_35_turbo else [])
+ {"model": model, "default": model == provider.default_model}
+ for model in provider.get_models()
]
- else:
- return [];
+ return []
@staticmethod
def get_image_models() -> list[dict]:
@@ -71,7 +68,7 @@ class Api():
"image_model": model,
"vision_model": parent.default_vision_model if hasattr(parent, "default_vision_model") else None
})
- index.append(parent.__name__)
+ index.append(parent.__name__)
elif hasattr(provider, "default_vision_model") and provider.__name__ not in index:
image_models.append({
"provider": provider.__name__,
@@ -89,15 +86,13 @@ class Api():
Return a list of all working providers.
"""
return {
- provider.__name__: (provider.label
- if hasattr(provider, "label")
- else provider.__name__) +
- (" (WebDriver)"
- if "webdriver" in provider.get_parameters()
- else "") +
- (" (Auth)"
- if provider.needs_auth
- else "")
+ provider.__name__: (
+ provider.label if hasattr(provider, "label") else provider.__name__
+ ) + (
+ " (WebDriver)" if "webdriver" in provider.get_parameters() else ""
+ ) + (
+ " (Auth)" if provider.needs_auth else ""
+ )
for provider in __providers__
if provider.working
}
@@ -131,7 +126,7 @@ class Api():
Returns:
dict: Arguments prepared for chat completion.
- """
+ """
model = json_data.get('model') or models.default
provider = json_data.get('provider')
messages = json_data['messages']
@@ -160,61 +155,62 @@ class Api():
}
def _create_response_stream(self, kwargs: dict, conversation_id: str, provider: str) -> Iterator:
- """
- Creates and returns a streaming response for the conversation.
-
- Args:
- kwargs (dict): Arguments for creating the chat completion.
-
- Yields:
- str: JSON formatted response chunks for the stream.
-
- Raises:
- Exception: If an error occurs during the streaming process.
- """
try:
+ result = ChatCompletion.create(**kwargs)
first = True
- for chunk in ChatCompletion.create(**kwargs):
+ if isinstance(result, ImageResponse):
+ # Якщо результат є ImageResponse, обробляємо його як одиночний елемент
if first:
first = False
yield self._format_json("provider", get_last_provider(True))
- if isinstance(chunk, BaseConversation):
- if provider not in conversations:
- conversations[provider] = {}
- conversations[provider][conversation_id] = chunk
- yield self._format_json("conversation", conversation_id)
- elif isinstance(chunk, Exception):
- logging.exception(chunk)
- yield self._format_json("message", get_error_message(chunk))
- elif isinstance(chunk, ImagePreview):
- yield self._format_json("preview", chunk.to_string())
- elif isinstance(chunk, ImageResponse):
- async def copy_images(images: list[str], cookies: Optional[Cookies] = None):
- async with ClientSession(
- connector=get_connector(None, os.environ.get("G4F_PROXY")),
- cookies=cookies
- ) as session:
- async def copy_image(image):
- async with session.get(image) as response:
- target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}")
- with open(target, "wb") as f:
- async for chunk in response.content.iter_any():
- f.write(chunk)
- with open(target, "rb") as f:
- extension = is_accepted_format(f.read(12)).split("/")[-1]
- extension = "jpg" if extension == "jpeg" else extension
- new_target = f"{target}.{extension}"
- os.rename(target, new_target)
- return f"/images/{os.path.basename(new_target)}"
- return await asyncio.gather(*[copy_image(image) for image in images])
- images = asyncio.run(copy_images(chunk.get_list(), chunk.options.get("cookies")))
- yield self._format_json("content", str(ImageResponse(images, chunk.alt)))
- elif not isinstance(chunk, FinishReason):
- yield self._format_json("content", str(chunk))
+ yield self._format_json("content", str(result))
+ else:
+ # Якщо результат є ітерабельним, обробляємо його як раніше
+ for chunk in result:
+ if first:
+ first = False
+ yield self._format_json("provider", get_last_provider(True))
+ if isinstance(chunk, BaseConversation):
+ if provider not in conversations:
+ conversations[provider] = {}
+ conversations[provider][conversation_id] = chunk
+ yield self._format_json("conversation", conversation_id)
+ elif isinstance(chunk, Exception):
+ logging.exception(chunk)
+ yield self._format_json("message", get_error_message(chunk))
+ elif isinstance(chunk, ImagePreview):
+ yield self._format_json("preview", chunk.to_string())
+ elif isinstance(chunk, ImageResponse):
+ # Обробка ImageResponse
+ images = asyncio.run(self._copy_images(chunk.get_list(), chunk.options.get("cookies")))
+ yield self._format_json("content", str(ImageResponse(images, chunk.alt)))
+ elif not isinstance(chunk, FinishReason):
+ yield self._format_json("content", str(chunk))
except Exception as e:
logging.exception(e)
yield self._format_json('error', get_error_message(e))
+ # Додайте цей метод до класу Api
+ async def _copy_images(self, images: list[str], cookies: Optional[Cookies] = None):
+ async with ClientSession(
+ connector=get_connector(None, os.environ.get("G4F_PROXY")),
+ cookies=cookies
+ ) as session:
+ async def copy_image(image):
+ async with session.get(image) as response:
+ target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}")
+ with open(target, "wb") as f:
+ async for chunk in response.content.iter_any():
+ f.write(chunk)
+ with open(target, "rb") as f:
+ extension = is_accepted_format(f.read(12)).split("/")[-1]
+ extension = "jpg" if extension == "jpeg" else extension
+ new_target = f"{target}.{extension}"
+ os.rename(target, new_target)
+ return f"/images/{os.path.basename(new_target)}"
+
+ return await asyncio.gather(*[copy_image(image) for image in images])
+
def _format_json(self, response_type: str, content):
"""
Formats and returns a JSON response.
@@ -231,6 +227,7 @@ class Api():
response_type: content
}
+
def get_error_message(exception: Exception) -> str:
"""
Generates a formatted error message from an exception.
@@ -245,4 +242,4 @@ def get_error_message(exception: Exception) -> str:
provider = get_last_provider()
if provider is None:
return message
- return f"{provider.__name__}: {message}" \ No newline at end of file
+ return f"{provider.__name__}: {message}"