diff options
-rw-r--r-- | README.md | 27 | ||||
-rw-r--r-- | docker-compose.yml | 4 | ||||
-rw-r--r-- | g4f/Provider/Ollama.py | 13 |
3 files changed, 40 insertions, 4 deletions
@@ -747,6 +747,33 @@ set G4F_PROXY=http://host:port </a> </td> </tr> + <tr> + <td> + <a href="https://github.com/yjg30737/pyqt-openai"> + <b>VividNode (pyqt-openai)</b> + </a> + </td> + <td> + <a href="https://github.com/yjg30737/pyqt-openai/stargazers"> + <img alt="Stars" src="https://img.shields.io/github/stars/yjg30737/pyqt-openai?style=flat-square&labelColor=343b41" /> + </a> + </td> + <td> + <a href="https://github.com/yjg30737/pyqt-openai/network/members"> + <img alt="Forks" src="https://img.shields.io/github/forks/yjg30737/pyqt-openai?style=flat-square&labelColor=343b41" /> + </a> + </td> + <td> + <a href="https://github.com/yjg30737/pyqt-openai/issues"> + <img alt="Issues" src="https://img.shields.io/github/issues/yjg30737/pyqt-openai?style=flat-square&labelColor=343b41" /> + </a> + </td> + <td> + <a href="https://github.com/yjg30737/pyqt-openai/pulls"> + <img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/yjg30737/pyqt-openai?style=flat-square&labelColor=343b41" /> + </a> + </td> + </tr> </tbody> </table> diff --git a/docker-compose.yml b/docker-compose.yml index 1b99ba97..3f8bc4ea 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -12,4 +12,6 @@ services: ports: - '8080:8080' - '1337:1337' - - '7900:7900'
\ No newline at end of file + - '7900:7900' + environment: + - OLLAMA_HOST=host.docker.internal diff --git a/g4f/Provider/Ollama.py b/g4f/Provider/Ollama.py index a44aaacd..f9116541 100644 --- a/g4f/Provider/Ollama.py +++ b/g4f/Provider/Ollama.py @@ -1,6 +1,7 @@ from __future__ import annotations import requests +import os from .needs_auth.Openai import Openai from ..typing import AsyncResult, Messages @@ -14,9 +15,11 @@ class Ollama(Openai): @classmethod def get_models(cls): if not cls.models: - url = 'http://127.0.0.1:11434/api/tags' + host = os.getenv("OLLAMA_HOST", "127.0.0.1") + port = os.getenv("OLLAMA_PORT", "11434") + url = f"http://{host}:{port}/api/tags" models = requests.get(url).json()["models"] - cls.models = [model['name'] for model in models] + cls.models = [model["name"] for model in models] cls.default_model = cls.models[0] return cls.models @@ -25,9 +28,13 @@ class Ollama(Openai): cls, model: str, messages: Messages, - api_base: str = "http://localhost:11434/v1", + api_base: str = None, **kwargs ) -> AsyncResult: + if not api_base: + host = os.getenv("OLLAMA_HOST", "localhost") + port = os.getenv("OLLAMA_PORT", "11434") + api_base: str = f"http://{host}:{port}/v1" return super().create_async_generator( model, messages, api_base=api_base, **kwargs )
\ No newline at end of file |