services: webui: container_name: ollama-webui image: ghcr.io/open-webui/open-webui:0.8 volumes: - /volume1/docker/ollama/webui:/app/backend/data:rw environment: OLLAMA_BASE_URL: http://ollama:11434 WEBUI_SECRET_KEY: RsxbMGHLRMfjVRtiQJkroSwXyEGPgaMWcEycVQYQkVhvgZFaUDfXFbPuRLrQhQpc healthcheck: test: timeout 10s bash -c ':> /dev/tcp/127.0.0.1/8080' || exit 1 interval: 10s timeout: 5s retries: 3 start_period: 90s mem_limit: 1g mem_reservation: 1g ports: - 8271:8080 depends_on: ollama: condition: service_healthy restart: on-failure ollama: container_name: ollama image: ollama/ollama:latest #For a NAS with an AMD CPU, use the following image ollama/ollama:rocm instead of ollama/ollama:latest entrypoint: ["/usr/bin/bash", "/entrypoint.sh"] volumes: - /volume1/docker/ollama/data:/root/.ollama:rw - /volume1/docker/ollama/entrypoint/entrypoint.sh:/entrypoint.sh environment: MODELS: llama3.2 #Check all the models at the following link https://ollama.com/library - You can separate models by commas like llama3.2,gemma2,mistral OLLAMA_INSTALL_MODELS: llama3.2 #Check all the models at the following link https://ollama.com/library - You can separate models by commas like llama3.2,gemma2,mistral OLLAMA_HOSTNAME: ollama.mschwab.net ports: - 11434:11434 healthcheck: test: ["CMD", "ollama", "--version"] interval: 10s timeout: 5s retries: 3 start_period: 30s mem_limit: 2g mem_reservation: 2g restart: on-failure:5