version: '3.6' services: ollama: # Uncomment below for GPU support # deploy: # resources: # reservations: # devices: # - driver: nvidia # count: 1 # capabilities: # - gpu volumes: - ollama:/root/.ollama ports: - 11434:11434 container_name: ollama pull_policy: always tty: true restart: unless-stopped image: ollama/ollama:latest ollama-webui: build: context: . args: OLLAMA_API_BASE_URL: '/ollama/api' dockerfile: Dockerfile image: ollama-webui:latest container_name: ollama-webui ports: - 3000:8080 extra_hosts: - host.docker.internal:host-gateway restart: unless-stopped volumes: ollama: {}