rosswickman
1/6/2026 - 5:04 PM

Full-Stack AI & Automation Suite

This template deploys a comprehensive ecosystem for building and hosting private AI agents and workflows:

  • Workflow Engine: n8n backed by a PostgreSQL 16 database.
  • Local LLM Host: Ollama configured with NVIDIA GPU passthrough for high-performance local inference.
  • Web Interface: Open WebUI for interacting with Ollama models and managing chats.
  • Security: Cloudflare Tunnel (cloudflared) to expose services safely without opening firewall ports.
  • Maintenance: Watchtower to keep all images updated automatically.

Prerequisites: Requires an .env file for credentials (Postgres, n8n encryption, Cloudflare token) and NVIDIA Container Toolkit for GPU support.

services:
  postgres:
    image: postgres:16-alpine
    container_name: n8n-postgress
    restart: unless-stopped
    environment:
      - POSTGRES_USER=${POSTGRES_USER}
      - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
      - POSTGRES_DB=${POSTGRES_DB}
    volumes:
      - ./postgres_data:/var/lib/postgresql/data

  n8n:
    image: n8nio/n8n:latest
    container_name: n8n
    restart: unless-stopped
    environment:
      - DB_TYPE=postgresdb
      - DB_POSTGRESDB_DATABASE=${POSTGRES_DB}
      - DB_POSTGRESDB_HOST=postgres
      - DB_POSTGRESDB_USER=${POSTGRES_USER}
      - DB_POSTGRESDB_PASSWORD=${POSTGRES_PASSWORD}
      - N8N_ENCRYPTION_KEY=${N8N_ENCRYPTION_KEY}
      - N8N_HOST=${N8N_HOST}
      - WEBHOOK_URL=${WEBHOOK_URL}
      - N8N_PROTOCOL=https
      - N8N_BLOCK_ENV_ACCESS_IN_NODE=false
      - N8N_RELOAD_CREDENTIALS_ON_CHANGE=true
      - N8N_ONBOARDING_SKIP=true
      - N8N_SECURE_COOKIE=false
    ports:
      - 5678:5678
    depends_on:
      - postgres
    volumes:
      - ./n8n_data:/home/node/.n8n

  ollama:
    image: ollama/ollama:latest
    container_name: ollama
    restart: unless-stopped
    environment:
      - OLLAMA_MODELS=/root/.ollama/models
      - OLLAMA_HOST=0.0.0.0
      - OLLAMA_ORIGINS=*
      - OLLAMA_KEEP_ALIVE=-1
      - OLLAMA_LOAD_TIMEOUT=5m
    ports:
      - 11434:11434
    volumes:
      - /mnt/m/AI/ollama/models:/root/.ollama/models
    deploy:
      resources:
        reservations:
          devices:
            - driver: nvidia
              count: 1
              capabilities: [gpu]

  open-webui:
    image: ghcr.io/open-webui/open-webui:main
    container_name: open-webui
    restart: unless-stopped # Ensures restart on reboot
    ports:
      - "3000:8080"
    environment:
      - OLLAMA_BASE_URL=http://ollama:11434
      # IMPORTANT: Set a secret key to stay logged in after updates
      - WEBUI_SECRET_KEY=${WEBUI_SECRET_KEY:-supersecretkey123}
    volumes:
      # CRITICAL: Added this volume to persist chats and admin accounts
      - ./open-webui_data:/app/backend/data

  tunnel:
    image: cloudflare/cloudflared:latest
    container_name: cloudflared
    restart: unless-stopped # Changed to unless-stopped for better control
    command: tunnel run
    environment:
      - TUNNEL_TOKEN=${TUNNEL_TOKEN}

  watchtower:
    image: containrrr/watchtower
    container_name: watchtower
    restart: unless-stopped # Added restart policy for persistence
    environment:
      - DOCKER_API_VERSION=1.44  # Forces the correct API version
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
    command: --interval 86400 --cleanup
# Security
N8N_ENCRYPTION_KEY='32 char key'
POSTGRES_USER='admin-user'
POSTGRES_PASSWORD='admin-pass'
POSTGRES_DB='n8n'

# Network & Cloudflare
N8N_HOST='n8n.example.com'
TUNNEL_TOKEN='cloudflare-tunnel-token'

# Internal
WEBHOOK_URL='https://n8n.example.com/'