llm-lab/docker-compose.yml
2025-12-25 22:24:51 -04:00

721 lines
22 KiB
YAML

# Made by: Tecno Consultores 2023
x-ollama-base: &ollama-base
image: ollama/ollama:latest
hostname: ${ohostname}
container_name: ${ohostname}
privileged: true
pull_policy: ${pull_policy}
restart: ${restart}
tty: true
ports:
- ${OPORT}:11434
ulimits:
memlock: -1
volumes:
- ./ollama:/root/.ollama
healthcheck:
test: ['CMD-SHELL', 'ollama --version && ollama ps || exit 1']
start_period: 5s
interval: 5s
timeout: 5s
retries: 10
x-openwebui-base: &openwebui-base
image: ghcr.io/open-webui/open-webui:main
hostname: ${openhostname}
container_name: ${openhostname}
pull_policy: ${pull_policy}
restart: ${restart}
environment:
- ENV=prod
- WEBUI_SECRET_KEY=${WEBUI_SECRET_KEY}
- WEBUI_URL=${WEBUI_URL}
- DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DATABASE_HOST}:${POSTGRESDB_PORT}/oui
- CUSTOM_NAME=${CUSTOM_NAME}
- WEBUI_NAME=${CUSTOM_NAME}
- ENABLE_SIGNUP=true
- ENABLE_LOGIN_FORM=true
- ENABLE_REALTIME_CHAT_SAVE=false
- ENABLE_ADMIN_EXPORT=false
- ENABLE_ADMIN_CHAT_ACCESS=true
- MODELS_CACHE_TTL=300
- ENABLE_CHANNELS=false
- SHOW_ADMIN_DETAILS=true
- DEFAULT_USER_ROLE=pending
- DEFAULT_LOCALE=es
- AIOHTTP_CLIENT_TIMEOUT=1000
- ENABLE_OLLAMA_API=true
- OLLAMA_BASE_URL=${UOLLAMA}
- ENABLE_OPENAI_API=${ENABLE_OPENAI_API}
- OPENAI_API_BASE_URL=${OPENAI_API_BASE_URL}
- ENABLE_AUTOCOMPLETE_GENERATION=true
- AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH=-1
- ENABLE_EVALUATION_ARENA_MODELS=false
- ENABLE_MESSAGE_RATING=true
- ENABLE_COMMUNITY_SHARING=false
- ENABLE_TAGS_GENERATION=true
- ENABLE_FORWARD_USER_INFO_HEADERS=true #set to true for openai-bridge
- WEBUI_SESSION_COOKIE_SAME_SITE=strict
- WEBUI_SESSION_COOKIE_SECURE=true
- WEBUI_AUTH_COOKIE_SAME_SITE=strict
- WEBUI_AUTH_COOKIE_SECURE=true
- WEBUI_AUTH=true
- OFFLINE_MODE=true
- HF_HUB_OFFLINE=1
- RAG_EMBEDDING_MODEL_AUTO_UPDATE=false
- RAG_RERANKING_MODEL_AUTO_UPDATE=false
- WHISPER_MODEL_AUTO_UPDATE=false
- RESET_CONFIG_ON_START=false
- SAFE_MODE=false
- CORS_ALLOW_ORIGIN=*
- ENABLE_GOOGLE_DRIVE_INTEGRATION=false
- GOOGLE_DRIVE_CLIENT_ID=
- GOOGLE_DRIVE_API_KEY=
- ENABLE_IMAGE_GENERATION=false
- IMAGE_GENERATION_ENGINE=comfyui
- IMAGE_GENERATION_MODEL=
- IMAGE_SIZE=512x512
- IMAGE_STEPS=50
- ENABLE_WEBSOCKET_SUPPORT=true
- WEBSOCKET_MANAGER=redis
- WEBSOCKET_REDIS_URL=redis://${rhostname}:${REDIS_PORT}/2
- REDIS_URL=redis://${rhostname}:${REDIS_PORT}/2
- REDIS_KEY_PREFIX=${openhostname}
- ENABLE_SEARCH_QUERY_GENERATION=false
- ENABLE_CODE_EXECUTION=false
- ENABLE_CODE_INTERPRETER=false
- ENABLE_WEB_SEARCH=false
#- http_proxy=${http_proxy}
#- https_proxy=${https_proxy}
ports:
- ${PORT}:8080
extra_hosts:
- host.docker.internal:host-gateway
volumes:
- ./openwebui:/app/backend/data
deploy:
mode: global
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
x-openai-bridge: &openai-bridge
image: ghcr.io/sveneisenschmidt/n8n-openai-bridge:latest
pull_policy: ${pull_policy}
restart: ${restart}
hostname: ${obridge}
container_name: ${obridge}
ports:
- ${obridgep}:3333
environment:
- BEARER_TOKEN=${obearer} #Required Auth token for API requests TO this bridge
- N8N_WEBHOOK_BEARER_TOKEN=${wtoken}
- LOG_REQUESTS=false
- SESSION_ID_HEADERS=X-Session-Id,X-Chat-Id,X-OpenWebUI-Chat-Id
- USER_ID_HEADERS=X-User-Id,X-OpenWebUI-User-Id
- USER_EMAIL_HEADERS=X-User-Email,X-OpenWebUI-User-Email
- USER_NAME_HEADERS=X-User-Name,X-OpenWebUI-User-Name
- USER_ROLE_HEADERS=X-User-Role,X-OpenWebUI-User-Role
- REQUEST_BODY_LIMIT=50mb
- MODELS_POLL_INTERVAL=3 #File polling interval in seconds (default 1)
- N8N_BASE_URL=http://n8n:5678/
- N8N_API_BEARER_TOKEN=${napi}
- MODEL_LOADER_TYPE=n8n-api
- AUTO_DISCOVERY_TAG=n8n-openai-bridge
- AUTO_DISCOVERY_POLL_INTERVAL=300
- ENABLE_TASK_DETECTION=false #Set to 'true' to enable (default false)
- N8N_TIMEOUT=300000 # n8n webhook request timeout (default 5 minutes)
- SERVER_TIMEOUT=300000 # HTTP server request timeout (default 5 minutes)
- SERVER_KEEP_ALIVE_TIMEOUT=120000 # Keep-alive connection timeout (default 2 minutes)
- SERVER_HEADERS_TIMEOUT=121000 # Headers timeout (default 121 seconds)
- FILE_UPLOAD_MODE=passthrough # Options passthrough, extract-json, extract-multipart, disabled
- RATE_LIMIT_WINDOW_MS=60000 # Time window in milliseconds (default 1 minute)
- RATE_LIMIT_MAX_REQUESTS=100 # Max requests per window for general endpoints (default 100)
- RATE_LIMIT_CHAT_COMPLETIONS=30 # Max chat completion requests per window (default 30)
- DISABLE_RATE_LIMIT=false # Set to 'true' to disable rate limiting entirely
volumes:
- ./models.json:/app/models.json:ro
x-n8n-base: &n8n-base
image: n8nio/n8n:latest
pull_policy: ${pull_policy}
restart: ${restart}
environment:
- N8N_ENCRYPTION_KEY=${N8N_ENCRYPTION_KEY}
- N8N_USER_MANAGEMENT_JWT_SECRET=${N8N_USER_MANAGEMENT_JWT_SECRET}
- N8N_RUNNERS_AUTH_TOKEN=${N8N_RUNNERS_AUTH_TOKEN}
- N8N_RUNNERS_GRANT_TOKEN=${N8N_RUNNERS_AUTH_TOKEN}
- NODE_ENV=production
- N8N_DIAGNOSTICS_ENABLED=false
- N8N_PERSONALIZATION_ENABLED=true
- N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS=true
- EXECUTIONS_DATA_PRUNE=true
- EXECUTIONS_DATA_MAX_AGE=24
- EXECUTIONS_DATA_PRUNE_MAX_COUNT=100
- N8N_SECURE_COOKIE=false
- DB_TYPE=${DB_TYPE}
- DB_POSTGRESDB_HOST=${DATABASE_HOST}
- DB_POSTGRESDB_PORT=${POSTGRESDB_PORT}
- DB_POSTGRESDB_DATABASE=${POSTGRES_DB}
- DB_POSTGRESDB_USER=${POSTGRES_USER}
- DB_POSTGRESDB_PASSWORD=${POSTGRES_PASSWORD}
- DB_POSTGRESDB_POOL_SIZE=2
- DB_POSTGRESDB_SSL_ENABLED=false
- N8N_TEMPLATES_ENABLED=true
- NODE_FUNCTION_ALLOW_EXTERNAL=*
- NODE_FUNCTION_ALLOW_BUILTIN=*
- N8N_HIRING_BANNER_ENABLED=false
- N8N_GRACEFUL_SHUTDOWN_TIMEOUT=30
- N8N_REINSTALL_MISSING_PACKAGES=true
- N8N_METRICS=false
- EXECUTIONS_DATA_SAVE_ON_ERROR=all
- EXECUTIONS_DATA_SAVE_ON_SUCCESS=all
- EXECUTIONS_DATA_SAVE_ON_PROGRESS=false
- EXECUTIONS_DATA_SAVE_MANUAL_EXECUTIONS=true
- N8N_COMMUNITY_PACKAGES_ENABLED=true
- WEBHOOK_URL=${WEBHOOK_URL}
- N8N_EDITOR_BASE_URL=${WEBHOOK_URL}
- N8N_PAYLOAD_SIZE_MAX=${N8N_PAYLOAD_SIZE_MAX}
- N8N_FORMDATA_FILE_SIZE_MAX=${N8N_FORMDATA_FILE_SIZE_MAX}
- N8N_DEFAULT_BINARY_DATA_MODE=filesystem
- N8N_PUBLIC_API_SWAGGERUI_DISABLED=true
- N8N_RUNNERS_ENABLED=true
- N8N_RUNNERS_MODE=external
- N8N_RUNNERS_BROKER_LISTEN_ADDRESS=0.0.0.0
- N8N_HIDE_USAGE_PAGE=false
- N8N_EMAIL_MODE=smtp
- N8N_SMTP_HOST=${N8N_SMTP_HOST}
- N8N_SMTP_PORT=${N8N_SMTP_PORT}
- N8N_SMTP_USER=${N8N_SMTP_USER}
- N8N_SMTP_PASS=${N8N_SMTP_PASS}
- N8N_SMTP_SENDER=${N8N_SMTP_USER}
- N8N_SMTP_SSL=false
- N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true
- GENERIC_TIMEZONE=${TIMEZONE}
- TZ=${TIMEZONE}
- EXECUTIONS_MODE=queue
- QUEUE_BULL_PREFIX=${nhostname}
- QUEUE_BULL_REDIS_DB=1
- QUEUE_BULL_REDIS_HOST=${rhostname}
- QUEUE_BULL_REDIS_PORT=${REDIS_PORT}
- QUEUE_BULL_REDIS_TIMEOUT_THRESHOLD=10000
- QUEUE_BULL_REDIS_TLS=false
- QUEUE_BULL_REDIS_DUALSTACK=false
- QUEUE_HEALTH_CHECK_ACTIVE=true
- QUEUE_BULL_REDIS_CONCURRENCY=5
- LANGCHAIN_ENDPOINT="https://api.smith.langchain.com"
- LANGCHAIN_TRACING_V2=true
- LANGCHAIN_API_KEY=${LANGCHAIN_API_KEY}
- LANGSMITH_PROJECT=${nhostname}
- N8N_VERSION_NOTIFICATIONS_ENABLED=false
- N8N_MFA_ENABLED=true
- N8N_BLOCK_ENV_ACCESS_IN_NODE=false
- OFFLOAD_MANUAL_EXECUTIONS_TO_WORKERS=true
- N8N_WORKER_SERVICE_NAME=${workername}
- N8N_TASK_BROKER_URL=http://${nhostname}:${N8N_PORT2}
- N8N_COMMAND_RESPONSE_URL=http://${nhostname}:${N8N_PORT2}
- N8N_TASK_BROKER_PORT=${N8N_PORT2}
- N8N_RUNNERS_BROKER_PORT=${N8N_PORT2}
- N8N_GIT_NODE_DISABLE_BARE_REPOS=true
- N8N_PROXY_HOPS=1
- NODES_EXCLUDE="[]"
- N8N_SKIP_AUTH_ON_OAUTH_CALLBACK=false
- N8N_DEFAULT_LOCALE=es
- N8N_RESTRICT_FILE_ACCESS_TO="/tmp/"
#- N8N_MULTI_MAIN_SETUP_ENABLED=true # enterprise only
#- N8N_MULTI_MAIN_SETUP_KEY_TTL=10 # enterprise only
#- N8N_MULTI_MAIN_SETUP_CHECK_INTERVAL=3 # enterprise only
volumes:
- ./n8n/storage:/home/node/.n8n
- ./n8n/backup:/backup
- ./n8n/shared:/data/shared
deploy:
mode: global
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
x-n8n-runner: &n8n-runner
image: n8nio/runners:latest
pull_policy: ${pull_policy}
restart: ${restart}
environment:
- N8N_ENCRYPTION_KEY=${N8N_ENCRYPTION_KEY}
- N8N_USER_MANAGEMENT_JWT_SECRET=${N8N_USER_MANAGEMENT_JWT_SECRET}
- N8N_RUNNERS_AUTH_TOKEN=${N8N_RUNNERS_AUTH_TOKEN}
- N8N_RUNNERS_GRANT_TOKEN=${N8N_RUNNERS_AUTH_TOKEN}
- NODE_ENV=production
- N8N_RUNNERS_TASK_BROKER_URI=http://${workername}:${N8N_PORT2}
- N8N_RUNNERS_AUTO_SHUTDOWN_TIMEOUT=15
- N8N_RUNNERS_LAUNCHER_HEALTH_CHECK_PORT=5680
- GENERIC_TIMEZONE=${TIMEZONE}
- TZ=${TIMEZONE}
#- N8N_NATIVE_PYTHON_RUNNER=true
# volumes:
# - ./n8n/storage:/home/node/.n8n
# - ./n8n/backup:/backup
# - ./n8n/shared:/tmp/shared
deploy:
mode: global
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
x-postgres-base: &postgres-base
image: postgres:17.6
hostname: ${DATABASE_HOST}
container_name: ${DATABASE_HOST}
pull_policy: ${pull_policy}
restart: ${restart}
environment:
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB}
- TZ=America/Caracas
ports:
- ${POSTGRESDB_PORT}:5432
volumes:
- ./init:/docker-entrypoint-initdb.d:ro
- ./postgres:/var/lib/postgresql/data
healthcheck:
test: ['CMD-SHELL', 'pg_isready -h localhost -U ${POSTGRES_USER} -d ${POSTGRES_DB}']
start_period: 5s
interval: 5s
timeout: 5s
retries: 10
x-redis-base: &redis-base
image: redis/redis-stack:latest
hostname: ${rhostname}
container_name: ${rhostname}
pull_policy: ${pull_policy}
restart: ${restart}
environment:
- REDIS_ARGS=--save 20 3
- TZ=America/Caracas
ports:
- ${REDIS_PORT}:6379
- ${INSIGHT_PORT}:8001
volumes:
- ./redis:/data
healthcheck:
test: ["CMD", "redis-cli","ping"]
start_period: 5s
interval: 5s
timeout: 3s
retries: 5
x-qdrant-base: &qdrant-base
image: qdrant/qdrant
hostname: ${qhostname}
container_name: ${qhostname}
pull_policy: ${pull_policy}
restart: ${restart}
environment:
- QDRANT__SERVICE__GRPC_PORT=6334
- QDRANT__LOG_LEVEL=INFO
- TZ=America/Caracas
ports:
- ${qdrant}:6333
volumes:
- ./qdrant:/qdrant/storage
x-kafka-base: &kafka-base
image: apache/kafka:latest
hostname: ${khostname}
container_name: ${khostname}
pull_policy: ${pull_policy}
restart: ${restart}
ports:
- ${kafka1}:${kafka1}
- ${kafka2}:${kafka2}
environment:
- KAFKA_NODE_ID=1
- KAFKA_PROCESS_ROLES=broker,controller
- KAFKA_LISTENERS=PLAINTEXT://localhost:${kafka1},CONTROLLER://localhost:${kafka2}
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:${kafka1}
- KAFKA_CONTROLLER_LISTENER_NAMES=CONTROLLER
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT
- KAFKA_CONTROLLER_QUORUM_VOTERS=1@localhost:${kafka2}
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
- KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR=1
- KAFKA_TRANSACTION_STATE_LOG_MIN_ISR=1
- KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS=0
- KAFKA_NUM_PARTITIONS=3
volumes:
- ./broker/data:/var/lib/kafka/data
x-whisper-base: &whisper-base
hostname: ${whostname}
container_name: ${whostname}
pull_policy: ${pull_policy}
restart: ${restart}
ports:
- ${wport}:${wport}
volumes:
- ./whisper:/app/app
- ./cache-whisper:/root/.cache
x-crawl4ai-base: &crawl4ai-base
hostname: ${crawhostname}
container_name: ${crawhostname}
pull_policy: ${pull_policy}
restart: ${restart}
ports:
- ${CRAW1}:11235
- ${CRAW2}:8000
- ${CRAW3}:9222
- ${CRAW4}:8080
volumes:
- /dev/shm:/dev/shm
environment:
- CRAWL4AI_API_TOKEN=${CRAWL4AI_API_TOKEN}
#- OPENAI_API_KEY=${OPENAI_API_KEY}
#- CLAUDE_API_KEY=${CLAUDE_API_KEY}
deploy:
resources:
limits:
memory: 4G
reservations:
memory: 1G
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11235/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
x-nvidia-base: &nvidia-base
runtime: nvidia
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
x-proxy-base: &proxy-base
image: 'jc21/nginx-proxy-manager:latest'
hostname: ${proxyhostname}
container_name: ${proxyhostname}
pull_policy: ${pull_policy}
restart: ${restart}
environment:
- DISABLE_IPV6=${DISABLE_IPV6}
- INITIAL_ADMIN_EMAIL=${INITIAL_ADMIN_EMAIL}
- INITIAL_ADMIN_PASSWORD=${INITIAL_ADMIN_PASSWORD}
ports:
- 80:80
- 443:443
- ${pproxy}:81
volumes:
- ./proxy/data:/data
- ./proxy/letsencrypt:/etc/letsencrypt
healthcheck:
test: ["CMD", "/usr/bin/check-health"]
interval: 10s
timeout: 3s
x-searxng-base: &searxng-base
image: docker.io/searxng/searxng:latest
hostname: ${searxnghostname}
container_name: ${searxnghostname}
pull_policy: ${pull_policy}
restart: ${restart}
ports:
- ${psearch}:8080
volumes:
- ./searxng:/etc/searxng:rw
environment:
- SEARXNG_BASE_URL=https://${SEARXNG_HOSTNAME:-localhost}/
- UWSGI_WORKERS=${SEARXNG_UWSGI_WORKERS:-4}
- UWSGI_THREADS=${SEARXNG_UWSGI_THREADS:-4}
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "1"
x-mongodb-base: &mongodb-base
image: mongo:latest
hostname: ${mhostname}
container_name: ${mhostname}
pull_policy: ${pull_policy}
restart: ${restart}
# ports:
# - ${mongoport}:8081
environment:
- MONGO_INITDB_ROOT_USERNAME=${MONGO_INITDB_ROOT_USERNAME}
- MONGO_INITDB_ROOT_PASSWORD=${MONGO_INITDB_ROOT_PASSWORD}
- TZ=America/Caracas
volumes:
- ./mongodb_data:/data/db
healthcheck:
test: [ "CMD", "mongo", "--eval", "db.adminCommand('ping')" ]
interval: 10s
timeout: 5s
retries: 5
x-evolution-base: &evolution-base
image: atendai/evolution-api:latest
hostname: ${evohostname}
container_name: ${evohostname}
restart: ${restart}
pull_policy: ${pull_policy}
environment:
- SERVER_TYPE=http
- SERVER_URL=${EVOLUTION_SERVER_URL}
- TELEMETRY=false
- CORS_ORIGIN=*
- DEL_INSTANCE=false
- DEL_TEMP_INSTANCES=false
- WEBSOCKET_ENABLED=false
- WEBSOCKET_GLOBAL_EVENTS=false
- LANGUAGE=en
- AUTHENTICATION_API_KEY=${evoapikey}
- DATABASE_ENABLED=true
- DATABASE_PROVIDER=postgresql
- DATABASE_CONNECTION_URI=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DATABASE_HOST}:${POSTGRESDB_PORT}/${evodb}?schema=public
- DATABASE_CONNECTION_CLIENT_NAME=evolution_exchange
- DATABASE_SAVE_DATA_INSTANCE=true
- DATABASE_SAVE_DATA_NEW_MESSAGE=true
- DATABASE_SAVE_MESSAGE_UPDATE=true
- DATABASE_SAVE_DATA_CONTACTS=true
- DATABASE_SAVE_DATA_CHATS=true
- DATABASE_SAVE_DATA_LABELS=true
- DATABASE_SAVE_DATA_HISTORIC=true
- CACHE_REDIS_ENABLED=true
- CACHE_REDIS_URI=redis://${rhostname}:${REDIS_PORT}/3
- CACHE_REDIS_PREFIX_KEY=${evohostname}
- CACHE_REDIS_TTL=604800
- CACHE_REDIS_SAVE_INSTANCES=false
- CACHE_LOCAL_ENABLED=false
- TZ=America/Caracas
# - CONFIG_SESSION_PHONE_VERSION=2.3000.1023204200
ports:
- ${evoport}:8080
volumes:
- ./evolution_instances:/evolution/instances
deploy:
mode: global
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
################################################
services:
n8n-all:
<<: *n8n-base
hostname: ${nhostname}
container_name: ${nhostname}
profiles: ["n8n"]
ports:
- ${N8N_PORT}:5678
- ${N8N_PORT2}:5679
healthcheck:
test: ["CMD", "sh", "-c", "(wget -q -T 5 -O - http://127.0.0.1:5678/healthz 2>/dev/null | grep -qF '{\"status\":\"ok\"}') || exit 1"]
start_period: 1m00s
interval: 5s
timeout: 10s
retries: 10
links:
- redis-all
depends_on:
postgres-all:
condition: service_healthy
restart: true
redis-all:
condition: service_healthy
restart: true
n8n-worker:
<<: *n8n-base
hostname: ${workername}
container_name: ${workername}
profiles: ["n8n-worker"]
command: worker
healthcheck:
test: ["CMD", "node", "-v"]
start_period: 1m00s
interval: 5s
timeout: 10s
retries: 10
links:
- redis-all
depends_on:
postgres-all:
condition: service_healthy
restart: true
redis-all:
condition: service_healthy
restart: true
n8n-all:
condition: service_healthy
restart: true
n8n-runner:
<<: *n8n-runner
hostname: ${runnername}
container_name: ${runnername}
profiles: ["n8n-runner"]
healthcheck:
test: ["CMD", "node", "-v"]
start_period: 1m00s
interval: 5s
timeout: 10s
retries: 10
links:
- redis-all
depends_on:
postgres-all:
condition: service_healthy
restart: true
redis-all:
condition: service_healthy
restart: true
n8n-worker:
condition: service_healthy
restart: true
postgres-all:
<<: *postgres-base
profiles: ["n8n", "evolutionapi", "n8n-worker", "postgres-all"]
redis-all:
<<: *redis-base
profiles: ["n8n", "openwebui", "evolutionapi", "n8n-worker", "redis-all"]
qdrant-all:
<<: *qdrant-base
profiles: ["qdrant"]
openwebui-all:
<<: *openwebui-base
profiles: ["openwebui"]
links:
- redis-all
depends_on:
redis-all:
condition: service_healthy
restart: true
postgres-all:
condition: service_healthy
restart: true
n8n-openai-bridge-all:
<<: *openai-bridge
profiles: ["openaibridge"]
depends_on:
openwebui-all:
condition: service_healthy
restart: true
n8n-all:
condition: service_healthy
restart: true
kafka-all:
<<: *kafka-base
profiles: ["kafka"]
ollama-others:
<<: *ollama-base
profiles: ["ollama-cpu"]
environment:
- OLLAMA_DEBUG=${OLLAMA_DEBUG}
- OLLAMA_KEEP_ALIVE=${OLLAMA_KEEP_ALIVE}
- OLLAMA_MAX_LOADED_MODELS=${OLLAMA_MAX_LOADED_MODELS}
- OLLAMA_MAX_QUEUE=${OLLAMA_MAX_QUEUE}
- OLLAMA_NUM_PARALLEL=${OLLAMA_NUM_PARALLEL}
- OLLAMA_NOPRUNE=${OLLAMA_NOPRUNE}
- OLLAMA_SCHED_SPREAD=${OLLAMA_SCHED_SPREAD}
- OLLAMA_FLASH_ATTENTION=${OLLAMA_FLASH_ATTENTION}
- OLLAMA_KV_CACHE_TYPE=${OLLAMA_KV_CACHE_TYPE}
- OLLAMA_LOAD_TIMEOUT=${OLLAMA_LOAD_TIMEOUT}
ollama-nvidia:
<<: [*ollama-base, *nvidia-base]
profiles: ["ollama-gpu"]
environment:
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=compute,utility,video
- OLLAMA_DEBUG=${OLLAMA_DEBUG}
- OLLAMA_KEEP_ALIVE=${OLLAMA_KEEP_ALIVE}
- OLLAMA_MAX_LOADED_MODELS=${OLLAMA_MAX_LOADED_MODELS}
- OLLAMA_MAX_QUEUE=${OLLAMA_MAX_QUEUE}
- OLLAMA_NUM_PARALLEL=${OLLAMA_NUM_PARALLEL}
- OLLAMA_NOPRUNE=${OLLAMA_NOPRUNE}
- OLLAMA_SCHED_SPREAD=${OLLAMA_SCHED_SPREAD}
- OLLAMA_FLASH_ATTENTION=${OLLAMA_FLASH_ATTENTION}
- OLLAMA_KV_CACHE_TYPE=${OLLAMA_KV_CACHE_TYPE}
- OLLAMA_LOAD_TIMEOUT=${OLLAMA_LOAD_TIMEOUT}
whisper-nvidia:
<<: [*whisper-base, *nvidia-base]
image: onerahmet/openai-whisper-asr-webservice:latest-gpu
profiles: ["whisper-gpu"]
environment:
- ASR_DEVICE=cuda
- ASR_MODEL=${ASR_MODEL}
- ASR_ENGINE=${ASR_ENGINE}
- MODEL_IDLE_TIMEOUT=300
whisper-cpu:
<<: *whisper-base
image: onerahmet/openai-whisper-asr-webservice:latest
profiles: ["whisper"]
environment:
- ASR_DEVICE=cpu
- ASR_MODEL=${ASR_MODEL}
- ASR_ENGINE=${ASR_ENGINE}
- MODEL_IDLE_TIMEOUT=300
crawl4ai-amd64:
<<: *crawl4ai-base
image: unclecode/crawl4ai:all-amd64
profiles: ["crawl4ai-amd64"]
crawl4ai-arm:
<<: *crawl4ai-base
image: unclecode/crawl4ai:all-arm64
profiles: ["crawl4ai-arm64"]
proxy:
<<: *proxy-base
profiles: ["proxy"]
searxng:
<<: *searxng-base
profiles: ["searxng"]
mongodb:
<<: *mongodb-base
profiles: [ "mongodb" ]
evolutionapi:
<<: *evolution-base
profiles: [ "evolutionapi" ]
links:
- redis-all
depends_on:
postgres-all:
condition: service_healthy
restart: true
redis-all:
condition: service_healthy
restart: true