mirror of
https://github.com/lobehub/lobehub
synced 2026-04-21 09:37:28 +00:00
297 lines
9.7 KiB
YAML
297 lines
9.7 KiB
YAML
name: lobehub
|
||
services:
|
||
network-service:
|
||
image: alpine
|
||
container_name: lobe-network
|
||
restart: always
|
||
ports:
|
||
- '${RUSTFS_PORT}:9000' # RustFS API
|
||
- '9001:9001' # RustFS Console
|
||
- '${CASDOOR_PORT}:${CASDOOR_PORT}' # Casdoor
|
||
- '${LOBE_PORT}:3210' # LobeChat
|
||
- '3000:3000' # Grafana
|
||
- '4318:4318' # otel-collector HTTP
|
||
- '4317:4317' # otel-collector gRPC
|
||
command: tail -f /dev/null
|
||
networks:
|
||
- lobe-network
|
||
|
||
postgresql:
|
||
image: pgvector/pgvector:pg17
|
||
container_name: lobe-postgres
|
||
ports:
|
||
- '5432:5432'
|
||
volumes:
|
||
- './data:/var/lib/postgresql/data'
|
||
environment:
|
||
- 'POSTGRES_DB=${LOBE_DB_NAME}'
|
||
- 'POSTGRES_PASSWORD=${POSTGRES_PASSWORD}'
|
||
healthcheck:
|
||
test: ['CMD-SHELL', 'pg_isready -U postgres']
|
||
interval: 5s
|
||
timeout: 5s
|
||
retries: 5
|
||
restart: always
|
||
networks:
|
||
- lobe-network
|
||
|
||
redis:
|
||
image: redis:7-alpine
|
||
container_name: lobe-redis
|
||
ports:
|
||
- '6379:6379'
|
||
command: redis-server --save 60 1000 --appendonly yes
|
||
volumes:
|
||
- 'redis_data:/data'
|
||
healthcheck:
|
||
test: ['CMD', 'redis-cli', 'ping']
|
||
interval: 5s
|
||
timeout: 3s
|
||
retries: 5
|
||
restart: always
|
||
networks:
|
||
- lobe-network
|
||
|
||
|
||
rustfs:
|
||
image: rustfs/rustfs:latest
|
||
container_name: lobe-rustfs
|
||
network_mode: 'service:network-service'
|
||
environment:
|
||
- RUSTFS_CONSOLE_ENABLE=true
|
||
- RUSTFS_ACCESS_KEY=${RUSTFS_ACCESS_KEY}
|
||
- RUSTFS_SECRET_KEY=${RUSTFS_SECRET_KEY}
|
||
volumes:
|
||
- rustfs-data:/data
|
||
healthcheck:
|
||
test: ["CMD-SHELL", "wget -qO- http://localhost:9000/health >/dev/null 2>&1 || exit 1"]
|
||
interval: 5s
|
||
timeout: 3s
|
||
retries: 30
|
||
command: ["--access-key","${RUSTFS_ACCESS_KEY}","--secret-key","${RUSTFS_SECRET_KEY}","/data"]
|
||
|
||
rustfs-init:
|
||
image: minio/mc:latest
|
||
container_name: lobe-rustfs-init
|
||
depends_on:
|
||
rustfs:
|
||
condition: service_healthy
|
||
volumes:
|
||
- ./bucket.config.json:/bucket.config.json:ro
|
||
entrypoint: /bin/sh
|
||
command: -c '
|
||
set -eux;
|
||
echo "S3_ACCESS_KEY=${RUSTFS_ACCESS_KEY}, S3_SECRET_KEY=${RUSTFS_SECRET_KEY}";
|
||
mc --version;
|
||
mc alias set rustfs "http://network-service:9000" "${RUSTFS_ACCESS_KEY}" "${RUSTFS_SECRET_KEY}";
|
||
mc ls rustfs || true;
|
||
mc mb "rustfs/lobe" --ignore-existing;
|
||
mc admin info rustfs || true;
|
||
mc anonymous set-json "/bucket.config.json" "rustfs/lobe";
|
||
'
|
||
restart: "no"
|
||
networks:
|
||
- lobe-network
|
||
|
||
# version lock ref: https://github.com/lobehub/lobe-chat/pull/7331
|
||
casdoor:
|
||
image: casbin/casdoor:v2.13.0
|
||
container_name: lobe-casdoor
|
||
entrypoint: /bin/sh -c './server --createDatabase=true'
|
||
network_mode: 'service:network-service'
|
||
depends_on:
|
||
postgresql:
|
||
condition: service_healthy
|
||
environment:
|
||
httpport: ${CASDOOR_PORT}
|
||
RUNNING_IN_DOCKER: 'true'
|
||
driverName: 'postgres'
|
||
dataSourceName: 'user=postgres password=${POSTGRES_PASSWORD} host=postgresql port=5432 sslmode=disable dbname=casdoor'
|
||
runmode: 'dev'
|
||
volumes:
|
||
- ./init_data.json:/init_data.json
|
||
env_file:
|
||
- .env
|
||
|
||
searxng:
|
||
image: searxng/searxng
|
||
container_name: lobe-searxng
|
||
volumes:
|
||
- './searxng-settings.yml:/etc/searxng/settings.yml'
|
||
environment:
|
||
- 'SEARXNG_SETTINGS_FILE=/etc/searxng/settings.yml'
|
||
restart: always
|
||
networks:
|
||
- lobe-network
|
||
env_file:
|
||
- .env
|
||
|
||
lobe:
|
||
image: lobehub/lobehub
|
||
container_name: lobehub
|
||
network_mode: 'service:network-service'
|
||
depends_on:
|
||
postgresql:
|
||
condition: service_healthy
|
||
network-service:
|
||
condition: service_started
|
||
rustfs:
|
||
condition: service_healthy
|
||
rustfs-init:
|
||
condition: service_completed_successfully
|
||
casdoor:
|
||
condition: service_started
|
||
redis:
|
||
condition: service_healthy
|
||
|
||
environment:
|
||
- 'AUTH_SSO_PROVIDERS=casdoor'
|
||
- 'KEY_VAULTS_SECRET=Kix2wcUONd4CX51E/ZPAd36BqM4wzJgKjPtz2sGztqQ='
|
||
- 'AUTH_SECRET=NX2kaPE923dt6BL2U8e9oSre5RfoT7hg'
|
||
- 'DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgresql:5432/${LOBE_DB_NAME}'
|
||
- 'S3_BUCKET=${RUSTFS_LOBE_BUCKET}'
|
||
- 'S3_ENABLE_PATH_STYLE=1'
|
||
- 'S3_ACCESS_KEY=${RUSTFS_ACCESS_KEY}'
|
||
- 'S3_ACCESS_KEY_ID=${RUSTFS_ACCESS_KEY}'
|
||
- 'S3_SECRET_ACCESS_KEY=${RUSTFS_SECRET_KEY}'
|
||
- 'LLM_VISION_IMAGE_USE_BASE64=1'
|
||
- 'S3_SET_ACL=0'
|
||
- 'SEARXNG_URL=http://searxng:8080'
|
||
- 'REDIS_URL=redis://redis:6379'
|
||
- 'REDIS_PREFIX=lobechat'
|
||
- 'REDIS_TLS=0'
|
||
env_file:
|
||
- .env
|
||
restart: always
|
||
entrypoint: >
|
||
/bin/sh -c "
|
||
/bin/node /app/startServer.js &
|
||
LOBE_PID=\$!
|
||
sleep 3
|
||
if [ $(wget --timeout=5 --spider --server-response ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration 2>&1 | grep -c 'HTTP/1.1 200 OK') -eq 0 ]; then
|
||
echo '⚠️Warning: Unable to fetch OIDC configuration from Casdoor'
|
||
echo 'Request URL: ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration'
|
||
echo 'Read more at: https://lobehub.com/docs/self-hosting/server-database/docker-compose#necessary-configuration'
|
||
echo ''
|
||
echo '⚠️注意:无法从 Casdoor 获取 OIDC 配置'
|
||
echo '请求 URL: ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration'
|
||
echo '了解更多:https://lobehub.com/zh/docs/self-hosting/server-database/docker-compose#necessary-configuration'
|
||
echo ''
|
||
else
|
||
if ! wget -O - --timeout=5 ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration 2>&1 | grep 'issuer' | grep ${AUTH_CASDOOR_ISSUER}; then
|
||
printf '❌Error: The Auth issuer is conflict, Issuer in OIDC configuration is: %s' \$(wget -O - --timeout=5 ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration 2>&1 | grep -E 'issuer.*' | awk -F '\"' '{print \$4}')
|
||
echo ' , but the issuer in .env file is: ${AUTH_CASDOOR_ISSUER} '
|
||
echo 'Request URL: ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration'
|
||
echo 'Read more at: https://lobehub.com/docs/self-hosting/server-database/docker-compose#necessary-configuration'
|
||
echo ''
|
||
printf '❌错误:Auth 的 issuer 冲突,OIDC 配置中的 issuer 是:%s' \$(wget -O - --timeout=5 ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration 2>&1 | grep -E 'issuer.*' | awk -F '\"' '{print \$4}')
|
||
echo ' , 但 .env 文件中的 issuer 是:${AUTH_CASDOOR_ISSUER} '
|
||
echo '请求 URL: ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration'
|
||
echo '了解更多:https://lobehub.com/zh/docs/self-hosting/server-database/docker-compose#necessary-configuration'
|
||
echo ''
|
||
fi
|
||
fi
|
||
if [ $(wget --timeout=5 --spider --server-response ${S3_ENDPOINT}/health 2>&1 | grep -c 'HTTP/1.1 200 OK') -eq 0 ]; then
|
||
echo '⚠️Warning: Unable to fetch RustFS health status'
|
||
echo 'Request URL: ${S3_ENDPOINT}/health'
|
||
echo 'Read more at: https://lobehub.com/docs/self-hosting/server-database/docker-compose#necessary-configuration'
|
||
echo ''
|
||
echo '⚠️注意:无法获取 RustFS 健康状态'
|
||
echo '请求 URL: ${S3_ENDPOINT}/health'
|
||
echo '了解更多:https://lobehub.com/zh/docs/self-hosting/server-database/docker-compose#necessary-configuration'
|
||
echo ''
|
||
fi
|
||
wait \$LOBE_PID
|
||
"
|
||
|
||
grafana:
|
||
profiles:
|
||
- otel
|
||
image: grafana/grafana:12.2.0-17419259409
|
||
container_name: lobe-grafana
|
||
network_mode: 'service:network-service'
|
||
restart: always
|
||
environment:
|
||
- GF_AUTH_ANONYMOUS_ENABLED=true
|
||
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
|
||
- GF_AUTH_DISABLE_LOGIN_FORM=true
|
||
- GF_FEATURE_TOGGLES_ENABLE=traceqlEditor
|
||
volumes:
|
||
- grafana_data:/var/lib/grafana
|
||
- ./grafana/dashboards:/etc/grafana/provisioning/dashboards
|
||
- ./grafana/datasources:/etc/grafana/provisioning/datasources
|
||
depends_on:
|
||
- tempo
|
||
- prometheus
|
||
|
||
tempo:
|
||
profiles:
|
||
- otel
|
||
image: grafana/tempo:latest
|
||
container_name: lobe-tempo
|
||
network_mode: 'service:network-service'
|
||
restart: always
|
||
volumes:
|
||
- ./tempo/tempo.yaml:/etc/tempo.yaml
|
||
- tempo_data:/var/tempo
|
||
command: ['-config.file=/etc/tempo.yaml']
|
||
|
||
prometheus:
|
||
profiles:
|
||
- otel
|
||
image: prom/prometheus
|
||
container_name: lobe-prometheus
|
||
network_mode: 'service:network-service'
|
||
restart: always
|
||
volumes:
|
||
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
|
||
- prometheus_data:/prometheus
|
||
command:
|
||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||
- '--web.enable-otlp-receiver'
|
||
- '--web.enable-remote-write-receiver'
|
||
- '--enable-feature=exemplar-storage'
|
||
|
||
otel-collector:
|
||
profiles:
|
||
- otel
|
||
image: otel/opentelemetry-collector
|
||
container_name: lobe-otel-collector
|
||
network_mode: 'service:network-service'
|
||
restart: always
|
||
volumes:
|
||
- ./otel-collector/collector-config.yaml:/etc/otelcol/config.yaml
|
||
command: ['--config', '/etc/otelcol/config.yaml']
|
||
depends_on:
|
||
- tempo
|
||
- prometheus
|
||
|
||
otel-tracing-test:
|
||
profiles:
|
||
- otel-test
|
||
image: ghcr.io/grafana/xk6-client-tracing:v0.0.9
|
||
container_name: lobe-otel-tracing-test
|
||
network_mode: 'service:network-service'
|
||
restart: always
|
||
environment:
|
||
- ENDPOINT=127.0.0.1:4317
|
||
|
||
volumes:
|
||
data:
|
||
driver: local
|
||
s3_data:
|
||
driver: local
|
||
grafana_data:
|
||
driver: local
|
||
tempo_data:
|
||
driver: local
|
||
prometheus_data:
|
||
driver: local
|
||
redis_data:
|
||
driver: local
|
||
rustfs-data:
|
||
driver: local
|
||
|
||
networks:
|
||
lobe-network:
|
||
driver: bridge
|