* openai-bridge

This commit is contained in:
Jesus Palencia 2025-12-25 22:24:51 -04:00
parent 19b44f2189
commit 53c4c63b41
3 changed files with 60 additions and 1 deletions

View file

@ -54,7 +54,7 @@ x-openwebui-base: &openwebui-base
- ENABLE_MESSAGE_RATING=true
- ENABLE_COMMUNITY_SHARING=false
- ENABLE_TAGS_GENERATION=true
- ENABLE_FORWARD_USER_INFO_HEADERS=false
- ENABLE_FORWARD_USER_INFO_HEADERS=true #set to true for openai-bridge
- WEBUI_SESSION_COOKIE_SAME_SITE=strict
- WEBUI_SESSION_COOKIE_SECURE=true
- WEBUI_AUTH_COOKIE_SAME_SITE=strict
@ -101,6 +101,43 @@ x-openwebui-base: &openwebui-base
max_attempts: 3
window: 120s
x-openai-bridge: &openai-bridge
image: ghcr.io/sveneisenschmidt/n8n-openai-bridge:latest
pull_policy: ${pull_policy}
restart: ${restart}
hostname: ${obridge}
container_name: ${obridge}
ports:
- ${obridgep}:3333
environment:
- BEARER_TOKEN=${obearer} #Required Auth token for API requests TO this bridge
- N8N_WEBHOOK_BEARER_TOKEN=${wtoken}
- LOG_REQUESTS=false
- SESSION_ID_HEADERS=X-Session-Id,X-Chat-Id,X-OpenWebUI-Chat-Id
- USER_ID_HEADERS=X-User-Id,X-OpenWebUI-User-Id
- USER_EMAIL_HEADERS=X-User-Email,X-OpenWebUI-User-Email
- USER_NAME_HEADERS=X-User-Name,X-OpenWebUI-User-Name
- USER_ROLE_HEADERS=X-User-Role,X-OpenWebUI-User-Role
- REQUEST_BODY_LIMIT=50mb
- MODELS_POLL_INTERVAL=3 #File polling interval in seconds (default 1)
- N8N_BASE_URL=http://n8n:5678/
- N8N_API_BEARER_TOKEN=${napi}
- MODEL_LOADER_TYPE=n8n-api
- AUTO_DISCOVERY_TAG=n8n-openai-bridge
- AUTO_DISCOVERY_POLL_INTERVAL=300
- ENABLE_TASK_DETECTION=false #Set to 'true' to enable (default false)
- N8N_TIMEOUT=300000 # n8n webhook request timeout (default 5 minutes)
- SERVER_TIMEOUT=300000 # HTTP server request timeout (default 5 minutes)
- SERVER_KEEP_ALIVE_TIMEOUT=120000 # Keep-alive connection timeout (default 2 minutes)
- SERVER_HEADERS_TIMEOUT=121000 # Headers timeout (default 121 seconds)
- FILE_UPLOAD_MODE=passthrough # Options passthrough, extract-json, extract-multipart, disabled
- RATE_LIMIT_WINDOW_MS=60000 # Time window in milliseconds (default 1 minute)
- RATE_LIMIT_MAX_REQUESTS=100 # Max requests per window for general endpoints (default 100)
- RATE_LIMIT_CHAT_COMPLETIONS=30 # Max chat completion requests per window (default 30)
- DISABLE_RATE_LIMIT=false # Set to 'true' to disable rate limiting entirely
volumes:
- ./models.json:/app/models.json:ro
x-n8n-base: &n8n-base
image: n8nio/n8n:latest
pull_policy: ${pull_policy}
@ -581,6 +618,17 @@ services:
condition: service_healthy
restart: true
n8n-openai-bridge-all:
<<: *openai-bridge
profiles: ["openaibridge"]
depends_on:
openwebui-all:
condition: service_healthy
restart: true
n8n-all:
condition: service_healthy
restart: true
kafka-all:
<<: *kafka-base
profiles: ["kafka"]

View file

@ -107,6 +107,13 @@ evohostname=evolutionapi
evoport=9191
evodb=evolution
# openai-bridge
obridge=n8n-openai-bridge
obridgep=3333
obearer=TH76Ki41mS2LUlvj64nHieTTsl45K1hM #change me
wtoken=your-secret-token-here #change me
napi=n8n-secret-token-here #change me
# global
restart=always
pull_policy=always

4
models.json Normal file
View file

@ -0,0 +1,4 @@
{
"chat-trigger-agent": "https://n8n.example.com/webhook/abc123/chat",
"webhook-agent": "https://n8n.example.com/webhook/xyz789"
}