Nerve v1.0 — Web UI for OpenClaw

This commit is contained in:
daggerhashimoto 2026-02-19 22:25:55 +01:00
commit 99ced5c080
253 changed files with 50357 additions and 0 deletions

15
.editorconfig Normal file
View file

@ -0,0 +1,15 @@
root = true
[*]
indent_style = space
indent_size = 2
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.md]
trim_trailing_whitespace = false
[Makefile]
indent_style = tab

51
.env.example Normal file
View file

@ -0,0 +1,51 @@
# Nerve Configuration
# Tip: Run `npm run setup` for guided configuration instead of editing this manually.
# Server ports
# In development: Vite runs on 3080 and proxies /api + /ws to the backend on 3081
# In production: Backend serves everything on PORT (default 3080)
PORT=3081
SSL_PORT=3443
# VITE_PORT=3080 # Vite dev server port (default: 3080)
# VITE_HOST=127.0.0.1 # Vite dev server bind address (default: 127.0.0.1)
# Bind address — defaults to 127.0.0.1 (localhost only)
# Set to 0.0.0.0 for network/remote access
# HOST=127.0.0.1
# Agent identity
AGENT_NAME=Agent
# OpenClaw Gateway
GATEWAY_URL=http://127.0.0.1:18789
GATEWAY_TOKEN=
# OPENCLAW_GATEWAY_TOKEN= # Alternative name for GATEWAY_TOKEN (checked as fallback)
# Paths (defaults to ~/.openclaw structure if unset)
# MEMORY_PATH=
# MEMORY_DIR=
# SESSIONS_DIR=
# USAGE_FILE=
# API keys (optional — Edge TTS is always available as a free fallback)
OPENAI_API_KEY=
REPLICATE_API_TOKEN=
# API base URLs (optional — override for proxies or self-hosted)
# OPENAI_BASE_URL=https://api.openai.com/v1
# REPLICATE_BASE_URL=https://api.replicate.com/v1
# TTS cache settings (optional)
# TTS_CACHE_TTL_MS=3600000
# TTS_CACHE_MAX=200
# CORS — additional allowed origins (comma-separated)
# Auto-configured by `npm run setup` when choosing Tailscale/Network/Custom access
# ALLOWED_ORIGINS=https://your-server.example.com:3443
# CSP — additional connect-src entries (space-separated)
# CORS origins + their WebSocket equivalents
# CSP_CONNECT_EXTRA=wss://your-server.example.com:3443 https://your-server.example.com:3443
# WebSocket proxy — additional allowed hostnames (comma-separated)
# WS_ALLOWED_HOSTS=gateway.local,10.0.0.5

24
.gitattributes vendored Normal file
View file

@ -0,0 +1,24 @@
# Normalize line endings for cross-platform consistency
* text=auto
# Force LF for source files
*.ts text eol=lf
*.tsx text eol=lf
*.js text eol=lf
*.mjs text eol=lf
*.cjs text eol=lf
*.json text eol=lf
*.css text eol=lf
*.html text eol=lf
*.md text eol=lf
*.yaml text eol=lf
*.yml text eol=lf
# Binary files
*.png binary
*.jpg binary
*.jpeg binary
*.gif binary
*.ico binary
*.woff binary
*.woff2 binary

48
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View file

@ -0,0 +1,48 @@
---
name: Bug Report
about: Something broken? Let us know.
title: "[Bug] "
labels: bug
assignees: ""
---
## Description
<!-- A clear, concise description of the bug. -->
## Steps to Reproduce
1. Go to '...'
2. Click on '...'
3. See error
## Expected Behavior
<!-- What should have happened? -->
## Actual Behavior
<!-- What happened instead? -->
## Screenshots / Logs
<!-- Paste screenshots, browser console errors, or terminal output. Use <details> for long logs: -->
<!--
<details>
<summary>Error log</summary>
```
paste log here
```
</details>
-->
## Environment
- **OS:** (e.g. Ubuntu 24.04, macOS 15)
- **Node:** (e.g. 22.x — run `node --version`)
- **Nerve version:** (e.g. 1.3.0 — check status bar or `package.json`)
- **OpenClaw version:** (e.g. 2026.2.14)
- **Browser:** (e.g. Chrome 132, Firefox 134)

View file

@ -0,0 +1,23 @@
---
name: Feature Request
about: Got an idea? We're listening.
title: "[Feature] "
labels: enhancement
assignees: ""
---
## Problem
<!-- What use case or pain point does this address? -->
## Proposed Solution
<!-- How should it work? Be as specific as you like — UI mockups, API shapes, and user flows all help. -->
## Alternatives Considered
<!-- Any other approaches you've thought about, and why you prefer the proposed solution. -->
## Additional Context
<!-- Screenshots, links, related issues, or anything else that's relevant. -->

31
.github/pull_request_template.md vendored Normal file
View file

@ -0,0 +1,31 @@
## What
<!-- Brief description of the change. What does this PR do? -->
## Why
<!-- What problem does this solve? Link to related issue(s): Closes #__ -->
## How
<!-- Key implementation details, if non-obvious. Mention new files, architectural decisions, trade-offs. -->
## Type of Change
- [ ] 🐛 Bug fix (non-breaking change that fixes an issue)
- [ ] ✨ New feature (non-breaking change that adds functionality)
- [ ] 💥 Breaking change (fix or feature that would cause existing functionality to change)
- [ ] 📝 Documentation update
- [ ] 🔧 Refactor / chore (no functional change)
## Checklist
- [ ] `npm run lint` passes
- [ ] `npm run build && npm run build:server` succeeds
- [ ] `npm test -- --run` passes
- [ ] New features include tests
- [ ] UI changes include a screenshot or screen recording
## Screenshots
<!-- If applicable, add screenshots of the change. Delete this section if not relevant. -->

52
.gitignore vendored Normal file
View file

@ -0,0 +1,52 @@
node_modules/
dist/
# OS
.DS_Store
Thumbs.db
# Editors
*.swp
*.swo
*~
.idea/
.vscode/settings.json
.vscode/launch.json
# npm
npm-debug.log*
.npmrc
# SSL certs
certs/
cert.pem
key.pem
# Logs
server.log
agent-log.json
*.log
# Environment
.env
.env.tmp
.env.backup
.env.backup.*
tsconfig.server.tsbuildinfo
server-dist/
# Brand assets (local only)
brand/
docs/brand/
# Internal docs (local only)
docs/AUDIT.md
docs/CODEBASE_ISSUES_VERIFIED.md
docs/CODEBASE_REVIEW_AND_IDEAS.md
docs/IMPROVEMENTS.md
docs/OPENSOURCE_PLAN.md
coverage/
tts-config.json
scripts-dist/
.nerve/
start.sh

1
.nvmrc Normal file
View file

@ -0,0 +1 @@
22

198
CONTRIBUTING.md Normal file
View file

@ -0,0 +1,198 @@
# Contributing to Nerve
Thanks for wanting to help! This guide covers everything you need to start contributing.
## Table of Contents
- [Development Setup](#development-setup)
- [Project Structure](#project-structure)
- [Adding a Feature](#adding-a-feature)
- [Testing](#testing)
- [Linting](#linting)
- [Commit Conventions](#commit-conventions)
- [Pull Request Process](#pull-request-process)
- [License](#license)
## Development Setup
### Prerequisites
- **Node.js ≥ 22** — check with `node --version`
- **npm** (bundled with Node)
- A running [OpenClaw](https://github.com/openclaw/openclaw) gateway
### Steps
1. **Fork and clone** the repository:
```bash
git clone https://github.com/<your-username>/openclaw-nerve.git
cd openclaw-nerve
```
2. **Install dependencies:**
```bash
npm install
```
3. **Configure environment:**
```bash
npm run setup
```
The interactive wizard auto-detects your gateway token and writes `.env`. Alternatively, copy `.env.example` to `.env` and fill in values manually.
4. **Start development servers** (two terminals):
```bash
# Terminal 1 — Vite frontend with HMR
npm run dev
# Terminal 2 — Backend with file watching
npm run dev:server
```
5. Open **http://localhost:3080**. The frontend proxies API requests to the backend on `:3081`.
## Project Structure
```
nerve/
├── src/ # Frontend (React + TypeScript)
│ ├── features/ # Feature modules (co-located)
│ │ ├── chat/ # Chat panel, messages, input, search
│ │ ├── voice/ # Push-to-talk, wake word, audio feedback
│ │ ├── tts/ # Text-to-speech playback
│ │ ├── sessions/ # Session list, tree, spawn dialog
│ │ ├── workspace/ # Tabbed panel: memory, crons, skills, config
│ │ ├── settings/ # Settings drawer (appearance, audio, connection)
│ │ ├── command-palette/ # ⌘K command palette
│ │ ├── markdown/ # Markdown renderer, code block actions
│ │ ├── charts/ # Inline chart extraction and rendering
│ │ ├── memory/ # Memory editor, add/delete dialogs
│ │ ├── activity/ # Agent log, event log
│ │ ├── dashboard/ # Token usage, memory list, limits
│ │ └── connect/ # Connect dialog (gateway setup)
│ ├── components/ # Shared UI components
│ │ ├── ui/ # Primitives (button, input, dialog, etc.)
│ │ └── skeletons/ # Loading skeletons
│ ├── contexts/ # React contexts (Chat, Session, Gateway, Settings)
│ ├── hooks/ # Shared hooks (WebSocket, SSE, keyboard, etc.)
│ ├── lib/ # Utilities (formatting, themes, sanitize, etc.)
│ ├── types.ts # Shared type definitions
│ └── test/ # Test setup
├── server/ # Backend (Hono + TypeScript)
│ ├── routes/ # API route handlers
│ ├── services/ # TTS engines, Whisper, usage tracking
│ ├── lib/ # Utilities (config, WS proxy, file watcher, etc.)
│ ├── middleware/ # Rate limiting
│ └── app.ts # Hono app assembly
├── config/ # TypeScript configs for server build
├── scripts/ # Setup wizard and utilities
├── docs/ # Documentation
├── vitest.config.ts # Test configuration
├── eslint.config.js # Lint configuration
└── vite.config.ts # Vite build configuration
```
### Key conventions
- **Feature modules** live in `src/features/<name>/`. Each feature owns its components, hooks, types, and tests.
- **`@/` import alias** maps to `src/` — use it for cross-feature imports.
- **Tests are co-located** with source files: `foo.ts``foo.test.ts`.
- **Server routes** are thin handlers that delegate to `services/` and `lib/`.
## Adding a Feature
### Frontend
1. Create a directory in `src/features/<your-feature>/`.
2. Add your components, hooks, and types inside.
3. Export the public API from an `index.ts` barrel file.
4. Wire it into the app (usually via `App.tsx` or an existing panel component).
5. Write tests alongside your source files.
### Backend
1. Create a route file in `server/routes/<your-feature>.ts`.
2. If you need business logic, add a service in `server/services/`.
3. Register the route in `server/app.ts`.
4. Add tests (co-located, e.g. `server/routes/<your-feature>.test.ts`).
### Both
- Update types in `src/types.ts` if you're adding new WebSocket or API message shapes.
- If your feature needs new environment variables, add them to `.env.example` and document them in `docs/CONFIGURATION.md`.
## Testing
Tests use [Vitest](https://vitest.dev) with jsdom for React component testing and [Testing Library](https://testing-library.com/docs/react-testing-library/intro) for assertions.
```bash
npm test # Watch mode (re-runs on save)
npm test -- --run # Single run (CI-friendly)
npm run test:coverage # With V8 coverage report (text + HTML + lcov)
```
### Guidelines
- Co-locate tests with source: `useVoiceInput.ts``useVoiceInput.test.ts`.
- Use `@testing-library/react` for component tests, plain Vitest for logic.
- Test setup lives in `src/test/setup.ts` (imports `@testing-library/jest-dom`).
- Coverage excludes config files, type declarations, and test files themselves.
## Linting
ESLint 9 with flat config. TypeScript-ESLint + React Hooks + React Refresh rules.
```bash
npm run lint
```
Key rules:
- **`react-hooks/exhaustive-deps: warn`** — keep dependency arrays honest.
- **TypeScript strict mode** throughout.
- Ignores `dist/` and `server-dist/`.
Fix issues before committing. Your PR will fail CI if lint doesn't pass.
## Commit Conventions
Use [Conventional Commits](https://www.conventionalcommits.org/):
```
<type>(<scope>): <short description>
[optional body]
```
**Types:** `feat`, `fix`, `docs`, `style`, `refactor`, `test`, `chore`, `perf`, `ci`
**Scope** (optional): the feature or area — `chat`, `tts`, `voice`, `server`, `sessions`, `workspace`, etc.
**Examples:**
```
feat(chat): add image lightbox for inline images
fix(tts): handle empty audio response from Edge TTS
docs: update configuration guide with new env vars
refactor(server): extract TTS cache into service module
test(voice): add wake-word persistence tests
```
## Pull Request Process
1. **Open an issue first** for non-trivial changes. Discuss the approach before writing code.
2. **Branch from `master`**: `git checkout -b feat/my-feature`.
3. **Keep PRs focused** — one feature or fix per PR.
4. **Ensure all checks pass** before requesting review:
```bash
npm run lint
npm run build
npm run build:server
npm test -- --run
```
5. **Fill out the PR template** — describe what, why, and how.
6. **Include tests** for new features. Bug fixes should include a regression test when feasible.
7. **Screenshots welcome** for UI changes.
8. A maintainer will review, possibly request changes, and merge.
## License
By contributing, you agree that your contributions will be licensed under the [MIT License](LICENSE).

21
LICENSE Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025-2026 Nerve contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

119
README.md Normal file
View file

@ -0,0 +1,119 @@
<div align="center">
<img src="docs/nerve-logo-animated.svg" alt="Nerve" width="200" />
# Nerve
**The cockpit for your [OpenClaw](https://github.com/openclaw/openclaw) agents.**
[![MIT License](https://img.shields.io/badge/license-MIT-blue)](LICENSE)
![Node 22+](https://img.shields.io/badge/node-%3E%3D22-brightgreen)
</div>
```bash
curl -fsSL https://raw.githubusercontent.com/daggerhashimoto/openclaw-nerve/master/install.sh | bash
```
## What is Nerve?
You can already chat with your OpenClaw agent through webchat, Telegram, WhatsApp, Discord. Nerve is what you open when chatting isn't enough.
Nerve is a self-hosted cockpit for [OpenClaw](https://github.com/openclaw/openclaw) agents. Voice conversations, live workspace editing, inline charts, cron scheduling, and full token-level visibility. One install script. Running in 30 seconds.
## Why Nerve?
Messaging channels are great for chatting. But you can't watch charts render in real-time, edit your agent's workspace mid-conversation, browse its files, or monitor token spend from a Telegram window. Nerve gives you the full picture.
<div align="center">
![Screenshot](docs/screenshot.png)
</div>
## What makes it different
### Voice that actually works
Talk to your agent. It talks back. Wake-word activation, local Whisper transcription (no API key needed), multi-provider TTS with Edge, OpenAI, and Replicate. Not a gimmick, a daily driver.
### Live charts from a chat message
Your agent can drop interactive TradingView charts, candlestick plots, and data visualizations directly into the conversation. Say "show me gold this year" and get a real chart, not a code block.
### Full workspace visibility
Your agent's memory, personality, tools, daily logs. All visible, all editable, all live. Change SOUL.md while it's mid-conversation. No restarts, no file hunting, no guessing what it remembers.
### Cron and scheduling from the UI
Create recurring jobs and one-shot reminders. Every scheduled run shows up as its own session in the sidebar. You can watch it execute live, read the full transcript, and see exactly what it did.
## Everything else
| | |
|---|---|
| **Streaming chat** | Markdown, syntax highlighting, diff views, image paste, file previews. All rendering as it streams |
| **File browser** | Browse your workspace, open files in tabs, edit with syntax highlighting. Real-time sync when your agent edits files |
| **Multi-session** | Session tree with sub-agents, per-session model overrides, unread indicators |
| **Sub-agents** | Spawn background workers with custom models and reasoning levels |
| **Monitoring** | Token usage, context window meter, cost tracking, activity logs |
| **Command palette** | Cmd+K to search, switch sessions, change models. Keyboard-first |
| **Search** | Full-text search across all messages in the current session |
| **14 themes** | Dark, light, and everything in between. Resizable panels, custom fonts |
## Get Started
### One command
```bash
curl -fsSL https://raw.githubusercontent.com/daggerhashimoto/openclaw-nerve/master/install.sh | bash
```
The installer handles dependencies, cloning, building, and launching. It runs a setup wizard that auto-detects your gateway token and walks you through configuration.
### Manual install
```bash
git clone https://github.com/daggerhashimoto/openclaw-nerve.git
cd openclaw-nerve
npm install
npm run setup # interactive wizard — configures .env
npm run prod # builds and starts the server
```
### Development
```bash
npm run dev # frontend — Vite HMR on :3080
npm run dev:server # backend — watch mode on :3081
```
**Requires:** Node.js 22+ and an [OpenClaw](https://github.com/openclaw/openclaw) gateway.
## How it works
```
Browser ─── Nerve (:3080) ─── OpenClaw Gateway (:18789)
│ │
├─ WS ──────┤ proxied to gateway
├─ SSE ─────┤ file watchers, real-time sync
└─ REST ────┘ files, memories, TTS, models
```
Nerve proxies WebSocket traffic to your gateway and adds its own REST layer for voice, memory, and monitoring.
**Frontend:** React 19 · Tailwind CSS 4 · shadcn/ui · Vite 7
**Backend:** Hono 4 on Node.js
## Docs
| | |
|---|---|
| **[Architecture](docs/ARCHITECTURE.md)** | How the codebase is organized |
| **[Configuration](docs/CONFIGURATION.md)** | Every `.env` variable explained |
| **[Agent Markers](docs/AGENT-MARKERS.md)** | TTS markers, inline charts, and how agents render rich UI |
| **[Security](docs/SECURITY.md)** | What's locked down and how |
| **[API](docs/API.md)** | REST and WebSocket endpoints |
| **[Contributing](CONTRIBUTING.md)** | Dev setup, code style, PRs |
| **[Troubleshooting](docs/TROUBLESHOOTING.md)** | Common issues and fixes |
## License
[MIT](LICENSE)

23
components.json Normal file
View file

@ -0,0 +1,23 @@
{
"$schema": "https://ui.shadcn.com/schema.json",
"style": "new-york",
"rsc": false,
"tsx": true,
"tailwind": {
"config": "",
"css": "src/index.css",
"baseColor": "neutral",
"cssVariables": true,
"prefix": ""
},
"iconLibrary": "lucide",
"rtl": false,
"aliases": {
"components": "@/components",
"utils": "@/lib/utils",
"ui": "@/components/ui",
"lib": "@/lib",
"hooks": "@/hooks"
},
"registries": {}
}

33
config/tsconfig.app.json Normal file
View file

@ -0,0 +1,33 @@
{
"compilerOptions": {
"tsBuildInfoFile": "../node_modules/.tmp/tsconfig.app.tsbuildinfo",
"target": "ES2022",
"useDefineForClassFields": true,
"lib": ["ES2022", "DOM", "DOM.Iterable"],
"module": "ESNext",
"types": ["vite/client"],
"skipLibCheck": true,
/* Bundler mode */
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"verbatimModuleSyntax": true,
"moduleDetection": "force",
"noEmit": true,
"jsx": "react-jsx",
/* Linting */
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"erasableSyntaxOnly": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedSideEffectImports": true,
"baseUrl": "..",
"paths": {
"@/*": ["./src/*"]
}
},
"include": ["../src"],
"exclude": ["../src/**/*.test.ts", "../src/**/*.test.tsx", "../src/**/*.spec.ts"]
}

26
config/tsconfig.node.json Normal file
View file

@ -0,0 +1,26 @@
{
"compilerOptions": {
"tsBuildInfoFile": "../node_modules/.tmp/tsconfig.node.tsbuildinfo",
"target": "ES2023",
"lib": ["ES2023"],
"module": "ESNext",
"types": ["node"],
"skipLibCheck": true,
/* Bundler mode */
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"verbatimModuleSyntax": true,
"moduleDetection": "force",
"noEmit": true,
/* Linting */
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"erasableSyntaxOnly": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedSideEffectImports": true
},
"include": ["../vite.config.ts"]
}

View file

@ -0,0 +1,23 @@
{
"compilerOptions": {
"tsBuildInfoFile": "../node_modules/.tmp/tsconfig.scripts.tsbuildinfo",
"target": "ES2022",
"module": "ESNext",
"moduleResolution": "bundler",
"lib": ["ES2022"],
"types": ["node"],
"skipLibCheck": true,
"strict": true,
"noEmit": true,
"esModuleInterop": true,
"resolveJsonModule": true,
"verbatimModuleSyntax": true,
"moduleDetection": "force",
"erasableSyntaxOnly": true,
"noUnusedLocals": false,
"noUnusedParameters": false,
"noFallthroughCasesInSwitch": true,
"noUncheckedSideEffectImports": true
},
"include": ["../scripts"]
}

View file

@ -0,0 +1,22 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "ESNext",
"moduleResolution": "bundler",
"lib": ["ES2022"],
"types": ["node"],
"skipLibCheck": true,
"strict": true,
"noEmit": true,
"esModuleInterop": true,
"resolveJsonModule": true,
"verbatimModuleSyntax": true,
"moduleDetection": "force",
"erasableSyntaxOnly": true,
"noUnusedLocals": false,
"noUnusedParameters": false,
"noFallthroughCasesInSwitch": true,
"noUncheckedSideEffectImports": true
},
"include": ["../server"]
}

158
docs/AGENT-MARKERS.md Normal file
View file

@ -0,0 +1,158 @@
# Agent Markers
Nerve parses special markers in agent responses to render rich UI elements. These markers are stripped from the visible text and replaced with interactive components.
## TTS Markers — `[tts:...]`
Makes the agent's response play back as audio.
### Format
```
[tts: Text to be spoken aloud]
```
### How It Works
1. **User sends a voice message** → Nerve prepends `[voice] ` to the text
2. **Nerve appends a system hint** to the message telling the agent to include `[tts:...]` markers in its response
3. **Agent responds** with both readable text AND a `[tts:...]` marker
4. **Nerve extracts the marker**, strips it from visible text, and sends it to the TTS engine for audio playback
5. **Fallback**: If the agent forgets the marker but the user sent a voice message, Nerve auto-speaks the full response text
### Example
Agent response:
```
The weather in Istanbul is 22°C and sunny.
[tts: The weather in Istanbul is 22 degrees and sunny.]
```
Nerve displays: "The weather in Istanbul is 22°C and sunny."
Nerve speaks: "The weather in Istanbul is 22 degrees and sunny."
### Rules for Agents
- Place `[tts:...]` at the **end** of the response
- The spoken text can differ from the written text (e.g., expand abbreviations, simplify formatting)
- Only the **first** `[tts:...]` marker is used for audio; all markers are stripped from display
- Never send **only** a TTS marker — the response must be readable as text too
- TTS markers are only expected when the user sends a voice message (indicated by `[voice]` prefix)
### Implementation
- **Injection**: `src/features/chat/operations/sendMessage.ts``applyVoiceTTSHint()` appends the system hint when `[voice] ` prefix is detected
- **Extraction**: `src/features/tts/useTTS.ts``extractTTSMarkers()` parses markers from response text
- **Fallback**: `src/contexts/ChatContext.tsx` — auto-speaks response if voice message had no `[tts:...]` marker
---
## Chart Markers — `[chart:{...}]`
Embeds interactive charts inline in the conversation.
### Format
```
[chart:{"type":"<type>","title":"<title>","data":{...}}]
```
The marker must contain valid JSON inside `[chart:{...}]`. The parser uses bracket-balanced scanning (not regex) to handle nested JSON correctly.
### Chart Types
| Type | Renderer | Data Required | Use Case |
|------|----------|--------------|----------|
| `tv` | TradingView Widget | `symbol` (no `data`) | Live financial tickers — stocks, crypto, forex, commodities |
| `line` | Lightweight Charts | `labels` + `values` or `series` | Custom time-series data |
| `area` | Lightweight Charts | `labels` + `values` or `series` | Custom time-series with gradient fill |
| `candle` | Lightweight Charts | `labels` + `candles` (OHLC) | Custom candlestick data |
| `bar` | Recharts | `labels` + `values` | Category comparisons |
| `pie` | Recharts | `labels` + `values` | Proportions |
### Examples
**TradingView live ticker** (no data needed — pulls live market data):
```
[chart:{"type":"tv","symbol":"TVC:GOLD","interval":"W","title":"Gold — Weekly"}]
```
**Line chart with custom data:**
```
[chart:{"type":"line","title":"Monthly Revenue","data":{"labels":["Jan","Feb","Mar","Apr"],"values":[4200,5800,4900,7100]}}]
```
**Multi-series line chart:**
```
[chart:{"type":"line","title":"Growth","data":{"labels":["Q1","Q2","Q3","Q4"],"series":[{"name":"Users","values":[100,250,480,720]},{"name":"Revenue","values":[10,35,90,180]}]}}]
```
**Candlestick chart with OHLC data:**
```
[chart:{"type":"candle","title":"BTC Weekly","data":{"labels":["W1","W2","W3"],"candles":[{"open":42000,"high":44000,"low":41000,"close":43500},{"open":43500,"high":45000,"low":42500,"close":44800},{"open":44800,"high":46000,"low":43000,"close":43200}]}}]
```
**Bar chart:**
```
[chart:{"type":"bar","title":"Revenue by Region","data":{"labels":["US","EU","Asia"],"values":[5200,3800,2900]}}]
```
**Pie chart:**
```
[chart:{"type":"pie","title":"Market Share","data":{"labels":["Chrome","Safari","Firefox"],"values":[65,20,15]}}]
```
### TradingView Symbols
| Asset | Symbol |
|-------|--------|
| Gold | `TVC:GOLD` |
| Silver | `TVC:SILVER` |
| Bitcoin | `BITSTAMP:BTCUSD` |
| Ethereum | `BITSTAMP:ETHUSD` |
| Apple | `NASDAQ:AAPL` |
| Tesla | `NYSE:TSLA` |
| EUR/USD | `FX:EURUSD` |
| S&P 500 | `SP:SPX` |
| US Dollar Index | `TVC:DXY` |
| Uranium | `NYMEX:UX1!` |
**Intervals:** `1` (1min), `5`, `15`, `60`, `D` (daily), `W` (weekly), `M` (monthly). Default: `W`
### Rules for Agents
- Place chart markers on their own line for best rendering
- The marker text is stripped from the visible message — add context before/after
- Keep labels short (they need to fit on chart axes)
- For real financial instruments, prefer `tv` type (live data, interactive, no manual data needed)
- For custom/computed data, use `line`/`area`/`candle`/`bar`/`pie`
- 312 data points is the sweet spot for readability
### How Agents Learn About Charts
Unlike TTS markers (which use runtime prompt injection), chart markers are taught to agents via the **`TOOLS.md` workspace file**. Nerve's installer can inject chart documentation into `TOOLS.md` automatically (see PR #218).
Agents that have the chart syntax in their `TOOLS.md` will naturally include `[chart:{...}]` markers when data visualization is appropriate.
### Implementation
- **Parser**: `src/features/charts/extractCharts.ts` — bracket-balanced `[chart:{...}]` extraction with JSON validation
- **Router**: `src/features/charts/InlineChart.tsx` — dispatches to correct renderer by type
- **TradingView**: `src/features/charts/TradingViewWidget.tsx` — official script injection embed
- **Lightweight Charts**: `src/features/charts/LightweightChart.tsx` — line/area/candle with dark theme
- **Recharts**: Lazy-loaded for bar/pie (bundled in `InlineChart.tsx`)
---
## Marker Processing Pipeline
When an agent response arrives, markers are processed in this order:
1. **TTS extraction**`[tts:...]` markers are extracted and queued for audio playback
2. **Chart extraction**`[chart:{...}]` markers are extracted and attached to the message object
3. **Image extraction** — Inline image references are extracted
4. **Markdown rendering** — Remaining text is rendered as markdown with syntax highlighting
5. **Tool result rendering** — Tool call sentinels are converted to collapsible `<details>` elements
The cleaned text (with all markers stripped) is what the user sees. Charts render as interactive components below the message text.

1026
docs/API.md Normal file

File diff suppressed because it is too large Load diff

503
docs/ARCHITECTURE.md Normal file
View file

@ -0,0 +1,503 @@
# Architecture
> **Nerve** is a web interface for OpenClaw — chat, voice input, TTS, and agent monitoring in the browser. It connects to the OpenClaw gateway over WebSocket and provides a rich UI for interacting with AI agents.
## System Diagram
```
┌──────────────────────────────────────────────────────────────────┐
│ Browser (React SPA) │
│ │
│ ┌──────────┐ ┌──────────┐ ┌───────────┐ ┌────────────────┐ │
│ │ ChatPanel│ │ Sessions │ │ Workspace │ │ Command Palette│ │
│ └────┬─────┘ └────┬─────┘ └─────┬─────┘ └────────────────┘ │
│ │ │ │ │
│ ┌────┴──────────────┴──────────────┴────────────────────────┐ │
│ │ React Contexts (Gateway, Session, Chat, Settings)│ │
│ └────────────────────────────┬──────────────────────────────┘ │
│ │ WebSocket (/ws proxy) │
└───────────────────────────────┼──────────────────────────────────┘
┌───────────────────────────────┼──────────────────────────────────┐
│ Nerve Server (Hono + Node) │ │
│ │ │
│ ┌────────────────────────────┴─────────────┐ │
│ │ WebSocket Proxy (ws-proxy.ts) │ │
│ │ - Intercepts connect.challenge │ │
│ │ - Injects device identity (Ed25519) │ │
│ └────────────────────────────┬──────────────┘ │
│ │ │
│ ┌───────────────┐ ┌────────┴─────┐ ┌───────────────────────┐ │
│ │ REST API │ │ SSE Stream │ │ Static File Server │ │
│ │ /api/* │ │ /api/events │ │ Vite build → dist/ │ │
│ └───────┬───────┘ └──────────────┘ └───────────────────────┘ │
│ │ │
│ ┌───────┴──────────────────────────────────────────────────┐ │
│ │ Services: TTS (OpenAI, Replicate, Edge), Whisper, │ │
│ │ Claude Usage, TTS Cache, Usage Tracker │ │
│ └──────────────────────────────────────────────────────────┘ │
└──────────────────────────────┬───────────────────────────────────┘
│ HTTP / WS
┌──────────┴──────────┐
│ OpenClaw Gateway │
│ (ws://127.0.0.1: │
│ 18789) │
└─────────────────────┘
```
## Frontend Structure
Built with **React 19**, **TypeScript**, **Vite**, and **Tailwind CSS v4**.
### Entry Point
| File | Purpose |
|------|---------|
| `src/main.tsx` | Mounts the React tree with the provider hierarchy: `ErrorBoundary → StrictMode → GatewayProvider → SettingsProvider → SessionProvider → ChatProvider → App` |
| `src/App.tsx` | Root layout — wires contexts to lazy-loaded panels, manages keyboard shortcuts and command palette |
### Context Providers (State Management)
All global state flows through four React contexts, nested in dependency order:
| Context | File | Responsibilities |
|---------|------|-----------------|
| **GatewayContext** | `src/contexts/GatewayContext.tsx` | WebSocket connection lifecycle, RPC method calls, event fan-out via pub/sub pattern, model/thinking status polling, activity sparkline |
| **SettingsContext** | `src/contexts/SettingsContext.tsx` | Sound, TTS provider/model, wake word, panel ratio, theme, font, telemetry/events visibility. Persists to `localStorage` |
| **SessionContext** | `src/contexts/SessionContext.tsx` | Session list (via gateway RPC), granular agent status tracking (IDLE/THINKING/STREAMING/DONE/ERROR), busy state derivation, unread session tracking, agent log, event log, session CRUD (delete, spawn, rename, abort) |
| **ChatContext** | `src/contexts/ChatContext.tsx` | Chat messages, streaming state, processing stage indicator, activity log (tool calls), send/abort/reset, infinite scroll history, TTS voice fallback |
**Data flow pattern:** Contexts subscribe to gateway events via `GatewayContext.subscribe()`. The `SessionContext` listens for `agent` and `chat` events to update granular status. The `ChatContext` listens for streaming deltas and lifecycle events to render real-time responses.
### Feature Modules
Each feature lives in `src/features/<name>/` with its own components, hooks, types, and operations.
#### `features/chat/`
The main chat interface.
| File | Purpose |
|------|---------|
| `ChatPanel.tsx` | Full chat view — message list with infinite scroll, input bar, streaming indicator, search |
| `InputBar.tsx` | Text input with voice recording, image attachment, tab completion, input history |
| `MessageBubble.tsx` | Renders individual messages (user, assistant, tool, system) with markdown |
| `ToolCallBlock.tsx` | Renders tool call blocks with name, arguments, and results |
| `DiffView.tsx` | Side-by-side diff rendering for file edits |
| `FileContentView.tsx` | Syntax-highlighted file content display |
| `ImageLightbox.tsx` | Full-screen image viewer |
| `SearchBar.tsx` | In-chat message search (Cmd+F) |
| `MemoriesSection.tsx` | Inline memory display within chat |
| `edit-blocks.ts` | Parses edit/diff blocks from tool output |
| `extractImages.ts` | Extracts image content blocks from messages |
| `image-compress.ts` | Client-side image compression before upload |
| `types.ts` | Chat-specific types (`ChatMsg`, `ImageAttachment`) |
| `utils.ts` | Chat utility functions |
| `useMessageSearch.ts` | Hook for message search filtering |
| `operations/` | Pure business logic (no React): `loadHistory.ts`, `sendMessage.ts`, `streamEventHandler.ts` |
| `components/` | Sub-components: `ActivityLog`, `ChatHeader`, `HeartbeatPulse`, `ProcessingIndicator`, `ScrollToBottomButton`, `StreamingMessage`, `ThinkingDots`, `ToolGroupBlock`, `useModelEffort` |
#### `features/sessions/`
Session management sidebar.
| File | Purpose |
|------|---------|
| `SessionList.tsx` | Hierarchical session tree with parent-child relationships |
| `SessionNode.tsx` | Individual session row with status indicator, context menu |
| `SessionInfoPanel.tsx` | Session detail panel (model, tokens, thinking level) |
| `SpawnAgentDialog.tsx` | Dialog for spawning sub-agents with task/model/thinking config |
| `sessionTree.ts` | Builds tree structure from flat session list using `parentId` |
| `statusUtils.ts` | Maps agent status to icons and labels |
#### `features/file-browser/`
Full workspace file browser with tabbed CodeMirror editor.
| File | Purpose |
|------|---------|
| `FileTreePanel.tsx` | Collapsible file tree sidebar with directory expand/collapse |
| `FileTreeNode.tsx` | Individual file/directory row with icon and indent |
| `EditorTabBar.tsx` | Tab bar for open files with close buttons |
| `EditorTab.tsx` | Single editor tab with modified indicator |
| `FileEditor.tsx` | CodeMirror 6 editor — syntax highlighting, line numbers, search, Cmd+S save |
| `TabbedContentArea.tsx` | Manages chat/editor tab switching (chat never unmounts) |
| `editorTheme.ts` | One Dark-inspired CodeMirror theme matching Nerve's dark aesthetic |
| `hooks/useFileTree.ts` | File tree data fetching and directory toggle state |
| `hooks/useOpenFiles.ts` | Open file tab management, save with mtime conflict detection |
| `utils/fileIcons.tsx` | File extension → icon mapping |
| `utils/languageMap.ts` | File extension → CodeMirror language extension mapping |
| `types.ts` | Shared types (FileNode, OpenFile, etc.) |
#### `features/workspace/`
Workspace file editor and management tabs.
| File | Purpose |
|------|---------|
| `WorkspacePanel.tsx` | Container for workspace tabs |
| `WorkspaceTabs.tsx` | Tab switcher (Memory, Config, Crons, Skills) |
| `tabs/MemoryTab.tsx` | View/edit MEMORY.md and daily files |
| `tabs/ConfigTab.tsx` | Edit workspace files (SOUL.md, TOOLS.md, USER.md, etc.) |
| `tabs/CronsTab.tsx` | Cron job management (list, create, toggle, run) |
| `tabs/CronDialog.tsx` | Cron creation/edit dialog |
| `tabs/SkillsTab.tsx` | View installed skills with eligibility status |
| `hooks/useWorkspaceFile.ts` | Fetch/save workspace files via REST API |
| `hooks/useCrons.ts` | Cron CRUD operations via REST API |
| `hooks/useSkills.ts` | Fetch skills list |
#### `features/settings/`
Settings drawer with tabbed sections.
| File | Purpose |
|------|---------|
| `SettingsDrawer.tsx` | Slide-out drawer container |
| `ConnectionSettings.tsx` | Gateway URL/token, reconnect |
| `AudioSettings.tsx` | TTS provider, model, voice, wake word |
| `AppearanceSettings.tsx` | Theme, font selection |
#### `features/tts/`
Text-to-speech integration.
| File | Purpose |
|------|---------|
| `useTTS.ts` | Core TTS hook — speaks text via server `/api/tts` endpoint. Supports OpenAI, Replicate, Edge (default) providers |
| `useTTSConfig.ts` | Server-side TTS voice configuration management |
#### `features/voice/`
Voice input and audio feedback.
| File | Purpose |
|------|---------|
| `useVoiceInput.ts` | Web Speech API integration for voice-to-text with Whisper fallback |
| `audio-feedback.ts` | Notification sounds (ping on response complete) |
#### `features/markdown/`
Markdown rendering pipeline.
| File | Purpose |
|------|---------|
| `MarkdownRenderer.tsx` | `react-markdown` with `remark-gfm`, syntax highlighting via `highlight.js` |
| `CodeBlockActions.tsx` | Copy/run buttons on code blocks |
#### `features/charts/`
Inline chart rendering with three renderers: **TradingView widgets** for live financial data, **Lightweight Charts** for custom time-series and candlestick data, and **Recharts** for bar/pie charts.
| File | Purpose |
|------|---------|
| `InlineChart.tsx` | Chart router — dispatches `[chart:{...}]` markers to the correct renderer based on `type` |
| `extractCharts.ts` | Bracket-balanced parser for `[chart:{...}]` markers. Validates chart data by type (bar, line, pie, area, candle, tv) |
| `LightweightChart.tsx` | Renders line, area, and candlestick charts using `lightweight-charts` (TradingView). Dark theme, gradient fills, crosshair, percentage change badges |
| `TradingViewWidget.tsx` | Embeds TradingView Advanced Chart widget via official script injection for real financial tickers (e.g. `TVC:GOLD`, `BITSTAMP:BTCUSD`) |
**Chart type routing:**
| Type | Renderer | Use case |
|------|----------|----------|
| `tv` | TradingView Widget | Live financial tickers (stocks, crypto, forex, commodities) |
| `line`, `area` | Lightweight Charts | Custom time-series data |
| `candle` | Lightweight Charts | Custom OHLC candlestick data |
| `bar`, `pie` | Recharts | Category comparisons, proportions |
#### `features/command-palette/`
Cmd+K command palette.
| File | Purpose |
|------|---------|
| `CommandPalette.tsx` | Fuzzy-search command list |
| `commands.ts` | Command definitions (new session, reset, theme, TTS, etc.) |
#### `features/connect/`
| File | Purpose |
|------|---------|
| `ConnectDialog.tsx` | Initial gateway connection dialog with auto-connect from `/api/connect-defaults` |
#### `features/activity/`
| File | Purpose |
|------|---------|
| `AgentLog.tsx` | Scrolling agent activity log (tool calls, lifecycle events) |
| `EventLog.tsx` | Raw gateway event stream display |
#### `features/dashboard/`
| File | Purpose |
|------|---------|
| `TokenUsage.tsx` | Token usage and cost display |
| `MemoryList.tsx` | Memory listing component |
| `useLimits.ts` | Claude Code / Codex rate limit polling |
#### `features/memory/`
| File | Purpose |
|------|---------|
| `MemoryEditor.tsx` | Inline memory editing |
| `MemoryItem.tsx` | Individual memory display with edit/delete |
| `AddMemoryDialog.tsx` | Dialog for adding new memories |
| `ConfirmDeleteDialog.tsx` | Delete confirmation |
| `useMemories.ts` | Memory CRUD operations |
### Shared Components
| Path | Purpose |
|------|---------|
| `components/TopBar.tsx` | Header with agent log, token data, event indicators |
| `components/StatusBar.tsx` | Footer with connection state, session count, sparkline, context meter |
| `components/ResizablePanels.tsx` | Draggable split layout (chat left, panels right) |
| `components/ContextMeter.tsx` | Visual context window usage bar |
| `components/ConfirmDialog.tsx` | Reusable confirmation modal |
| `components/ErrorBoundary.tsx` | Top-level error boundary |
| `components/PanelErrorBoundary.tsx` | Per-panel error boundary (isolates failures) |
| `components/NerveLogo.tsx` | SVG logo component |
| `components/skeletons/` | Loading skeleton components (Message, Session, Memory) |
| `components/ui/` | Primitives: `button`, `card`, `dialog`, `input`, `switch`, `scroll-area`, `collapsible`, `AnimatedNumber`, `InlineSelect` |
### Hooks
| Hook | File | Purpose |
|------|------|---------|
| `useWebSocket` | `hooks/useWebSocket.ts` | Core WebSocket management — connect, RPC, auto-reconnect with exponential backoff |
| `useConnectionManager` | `hooks/useConnectionManager.ts` | Auto-connect logic, credential persistence in `sessionStorage` |
| `useDashboardData` | `hooks/useDashboardData.ts` | Fetches memories and token data via REST + SSE |
| `useServerEvents` | `hooks/useServerEvents.ts` | SSE client for `/api/events` |
| `useInputHistory` | `hooks/useInputHistory.ts` | Up/down arrow input history |
| `useTabCompletion` | `hooks/useTabCompletion.ts` | Tab completion for slash commands |
| `useKeyboardShortcuts` | `hooks/useKeyboardShortcuts.ts` | Global keyboard shortcut registration |
| `useGitInfo` | `hooks/useGitInfo.ts` | Git branch/status display |
### Libraries
| File | Purpose |
|------|---------|
| `lib/constants.ts` | App constants: context window limits (with dynamic `getContextLimit()` fallback), wake/stop/cancel phrase builders, attachment limits |
| `lib/themes.ts` | Theme definitions and CSS variable application |
| `lib/fonts.ts` | Font configuration |
| `lib/formatting.ts` | Message formatting utilities |
| `lib/sanitize.ts` | HTML sanitization via DOMPurify |
| `lib/highlight.ts` | Syntax highlighting configuration |
| `lib/utils.ts` | `cn()` classname merge utility (clsx + tailwind-merge) |
| `lib/progress-colors.ts` | Color scales for progress indicators |
| `lib/text/isStructuredMarkdown.ts` | Detects structured markdown for rendering decisions |
---
## Backend Structure
Built with **Hono** (lightweight web framework), **TypeScript**, running on **Node.js ≥22**.
### Entry Point
| File | Purpose |
|------|---------|
| `server/index.ts` | Starts HTTP + HTTPS servers, sets up WebSocket proxy, file watchers, graceful shutdown |
| `server/app.ts` | Hono app definition — middleware stack, route mounting, static file serving with SPA fallback |
### Middleware Stack
Applied in order in `app.ts`:
| Middleware | File | Purpose |
|------------|------|---------|
| Error handler | `middleware/error-handler.ts` | Catches unhandled errors, returns consistent JSON. Shows stack in dev |
| Logger | Hono built-in | Request logging |
| CORS | Hono built-in + custom | Whitelist of localhost origins + `ALLOWED_ORIGINS` env var. Validates via `URL` constructor. Methods: GET, POST, PUT, PATCH, DELETE, OPTIONS |
| Security headers | `middleware/security-headers.ts` | Standard security headers (CSP, X-Frame-Options, etc.) |
| Body limit | Hono built-in | Configurable max body size (from `config.limits.maxBodyBytes`) |
| Compression | Hono built-in | gzip/brotli on all routes **except** SSE (`/api/events`) |
| Cache headers | `middleware/cache-headers.ts` | Hashed assets → immutable, API → no-cache, non-hashed static → must-revalidate |
| Rate limiting | `middleware/rate-limit.ts` | Per-IP sliding window. Separate limits for general API vs TTS/transcribe. Client ID from socket or custom header |
### API Routes
| Route | File | Methods | Purpose |
|-------|------|---------|---------|
| `/health` | `routes/health.ts` | GET | Health check with gateway connectivity probe |
| `/api/connect-defaults` | `routes/connect-defaults.ts` | GET | Pre-fill gateway URL/token for browser. Token only returned for loopback clients |
| `/api/events` | `routes/events.ts` | GET, POST | SSE stream for real-time push (memory.changed, tokens.updated, status.changed, ping). POST for test events |
| `/api/tts` | `routes/tts.ts` | POST | Text-to-speech with provider auto-selection (OpenAI → Replicate → Edge). LRU cache with TTL |
| `/api/tts/config` | `routes/tts.ts` | GET, PUT | TTS voice configuration per provider (read / partial update) |
| `/api/transcribe` | `routes/transcribe.ts` | POST | Audio transcription via OpenAI Whisper or local whisper.cpp (`STT_PROVIDER`). Multipart file upload, MIME validation |
| `/api/agentlog` | `routes/agent-log.ts` | GET, POST | Agent activity log persistence. Zod-validated entries. Mutex-protected file I/O |
| `/api/tokens` | `routes/tokens.ts` | GET | Token usage statistics — scans session transcripts, persists high water mark |
| `/api/memories` | `routes/memories.ts` | GET, POST, DELETE | Memory management — reads MEMORY.md + daily files, stores/deletes via gateway tool invocation |
| `/api/memories/section` | `routes/memories.ts` | GET, PUT | Read/replace a specific memory section by title |
| `/api/gateway/models` | `routes/gateway.ts` | GET | Available models via `openclaw models list`. Allowlist support |
| `/api/gateway/session-info` | `routes/gateway.ts` | GET | Current session model/thinking level |
| `/api/gateway/session-patch` | `routes/gateway.ts` | POST | Change model/effort for a session |
| `/api/server-info` | `routes/server-info.ts` | GET | Server time, gateway uptime, agent name |
| `/api/version` | `routes/version.ts` | GET | Package version from `package.json` |
| `/api/git-info` | `routes/git-info.ts` | GET, POST, DELETE | Git branch/status. Session workdir registration |
| `/api/workspace/:key` | `routes/workspace.ts` | GET, PUT | Read/write workspace files (strict key→file allowlist: soul, tools, identity, user, agents, heartbeat) |
| `/api/crons` | `routes/crons.ts` | GET, POST, PATCH, DELETE | Cron job CRUD via gateway tool invocation |
| `/api/crons/:id/toggle` | `routes/crons.ts` | POST | Toggle cron enabled/disabled |
| `/api/crons/:id/run` | `routes/crons.ts` | POST | Run cron job immediately |
| `/api/crons/:id/runs` | `routes/crons.ts` | GET | Cron run history |
| `/api/skills` | `routes/skills.ts` | GET | List skills via `openclaw skills list --json` |
| `/api/files` | `routes/files.ts` | GET | Serve local image files (MIME-type restricted, directory traversal blocked) |
| `/api/files/tree` | `routes/file-browser.ts` | GET | Workspace directory tree (excludes node_modules, .git, etc.) |
| `/api/files/read` | `routes/file-browser.ts` | GET | Read file contents with mtime for conflict detection |
| `/api/files/write` | `routes/file-browser.ts` | POST | Write file with mtime-based optimistic concurrency (409 on conflict) |
| `/api/claude-code-limits` | `routes/claude-code-limits.ts` | GET | Claude Code rate limits via PTY + CLI parsing |
| `/api/codex-limits` | `routes/codex-limits.ts` | GET | Codex rate limits via OpenAI API with local file fallback |
### Server Libraries
| File | Purpose |
|------|---------|
| `lib/config.ts` | Centralized configuration from env vars — ports, keys, paths, limits. Validated at startup |
| `lib/ws-proxy.ts` | WebSocket proxy — client→gateway with device identity injection (Ed25519 challenge-response) |
| `lib/device-identity.ts` | Ed25519 keypair generation/persistence for gateway auth. Stored in `~/.nerve/device-identity.json` |
| `lib/gateway-client.ts` | HTTP client for gateway tool invocation API (`/tools/invoke`) |
| `lib/file-watcher.ts` | Watches MEMORY.md, `memory/`, and workspace directory (recursive). Broadcasts `file.changed` SSE events for real-time sync |
| `lib/file-utils.ts` | File browser utilities — path validation, directory exclusions, binary file detection |
| `lib/files.ts` | Async file helpers (`readJSON`, `writeJSON`, `readText`) |
| `lib/mutex.ts` | Async mutex for serializing file read-modify-write. Includes keyed mutex variant |
| `lib/cached-fetch.ts` | Generic TTL cache with in-flight request deduplication |
| `lib/usage-tracker.ts` | Persistent token usage high water mark tracking |
| `lib/tts-config.ts` | TTS voice configuration file management |
| `lib/openclaw-bin.ts` | Resolves `openclaw` binary path (env → sibling of node → common paths → PATH) |
### Services
| File | Purpose |
|------|---------|
| `services/openai-tts.ts` | OpenAI TTS API client (gpt-4o-mini-tts, tts-1, tts-1-hd) |
| `services/replicate-tts.ts` | Replicate API client for hosted TTS models (Qwen3-TTS). WAV→MP3 via ffmpeg |
| `services/edge-tts.ts` | Microsoft Edge Read-Aloud TTS via WebSocket protocol. Free, zero-config. Includes Sec-MS-GEC token generation |
| `services/tts-cache.ts` | LRU in-memory TTS cache with TTL expiry (100 MB budget) |
| `services/openai-whisper.ts` | OpenAI Whisper transcription client |
| `services/whisper-local.ts` | Local whisper.cpp STT via `@fugood/whisper.node`. Singleton model context, auto-download from HuggingFace, GPU detection |
| `services/claude-usage.ts` | Claude Code CLI usage/limits parser via node-pty |
---
## Data Flow
### WebSocket Proxy
```
Browser WS → /ws?target=ws://gateway:18789/ws → ws-proxy.ts → OpenClaw Gateway
```
1. Client connects to `/ws` endpoint on Nerve server
2. Proxy validates target URL against `WS_ALLOWED_HOSTS` allowlist
3. Proxy opens upstream WebSocket to the gateway
4. On `connect.challenge` event, proxy intercepts the client's `connect` request and injects Ed25519 device identity (`device` block with signed nonce)
5. After handshake, all messages are transparently forwarded bidirectionally
6. Pending messages are buffered (capped at 100 messages / 1 MB) while upstream connects
### Server-Sent Events (SSE)
```
Browser → GET /api/events → SSE stream (text/event-stream)
```
Events pushed by the server:
- `memory.changed` — File watcher detects MEMORY.md or daily file changes
- `tokens.updated` — Token usage data changed
- `status.changed` — Gateway status changed
- `ping` — Keep-alive every 30 seconds
SSE is excluded from compression middleware to avoid buffering.
### REST API
REST endpoints serve two purposes:
1. **Proxy to gateway** — Routes like `/api/crons`, `/api/memories` (POST/DELETE), `/api/gateway/*` invoke gateway tools via `invokeGatewayTool()`
2. **Local server data** — Routes like `/api/tokens`, `/api/agentlog`, `/api/server-info` read from local files or process info
### Gateway RPC (via WebSocket)
The frontend calls gateway methods via `GatewayContext.rpc()`:
| Method | Purpose |
|--------|---------|
| `status` | Get current agent model, thinking level |
| `sessions.list` | List active sessions |
| `sessions.delete` | Delete a session |
| `sessions.reset` | Clear session context |
| `sessions.patch` | Rename a session |
| `chat.send` | Send a message (with idempotency key) |
| `chat.history` | Load message history |
| `chat.abort` | Abort current generation |
| `connect` | Initial handshake with auth/device identity |
### Event Types (Gateway → Client)
| Event | Payload | Purpose |
|-------|---------|---------|
| `connect.challenge` | `{ nonce }` | Auth handshake initiation |
| `chat` | `{ sessionKey, state, message?, content? }` | Chat state changes: `started`, `delta`, `final`, `error`, `aborted` |
| `agent` | `{ sessionKey, state, stream, data? }` | Agent lifecycle: `lifecycle.start/end/error`, `tool.start/result`, `assistant` stream |
| `cron` | `{ name }` | Cron job triggered |
| `exec.approval.request` | — | Exec approval requested |
| `exec.approval.resolved` | — | Exec approval granted |
| `presence` | — | Presence updates |
---
## Build System
### Development
```bash
npm run dev # Vite dev server (frontend) — port 3080
npm run dev:server # tsx watch (backend) — port 3081
```
Vite proxies `/api` and `/ws` to the backend dev server.
### Production
```bash
npm run prod # Builds frontend + backend, then starts
# Equivalent to:
npm run build # tsc -b && vite build → dist/
npm run build:server # tsc -p config/tsconfig.server.json → server-dist/
npm start # node server-dist/index.js
```
### Vite Configuration
- **Plugins:** `@vitejs/plugin-react`, `@tailwindcss/vite`
- **Path alias:** `@/``./src/`
- **Manual chunks:** `react-vendor`, `markdown` (react-markdown + highlight.js), `ui-vendor` (lucide-react), `utils` (clsx, tailwind-merge, dompurify)
- **HTTPS:** Auto-enabled if `certs/cert.pem` and `certs/key.pem` exist
### TypeScript Configuration
Project references with four configs:
- `config/tsconfig.app.json` — Frontend (src/)
- `config/tsconfig.node.json` — Vite/build tooling
- `config/tsconfig.server.json` — Backend (server/) → compiled to `server-dist/`
- `config/tsconfig.scripts.json` — Setup scripts
---
## Testing
**Framework:** Vitest with jsdom environment for React tests.
```bash
npm test # Run all tests
npm run test:coverage # With V8 coverage
```
### Test Files
| Test | Coverage |
|------|----------|
| `src/hooks/useWebSocket.test.ts` | WebSocket connection, RPC, reconnection |
| `src/hooks/useServerEvents.test.ts` | SSE client |
| `src/features/tts/useTTS.test.ts` | TTS hook behavior |
| `src/features/voice/useVoiceInput.test.ts` | Voice input |
| `src/features/voice/audio-feedback.test.ts` | Audio feedback |
| `src/features/sessions/unreadSessions.test.ts` | Unread tracking |
| `src/lib/formatting.test.ts` | Message formatting |
| `src/lib/constants.test.ts` | Constants validation |
| `src/lib/sanitize.test.ts` | HTML sanitization |
| `src/lib/voice-prefix.test.ts` | Voice prefix parsing |
| `server/routes/health.test.ts` | Health endpoint |
| `server/services/tts-cache.test.ts` | TTS cache LRU/TTL |
| `server/middleware/rate-limit.test.ts` | Rate limiting |
| `server/lib/mutex.test.ts` | Async mutex |
### Configuration
- **Environment:** jsdom (browser APIs mocked)
- **Setup:** `src/test/setup.ts`
- **Exclusions:** `node_modules/`, `server-dist/` (avoids duplicate compiled test files)
- **Coverage:** V8 provider, text + HTML + lcov reporters

299
docs/CODE_REVIEW.md Normal file
View file

@ -0,0 +1,299 @@
# Code Review Guide
Standards, patterns, and review checklist for the Nerve codebase.
---
## Coding Standards
### TypeScript
- **Strict mode** enabled across all tsconfig project references
- **Explicit types** on all public interfaces, context values, and hook returns
- **Discriminated unions** for message types (`GatewayEvent | GatewayRequest | GatewayResponse` via `type` field)
- **Typed event payloads**`AgentEventPayload`, `ChatEventPayload`, `CronEventPayload` instead of `any`
- **Zod validation** on all API request bodies (server-side)
- **No `any`** — use `unknown` with type narrowing
### React
- **Functional components only** — no class components
- **`useCallback` / `useMemo`** on all callbacks and derived values passed to children or used in dependency arrays
- **`React.memo`** is not used broadly; instead, stable references via `useMemo`/`useCallback` prevent unnecessary re-renders
- **Ref-based state access** in callbacks that shouldn't trigger re-registration (e.g., `currentSessionRef`, `isGeneratingRef`, `soundEnabledRef`)
- **ESLint annotations** when intentionally breaking rules: `// eslint-disable-next-line react-hooks/set-state-in-effect -- valid: <reason>`
### Naming
- **Files:** PascalCase for components (`ChatPanel.tsx`), camelCase for hooks/utils (`useWebSocket.ts`, `helpers.ts`)
- **Contexts:** `<Name>Context` with `<Name>Provider` and `use<Name>` hook co-located in same file
- **Feature directories:** kebab-case (`command-palette/`)
- **Types:** PascalCase interfaces/types, `I` prefix NOT used
---
## Architectural Patterns
### 1. Feature-Based Directory Structure
```
src/features/
chat/
ChatPanel.tsx # Main component
components/ # Sub-components
operations/ # Pure business logic (no React)
types.ts # Feature-specific types
utils.ts # Feature utilities
sessions/
workspace/
settings/
tts/
voice/
...
```
Each feature is self-contained. Cross-feature imports go through context providers, not direct imports.
### 2. Context Provider Pattern
Every context follows the same structure:
```tsx
const MyContext = createContext<MyContextValue | null>(null);
export function MyProvider({ children }: { children: ReactNode }) {
// State, effects, callbacks
const value = useMemo<MyContextValue>(() => ({
// All exposed values
}), [/* dependencies */]);
return <MyContext.Provider value={value}>{children}</MyContext.Provider>;
}
export function useMyContext() {
const ctx = useContext(MyContext);
if (!ctx) throw new Error('useMyContext must be used within MyProvider');
return ctx;
}
```
Key characteristics:
- Context value is always `useMemo`-wrapped with explicit type annotation
- `null` default with runtime check in the hook
- Provider, context, and hook co-located in one file (ESLint `react-refresh/only-export-components` disabled with reason)
### 3. Ref-Synchronized State
For callbacks that need current state but shouldn't re-register:
```tsx
const currentSessionRef = useRef(currentSession);
useEffect(() => {
currentSessionRef.current = currentSession;
}, [currentSession]);
// In callbacks: use currentSessionRef.current instead of currentSession
const handleSend = useCallback(async (text: string) => {
await sendChatMessage({ sessionKey: currentSessionRef.current, ... });
}, [rpc]); // Note: currentSession NOT in deps
```
This pattern is used extensively in `ChatContext`, `SessionContext`, and `GatewayContext`.
### 4. Lazy Loading
Heavy components are code-split via `React.lazy`:
```tsx
const SettingsDrawer = lazy(() => import('@/features/settings/SettingsDrawer')
.then(m => ({ default: m.SettingsDrawer })));
const CommandPalette = lazy(() => import('@/features/command-palette/CommandPalette')
.then(m => ({ default: m.CommandPalette })));
const SessionList = lazy(() => import('@/features/sessions/SessionList')
.then(m => ({ default: m.SessionList })));
const WorkspacePanel = lazy(() => import('@/features/workspace/WorkspacePanel')
.then(m => ({ default: m.WorkspacePanel })));
```
Each wrapped in `<Suspense>` and `<PanelErrorBoundary>` for graceful degradation.
### 5. Operations Layer (Pure Logic Extraction)
`ChatContext` delegates to pure functions in `features/chat/operations/`:
```
operations/
index.ts # Re-exports all operations
loadHistory.ts # loadChatHistory()
sendMessage.ts # buildUserMessage(), sendChatMessage()
streamEventHandler.ts # classifyStreamEvent(), extractStreamDelta(), etc.
```
This separates React state management from business logic, making operations testable without rendering.
### 6. Event Fan-Out (Pub/Sub)
`GatewayContext` implements a subscriber pattern:
```tsx
const subscribersRef = useRef<Set<EventHandler>>(new Set());
const subscribe = useCallback((handler: EventHandler) => {
subscribersRef.current.add(handler);
return () => { subscribersRef.current.delete(handler); };
}, []);
// In onEvent:
for (const handler of subscribersRef.current) {
try { handler(msg); } catch (e) { console.error(e); }
}
```
Consumers (`SessionContext`, `ChatContext`) subscribe in `useEffect` and receive all gateway events.
### 7. Smart Session Diffing
`SessionContext.refreshSessions()` preserves object references for unchanged sessions:
```tsx
setSessions(prev => {
const prevMap = new Map(prev.map(s => [getSessionKey(s), s]));
let hasChanges = false;
const merged = newSessions.map(newSession => {
const existing = prevMap.get(key);
if (!existing) { hasChanges = true; return newSession; }
const changed = existing.state !== newSession.state || ...;
if (changed) { hasChanges = true; return newSession; }
return existing; // Preserve reference
});
return hasChanges ? merged : prev;
});
```
### 8. Server Route Pattern (Hono)
Each route file exports a Hono sub-app:
```tsx
const app = new Hono();
app.get('/api/something', rateLimitGeneral, async (c) => { ... });
export default app;
```
Routes are mounted in `app.ts` via `app.route('/', route)`.
### 9. Gateway Tool Invocation
Server routes that need gateway interaction use the shared client:
```tsx
import { invokeGatewayTool } from '../lib/gateway-client.js';
const result = await invokeGatewayTool('cron', { action: 'list' });
```
### 10. Mutex-Protected File I/O
File operations that need atomicity use the mutex:
```tsx
import { createMutex } from '../lib/mutex.js';
const withLock = createMutex();
await withLock(async () => {
const data = await readJSON(file, []);
data.push(entry);
await writeJSON(file, data);
});
```
### 11. Cached Fetch with Deduplication
Expensive operations use `createCachedFetch` which deduplicates in-flight requests:
```tsx
const fetchLimits = createCachedFetch(
() => expensiveApiCall(),
5 * 60 * 1000, // 5 min TTL
{ isValid: (result) => result.available }
);
```
---
## Server-Side Patterns
### Security
- **CORS:** Strict origin allowlist — only localhost variants and explicitly configured origins
- **Token exposure:** Gateway token only returned to loopback clients (`/api/connect-defaults`)
- **File serving:** MIME-type allowlist + directory traversal prevention + allowed prefix check
- **Body limits:** Configurable per-route (general API vs transcribe uploads)
- **Rate limiting:** Per-IP sliding window with separate limits for expensive operations
- **Credentials:** `sessionStorage` (not `localStorage`) for gateway auth — cleared on tab close
- **Input validation:** Zod schemas on all POST/PUT request bodies
### Graceful Shutdown
`server/index.ts` handles SIGTERM/SIGINT:
1. Stop file watchers
2. Close all WebSocket connections
3. Close HTTP + HTTPS servers
4. Force exit after 5s drain timeout
### Dual HTTP/HTTPS
Server runs on both HTTP (port 3080) and HTTPS (port 3443). HTTPS auto-enables if `certs/cert.pem` + `certs/key.pem` exist. HTTPS is required for:
- Microphone access (secure context)
- WSS proxy (encrypted WebSocket)
The HTTPS server manually converts Node.js `req`/`res` to `fetch` `Request`/`Response` for Hono compatibility, with special handling for SSE streaming.
---
## Review Checklist
### All PRs
- [ ] TypeScript strict — no `any`, no `@ts-ignore`
- [ ] All new API endpoints have rate limiting middleware
- [ ] All POST/PUT bodies validated with Zod
- [ ] New state in contexts is `useMemo`/`useCallback`-wrapped
- [ ] No secrets in client-side code or localStorage
- [ ] Error boundaries around lazy-loaded or side-panel components
- [ ] Tests for new utilities/hooks (at minimum)
### Frontend PRs
- [ ] New components follow feature directory structure
- [ ] Heavy components are lazy-loaded if not needed at initial render
- [ ] Callbacks use `useCallback` if passed as props or in dependency arrays
- [ ] State-setting in effects has ESLint annotation with justification
- [ ] No direct cross-feature imports (use contexts)
- [ ] Cleanup functions in `useEffect` for subscriptions/timers/RAF
- [ ] Keyboard shortcuts registered via `useKeyboardShortcuts`
### Backend PRs
- [ ] Routes export a Hono sub-app, mounted in `app.ts`
- [ ] File I/O wrapped in mutex when read-modify-write
- [ ] Gateway calls use `invokeGatewayTool()` from shared client
- [ ] Expensive fetches wrapped in `createCachedFetch`
- [ ] SSE-aware: don't break compression exclusion for `/api/events`
- [ ] CORS: new endpoints automatically covered by global middleware
- [ ] Security: file serving paths validated against allowlist
### Performance
- [ ] No unnecessary re-renders (check with React DevTools Profiler)
- [ ] Session list uses smart diffing (preserves references)
- [ ] Streaming updates use `requestAnimationFrame` batching
- [ ] Large data (history) uses infinite scroll, not full render
- [ ] Activity sparkline and polling respect `document.visibilityState`
### Accessibility
- [ ] Skip-to-content link present (`<a href="#main-chat" class="sr-only">`)
- [ ] Dialogs have proper focus management
- [ ] Keyboard navigation works for all interactive elements
- [ ] Color contrast meets WCAG AA (themes should preserve this)

262
docs/CONFIGURATION.md Normal file
View file

@ -0,0 +1,262 @@
# Configuration
Nerve is configured via a `.env` file in the project root. All variables have sensible defaults — only `GATEWAY_TOKEN` is strictly required.
---
## Setup Wizard
The interactive setup wizard is the recommended way to configure Nerve:
```bash
npm run setup # Interactive setup (5 steps)
npm run setup -- --check # Validate existing config & test gateway
npm run setup -- --defaults # Non-interactive with auto-detected values
npm run setup -- --help # Show help
```
### Wizard Steps
The wizard walks through **5 sections**:
#### 1. Gateway Connection
Connects Nerve to your OpenClaw gateway. The wizard auto-detects the gateway token from:
1. Existing `.env` (`GATEWAY_TOKEN`)
2. Environment variable `OPENCLAW_GATEWAY_TOKEN`
3. `~/.openclaw/openclaw.json` (auto-detected)
Tests the connection before proceeding. If the gateway is unreachable, you can continue anyway.
#### 2. Agent Identity
Sets the `AGENT_NAME` displayed in the UI.
#### 3. Access Mode
Determines how you'll access Nerve. The wizard auto-configures `HOST`, `ALLOWED_ORIGINS`, `WS_ALLOWED_HOSTS`, and `CSP_CONNECT_EXTRA` based on your choice:
| Mode | Bind | Description |
|------|------|-------------|
| **Localhost** | `127.0.0.1` | Only accessible from this machine. Safest option. |
| **Tailscale** | `0.0.0.0` | Accessible from your Tailscale network. Auto-detected if Tailscale is running. Sets CORS + CSP for your Tailscale IP. |
| **Network (LAN)** | `0.0.0.0` | Accessible from your local network. Prompts for your LAN IP. Sets CORS + CSP for that IP. |
| **Custom** | Manual | Full manual control: custom port, bind address, HTTPS certificate generation, CORS. |
**HTTPS (Custom mode only):** The wizard can generate self-signed certificates via `openssl` and configure `SSL_PORT`.
#### 4. TTS Configuration (Optional)
Prompts for optional API keys:
- `OPENAI_API_KEY` — enables OpenAI TTS + Whisper transcription
- `REPLICATE_API_TOKEN` — enables Qwen TTS via Replicate (warns if `ffmpeg` is missing)
Edge TTS always works without any keys.
#### 5. Advanced Settings (Optional)
Custom file paths for `MEMORY_PATH`, `MEMORY_DIR`, `SESSIONS_DIR`. Most users skip this.
### Modes Summary
| Flag | Behavior |
|------|----------|
| *(none)* | Full interactive wizard. If `.env` exists, asks whether to update or start fresh. |
| `--check` | Validates all config values, tests gateway connectivity, and exits. Non-destructive. |
| `--defaults` | Auto-detects gateway token, applies defaults for everything else, writes `.env`. No prompts. |
The wizard backs up existing `.env` files (e.g. `.env.bak.1708100000000`) before overwriting and applies `chmod 600` to both `.env` and backup files.
---
## Environment Variables
### Server
| Variable | Default | Description |
|----------|---------|-------------|
| `PORT` | `3080` | HTTP server port |
| `SSL_PORT` | `3443` | HTTPS server port (requires certificates at `certs/cert.pem` and `certs/key.pem`) |
| `HOST` | `127.0.0.1` | Bind address. Set to `0.0.0.0` for network access. **Warning:** exposes the API to the network |
```env
PORT=3080
SSL_PORT=3443
HOST=127.0.0.1
```
### Gateway (Required)
| Variable | Default | Required | Description |
|----------|---------|----------|-------------|
| `GATEWAY_TOKEN` | — | **Yes** | Authentication token for the OpenClaw gateway. Found in `~/.openclaw/openclaw.json` or via `openclaw gateway status` |
| `GATEWAY_URL` | `http://127.0.0.1:18789` | No | Gateway HTTP endpoint URL |
```env
GATEWAY_TOKEN=your-token-here
GATEWAY_URL=http://127.0.0.1:18789
```
> **Note:** `OPENCLAW_GATEWAY_TOKEN` is also accepted as a fallback for `GATEWAY_TOKEN`.
### Agent Identity
| Variable | Default | Description |
|----------|---------|-------------|
| `AGENT_NAME` | `Agent` | Display name shown in the UI header and server info |
```env
AGENT_NAME=Friday
```
### API Keys (Optional)
| Variable | Description |
|----------|-------------|
| `OPENAI_API_KEY` | Enables OpenAI TTS (multiple voices) and Whisper audio transcription |
| `REPLICATE_API_TOKEN` | Enables Replicate-hosted TTS models (e.g. Qwen TTS). Requires `ffmpeg` for WAV→MP3 |
```env
OPENAI_API_KEY=sk-...
REPLICATE_API_TOKEN=r8_...
```
TTS provider fallback chain (when no explicit provider is requested):
1. **OpenAI** — if `OPENAI_API_KEY` is set
2. **Replicate** — if `REPLICATE_API_TOKEN` is set
3. **Edge TTS** — always available, no API key needed (default for new installs)
### Speech-to-Text (STT)
| Variable | Default | Description |
|----------|---------|-------------|
| `STT_PROVIDER` | `openai` | STT provider: `openai` (requires `OPENAI_API_KEY`) or `local` (whisper.cpp, no API key needed) |
| `WHISPER_MODEL` | `tiny.en` | Local whisper model: `tiny.en` (75 MB), `base.en` (142 MB), or `small.en` (466 MB) |
| `WHISPER_MODEL_DIR` | `~/.nerve/models/` | Directory for downloaded whisper model files |
```env
# Use local speech-to-text (no API key needed)
STT_PROVIDER=local
WHISPER_MODEL=tiny.en
```
Local STT requires `ffmpeg` for audio format conversion (webm/ogg → 16kHz mono WAV). The installer handles this automatically. Models are downloaded from HuggingFace on first use.
### Network & Security
| Variable | Default | Description |
|----------|---------|-------------|
| `ALLOWED_ORIGINS` | *(localhost only)* | Additional CORS origins, comma-separated. Normalised via `URL` constructor; `"null"` origins are rejected |
| `CSP_CONNECT_EXTRA` | *(none)* | Additional CSP `connect-src` entries, space-separated. Only `http://`, `https://`, `ws://`, `wss://` schemes accepted. Semicolons and newlines are stripped to prevent directive injection |
| `WS_ALLOWED_HOSTS` | `localhost,127.0.0.1,::1` | Additional WebSocket proxy allowed hostnames, comma-separated |
| `TRUSTED_PROXIES` | `127.0.0.1,::1,::ffff:127.0.0.1` | IP addresses trusted to set `X-Forwarded-For` / `X-Real-IP` headers, comma-separated |
```env
# Tailscale example
ALLOWED_ORIGINS=http://100.64.0.5:3080
CSP_CONNECT_EXTRA=http://100.64.0.5:3080 ws://100.64.0.5:3080
WS_ALLOWED_HOSTS=100.64.0.5
# Behind nginx reverse proxy
TRUSTED_PROXIES=127.0.0.1,::1,10.0.0.1
```
### File Paths
| Variable | Default | Description |
|----------|---------|-------------|
| `MEMORY_PATH` | `~/.openclaw/workspace/MEMORY.md` | Path to the agent's long-term memory file |
| `MEMORY_DIR` | `~/.openclaw/workspace/memory/` | Directory for daily memory files (`YYYY-MM-DD.md`) |
| `SESSIONS_DIR` | `~/.openclaw/agents/main/sessions/` | Session transcript directory (scanned for token usage) |
| `USAGE_FILE` | `~/.openclaw/token-usage.json` | Persistent cumulative token usage data |
| `WORKSPACE_ROOT` | *(auto-detected)* | Allowed base directory for git workdir registration. Auto-derived from `git worktree list` or parent of `process.cwd()` |
```env
MEMORY_PATH=/custom/path/MEMORY.md
MEMORY_DIR=/custom/path/memory/
SESSIONS_DIR=/custom/path/sessions/
```
### TTS Cache
| Variable | Default | Description |
|----------|---------|-------------|
| `TTS_CACHE_TTL_MS` | `3600000` (1 hour) | Time-to-live for cached TTS audio in milliseconds |
| `TTS_CACHE_MAX` | `200` | Maximum number of cached TTS entries (in-memory LRU) |
```env
TTS_CACHE_TTL_MS=7200000
TTS_CACHE_MAX=500
```
### Development
| Variable | Description |
|----------|-------------|
| `NODE_ENV` | Set to `development` to enable the `POST /api/events/test` debug endpoint and verbose error logging |
---
## HTTPS
Nerve automatically starts an HTTPS server on `SSL_PORT` when certificates exist at:
```
certs/cert.pem # Certificate
certs/key.pem # Private key
```
Generate self-signed certificates:
```bash
mkdir -p certs
openssl req -x509 -newkey rsa:2048 \
-keyout certs/key.pem -out certs/cert.pem \
-days 365 -nodes -subj '/CN=localhost'
```
Or use the setup wizard's Custom access mode, which generates them automatically if `openssl` is available.
> **Why HTTPS?** Browser microphone access (`getUserMedia`) requires a [secure context](https://developer.mozilla.org/en-US/docs/Web/Security/Secure_Contexts). On `localhost` this works over HTTP, but network access requires HTTPS.
---
## Minimal `.env` Example
```env
GATEWAY_TOKEN=abc123def456
```
Everything else uses defaults. This is sufficient for local-only usage.
## Full `.env` Example
```env
# Gateway (required)
GATEWAY_TOKEN=abc123def456
GATEWAY_URL=http://127.0.0.1:18789
# Server
PORT=3080
SSL_PORT=3443
HOST=0.0.0.0
AGENT_NAME=Friday
# API Keys
OPENAI_API_KEY=sk-...
REPLICATE_API_TOKEN=r8_...
# Network (Tailscale example)
ALLOWED_ORIGINS=http://100.64.0.5:3080
CSP_CONNECT_EXTRA=http://100.64.0.5:3080 ws://100.64.0.5:3080
WS_ALLOWED_HOSTS=100.64.0.5
# TTS Cache
TTS_CACHE_TTL_MS=3600000
TTS_CACHE_MAX=200
# Custom Paths (optional)
MEMORY_PATH=/home/user/.openclaw/workspace/MEMORY.md
MEMORY_DIR=/home/user/.openclaw/workspace/memory/
SESSIONS_DIR=/home/user/.openclaw/agents/main/sessions/
```

341
docs/SECURITY.md Normal file
View file

@ -0,0 +1,341 @@
# Security
Nerve is designed as a **local-first** web UI for an AI agent. Its security model assumes the server runs on a trusted machine and is accessed by its owner. It is **not** designed for multi-tenant or public-internet deployment without an additional reverse proxy and authentication layer.
---
## Table of Contents
- [Threat Model](#threat-model)
- [Authentication & Access Control](#authentication--access-control)
- [CORS Policy](#cors-policy)
- [Security Headers](#security-headers)
- [Rate Limiting](#rate-limiting)
- [Input Validation](#input-validation)
- [File Serving Security](#file-serving-security)
- [WebSocket Proxy Security](#websocket-proxy-security)
- [Body Size Limits](#body-size-limits)
- [Path Traversal Prevention](#path-traversal-prevention)
- [TLS / HTTPS](#tls--https)
- [Token & Secret Handling](#token--secret-handling)
- [Client-Side Security](#client-side-security)
- [Configuration File Security](#configuration-file-security)
- [Reporting Vulnerabilities](#reporting-vulnerabilities)
---
## Threat Model
### In Scope
| Threat | Mitigation |
|--------|------------|
| **Cross-site request forgery (CSRF)** | CORS allowlist restricts cross-origin requests. Only explicitly configured origins are allowed. |
| **Cross-site scripting (XSS)** | CSP `script-src 'self'` blocks inline/injected scripts (exception: `s3.tradingview.com` for chart widgets). HTML content is sanitised with DOMPurify on the client. |
| **Clickjacking** | `X-Frame-Options: DENY` and CSP `frame-ancestors 'none'` prevent embedding in iframes. |
| **Network sniffing** | Optional HTTPS with HSTS (`max-age=31536000; includeSubDomains`). |
| **Abuse / resource exhaustion** | Per-IP rate limiting on all API endpoints. Global body size limits. Rate limit store capped at 10,000 entries. |
| **Directory traversal** | Resolved absolute paths checked against strict prefix allowlists. Symlinks resolved and re-checked. |
| **Symlink escape** | `/api/files` resolves symlinks via `fs.realpathSync()` and re-validates the real path against allowed prefixes. |
| **Gateway token exfiltration** | Token only returned via `/api/connect-defaults` to loopback clients. Remote clients receive `null`. |
| **Spoofed client IPs** | Rate limiter uses the real TCP socket address. `X-Forwarded-For` only trusted from configured `TRUSTED_PROXIES`. |
| **MIME sniffing** | `X-Content-Type-Options: nosniff` on all responses. |
| **CSP directive injection** | `CSP_CONNECT_EXTRA` is sanitised: semicolons and newlines stripped, only `http(s)://` and `ws(s)://` schemes accepted. |
| **Malformed CORS origins** | `ALLOWED_ORIGINS` entries are normalised via `new URL()`. Malformed entries and `"null"` origins are silently rejected. |
### Out of Scope
- **Multi-user authentication** — Nerve has no user accounts or login system. Access is controlled at the network level (localhost binding, firewall, VPN).
- **End-to-end encryption** — TLS covers transport; at-rest encryption of memory files or session data is not provided.
- **DDoS protection** — The in-memory rate limiter handles casual abuse but is not designed for sustained attacks. Use a reverse proxy (nginx, Cloudflare) for production exposure.
---
## Authentication & Access Control
Nerve does **not** implement its own auth layer. Security is enforced through:
1. **Localhost binding** — The server binds to `127.0.0.1` by default. Only local processes can connect.
2. **CORS allowlist** — Browsers enforce the Origin check. Only configured origins receive CORS headers.
3. **Gateway token isolation** — The sensitive `GATEWAY_TOKEN` is only exposed to loopback clients via `/api/connect-defaults`.
4. **Session storage** — The frontend stores the gateway token in `sessionStorage` (cleared when the tab closes), not `localStorage`.
When exposing Nerve to a network (`HOST=0.0.0.0`), consider:
- Using a VPN (Tailscale, WireGuard) — the setup wizard has first-class Tailscale support
- Placing Nerve behind a reverse proxy with authentication (nginx + basic auth, OAuth proxy, etc.)
- Restricting access with firewall rules
---
## CORS Policy
CORS is enforced on all requests via Hono's CORS middleware.
**Default allowed origins** (auto-configured):
- `http://localhost:{PORT}`
- `https://localhost:{SSL_PORT}`
- `http://127.0.0.1:{PORT}`
- `https://127.0.0.1:{SSL_PORT}`
**Additional origins** via `ALLOWED_ORIGINS` env var (comma-separated). Each entry is normalised through the `URL` constructor:
```env
ALLOWED_ORIGINS=http://100.64.0.5:3080,https://my-server.tailnet.ts.net:3443
```
**Allowed methods:** `GET`, `POST`, `PUT`, `DELETE`, `OPTIONS`
**Allowed headers:** `Content-Type`, `Authorization`
**Credentials:** Enabled (`credentials: true`)
Requests with no `Origin` header (same-origin, non-browser) are allowed through.
---
## Security Headers
Applied to **every response** via the `securityHeaders` middleware:
| Header | Value | Purpose |
|--------|-------|---------|
| `Content-Security-Policy` | See below | Defense-in-depth against XSS |
| `X-Frame-Options` | `DENY` | Prevent clickjacking |
| `X-Content-Type-Options` | `nosniff` | Prevent MIME type sniffing |
| `X-XSS-Protection` | `1; mode=block` | Legacy XSS filter for older browsers |
| `Strict-Transport-Security` | `max-age=31536000; includeSubDomains` | Enforce HTTPS for 1 year |
| `Referrer-Policy` | `strict-origin-when-cross-origin` | Control referrer leakage |
| `Cache-Control` | `no-store` | Default for all responses (overridden by cache middleware for assets) |
### Content Security Policy
> **Implementation note:** CSP directives are built lazily on first request (not at module import time) to avoid race conditions with `dotenv/config` load order. The computed directives are then cached for the lifetime of the process.
```
default-src 'self';
script-src 'self' https://s3.tradingview.com;
style-src 'self' 'unsafe-inline' https://fonts.googleapis.com;
font-src 'self' https://fonts.gstatic.com;
connect-src 'self' ws://localhost:* wss://localhost:* http://localhost:* https://localhost:*
ws://127.0.0.1:* wss://127.0.0.1:* http://127.0.0.1:* https://127.0.0.1:*
[CSP_CONNECT_EXTRA];
img-src 'self' data: blob:;
media-src 'self' blob:;
frame-src https://s3.tradingview.com https://www.tradingview.com
https://www.tradingview-widget.com https://s.tradingview.com;
frame-ancestors 'none';
base-uri 'self';
form-action 'self'
```
**TradingView domains:** The `script-src` and `frame-src` entries for TradingView are required for the inline `tv` chart type, which uses TradingView's official widget embed script. The script injects iframes from multiple TradingView subdomains.
The `connect-src` directive can be extended via `CSP_CONNECT_EXTRA` (space-separated). Input is sanitised:
- Semicolons (`;`) and newlines (`\r`, `\n`) are stripped to prevent directive injection
- Only entries matching `http://`, `https://`, `ws://`, or `wss://` schemes are accepted
---
## Rate Limiting
In-memory sliding window rate limiter applied to all `/api/*` routes.
### Presets
| Preset | Limit | Window | Applied To |
|--------|-------|--------|------------|
| TTS | 10 requests | 60 seconds | `POST /api/tts` |
| Transcribe | 30 requests | 60 seconds | `POST /api/transcribe` |
| General | 60 requests | 60 seconds | All other `/api/*` routes |
### Implementation Details
- **Per-client, per-path** — Each unique `clientIP:path` combination gets its own sliding window.
- **Client identification** — Uses the real TCP socket address from `getConnInfo()`. **Not** spoofable via request headers.
- **Trusted proxies**`X-Forwarded-For` and `X-Real-IP` are only honoured when the direct connection comes from an IP in `TRUSTED_PROXIES` (default: loopback addresses only). Extend via `TRUSTED_PROXIES` env var.
- **Store cap** — The rate limit store is capped at **10,000 entries** to prevent memory amplification from spoofed IPs (when behind a trusted proxy). When full, the oldest entry is evicted.
- **Cleanup** — Expired timestamps are purged every 5 minutes.
### Response Headers
Every response includes:
```
X-RateLimit-Limit: 60
X-RateLimit-Remaining: 57
```
When rate-limited (HTTP 429):
```
Retry-After: 42
X-RateLimit-Limit: 60
X-RateLimit-Remaining: 0
X-RateLimit-Reset: 1708100060
```
---
## Input Validation
All POST/PUT endpoints validate request bodies with [Zod](https://zod.dev/) schemas:
| Endpoint | Validated Fields |
|----------|-----------------|
| `POST /api/tts` | `text` (15000 chars, non-empty), `provider` (enum), `voice`, `model` |
| `PUT /api/tts/config` | Strict key allowlist per section, string values only, max 2000 chars |
| `POST /api/transcribe` | File presence, size (≤12 MB), MIME type allowlist |
| `POST /api/agentlog` | Optional typed fields (`ts`, `type`, `message`, `level`, `data`) |
| `POST /api/memories` | `text` (110000 chars), `section` (≤200), `category` (enum), `importance` (01) |
| `PUT /api/memories/section` | `title` (1200), `content` (≤50000), `date` (YYYY-MM-DD regex) |
| `DELETE /api/memories` | `query` (11000), `type` (enum), `date` (YYYY-MM-DD regex) |
| `PUT /api/workspace/:key` | `content` (string, ≤100 KB), `key` checked against strict allowlist |
| `POST /api/git-info/workdir` | `sessionKey` (non-empty), `workdir` (non-empty, validated against allowed base) |
Validation errors return **HTTP 400** with the first Zod issue message as plain text or JSON.
---
## File Serving Security
The `GET /api/files` endpoint serves local image files with multiple layers of protection:
### 1. MIME Type Allowlist
Only image files are served:
| Extension | MIME Type |
|-----------|-----------|
| `.png` | `image/png` |
| `.jpg`, `.jpeg` | `image/jpeg` |
| `.gif` | `image/gif` |
| `.webp` | `image/webp` |
| `.svg` | `image/svg+xml` |
| `.avif` | `image/avif` |
Non-image file types return **403 Not an allowed file type**.
### 2. Directory Prefix Allowlist
Files are only served from these directories:
| Prefix | Source |
|--------|--------|
| `/tmp` | Hardcoded |
| `~/.openclaw` | Derived from `os.homedir()` |
| `MEMORY_DIR` | From configuration |
The request path is resolved to an absolute path via `path.resolve()`, blocking `..` traversal. The resolved path must start with one of the allowed prefixes (with a path separator check to prevent `/tmp-evil` matching `/tmp`).
### 3. Symlink Traversal Protection
After the prefix check passes, the file's **real path** is resolved via `fs.realpathSync()`. The real path is then re-checked against the same prefix allowlist. This prevents:
- Symlinks inside `/tmp` pointing to `/etc/passwd`
- Symlinks inside `~/.openclaw` pointing outside the allowed tree
If the real path falls outside allowed prefixes → **403 Access denied**.
### 4. Path Canonicalisation
The `~` prefix in input paths is expanded to `os.homedir()` before resolution, preventing home directory confusion.
---
## WebSocket Proxy Security
The WebSocket proxy (connecting the frontend to the OpenClaw gateway) restricts target hostnames:
**Default allowed hosts:** `localhost`, `127.0.0.1`, `::1`
**Extend via** `WS_ALLOWED_HOSTS` env var (comma-separated):
```env
WS_ALLOWED_HOSTS=my-server.tailnet.ts.net,100.64.0.5
```
This prevents the proxy from being used to connect to arbitrary external hosts.
---
## Body Size Limits
| Scope | Limit | Enforced By |
|-------|-------|-------------|
| Global (`/api/*`) | ~13 MB (12 MB + 1 MB overhead) | Hono `bodyLimit` middleware |
| TTS text | 5,000 characters | Zod schema |
| Transcription file | 12 MB | Application check |
| Agent log entry | 64 KB | Config constant |
| Workspace file write | 100 KB | Application check |
| Memory text | 10,000 characters | Zod schema |
| Memory section content | 50,000 characters | Zod schema |
| TTS config field | 2,000 characters | Application check |
Exceeding the global body limit returns **413 Request body too large**.
---
## Path Traversal Prevention
Multiple layers prevent directory traversal attacks:
| Route | Mechanism |
|-------|-----------|
| `/api/files` | `path.resolve()` + prefix allowlist + symlink resolution + re-check |
| `/api/memories` (date params) | Regex validation: `/^\d{4}-\d{2}-\d{2}$/` — prevents injection in file paths |
| `/api/workspace/:key` | Strict key→filename allowlist (`soul`→`SOUL.md`, etc.) — no user-controlled paths |
| `/api/git-info/workdir` | Resolved path checked against allowed base directory (derived from git worktrees or `WORKSPACE_ROOT`). Exact match or child-path check with separator guard |
---
## TLS / HTTPS
Nerve automatically starts an HTTPS server alongside HTTP when certificates are present:
```
certs/cert.pem # X.509 certificate
certs/key.pem # RSA/EC private key
```
HSTS is always sent (`max-age=31536000; includeSubDomains`), even over HTTP. Browsers that have previously visited over HTTPS will refuse HTTP connections for 1 year.
> **Microphone access** requires a [secure context](https://developer.mozilla.org/en-US/docs/Web/Security/Secure_Contexts). On `localhost` HTTP works, but network access requires HTTPS.
---
## Token & Secret Handling
| Secret | Storage | Exposure |
|--------|---------|----------|
| `GATEWAY_TOKEN` | `.env` file (chmod 600) | Only returned to loopback clients via `/api/connect-defaults`. Never logged. |
| `OPENAI_API_KEY` | `.env` file | Used server-side only. Never sent to clients. |
| `REPLICATE_API_TOKEN` | `.env` file | Used server-side only. Never sent to clients. |
| Gateway token (client) | `sessionStorage` | Cleared when browser tab closes. Not persisted to disk. |
The setup wizard applies `chmod 600` to `.env` and backup files, restricting read access to the file owner.
---
## Client-Side Security
| Measure | Details |
|---------|---------|
| **DOMPurify** | All rendered HTML (agent messages, markdown) passes through DOMPurify with a strict tag/attribute allowlist |
| **Session storage** | Gateway token stored in `sessionStorage`, not `localStorage` — cleared on tab close |
| **CSP enforcement** | `script-src 'self' https://s3.tradingview.com` blocks inline scripts and limits external scripts to TradingView chart widgets only |
| **No eval** | No use of `eval()`, `Function()`, or `innerHTML` with unsanitised content |
---
## Configuration File Security
The setup wizard:
1. Writes `.env` atomically (via temp file + rename)
2. Applies `chmod 600` to `.env` and backup files
3. Cleans up `.env.tmp` on interruption (Ctrl+C handler)
4. Backs up existing `.env` before overwriting (timestamped `.env.bak.*`)
---
## Reporting Vulnerabilities
If you find a security issue, please open a GitHub issue or contact the maintainers directly. Do not disclose vulnerabilities publicly before they are addressed.

421
docs/TROUBLESHOOTING.md Normal file
View file

@ -0,0 +1,421 @@
# Troubleshooting
Common issues and solutions for Nerve.
---
## Build Errors
### `tsc -b` fails with path alias errors
**Symptom:** `Cannot find module '@/...'` during TypeScript compilation.
**Cause:** The `@/` alias must be configured in both `tsconfig.json` (for editor) and the relevant project reference config.
**Fix:** Ensure the root `tsconfig.json` has:
```json
{
"compilerOptions": {
"baseUrl": ".",
"paths": { "@/*": ["./src/*"] }
}
}
```
### `npm run build:server` produces nothing
**Symptom:** `server-dist/` is empty or `npm start` fails with "Cannot find module".
**Cause:** Server TypeScript is compiled separately via `tsc -p config/tsconfig.server.json`.
**Fix:**
```bash
npm run build:server # Compiles server/ → server-dist/
npm start # Then runs node server-dist/index.js
```
### Chunk size warnings during `vite build`
**Symptom:** Vite warns about chunks exceeding 500 kB.
**Cause:** Heavy dependencies (highlight.js, react-markdown) are bundled.
**Info:** This is expected. The build uses manual chunks to split: `react-vendor`, `markdown`, `ui-vendor`, `utils`. The warning limit is set to 600 kB in `vite.config.ts`. If a chunk exceeds this, check for accidental imports pulling in large libraries.
### Port already in use
**Symptom:** `Port 3080 is already in use. Is another instance running?`
**Fix:**
```bash
# Find what's using the port
lsof -i :3080
# Kill it, or use a different port:
PORT=3090 npm start
```
The server detects `EADDRINUSE` and exits with a clear error (see `server/index.ts`).
---
## Gateway Connection
### "Auth failed" in ConnectDialog
**Symptom:** Connection dialog shows "Auth failed: unknown" or similar.
**Causes:**
1. Wrong gateway token
2. Gateway not running
3. Token mismatch between Nerve server config and gateway
**Fix:**
- Verify the gateway is running: `openclaw gateway status`
- Check token: the server reads `GATEWAY_TOKEN` or `OPENCLAW_GATEWAY_TOKEN` env var
- For local access, `/api/connect-defaults` auto-provides the token (loopback only)
- For remote access, the token is NOT auto-provided (security). Enter it manually in the connection dialog
### Connection drops and "SIGNAL LOST" banner
**Symptom:** Red reconnecting banner appears periodically.
**Cause:** WebSocket connection to gateway dropped. Nerve auto-reconnects with exponential backoff (1s base, 30s max, up to 50 attempts).
**Diagnosis:**
```bash
# Check gateway health
curl http://127.0.0.1:18789/health
# Check Nerve health (includes gateway probe)
curl http://127.0.0.1:3080/health
# Returns: { "status": "ok", "uptime": ..., "gateway": "ok"|"unreachable" }
```
**Fix:**
- If gateway is unreachable, restart it: `openclaw gateway restart`
- If persistent, check firewall rules or network configuration
- The client stores credentials in `sessionStorage` (cleared on tab close) — if credentials are lost, reconnect manually
### Auto-connect doesn't work
**Symptom:** ConnectDialog appears even though the gateway is running.
**Cause:** The frontend fetches `/api/connect-defaults` on mount. This endpoint only returns the token for loopback clients (127.0.0.1, ::1).
**Fix:**
- If accessing Nerve remotely (SSH tunnel, reverse proxy), you must enter the gateway URL and token manually
- Alternatively, set the gateway URL in the connection dialog — the server's WebSocket proxy handles the actual connection
---
## WebSocket Proxy
### WebSocket connects but no events arrive
**Symptom:** UI shows "connected" but sessions/messages don't update.
**Cause:** The WS proxy (`server/lib/ws-proxy.ts`) might not be injecting device identity correctly, so the gateway doesn't grant `operator.read`/`operator.write` scopes.
**Diagnosis:**
- Check server logs for `[ws-proxy] Injected device identity: ...`
- If missing, the device identity file may be corrupted
**Fix:**
```bash
# Remove and regenerate device identity
rm ~/.nerve/device-identity.json
# Restart Nerve — a new keypair will be generated
```
### "Target not allowed" WebSocket error
**Symptom:** Browser console shows WebSocket close code 1008 with "Target not allowed".
**Cause:** The gateway URL hostname is not in the `WS_ALLOWED_HOSTS` allowlist (configured in `server/lib/config.ts`).
**Fix:** By default, only `127.0.0.1`, `localhost`, and `::1` are allowed. To add a custom host:
```bash
WS_ALLOWED_HOSTS=mygateway.local npm start
```
### Messages buffered indefinitely
**Symptom:** Messages sent immediately after connecting are lost.
**Info:** The proxy buffers up to 100 messages (1 MB) while the upstream gateway connection opens. If the buffer overflows, the client is disconnected with "Too many pending messages". This is a safety limit — reduce message burst rate.
---
## TTS Issues
### No audio plays
**Symptom:** TTS is enabled but no sound on responses.
**Diagnosis tree:**
1. **Sound enabled?** Check Settings → Audio → Sound toggle is on
2. **TTS provider configured?** Check Settings → Audio → TTS Provider
3. **API key present?**
- OpenAI: requires `OPENAI_API_KEY` env var
- Replicate: requires `REPLICATE_API_TOKEN` env var
- Edge: no key needed (free)
4. **Server-side check:**
```bash
curl -X POST http://127.0.0.1:3080/api/tts \
-H "Content-Type: application/json" \
-d '{"text": "hello", "provider": "edge"}'
```
Should return audio/mpeg binary.
**Provider auto-fallback:** If no explicit provider is selected, the server tries: OpenAI (if key) → Replicate (if key) → Edge (always available).
### TTS plays old/wrong responses
**Symptom:** Audio doesn't match the displayed message.
**Cause:** TTS cache serving stale entries. The cache is an LRU with TTL expiry (configurable via `config.ttsCacheTtlMs`), 100 MB memory budget.
**Fix:** Restart the Nerve server to clear the in-memory TTS cache.
### Edge TTS fails silently
**Symptom:** Edge TTS selected but no audio. No error in UI.
**Cause:** Edge TTS uses Microsoft's speech service WebSocket. The Sec-MS-GEC token generation or the WebSocket connection may fail.
**Diagnosis:** Check server logs for `[edge-tts]` errors.
**Fix:** Edge TTS has no API key dependency, but requires outbound WebSocket access to `speech.platform.bing.com`. Ensure your network allows this.
---
## Voice Input / Wake Word
### Microphone not working
**Symptom:** Voice input button does nothing or permission denied.
**Cause:** Microphone requires a **secure context** (HTTPS or localhost).
**Fix:**
- If accessing via `http://127.0.0.1:3080` — should work (localhost is secure)
- If accessing remotely, use HTTPS:
```bash
# Generate self-signed cert
mkdir -p certs
openssl req -x509 -newkey rsa:2048 -nodes \
-keyout certs/key.pem -out certs/cert.pem -days 365 \
-subj "/CN=localhost"
# Nerve auto-detects certs and starts HTTPS on port 3443
```
### Whisper transcription fails
**Symptom:** Voice input records but transcription returns error.
**Cause:** Transcription uses OpenAI Whisper API (requires `OPENAI_API_KEY`).
**Fix:**
- Ensure `OPENAI_API_KEY` is set in `.env` or environment
- Check file size: max 12 MB (configurable in `config.limits.transcribe`)
- Check MIME type: must be one of: `audio/webm`, `audio/mp3`, `audio/mpeg`, `audio/mp4`, `audio/m4a`, `audio/wav`, `audio/ogg`, `audio/flac`
### Wake word doesn't trigger
**Symptom:** Wake word toggle is on but voice detection never activates.
**Cause:** Wake word state is managed collaboratively — the InputBar component reports wake word state to SettingsContext via `handleWakeWordState(enabled, toggleFn)`.
**Fix:**
- Ensure microphone permissions are granted
- Try toggling wake word off and on via Settings or Cmd+K command palette
- Check browser console for speech recognition errors
---
## Memory Editing
### Memory changes don't appear
**Symptom:** Added/deleted memories don't reflect in the UI.
**Cause:** Memory operations go through the gateway tool invocation (`memory_store`/`memory_delete`), then the file watcher detects changes and broadcasts an SSE event.
**Diagnosis:**
1. Check POST/DELETE to `/api/memories` returns `{ ok: true }`
2. Check server logs for `[file-watcher]` events
3. Check SSE stream: `curl -N http://127.0.0.1:3080/api/events`
**Fix:**
- If the gateway tool call fails, check gateway connectivity
- If file watcher isn't firing, the memory file path may be wrong — check `config.memoryPath`
- Manual refresh: click the refresh button in the Memory tab, or use Cmd+K → "Refresh Memory"
### Memory file path is wrong
**Symptom:** Memories show as empty even though MEMORY.md exists.
**Cause:** The server resolves memory path from config (`config.memoryPath`). The workspace path is the parent of the memory path.
**Fix:** Check and set the correct path:
```bash
# In .env
MEMORY_PATH=/path/to/.openclaw/workspace/MEMORY.md
```
---
## Session Management
### Sessions don't appear in sidebar
**Symptom:** Session list is empty or shows only the main session.
**Cause:** Sessions are fetched via gateway RPC `sessions.list` with `activeMinutes: 120` filter.
**Fix:**
- Sessions inactive for >2 hours won't appear — this is by design
- Check gateway connectivity (sessions come from the gateway, not local state)
- Force refresh: click refresh button or Cmd+K → "Refresh Sessions"
### Sub-agent spawn times out
**Symptom:** "Timed out waiting for subagent to spawn" error.
**Cause:** Spawning uses a polling approach — sends a `[spawn-subagent]` chat message to the main session, then polls `sessions.list` every 2s for up to 30s waiting for a new subagent session to appear.
**Fix:**
- The main agent must be running and able to process the spawn request
- Check that the main session isn't busy with another task
- Check gateway logs for spawn errors
### Session status stuck on "THINKING"
**Symptom:** Session shows thinking/spinning indefinitely.
**Cause:** The agent state machine transitions THINKING → STREAMING → DONE → IDLE. If a lifecycle event was missed, the status can get stuck.
**Fix:**
- Use the abort button (or Ctrl+C when generating) to reset the state
- The DONE → IDLE auto-transition happens after 3 seconds (see `doneTimeoutsRef` in SessionContext)
- Force refresh sessions to re-sync from gateway
---
## Model Switching
### Model dropdown doesn't show available models
**Symptom:** Model selector is empty or shows only the current model.
**Cause:** Models are fetched via `GET /api/gateway/models`, which runs `openclaw models list --json`.
**Fix:**
- Ensure the `openclaw` binary is in PATH (the server searches multiple locations — see `lib/openclaw-bin.ts`)
- Set `OPENCLAW_BIN` env var to the explicit path
- Check server logs for model list errors
- An allowlist can restrict visible models (configured server-side)
### Model change doesn't take effect
**Symptom:** Switched model in UI but responses still come from the old model.
**Cause:** Model/thinking changes go through `POST /api/gateway/session-patch`, which invokes the gateway's session patch API.
**Fix:**
- The change applies per-session — switching sessions will show that session's model
- Verify the patch succeeded: check for `{ ok: true }` response
- Some models may not be available for the current session type
---
## Rate Limiting
### "Too many requests" errors
**Symptom:** API returns 429 status.
**Cause:** Per-IP sliding window rate limiter. Different limits for:
- General API endpoints
- TTS synthesis (more restrictive)
- Transcription (more restrictive)
**Fix:**
- Wait for the rate limit window to reset (check `X-RateLimit-Reset` header)
- If behind a reverse proxy, ensure `X-Forwarded-For` is set correctly (the server only trusts forwarded headers from trusted proxy IPs)
---
## HTTPS / SSL
### Certificate errors
**Symptom:** Browser shows SSL warnings or refuses to connect on port 3443.
**Fix:** For development, generate a self-signed cert:
```bash
mkdir -p certs
openssl req -x509 -newkey rsa:2048 -nodes \
-keyout certs/key.pem -out certs/cert.pem -days 365 \
-subj "/CN=localhost"
```
The server auto-detects cert files at `certs/cert.pem` and `certs/key.pem`. No configuration needed — if the files exist, HTTPS starts on `config.sslPort` (default 3443).
### SSE not working over HTTPS
**Symptom:** Real-time updates (memory changes, token updates) don't arrive over HTTPS.
**Cause:** The HTTPS server has special handling for SSE responses — it streams them instead of buffering (see `server/index.ts` SSE streaming fix).
**Fix:** This should work automatically. If it doesn't, check that:
- The response content-type includes `text/event-stream`
- No intermediate reverse proxy is buffering the response
- The compression middleware correctly skips `/api/events`
---
## Development
### `npm run dev` — proxy errors
**Symptom:** API requests fail with 502 during development.
**Cause:** Vite proxies `/api` and `/ws` to the backend server. If the backend isn't running, all proxied requests fail.
**Fix:** Run both servers:
```bash
# Terminal 1
npm run dev:server # Backend on port 3081
# Terminal 2
npm run dev # Frontend on port 3080 (proxies to 3081)
```
### Tests fail with "Cannot find module"
**Symptom:** Vitest can't resolve `@/` imports.
**Fix:** The test config (`vitest.config.ts`) must have the same path alias:
```ts
resolve: {
alias: {
'@': path.resolve(__dirname, './src'),
},
},
```
Also ensure `server-dist/` is excluded from test discovery (it contains compiled `.test.js` duplicates):
```ts
test: {
exclude: ['node_modules/**', 'server-dist/**'],
}
```
---
## Known Limitations
### Desktop browsers only
Nerve is designed for desktop browsers. There is no mobile-responsive layout yet. On phones and tablets the UI will be unusable or heavily clipped. This is a known gap, tracked in [#107](https://github.com/daggerhashimoto/openclaw-nerve/issues/107).

View file

@ -0,0 +1,171 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="148 148 104 104" width="400" height="400">
<defs>
<filter id="glow" x="-100%" y="-100%" width="300%" height="300%">
<feGaussianBlur stdDeviation="4" result="blur"/>
<feComposite in="SourceGraphic" in2="blur" operator="over"/>
</filter>
<filter id="glow-lg" x="-100%" y="-100%" width="300%" height="300%">
<feGaussianBlur stdDeviation="8" result="blur"/>
<feComposite in="SourceGraphic" in2="blur" operator="over"/>
</filter>
<filter id="glow-xl" x="-100%" y="-100%" width="300%" height="300%">
<feGaussianBlur stdDeviation="12" result="blur"/>
<feMerge>
<feMergeNode in="blur"/>
<feMergeNode in="SourceGraphic"/>
</feMerge>
</filter>
</defs>
<style>
@keyframes pulse-center {
0%, 100% { opacity: 0.4; r: 14; }
25% { opacity: 1; r: 18; }
50% { opacity: 0.6; r: 15; }
}
@keyframes pulse-center-core {
0%, 100% { opacity: 0.3; }
25% { opacity: 0.9; }
50% { opacity: 0.4; }
}
@keyframes pulse-node {
0%, 100% { opacity: 0.3; r: 9; }
50% { opacity: 0.85; r: 12; }
}
@keyframes pulse-node-core {
0%, 100% { opacity: 0.15; }
50% { opacity: 0.7; }
}
@keyframes spoke-pulse {
0%, 100% { opacity: 0.15; stroke-width: 2; }
50% { opacity: 0.5; stroke-width: 3; }
}
@keyframes ring-pulse {
0%, 100% { opacity: 0.12; stroke-width: 1.5; }
50% { opacity: 0.4; stroke-width: 2.5; }
}
@keyframes ripple {
0% { r: 14; opacity: 0.6; stroke-width: 3; }
100% { r: 50; opacity: 0; stroke-width: 0.5; }
}
/* Single particle: center → top → clockwise around hex → fade → 2.5s wait */
@keyframes travel-clockwise {
0% { offset-distance: 0%; opacity: 0; }
2% { opacity: 1; }
40% { offset-distance: 100%; opacity: 1; }
43% { opacity: 0; }
100% { opacity: 0; }
}
.center-glow { animation: pulse-center 2.8s ease-in-out infinite; }
.center-core { animation: pulse-center-core 2.8s ease-in-out infinite; }
.node-glow { animation: pulse-node 2.8s ease-in-out infinite; }
.node-core { animation: pulse-node-core 2.8s ease-in-out infinite; }
.spoke { animation: spoke-pulse 2.8s ease-in-out infinite; }
.ring { animation: ring-pulse 2.8s ease-in-out infinite; }
.ripple-center { animation: ripple 2.8s ease-out infinite; }
.n0 .node-glow, .n0 .node-core { animation-delay: 0s; }
.n1 .node-glow, .n1 .node-core { animation-delay: 0.47s; }
.n2 .node-glow, .n2 .node-core { animation-delay: 0.93s; }
.n3 .node-glow, .n3 .node-core { animation-delay: 1.4s; }
.n4 .node-glow, .n4 .node-core { animation-delay: 1.87s; }
.n5 .node-glow, .n5 .node-core { animation-delay: 2.33s; }
.s0 { animation-delay: 0s; }
.s1 { animation-delay: 0.47s; }
.s2 { animation-delay: 0.93s; }
.s3 { animation-delay: 1.4s; }
.s4 { animation-delay: 1.87s; }
.s5 { animation-delay: 2.33s; }
.r0 { animation-delay: 0.2s; }
.r1 { animation-delay: 0.67s; }
.r2 { animation-delay: 1.13s; }
.r3 { animation-delay: 1.6s; }
.r4 { animation-delay: 2.07s; }
.r5 { animation-delay: 2.53s; }
.particle {
filter: url(#glow-lg);
offset-path: path('M200,200 L200,180 L217,190 L217,210 L200,220 L183,210 L183,190 L200,180');
animation: travel-clockwise 4.3s ease-in-out infinite;
}
@media (prefers-reduced-motion: reduce) {
* { animation: none !important; }
}
</style>
<!-- Background dim spokes -->
<line x1="200" y1="200" x2="200" y2="180" stroke="#301C0E" stroke-width="2" stroke-linecap="round"/>
<line x1="200" y1="200" x2="217" y2="190" stroke="#301C0E" stroke-width="2" stroke-linecap="round"/>
<line x1="200" y1="200" x2="217" y2="210" stroke="#301C0E" stroke-width="2" stroke-linecap="round"/>
<line x1="200" y1="200" x2="200" y2="220" stroke="#301C0E" stroke-width="2" stroke-linecap="round"/>
<line x1="200" y1="200" x2="183" y2="210" stroke="#301C0E" stroke-width="2" stroke-linecap="round"/>
<line x1="200" y1="200" x2="183" y2="190" stroke="#301C0E" stroke-width="2" stroke-linecap="round"/>
<!-- Background dim hex ring -->
<polygon points="200,180 217,190 217,210 200,220 183,210 183,190" fill="none" stroke="#301C0E" stroke-width="1.5" stroke-linejoin="round"/>
<!-- Glow spokes -->
<line x1="200" y1="200" x2="200" y2="180" stroke="#FF8C32" stroke-linecap="round" filter="url(#glow)" class="spoke s0"/>
<line x1="200" y1="200" x2="217" y2="190" stroke="#FF8C32" stroke-linecap="round" filter="url(#glow)" class="spoke s1"/>
<line x1="200" y1="200" x2="217" y2="210" stroke="#FF8C32" stroke-linecap="round" filter="url(#glow)" class="spoke s2"/>
<line x1="200" y1="200" x2="200" y2="220" stroke="#FF8C32" stroke-linecap="round" filter="url(#glow)" class="spoke s3"/>
<line x1="200" y1="200" x2="183" y2="210" stroke="#FF8C32" stroke-linecap="round" filter="url(#glow)" class="spoke s4"/>
<line x1="200" y1="200" x2="183" y2="190" stroke="#FF8C32" stroke-linecap="round" filter="url(#glow)" class="spoke s5"/>
<!-- Glow hex ring segments -->
<line x1="200" y1="180" x2="217" y2="190" stroke="#FF8C32" stroke-linecap="round" filter="url(#glow)" class="ring r0"/>
<line x1="217" y1="190" x2="217" y2="210" stroke="#FF8C32" stroke-linecap="round" filter="url(#glow)" class="ring r1"/>
<line x1="217" y1="210" x2="200" y2="220" stroke="#FF8C32" stroke-linecap="round" filter="url(#glow)" class="ring r2"/>
<line x1="200" y1="220" x2="183" y2="210" stroke="#FF8C32" stroke-linecap="round" filter="url(#glow)" class="ring r3"/>
<line x1="183" y1="210" x2="183" y2="190" stroke="#FF8C32" stroke-linecap="round" filter="url(#glow)" class="ring r4"/>
<line x1="183" y1="190" x2="200" y2="180" stroke="#FF8C32" stroke-linecap="round" filter="url(#glow)" class="ring r5"/>
<!-- Single traveling particle: center → top → clockwise -->
<circle r="4" fill="#FF8C32" class="particle"/>
<!-- Center ripple -->
<circle cx="200" cy="200" fill="none" stroke="#FF8C32" class="ripple-center"/>
<!-- Center node -->
<circle cx="200" cy="200" r="14" fill="#FF8C32" filter="url(#glow-xl)" class="center-glow"/>
<circle cx="200" cy="200" r="7" fill="#FFDCB4" class="center-core"/>
<!-- Background dim dots -->
<circle cx="200" cy="180" r="9" fill="#301C0E"/>
<circle cx="217" cy="190" r="9" fill="#301C0E"/>
<circle cx="217" cy="210" r="9" fill="#301C0E"/>
<circle cx="200" cy="220" r="9" fill="#301C0E"/>
<circle cx="183" cy="210" r="9" fill="#301C0E"/>
<circle cx="183" cy="190" r="9" fill="#301C0E"/>
<!-- Outer nodes -->
<g class="n0">
<circle cx="200" cy="180" r="9" fill="#FF8C32" filter="url(#glow-lg)" class="node-glow"/>
<circle cx="200" cy="180" r="4.5" fill="#FFDCB4" class="node-core"/>
</g>
<g class="n1">
<circle cx="217" cy="190" r="9" fill="#FF8C32" filter="url(#glow-lg)" class="node-glow"/>
<circle cx="217" cy="190" r="4.5" fill="#FFDCB4" class="node-core"/>
</g>
<g class="n2">
<circle cx="217" cy="210" r="9" fill="#FF8C32" filter="url(#glow-lg)" class="node-glow"/>
<circle cx="217" cy="210" r="4.5" fill="#FFDCB4" class="node-core"/>
</g>
<g class="n3">
<circle cx="200" cy="220" r="9" fill="#FF8C32" filter="url(#glow-lg)" class="node-glow"/>
<circle cx="200" cy="220" r="4.5" fill="#FFDCB4" class="node-core"/>
</g>
<g class="n4">
<circle cx="183" cy="210" r="9" fill="#FF8C32" filter="url(#glow-lg)" class="node-glow"/>
<circle cx="183" cy="210" r="4.5" fill="#FFDCB4" class="node-core"/>
</g>
<g class="n5">
<circle cx="183" cy="190" r="9" fill="#FF8C32" filter="url(#glow-lg)" class="node-glow"/>
<circle cx="183" cy="190" r="4.5" fill="#FFDCB4" class="node-core"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 7.7 KiB

BIN
docs/nerve-logo-dark.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

BIN
docs/nerve-logo-light.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 MiB

BIN
docs/screenshot.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 790 KiB

39
eslint.config.js Normal file
View file

@ -0,0 +1,39 @@
import js from '@eslint/js'
import globals from 'globals'
import reactHooks from 'eslint-plugin-react-hooks'
import reactRefresh from 'eslint-plugin-react-refresh'
import tseslint from 'typescript-eslint'
import { defineConfig, globalIgnores } from 'eslint/config'
export default defineConfig([
globalIgnores(['dist', 'server-dist']),
{
files: ['**/*.{ts,tsx}'],
extends: [
js.configs.recommended,
tseslint.configs.recommended,
reactHooks.configs.flat.recommended,
reactRefresh.configs.vite,
],
languageOptions: {
ecmaVersion: 2020,
globals: globals.browser,
},
rules: {
// Enforce exhaustive deps for useEffect, useCallback, useMemo
// This prevents stale closure bugs and ensures effects run when dependencies change
'react-hooks/exhaustive-deps': 'warn',
},
},
// Node.js scripts (setup CLI, etc.) — use Node globals, not browser
{
files: ['scripts/**/*.ts'],
languageOptions: {
globals: globals.node,
},
rules: {
'react-hooks/exhaustive-deps': 'off',
'react-refresh/only-export-components': 'off',
},
},
])

19
index.html Normal file
View file

@ -0,0 +1,19 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Nerve</title>
<link rel="icon" type="image/svg+xml" href="/favicon.svg">
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32.png">
<link rel="icon" type="image/png" sizes="256x256" href="/favicon-256.png">
<link rel="apple-touch-icon" sizes="256x256" href="/favicon-256.png">
<!-- Fonts are lazy-loaded by src/lib/fonts.ts when selected -->
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>

944
install.sh Executable file
View file

@ -0,0 +1,944 @@
#!/usr/bin/env bash
# ──────────────────────────────────────────────────────────────────────
# Nerve Installer — one-command setup for the Nerve web interface
#
# Usage:
# curl -fsSL https://raw.githubusercontent.com/daggerhashimoto/openclaw-nerve/master/install.sh | bash
#
# Or with options:
# curl -fsSL ... | bash -s -- --dir ~/nerve --branch main
# ──────────────────────────────────────────────────────────────────────
set -euo pipefail
# ── Defaults ──────────────────────────────────────────────────────────
INSTALL_DIR="${NERVE_INSTALL_DIR:-${HOME}/nerve}"
BRANCH="master"
REPO="https://github.com/daggerhashimoto/openclaw-nerve.git"
NODE_MIN=22
SKIP_SETUP=false
DRY_RUN=false
GATEWAY_TOKEN=""
ENV_MISSING=false
# ── Colors ────────────────────────────────────────────────────────────
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
ORANGE='\033[38;5;208m'
BOLD='\033[1m'
DIM='\033[2m'
NC='\033[0m'
RAIL="${DIM}${NC}"
ok() { echo -e " ${RAIL} ${GREEN}${NC} $*"; }
warn() { echo -e " ${RAIL} ${YELLOW}${NC} $*"; }
fail() { echo -e " ${RAIL} ${RED}${NC} $*"; }
info() { echo -e " ${RAIL} ${CYAN}${NC} $*"; }
dry() { echo -e " ${RAIL} ${YELLOW}${NC} ${DIM}[dry-run]${NC} $*"; }
# ── Helpers ────────────────────────────────────────────────────────────
# Detect OS family once
IS_MAC=false; IS_DEBIAN=false; IS_FEDORA=false
if [[ "$(uname -s)" == "Darwin" ]]; then IS_MAC=true;
elif command -v apt-get &>/dev/null; then IS_DEBIAN=true;
elif command -v dnf &>/dev/null || command -v yum &>/dev/null; then IS_FEDORA=true; fi
# Display a copy-pasteable command hint
hint() { echo -e " ${RAIL}"; echo -e " ${RAIL} ${BOLD}$1${NC}"; echo -e " ${RAIL}"; }
cmd() { echo -e " ${RAIL} ${CYAN}\$ $1${NC}"; }
# Animated dots while a background process runs
# Usage: run_with_dots "message" command arg1 arg2 ...
# Sets RWD_EXIT to the command's exit code after completion.
run_with_dots() {
local msg="$1"; shift
printf " ${RAIL} ${CYAN}${NC} %s " "$msg"
"$@" &
local pid=$!
while kill -0 "$pid" 2>/dev/null; do
printf "."
sleep 1
done
wait "$pid"
RWD_EXIT=$?
echo ""
return $RWD_EXIT
}
STAGE_CURRENT=0
STAGE_TOTAL=5
stage() {
STAGE_CURRENT=$((STAGE_CURRENT + 1))
if [[ $STAGE_CURRENT -gt 1 ]]; then
echo -e " ${RAIL}"
fi
echo -e " ${ORANGE}${NC} ${ORANGE}${BOLD}${1}${NC} ${DIM}[${STAGE_CURRENT}/${STAGE_TOTAL}]${NC}"
echo -e " ${RAIL}"
}
stage_done() {
echo -e " ${RAIL}"
}
# ── Parse args ────────────────────────────────────────────────────────
while [[ $# -gt 0 ]]; do
case "$1" in
--dir) [[ $# -ge 2 ]] || { echo "Missing value for --dir"; exit 1; }; INSTALL_DIR="$2"; shift 2 ;;
--branch) [[ $# -ge 2 ]] || { echo "Missing value for --branch"; exit 1; }; BRANCH="$2"; shift 2 ;;
--repo) [[ $# -ge 2 ]] || { echo "Missing value for --repo"; exit 1; }; REPO="$2"; shift 2 ;;
--skip-setup) SKIP_SETUP=true; shift ;;
--dry-run) DRY_RUN=true; shift ;;
--gateway-token) [[ $# -ge 2 ]] || { echo "Missing value for --gateway-token"; exit 1; }; GATEWAY_TOKEN="$2"; shift 2 ;;
--help|-h)
echo "Nerve Installer"
echo ""
echo "Options:"
echo " --dir <path> Install directory (default: ~/nerve)"
echo " --branch <name> Git branch (default: master)"
echo " --repo <url> Git repo URL"
echo " --skip-setup Skip the interactive setup wizard"
echo " --gateway-token <t> Gateway token (for non-interactive installs)"
echo " --dry-run Simulate the install without changing anything"
echo " --help Show this help"
exit 0
;;
*) echo "Unknown option: $1"; exit 1 ;;
esac
done
# ── Detect interactive mode ───────────────────────────────────────────
# When piped via curl | bash, stdin is the pipe — but /dev/tty still
# provides access to the controlling terminal for interactive prompts.
# We check readable+writable (like OpenClaw's installer does).
INTERACTIVE=false
if [[ -t 0 ]]; then
INTERACTIVE=true
elif [[ -r /dev/tty && -w /dev/tty ]]; then
INTERACTIVE=true
fi
# ── Banner ────────────────────────────────────────────────────────────
echo ""
echo -e " ${ORANGE}██████ █████ ██████████ ███████████ █████ █████ ██████████${NC}"
echo -e " ${ORANGE}░░██████ ░░███ ░░███░░░░░█░░███░░░░░███ ░░███ ░░███ ░░███░░░░░█${NC}"
echo -e " ${ORANGE} ░███░███ ░███ ░███ █ ░ ░███ ░███ ░███ ░███ ░███ █ ░${NC}"
echo -e " ${ORANGE} ░███░░███░███ ░██████ ░██████████ ░███ ░███ ░██████${NC}"
echo -e " ${ORANGE} ░███ ░░██████ ░███░░█ ░███░░░░░███ ░░███ ███ ░███░░█${NC}"
echo -e " ${ORANGE} ░███ ░░█████ ░███ ░ █ ░███ ░███ ░░░█████░ ░███ ░ █${NC}"
echo -e " ${ORANGE} █████ ░░█████ ██████████ █████ █████ ░░███ ██████████${NC}"
echo -e " ${ORANGE}░░░░░ ░░░░░ ░░░░░░░░░░ ░░░░░ ░░░░░ ░░░ ░░░░░░░░░░${NC}"
echo ""
echo -e " ${DIM} Web interface for OpenClaw${NC}"
echo ""
if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${BOLD} ⊘ DRY RUN — nothing will be modified${NC}"
echo ""
fi
echo -e " ${DIM}${NC}"
# ── Check: OpenClaw installed ─────────────────────────────────────────
check_openclaw() {
if command -v openclaw &>/dev/null; then
local ver
ver=$(openclaw --version 2>/dev/null | head -1 || echo "unknown")
ok "OpenClaw found: ${ver}"
return 0
fi
# Check common paths
local candidates=(
"${HOME}/.nvm/versions/node/"*/bin/openclaw
/opt/homebrew/bin/openclaw
/usr/local/bin/openclaw
/usr/bin/openclaw
"${HOME}/.volta/bin/openclaw"
"${HOME}/.fnm/aliases/default/bin/openclaw"
)
for c in "${candidates[@]}"; do
if [[ -x "$c" ]]; then
ok "OpenClaw found: ${c}"
export PATH="$(dirname "$c"):$PATH"
return 0
fi
done
fail "OpenClaw not found"
echo ""
hint "Install OpenClaw:"
cmd "npm install -g openclaw"
echo ""
echo -e " ${RAIL} ${DIM}Docs: https://github.com/openclaw/openclaw${NC}"
echo ""
exit 1
}
# ── Check: Node.js ────────────────────────────────────────────────────
check_node() {
if ! command -v node &>/dev/null; then
fail "Node.js not found — version ${NODE_MIN}+ is required"
echo ""
hint "Install Node.js via nvm (recommended):"
cmd "curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash"
cmd "source ~/.bashrc"
cmd "nvm install ${NODE_MIN}"
echo ""
if $IS_MAC; then
echo -e " ${RAIL} ${DIM}Or via Homebrew: brew install node@${NODE_MIN}${NC}"
elif $IS_DEBIAN; then
echo -e " ${RAIL} ${DIM}Or via apt: https://deb.nodesource.com${NC}"
fi
echo ""
exit 1
fi
local node_ver
node_ver=$(node -v | sed 's/^v//')
local node_major
node_major=$(echo "$node_ver" | cut -d. -f1)
if [[ "$node_major" -ge "$NODE_MIN" ]]; then
ok "Node.js v${node_ver} (≥${NODE_MIN} required)"
else
fail "Node.js v${node_ver} — version ${NODE_MIN}+ is required"
echo ""
# Detect how Node was installed and suggest the right upgrade
local node_path
node_path=$(which node 2>/dev/null || echo "")
if [[ "$node_path" == *".nvm/"* ]]; then
hint "Upgrade via nvm:"
cmd "nvm install ${NODE_MIN}"
cmd "nvm use ${NODE_MIN}"
elif [[ "$node_path" == *"homebrew"* || "$node_path" == *"Cellar"* ]]; then
hint "Upgrade via Homebrew:"
cmd "brew install node@${NODE_MIN}"
elif $IS_DEBIAN; then
hint "Upgrade via nvm (recommended):"
cmd "curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash"
cmd "nvm install ${NODE_MIN}"
else
hint "Upgrade Node.js:"
cmd "nvm install ${NODE_MIN}"
fi
echo ""
exit 1
fi
}
check_npm() {
if command -v npm &>/dev/null; then
ok "npm $(npm -v 2>/dev/null)"
else
fail "npm not found — it ships with Node.js"
echo ""
hint "Reinstall Node.js to get npm:"
cmd "nvm install ${NODE_MIN}"
echo ""
echo -e " ${RAIL} ${DIM}If using a system package, npm may be separate: sudo apt install npm${NC}"
echo ""
exit 1
fi
}
check_git() {
if command -v git &>/dev/null; then
ok "git $(git --version 2>/dev/null | awk '{print $3}')"
else
fail "git not found — required to clone the repo"
echo ""
if $IS_MAC; then
hint "Install git:"
cmd "xcode-select --install"
echo -e " ${RAIL} ${DIM}Or: brew install git${NC}"
elif $IS_DEBIAN; then
hint "Install git:"
cmd "sudo apt install git"
elif $IS_FEDORA; then
hint "Install git:"
cmd "sudo dnf install git"
else
hint "Install git:"
cmd "sudo apt install git"
echo -e " ${RAIL} ${DIM}Or use your system's package manager${NC}"
fi
echo ""
exit 1
fi
}
# ── Check: Build tools (needed for node-pty native compilation) ───────
check_build_tools() {
if command -v make &>/dev/null && command -v g++ &>/dev/null; then
ok "Build tools available (make, g++)"
return 0
fi
warn "Build tools (make, g++) not found — required for native modules"
# Auto-install on Debian/Ubuntu
if command -v apt-get &>/dev/null; then
if [[ "$DRY_RUN" == "true" ]]; then
dry "Would install build-essential via apt"
return 0
fi
run_with_dots "Installing build tools" bash -c "DEBIAN_FRONTEND=noninteractive apt-get update -qq &>/dev/null && DEBIAN_FRONTEND=noninteractive apt-get install -y -qq build-essential &>/dev/null"
if command -v make &>/dev/null && command -v g++ &>/dev/null; then
ok "Build tools installed"
return 0
else
fail "Failed to install build-essential"
fi
fi
# Auto-install on macOS via Xcode Command Line Tools
if [[ "$(uname -s)" == "Darwin" ]]; then
if [[ "$DRY_RUN" == "true" ]]; then
dry "Would install Xcode Command Line Tools"
return 0
fi
info "Installing Xcode Command Line Tools (this may take a few minutes)..."
xcode-select --install 2>/dev/null || true
# Wait for the install to complete — xcode-select --install is async (opens GUI dialog)
printf " ${RAIL} ${CYAN}${NC} Waiting for Xcode CLT "
until xcode-select -p &>/dev/null; do
printf "."
sleep 5
done
echo ""
if command -v make &>/dev/null; then
ok "Xcode Command Line Tools installed"
return 0
else
fail "Xcode CLT install did not provide build tools"
fi
fi
# Can't auto-install — tell the user
echo ""
echo -e " Install build tools manually:"
echo -e " ${CYAN}Debian/Ubuntu:${NC} sudo apt install build-essential"
echo -e " ${CYAN}Fedora/RHEL:${NC} sudo dnf groupinstall 'Development Tools'"
echo -e " ${CYAN}macOS:${NC} xcode-select --install"
echo ""
exit 1
}
# ── Check: Gateway reachable ──────────────────────────────────────────
check_gateway() {
local gw_url="http://127.0.0.1:18789"
# Try to read from openclaw.json
local config_file="${HOME}/.openclaw/openclaw.json"
if [[ -f "$config_file" ]]; then
local port
port=$(node -e "try{const c=JSON.parse(require('fs').readFileSync('$config_file','utf8'));console.log(c.gateway?.port??18789)}catch{console.log(18789)}" 2>/dev/null || echo "18789")
gw_url="http://127.0.0.1:${port}"
fi
if curl -sf "${gw_url}/health" &>/dev/null || curl -sf "${gw_url}/" &>/dev/null; then
ok "OpenClaw gateway reachable at ${gw_url}"
else
warn "Gateway not reachable at ${gw_url} — start it with: openclaw gateway start"
fi
# Verify auth token exists (needed for .env generation and service connectivity)
local gw_token="${GATEWAY_TOKEN:-}"
if [[ -z "$gw_token" && -f "$config_file" ]]; then
gw_token=$(node -e "try{const c=JSON.parse(require('fs').readFileSync('$config_file','utf8'));console.log(c.gateway?.auth?.token??'')}catch{}" 2>/dev/null || echo "")
fi
if [[ -n "$gw_token" ]]; then
ok "Gateway auth token present"
else
warn "No gateway auth token found — run: ${CYAN}openclaw onboard --install-daemon${NC}"
fi
}
# ── [1/5] Prerequisites ───────────────────────────────────────────────
stage "Prerequisites"
check_node
check_npm
check_git
check_build_tools
check_openclaw
check_gateway
# ── [2/5] Clone or update ────────────────────────────────────────────
stage "Download"
if [[ "$DRY_RUN" == "true" ]]; then
if [[ -d "$INSTALL_DIR/.git" ]]; then
dry "Would update existing installation in ${INSTALL_DIR}"
dry "Would pull latest ${BRANCH}"
else
dry "Would clone ${REPO}"
dry "Would install to ${INSTALL_DIR}"
fi
else
if [[ -d "$INSTALL_DIR/.git" ]]; then
cd "$INSTALL_DIR"
run_with_dots "Updating" git pull origin "$BRANCH" -q
ok "Updated to latest ${BRANCH}"
else
run_with_dots "Cloning Nerve" git clone --branch "$BRANCH" --depth 1 -q "$REPO" "$INSTALL_DIR"
ok "Cloned to ${INSTALL_DIR}"
fi
cd "$INSTALL_DIR"
fi
# ── [3/5] Install & Build ────────────────────────────────────────────
stage "Install & Build"
if [[ "$DRY_RUN" == "true" ]]; then
dry "Would run: npm ci"
dry "Would run: npm run build"
dry "Would run: npm run build:server"
else
npm_log=$(mktemp /tmp/nerve-npm-install-XXXXXX)
run_with_dots "Installing dependencies" bash -c "npm ci --loglevel=error > '$npm_log' 2>&1"
if [[ $RWD_EXIT -eq 0 ]]; then
ok "Dependencies installed"
else
fail "npm ci failed"
echo ""
# Show the last meaningful lines
echo -e " ${RAIL} ${DIM}── Last 10 lines ──${NC}"
tail -10 "$npm_log" | while IFS= read -r line; do
echo -e " ${RAIL} ${DIM}${line}${NC}"
done
echo -e " ${RAIL} ${DIM}── Full log: ${npm_log} ──${NC}"
echo ""
# Detect common failure patterns and suggest fixes
if grep -qi 'EACCES\|permission denied' "$npm_log"; then
hint "Permissions issue — try installing Node via nvm instead of system packages:"
cmd "curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash"
cmd "nvm install ${NODE_MIN}"
echo -e " ${RAIL} ${DIM}nvm installs to your home directory — no sudo needed${NC}"
elif grep -qi 'node-gyp\|gyp ERR\|make.*Error\|g++.*not found\|cc.*not found' "$npm_log"; then
hint "Native module compilation failed — install build tools:"
if $IS_MAC; then
cmd "xcode-select --install"
elif $IS_DEBIAN; then
cmd "sudo apt install build-essential"
elif $IS_FEDORA; then
cmd "sudo dnf groupinstall 'Development Tools'"
else
cmd "sudo apt install build-essential"
fi
elif grep -qi 'ERESOLVE\|peer dep\|could not resolve' "$npm_log"; then
hint "Dependency conflict — try with a clean slate:"
cmd "rm -rf node_modules package-lock.json"
cmd "npm install"
else
hint "Troubleshooting:"
echo -e " ${RAIL} ${DIM}1. Check the full log: cat ${npm_log}${NC}"
echo -e " ${RAIL} ${DIM}2. Ensure Node ${NODE_MIN}+ and build tools are installed${NC}"
echo -e " ${RAIL} ${DIM}3. Try: rm -rf node_modules && npm install${NC}"
fi
echo ""
exit 1
fi
build_log=$(mktemp /tmp/nerve-build-XXXXXX)
run_with_dots "Building client" bash -c "npm run build > '$build_log' 2>&1"
if [[ $RWD_EXIT -eq 0 ]]; then
ok "Client built"
else
fail "Client build failed"
echo ""
echo -e " ${RAIL} ${DIM}── Last 10 lines ──${NC}"
tail -10 "$build_log" | while IFS= read -r line; do
echo -e " ${RAIL} ${DIM}${line}${NC}"
done
echo -e " ${RAIL} ${DIM}── Full log: ${build_log} ──${NC}"
echo ""
hint "Troubleshooting:"
echo -e " ${RAIL} ${DIM}1. Check the full log: cat ${build_log}${NC}"
echo -e " ${RAIL} ${DIM}2. Try rebuilding: npm run build${NC}"
echo ""
exit 1
fi
run_with_dots "Building server" bash -c "npm run build:server >> '$build_log' 2>&1"
if [[ $RWD_EXIT -eq 0 ]]; then
ok "Server built"
else
fail "Server build failed"
echo ""
echo -e " ${RAIL} ${DIM}── Last 10 lines ──${NC}"
tail -10 "$build_log" | while IFS= read -r line; do
echo -e " ${RAIL} ${DIM}${line}${NC}"
done
echo -e " ${RAIL} ${DIM}── Full log: ${build_log} ──${NC}"
echo ""
hint "Troubleshooting:"
echo -e " ${RAIL} ${DIM}1. Check the full log: cat ${build_log}${NC}"
echo -e " ${RAIL} ${DIM}2. Try rebuilding: npm run build:server${NC}"
echo ""
exit 1
fi
# Clean up temp logs on success
rm -f "$npm_log" "$build_log" 2>/dev/null
# ── Download speech model (for local voice input) ──────────────────
WHISPER_MODEL_DIR="${HOME}/.nerve/models"
WHISPER_MODEL_FILE="ggml-tiny.en.bin"
WHISPER_MODEL_PATH="${WHISPER_MODEL_DIR}/${WHISPER_MODEL_FILE}"
WHISPER_MODEL_URL="https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${WHISPER_MODEL_FILE}"
if [[ -f "$WHISPER_MODEL_PATH" ]]; then
ok "Speech model already downloaded"
else
mkdir -p "$WHISPER_MODEL_DIR"
run_with_dots "Downloading speech model (75MB)" bash -c "curl -fsSL -o '$WHISPER_MODEL_PATH' '$WHISPER_MODEL_URL' 2>/dev/null"
if [[ $RWD_EXIT -eq 0 ]]; then
ok "Speech model ready"
else
warn "Speech model download failed — voice input will use OpenAI API (requires OPENAI_API_KEY)"
rm -f "$WHISPER_MODEL_PATH" 2>/dev/null
fi
fi
# ── Check for ffmpeg (needed for voice input) ──────────────────────
if ! command -v ffmpeg &>/dev/null; then
if $IS_MAC; then
warn "ffmpeg not found — needed for voice input"
hint "Install with:"
cmd "brew install ffmpeg"
elif $IS_DEBIAN; then
run_with_dots "Installing ffmpeg" bash -c "sudo DEBIAN_FRONTEND=noninteractive apt-get install -y -qq ffmpeg &>/dev/null"
if [[ $RWD_EXIT -eq 0 ]]; then
ok "ffmpeg installed"
else
warn "ffmpeg install failed — voice input may not work"
fi
elif $IS_FEDORA; then
run_with_dots "Installing ffmpeg" bash -c "sudo dnf install -y -q ffmpeg &>/dev/null"
if [[ $RWD_EXIT -eq 0 ]]; then
ok "ffmpeg installed"
else
warn "ffmpeg install failed — voice input may not work"
fi
fi
fi
fi
# ── Auto-generate .env from OpenClaw gateway config ───────────────────
generate_env_from_gateway() {
# Already have an .env? Don't overwrite.
if [[ -f .env ]]; then
ok "Existing .env found — keeping current configuration"
return 0
fi
local gw_token="${GATEWAY_TOKEN:-}"
local gw_port="18789"
local config_file="${HOME}/.openclaw/openclaw.json"
# Read from openclaw.json if no --gateway-token was passed
if [[ -z "$gw_token" && -f "$config_file" ]]; then
gw_token=$(node -e "try{const c=JSON.parse(require('fs').readFileSync('$config_file','utf8'));console.log(c.gateway?.auth?.token??'')}catch{}" 2>/dev/null || echo "")
gw_port=$(node -e "try{const c=JSON.parse(require('fs').readFileSync('$config_file','utf8'));console.log(c.gateway?.port??18789)}catch{console.log(18789)}" 2>/dev/null || echo "18789")
fi
if [[ -n "$gw_token" ]]; then
cat > .env <<ENVEOF
GATEWAY_URL=http://127.0.0.1:${gw_port}
GATEWAY_TOKEN=${gw_token}
PORT=3080
ENVEOF
ok "Generated .env from OpenClaw gateway config"
else
warn "Cannot auto-generate .env — no gateway token found"
warn "Run: ${CYAN}npm run setup${NC} to configure manually"
ENV_MISSING=true
fi
}
# ── [4/5] Configure ──────────────────────────────────────────────────
stage "Configure"
if [[ "$DRY_RUN" == "true" ]]; then
if [[ "$SKIP_SETUP" == "true" ]]; then
dry "Would skip setup wizard (--skip-setup)"
else
dry "Would launch interactive setup wizard"
dry "Would prompt for: gateway token, port, TTS config"
fi
else
if [[ "$SKIP_SETUP" == "true" ]]; then
if [[ -f .env ]]; then
ok "Skipping setup (--skip-setup flag, .env exists)"
else
info "Skipping wizard — generating .env from gateway config..."
generate_env_from_gateway
fi
else
if [[ "$INTERACTIVE" == "true" ]]; then
if [[ -f .env ]]; then
ok "Existing .env found"
printf " ${RAIL} ${YELLOW}?${NC} Run setup wizard anyway? (y/N) "
if read -r answer < /dev/tty 2>/dev/null; then
if [[ "$(echo "$answer" | tr "[:upper:]" "[:lower:]")" == "y" ]]; then
echo ""
NERVE_INSTALLER=1 npm run setup < /dev/tty 2>/dev/null || {
warn "Setup wizard failed (no TTY?) — run ${CYAN}npm run setup${NC} manually"
}
else
ok "Keeping existing configuration"
fi
else
warn "Cannot read input — run ${CYAN}npm run setup${NC} manually to reconfigure"
fi
else
NERVE_INSTALLER=1 npm run setup < /dev/tty 2>/dev/null || {
warn "Setup wizard failed — attempting auto-config from gateway..."
generate_env_from_gateway
}
fi
else
if [[ -f .env ]]; then
ok "Existing .env found — keeping current configuration"
else
info "Non-interactive mode — generating .env from gateway config..."
generate_env_from_gateway
fi
fi
fi
fi
# ── [5/5] Systemd service ────────────────────────────────────────────
stage "Service"
setup_systemd() {
local service_file="/etc/systemd/system/nerve.service"
local node_bin
node_bin=$(which node)
local working_dir="$INSTALL_DIR"
local node_dir
node_dir=$(dirname "${node_bin}")
# Run as the installing user (who has openclaw config)
local install_user="${SUDO_USER:-${USER}}"
local install_home="${HOME}"
# If running via sudo, get the real user's home
if [[ -n "${SUDO_USER:-}" ]]; then
if command -v getent &>/dev/null; then
install_home=$(getent passwd "${SUDO_USER}" | cut -d: -f6)
else
install_home=$(eval echo "~${SUDO_USER}")
fi
fi
# Fallback: Detect from openclaw binary location (handles root installs where openclaw is in /home/user)
# Note: glob may match multiple users — picks first (alphabetical)
if [[ "${install_user}" == "root" ]]; then
local openclaw_bin
openclaw_bin=$(command -v openclaw 2>/dev/null || echo "")
if [[ -z "$openclaw_bin" ]]; then
# Check common nvm locations
for candidate in /home/*/.nvm/versions/node/*/bin/openclaw; do
if [[ -x "$candidate" ]]; then
openclaw_bin="$candidate"
break
fi
done
fi
if [[ -n "$openclaw_bin" ]]; then
# Extract user from path like /home/username/.nvm/...
if [[ "$openclaw_bin" =~ ^/home/([^/]+)/ ]]; then
local detected_user="${BASH_REMATCH[1]}"
install_user="$detected_user"
install_home="/home/$detected_user"
info "Detected openclaw owner: ${detected_user}"
fi
fi
fi
local tmp_service
tmp_service=$(mktemp /tmp/nerve.service.XXXXXX)
cat > "$tmp_service" <<EOF
[Unit]
Description=Nerve - OpenClaw Web UI
After=network.target
[Service]
Type=simple
User=${install_user}
Group=${install_user}
WorkingDirectory=${working_dir}
ExecStart=${node_bin} server-dist/index.js
EnvironmentFile=${working_dir}/.env
Environment=NODE_ENV=production
Environment=HOME=${install_home}
Environment=PATH=${node_dir}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
if [[ $EUID -eq 0 ]]; then
mv "$tmp_service" "$service_file"
if [[ -f "${working_dir}/.env" ]]; then
run_with_dots "Systemd service" bash -c "systemctl daemon-reload && systemctl enable nerve.service &>/dev/null && systemctl start nerve.service"
if [[ $RWD_EXIT -eq 0 ]]; then
ok "Systemd service installed and started"
else
warn "Systemd service install failed — try: sudo systemctl start nerve.service"
fi
else
systemctl daemon-reload
systemctl enable nerve.service &>/dev/null
ok "Systemd service installed (not started — run ${CYAN}npm run setup${NC} first, then ${CYAN}systemctl start nerve.service${NC})"
fi
else
echo ""
info "To install as a systemd service (requires sudo):"
echo ""
echo " sudo mv ${tmp_service} ${service_file}"
echo " sudo systemctl daemon-reload"
echo " sudo systemctl enable nerve.service"
echo " sudo systemctl start nerve.service"
echo ""
info "Service will run as: ${install_user}"
echo ""
fi
}
setup_launchd() {
local node_bin
node_bin=$(which node)
local working_dir="$INSTALL_DIR"
local plist_dir="${HOME}/Library/LaunchAgents"
local plist_file="${plist_dir}/com.nerve.server.plist"
mkdir -p "$plist_dir"
# Create a wrapper script that sources .env at runtime (not baked at install time)
# This way token/config changes in .env take effect on next service restart
local start_script="${working_dir}/start.sh"
# The plist sets PATH in EnvironmentVariables, but the wrapper also needs
# to find node if run manually. Bake the current node path as a fallback.
local node_dir_escaped
node_dir_escaped=$(dirname "${node_bin}")
cat > "$start_script" <<STARTEOF
#!/bin/bash
# Nerve start wrapper — sources .env at runtime so config changes
# take effect on restart without touching the plist
SCRIPT_DIR="\$(cd "\$(dirname "\$0")" && pwd)"
export PATH="${node_dir_escaped}:\${PATH}"
if [[ -f "\${SCRIPT_DIR}/.env" ]]; then
set -a
source "\${SCRIPT_DIR}/.env"
set +a
fi
export NODE_ENV=production
exec node "\${SCRIPT_DIR}/server-dist/index.js"
STARTEOF
chmod +x "$start_script"
cat > "$plist_file" <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.nerve.server</string>
<key>ProgramArguments</key>
<array>
<string>${start_script}</string>
</array>
<key>WorkingDirectory</key>
<string>${working_dir}</string>
<key>EnvironmentVariables</key>
<dict>
<key>PATH</key>
<string>$(dirname "${node_bin}"):/usr/local/bin:/usr/bin:/bin:/opt/homebrew/bin</string>
</dict>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<true/>
<key>StandardOutPath</key>
<string>${working_dir}/nerve.log</string>
<key>StandardErrorPath</key>
<string>${working_dir}/nerve.log</string>
</dict>
</plist>
EOF
# launchctl bootstrap (modern) with fallback to load (legacy)
local uid
uid=$(id -u)
if launchctl bootstrap "gui/${uid}" "$plist_file" 2>/dev/null; then
ok "launchd service installed and started"
info "Nerve will start automatically on login"
elif launchctl load "$plist_file" 2>/dev/null; then
ok "launchd service installed and started (legacy loader)"
info "Nerve will start automatically on login"
else
ok "launchd plist created at ${plist_file}"
info "Load it with: launchctl load ${plist_file}"
fi
echo ""
info "Manage:"
echo " launchctl stop com.nerve.server"
echo " launchctl start com.nerve.server"
echo " launchctl unload ${plist_file}"
echo ""
}
if [[ "$(uname -s)" == "Darwin" ]]; then
# ── macOS: launchd service ──────────────────────────────────────────
plist_check="${HOME}/Library/LaunchAgents/com.nerve.server.plist"
if [[ "$DRY_RUN" == "true" ]]; then
if [[ -f "$plist_check" ]]; then
dry "launchd service already exists — would restart it"
else
dry "Would create launchd service (~/Library/LaunchAgents/com.nerve.server.plist)"
fi
else
echo -e "${BOLD} Service${NC}"
echo ""
if [[ -f "$plist_check" ]]; then
info "Updating existing launchd service..."
uid=$(id -u)
launchctl bootout "gui/${uid}/com.nerve.server" 2>/dev/null || launchctl stop com.nerve.server 2>/dev/null || true
setup_launchd
elif [[ "$INTERACTIVE" == "true" ]]; then
printf " ${RAIL} ${YELLOW}?${NC} Install as a launchd service (starts on login)? (Y/n) "
if read -r answer < /dev/tty 2>/dev/null; then
if [[ "$(echo "$answer" | tr "[:upper:]" "[:lower:]")" != "n" ]]; then
setup_launchd
else
ok "Skipped — start manually with: npm start"
fi
else
info "Cannot read input — installing launchd service by default"
setup_launchd
fi
else
info "Installing launchd service..."
setup_launchd
fi
echo ""
fi
elif command -v systemctl &>/dev/null; then
if [[ "$DRY_RUN" == "true" ]]; then
if [[ -f /etc/systemd/system/nerve.service ]]; then
dry "Service already exists — would restart it"
else
dry "Would prompt to install systemd service"
dry "Would create /etc/systemd/system/nerve.service"
dry "Would enable and start the service"
fi
else
echo -e "${BOLD} Systemd service${NC}"
echo ""
if [[ -f /etc/systemd/system/nerve.service ]]; then
info "Updating existing systemd service..."
if [[ $EUID -eq 0 ]]; then
systemctl stop nerve.service 2>/dev/null || true
else
sudo systemctl stop nerve.service 2>/dev/null || true
fi
setup_systemd
elif [[ "$INTERACTIVE" == "true" ]]; then
printf " ${RAIL} ${YELLOW}?${NC} Install as a systemd service? (Y/n) "
if read -r answer < /dev/tty 2>/dev/null; then
if [[ "$(echo "$answer" | tr "[:upper:]" "[:lower:]")" != "n" ]]; then
setup_systemd
else
ok "Skipped — start manually with: npm start"
fi
else
info "Cannot read input — installing systemd service by default"
setup_systemd
fi
elif [[ $EUID -eq 0 ]]; then
info "Non-interactive mode — installing systemd service automatically"
setup_systemd
else
info "Non-interactive mode — generating systemd service file"
setup_systemd
fi
echo ""
fi
fi
# ── Done ──────────────────────────────────────────────────────────────
echo -e " ${RAIL}"
echo -e " ${GREEN}${NC} ${GREEN}${BOLD}Done${NC}"
echo ""
# Detect port from .env
local_port=3080
if [[ -f "${INSTALL_DIR}/.env" ]]; then
port_val=$(grep -E "^PORT=" "${INSTALL_DIR}/.env" 2>/dev/null | cut -d= -f2 || true)
[[ -n "$port_val" ]] && local_port="$port_val"
fi
if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${BOLD}⊘ Dry run complete — nothing was modified${NC}"
echo ""
echo -e " ${DIM}Run without --dry-run to install for real.${NC}"
else
# Use the actual IP if HOST is 0.0.0.0 (network mode)
host_val=$(grep -E "^HOST=" "${INSTALL_DIR}/.env" 2>/dev/null | cut -d= -f2 || true)
if [[ "$host_val" == "0.0.0.0" ]]; then
detected_ip=$(hostname -I 2>/dev/null | awk '{print $1}' || ipconfig getifaddr en0 2>/dev/null || echo "")
local_url="http://${detected_ip:-localhost}:${local_port}"
else
local_url="http://localhost:${local_port}"
fi
url_len=${#local_url}
# Box must fit both the header text and the URL, with breathing room
header_len=29 # "Open Nerve in your browser:" + padding
url_line_len=$((url_len + 4)) # "→ " + url + padding
if [[ $header_len -gt $url_line_len ]]; then
box_inner=$((header_len + 4))
else
box_inner=$((url_line_len + 4))
fi
echo ""
echo -e " ${GREEN}${BOLD}✅ Nerve installed!${NC}"
echo ""
echo -e " ${ORANGE}$(printf '─%.0s' $(seq 1 $box_inner))${NC}"
echo -e " ${ORANGE}${NC}$(printf ' %.0s' $(seq 1 $box_inner))${ORANGE}${NC}"
echo -e " ${ORANGE}${NC} ${BOLD}Open Nerve in your browser:${NC}$(printf ' %.0s' $(seq 1 $((box_inner - 29))))${ORANGE}${NC}"
echo -e " ${ORANGE}${NC} ${CYAN}${BOLD}${local_url}${NC}$(printf ' %.0s' $(seq 1 $((box_inner - url_len - 4))))${ORANGE}${NC}"
echo -e " ${ORANGE}${NC}$(printf ' %.0s' $(seq 1 $box_inner))${ORANGE}${NC}"
echo -e " ${ORANGE}$(printf '─%.0s' $(seq 1 $box_inner))${NC}"
echo ""
echo -e " ${DIM}Directory: cd ${INSTALL_DIR}${NC}"
if $IS_MAC; then
echo -e " ${DIM}Restart: launchctl stop com.nerve.server && launchctl start com.nerve.server${NC}"
echo -e " ${DIM}Logs: tail -f ${INSTALL_DIR}/nerve.log${NC}"
elif command -v systemctl &>/dev/null; then
echo -e " ${DIM}Restart: sudo systemctl restart nerve.service${NC}"
echo -e " ${DIM}Logs: sudo journalctl -u nerve.service -f${NC}"
else
echo -e " ${DIM}Start: cd ${INSTALL_DIR} && npm start${NC}"
fi
fi
echo ""
# Exit code reflects actual readiness
if [[ "$ENV_MISSING" == "true" ]] || [[ ! -f "${INSTALL_DIR}/.env" ]]; then
warn "Install complete but Nerve is not fully configured"
info "Run: cd ${INSTALL_DIR} && npm run setup"
exit 2 # partial success — installed but non-functional
fi
exit 0

13538
package-lock.json generated Normal file

File diff suppressed because it is too large Load diff

104
package.json Normal file
View file

@ -0,0 +1,104 @@
{
"name": "openclaw-nerve",
"version": "1.3.0",
"description": "Web interface for OpenClaw — chat, voice input, TTS, and agent monitoring in the browser",
"license": "MIT",
"engines": {
"node": ">=22.0.0"
},
"repository": {
"type": "git",
"url": "https://github.com/daggerhashimoto/openclaw-nerve.git"
},
"homepage": "https://github.com/daggerhashimoto/openclaw-nerve",
"keywords": [
"openclaw",
"nerve",
"chat",
"terminal",
"ai",
"agent",
"tts",
"voice"
],
"type": "module",
"scripts": {
"dev": "vite",
"dev:server": "tsx watch server/index.ts",
"build": "tsc -b && vite build",
"build:server": "tsc -p config/tsconfig.server.json --outDir server-dist --noEmit false --declaration false",
"lint": "eslint .",
"preview": "vite preview",
"start": "node server-dist/index.js",
"start:server": "tsx server/index.ts",
"prod": "npm run build && npm run build:server && node server-dist/index.js",
"setup": "tsx scripts/setup.ts",
"test": "vitest",
"test:coverage": "vitest --coverage",
"postinstall": "node -e \"try{require('fs').accessSync('.env')}catch{console.log('\\n Run \\x1b[33mnpm run setup\\x1b[0m to configure Nerve.\\n')}\""
},
"dependencies": {
"@codemirror/commands": "^6.10.2",
"@codemirror/lang-javascript": "^6.2.4",
"@codemirror/lang-json": "^6.0.2",
"@codemirror/lang-markdown": "^6.5.0",
"@codemirror/lang-python": "^6.2.1",
"@codemirror/lang-yaml": "^6.1.2",
"@codemirror/language": "^6.12.1",
"@codemirror/legacy-modes": "^6.5.2",
"@codemirror/search": "^6.6.0",
"@codemirror/state": "^6.5.4",
"@codemirror/view": "^6.39.14",
"@fugood/whisper.node": "^1.0.16",
"@hono/node-server": "^1.19.9",
"@hono/zod-validator": "^0.7.6",
"@tailwindcss/vite": "^4.1.18",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"codemirror": "^6.0.2",
"diff-sequences": "^29.6.3",
"dompurify": "^3.3.1",
"dotenv": "^17.2.4",
"highlight.js": "^11.11.1",
"hono": "^4.11.7",
"lightweight-charts": "^5.1.0",
"lucide-react": "^0.563.0",
"node-pty": "^1.1.0",
"radix-ui": "^1.4.3",
"react": "^19.2.0",
"react-dom": "^19.2.0",
"react-markdown": "^10.1.0",
"recharts": "^3.7.0",
"remark-gfm": "^4.0.1",
"tailwind-merge": "^3.4.0",
"tailwindcss": "^4.1.18",
"ws": "^8.19.0",
"zod": "^4.3.6"
},
"devDependencies": {
"@eslint/js": "^9.39.1",
"@inquirer/prompts": "^8.2.0",
"@testing-library/jest-dom": "^6.9.1",
"@testing-library/react": "^16.3.2",
"@testing-library/user-event": "^14.6.1",
"@types/dompurify": "^3.0.5",
"@types/node": "^24.10.1",
"@types/react": "^19.2.5",
"@types/react-dom": "^19.2.3",
"@types/ws": "^8.18.1",
"@vitejs/plugin-react": "^5.1.1",
"@vitest/coverage-v8": "^4.0.18",
"eslint": "^9.39.1",
"eslint-plugin-react-hooks": "^7.0.1",
"eslint-plugin-react-refresh": "^0.4.24",
"globals": "^16.5.0",
"jsdom": "^28.0.0",
"shadcn": "^3.8.2",
"tsx": "^4.21.0",
"tw-animate-css": "^1.4.0",
"typescript": "~5.9.3",
"typescript-eslint": "^8.46.4",
"vite": "^7.2.4",
"vitest": "^4.0.18"
}
}

BIN
public/favicon-256.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

BIN
public/favicon-32.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 949 B

36
public/favicon.svg Normal file
View file

@ -0,0 +1,36 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32">
<rect width="32" height="32" rx="6" fill="#0d1117"/>
<!-- Dim structure -->
<line x1="16" y1="16" x2="16" y2="10.4" stroke="#301C0E" stroke-width="1" stroke-linecap="round"/>
<line x1="16" y1="16" x2="20.8" y2="13.2" stroke="#301C0E" stroke-width="1" stroke-linecap="round"/>
<line x1="16" y1="16" x2="20.8" y2="18.8" stroke="#301C0E" stroke-width="1" stroke-linecap="round"/>
<line x1="16" y1="16" x2="16" y2="21.6" stroke="#301C0E" stroke-width="1" stroke-linecap="round"/>
<line x1="16" y1="16" x2="11.2" y2="18.8" stroke="#301C0E" stroke-width="1" stroke-linecap="round"/>
<line x1="16" y1="16" x2="11.2" y2="13.2" stroke="#301C0E" stroke-width="1" stroke-linecap="round"/>
<polygon points="16,10.4 20.8,13.2 20.8,18.8 16,21.6 11.2,18.8 11.2,13.2" fill="none" stroke="#301C0E" stroke-width="0.8" stroke-linejoin="round"/>
<!-- Glow spokes -->
<line x1="16" y1="16" x2="16" y2="10.4" stroke="#FF8C32" stroke-width="0.8" stroke-linecap="round" opacity="0.5"/>
<line x1="16" y1="16" x2="20.8" y2="13.2" stroke="#FF8C32" stroke-width="0.8" stroke-linecap="round" opacity="0.5"/>
<line x1="16" y1="16" x2="20.8" y2="18.8" stroke="#FF8C32" stroke-width="0.8" stroke-linecap="round" opacity="0.5"/>
<line x1="16" y1="16" x2="16" y2="21.6" stroke="#FF8C32" stroke-width="0.8" stroke-linecap="round" opacity="0.5"/>
<line x1="16" y1="16" x2="11.2" y2="18.8" stroke="#FF8C32" stroke-width="0.8" stroke-linecap="round" opacity="0.5"/>
<line x1="16" y1="16" x2="11.2" y2="13.2" stroke="#FF8C32" stroke-width="0.8" stroke-linecap="round" opacity="0.5"/>
<!-- Glow ring -->
<polygon points="16,10.4 20.8,13.2 20.8,18.8 16,21.6 11.2,18.8 11.2,13.2" fill="none" stroke="#FF8C32" stroke-width="0.8" stroke-linejoin="round" opacity="0.4"/>
<!-- Center node -->
<circle cx="16" cy="16" r="3" fill="#FF8C32" opacity="0.85"/>
<circle cx="16" cy="16" r="1.5" fill="#FFDCB4" opacity="0.9"/>
<!-- Outer nodes — all lit -->
<circle cx="16" cy="10.4" r="1.8" fill="#FF8C32" opacity="0.8"/>
<circle cx="16" cy="10.4" r="0.9" fill="#FFDCB4" opacity="0.7"/>
<circle cx="20.8" cy="13.2" r="1.8" fill="#FF8C32" opacity="0.8"/>
<circle cx="20.8" cy="13.2" r="0.9" fill="#FFDCB4" opacity="0.7"/>
<circle cx="20.8" cy="18.8" r="1.8" fill="#FF8C32" opacity="0.8"/>
<circle cx="20.8" cy="18.8" r="0.9" fill="#FFDCB4" opacity="0.7"/>
<circle cx="16" cy="21.6" r="1.8" fill="#FF8C32" opacity="0.8"/>
<circle cx="16" cy="21.6" r="0.9" fill="#FFDCB4" opacity="0.7"/>
<circle cx="11.2" cy="18.8" r="1.8" fill="#FF8C32" opacity="0.8"/>
<circle cx="11.2" cy="18.8" r="0.9" fill="#FFDCB4" opacity="0.7"/>
<circle cx="11.2" cy="13.2" r="1.8" fill="#FF8C32" opacity="0.8"/>
<circle cx="11.2" cy="13.2" r="0.9" fill="#FFDCB4" opacity="0.7"/>
</svg>

After

Width:  |  Height:  |  Size: 2.8 KiB

View file

@ -0,0 +1,9 @@
pre code.hljs{display:block;overflow-x:auto;padding:1em}code.hljs{padding:3px 5px}/*!
Theme: GitHub Dark Dimmed
Description: Dark dimmed theme as seen on github.com
Author: github.com
Maintainer: @Hirse
Updated: 2021-05-15
Colors taken from GitHub's CSS
*/.hljs{color:#adbac7;background:#22272e}.hljs-doctag,.hljs-keyword,.hljs-meta .hljs-keyword,.hljs-template-tag,.hljs-template-variable,.hljs-type,.hljs-variable.language_{color:#f47067}.hljs-title,.hljs-title.class_,.hljs-title.class_.inherited__,.hljs-title.function_{color:#dcbdfb}.hljs-attr,.hljs-attribute,.hljs-literal,.hljs-meta,.hljs-number,.hljs-operator,.hljs-selector-attr,.hljs-selector-class,.hljs-selector-id,.hljs-variable{color:#6cb6ff}.hljs-meta .hljs-string,.hljs-regexp,.hljs-string{color:#96d0ff}.hljs-built_in,.hljs-symbol{color:#f69d50}.hljs-code,.hljs-comment,.hljs-formula{color:#768390}.hljs-name,.hljs-quote,.hljs-selector-pseudo,.hljs-selector-tag{color:#8ddb8c}.hljs-subst{color:#adbac7}.hljs-section{color:#316dca;font-weight:700}.hljs-bullet{color:#eac55f}.hljs-emphasis{color:#adbac7;font-style:italic}.hljs-strong{color:#adbac7;font-weight:700}.hljs-addition{color:#b4f1b4;background-color:#1b4721}.hljs-deletion{color:#ffd8d3;background-color:#78191b}

10
public/hljs/github.min.css vendored Normal file
View file

@ -0,0 +1,10 @@
pre code.hljs{display:block;overflow-x:auto;padding:1em}code.hljs{padding:3px 5px}/*!
Theme: GitHub
Description: Light theme as seen on github.com
Author: github.com
Maintainer: @Hirse
Updated: 2021-05-15
Outdated base version: https://github.com/primer/github-syntax-light
Current colors taken from GitHub's CSS
*/.hljs{color:#24292e;background:#fff}.hljs-doctag,.hljs-keyword,.hljs-meta .hljs-keyword,.hljs-template-tag,.hljs-template-variable,.hljs-type,.hljs-variable.language_{color:#d73a49}.hljs-title,.hljs-title.class_,.hljs-title.class_.inherited__,.hljs-title.function_{color:#6f42c1}.hljs-attr,.hljs-attribute,.hljs-literal,.hljs-meta,.hljs-number,.hljs-operator,.hljs-selector-attr,.hljs-selector-class,.hljs-selector-id,.hljs-variable{color:#005cc5}.hljs-meta .hljs-string,.hljs-regexp,.hljs-string{color:#032f62}.hljs-built_in,.hljs-symbol{color:#e36209}.hljs-code,.hljs-comment,.hljs-formula{color:#6a737d}.hljs-name,.hljs-quote,.hljs-selector-pseudo,.hljs-selector-tag{color:#22863a}.hljs-subst{color:#24292e}.hljs-section{color:#005cc5;font-weight:700}.hljs-bullet{color:#735c0f}.hljs-emphasis{color:#24292e;font-style:italic}.hljs-strong{color:#24292e;font-weight:700}.hljs-addition{color:#22863a;background-color:#f0fff4}.hljs-deletion{color:#b31d28;background-color:#ffeef0}

73
scripts/lib/banner.ts Normal file
View file

@ -0,0 +1,73 @@
/**
* Banner and formatting utilities for the setup CLI.
*
* When NERVE_INSTALLER=1 (called from install.sh), uses the same
* rail + dot visual style as the installer for seamless continuity.
*/
import { readFileSync } from 'node:fs';
import { resolve } from 'node:path';
const isInstaller = !!process.env.NERVE_INSTALLER;
/** Inquirer theme that continues the rail in installer mode */
export const promptTheme = isInstaller
? { prefix: ` \x1b[2m│\x1b[0m` }
: {};
/** Read package version from package.json */
export function getVersion(): string {
try {
const pkg = JSON.parse(readFileSync(resolve(process.cwd(), 'package.json'), 'utf-8'));
return pkg.version || '0.0.0';
} catch {
return '0.0.0';
}
}
/** Print the setup welcome banner (no-op, removed) */
export function printBanner(): void {
return;
}
const rail = ` \x1b[2m│\x1b[0m`;
/** Print a numbered section header */
export function section(num: number, total: number, title: string): void {
if (isInstaller) {
// Sub-step within the installer's Configure stage — lighter style
console.log(rail);
console.log(`${rail} \x1b[38;5;208m▸\x1b[0m \x1b[1m${title}\x1b[0m`);
console.log(rail);
} else {
// Standalone mode — show numbered sections with rail
if (num > 1) console.log(rail);
console.log(` \x1b[38;5;208m●\x1b[0m \x1b[38;5;208m\x1b[1m${title}\x1b[0m \x1b[2m[${num}/${total}]\x1b[0m`);
console.log(rail);
}
}
/** Print a success message with green checkmark */
export function success(msg: string): void {
console.log(`${rail} \x1b[32m✓\x1b[0m ${msg}`);
}
/** Print a warning message with orange indicator */
export function warn(msg: string): void {
console.log(`${rail} \x1b[38;5;208m⚠\x1b[0m ${msg}`);
}
/** Print a failure message with red X */
export function fail(msg: string): void {
console.log(`${rail} \x1b[31m✗\x1b[0m ${msg}`);
}
/** Print an info message with cyan indicator */
export function info(msg: string): void {
console.log(`${rail} \x1b[36m○\x1b[0m ${msg}`);
}
/** Print a dim/muted message */
export function dim(msg: string): void {
console.log(`${rail} \x1b[2m${msg}\x1b[0m`);
}

40
scripts/lib/cert-gen.ts Normal file
View file

@ -0,0 +1,40 @@
/**
* Self-signed TLS certificate generator for HTTPS support.
*/
import { execSync } from 'node:child_process';
import { mkdirSync, existsSync, chmodSync } from 'node:fs';
import { resolve } from 'node:path';
export interface CertResult {
ok: boolean;
message: string;
}
/**
* Generate a self-signed certificate for localhost HTTPS.
* Creates certs/cert.pem and certs/key.pem in the project root.
*/
export function generateSelfSignedCert(projectRoot: string): CertResult {
const certsDir = resolve(projectRoot, 'certs');
const certPath = resolve(certsDir, 'cert.pem');
const keyPath = resolve(certsDir, 'key.pem');
if (existsSync(certPath) && existsSync(keyPath)) {
return { ok: true, message: 'Certificates already exist at certs/' };
}
try {
mkdirSync(certsDir, { recursive: true });
execSync(
`openssl req -x509 -newkey rsa:2048 -keyout "${keyPath}" -out "${certPath}" ` +
`-days 365 -nodes -subj '/CN=localhost'`,
{ stdio: 'pipe', timeout: 15_000 },
);
try { chmodSync(keyPath, 0o600); } catch { /* non-fatal */ }
return { ok: true, message: 'Self-signed certificate generated at certs/' };
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
return { ok: false, message: `Certificate generation failed: ${msg}` };
}
}

168
scripts/lib/env-writer.ts Normal file
View file

@ -0,0 +1,168 @@
/**
* .env file generator reads, writes, and backs up .env files.
*/
import { readFileSync, writeFileSync, renameSync, copyFileSync, existsSync, unlinkSync, chmodSync } from 'node:fs';
/** All supported env config keys. */
export interface EnvConfig {
GATEWAY_URL?: string;
GATEWAY_TOKEN?: string;
AGENT_NAME?: string;
PORT?: string;
HOST?: string;
SSL_PORT?: string;
OPENAI_API_KEY?: string;
REPLICATE_API_TOKEN?: string;
ALLOWED_ORIGINS?: string;
CSP_CONNECT_EXTRA?: string;
WS_ALLOWED_HOSTS?: string;
MEMORY_PATH?: string;
MEMORY_DIR?: string;
SESSIONS_DIR?: string;
USAGE_FILE?: string;
TTS_CACHE_TTL_MS?: string;
TTS_CACHE_MAX?: string;
VITE_PORT?: string;
}
/** Default values (matching server/lib/config.ts). */
export const DEFAULTS: Record<string, string> = {
GATEWAY_URL: 'http://127.0.0.1:18789',
AGENT_NAME: 'Agent',
PORT: '3080',
HOST: '127.0.0.1',
SSL_PORT: '3443',
TTS_CACHE_TTL_MS: '3600000',
TTS_CACHE_MAX: '200',
};
/**
* Generate a clean .env file.
* Only writes values that differ from defaults (keeps .env minimal).
* Always writes GATEWAY_TOKEN since it has no default.
*/
export function generateEnvContent(config: EnvConfig): string {
const lines: string[] = [
'# Nerve Configuration',
'# Generated by `npm run setup`',
`# ${new Date().toISOString()}`,
'',
];
// Gateway (always written — most important)
lines.push('# OpenClaw Gateway');
if (config.GATEWAY_URL && config.GATEWAY_URL !== DEFAULTS.GATEWAY_URL) {
lines.push(`GATEWAY_URL=${config.GATEWAY_URL}`);
}
lines.push(`GATEWAY_TOKEN=${config.GATEWAY_TOKEN || ''}`);
lines.push('');
// Agent
if (config.AGENT_NAME && config.AGENT_NAME !== DEFAULTS.AGENT_NAME) {
lines.push('# Agent');
lines.push(`AGENT_NAME=${config.AGENT_NAME}`);
lines.push('');
}
// Server — always write PORT for clarity (even if default)
const serverLines: string[] = [];
serverLines.push(`PORT=${config.PORT || DEFAULTS.PORT}`);
if (config.HOST && config.HOST !== DEFAULTS.HOST) {
serverLines.push(`HOST=${config.HOST}`);
}
if (config.SSL_PORT && config.SSL_PORT !== DEFAULTS.SSL_PORT) {
serverLines.push(`SSL_PORT=${config.SSL_PORT}`);
}
lines.push('# Server');
lines.push(...serverLines);
lines.push('');
// API Keys
const keyLines: string[] = [];
if (config.OPENAI_API_KEY) keyLines.push(`OPENAI_API_KEY=${config.OPENAI_API_KEY}`);
if (config.REPLICATE_API_TOKEN) keyLines.push(`REPLICATE_API_TOKEN=${config.REPLICATE_API_TOKEN}`);
if (keyLines.length > 0) {
lines.push('# API Keys');
lines.push(...keyLines);
lines.push('');
}
// Advanced
const advLines: string[] = [];
if (config.ALLOWED_ORIGINS) advLines.push(`ALLOWED_ORIGINS=${config.ALLOWED_ORIGINS}`);
if (config.CSP_CONNECT_EXTRA) advLines.push(`CSP_CONNECT_EXTRA=${config.CSP_CONNECT_EXTRA}`);
if (config.WS_ALLOWED_HOSTS) advLines.push(`WS_ALLOWED_HOSTS=${config.WS_ALLOWED_HOSTS}`);
if (config.MEMORY_PATH) advLines.push(`MEMORY_PATH=${config.MEMORY_PATH}`);
if (config.MEMORY_DIR) advLines.push(`MEMORY_DIR=${config.MEMORY_DIR}`);
if (config.SESSIONS_DIR) advLines.push(`SESSIONS_DIR=${config.SESSIONS_DIR}`);
if (config.USAGE_FILE) advLines.push(`USAGE_FILE=${config.USAGE_FILE}`);
if (advLines.length > 0) {
lines.push('# Advanced');
lines.push(...advLines);
lines.push('');
}
return lines.join('\n');
}
/**
* Write .env file atomically (write .env.tmp then rename).
*/
export function writeEnvFile(envPath: string, config: EnvConfig): void {
const content = generateEnvContent(config);
const tmpPath = envPath + '.tmp';
writeFileSync(tmpPath, content, 'utf-8');
renameSync(tmpPath, envPath);
try { chmodSync(envPath, 0o600); } catch { /* non-fatal on Windows */ }
}
/**
* Parse an existing .env file into key-value pairs.
*/
export function loadExistingEnv(envPath: string): EnvConfig {
const content = readFileSync(envPath, 'utf-8');
const config: Record<string, string> = {};
for (const line of content.split('\n')) {
const trimmed = line.trim();
if (!trimmed || trimmed.startsWith('#')) continue;
const eqIdx = trimmed.indexOf('=');
if (eqIdx > 0) {
const key = trimmed.slice(0, eqIdx).trim();
const value = trimmed.slice(eqIdx + 1).trim();
if (value) config[key] = value;
}
}
return config as EnvConfig;
}
/**
* Backup existing .env file before overwriting.
* Uses timestamped backup if .env.backup already exists.
*/
export function backupExistingEnv(envPath: string): string {
const backupPath = `${envPath}.backup`;
if (existsSync(backupPath)) {
const dated = `${backupPath}.${new Date().toISOString().slice(0, 10)}`;
copyFileSync(envPath, dated);
try { chmodSync(dated, 0o600); } catch { /* non-fatal */ }
return dated;
}
copyFileSync(envPath, backupPath);
try { chmodSync(backupPath, 0o600); } catch { /* non-fatal */ }
return backupPath;
}
/**
* Clean up .env.tmp if it exists (interrupted setup).
*/
export function cleanupTmp(envPath: string): void {
const tmpPath = envPath + '.tmp';
try {
if (existsSync(tmpPath)) {
unlinkSync(tmpPath);
}
} catch {
// ignore cleanup failures
}
}

View file

@ -0,0 +1,164 @@
/**
* Auto-detect gateway token from the local OpenClaw configuration.
*
* Reads ~/.openclaw/openclaw.json and extracts the gateway auth token.
* This avoids requiring users to manually copy-paste the token during setup.
*/
import { readFileSync, writeFileSync, existsSync } from 'node:fs';
import { execSync } from 'node:child_process';
import { join } from 'node:path';
import os from 'node:os';
const HOME = process.env.HOME || os.homedir();
const OPENCLAW_CONFIG = join(HOME, '.openclaw', 'openclaw.json');
interface OpenClawConfig {
gateway?: {
port?: number;
bind?: string;
auth?: {
mode?: string;
token?: string;
};
controlUi?: {
allowedOrigins?: string[];
};
};
[key: string]: unknown;
}
export interface DetectedGateway {
token: string | null;
url: string | null;
}
/**
* Attempt to auto-detect gateway configuration from the local OpenClaw install.
* Returns null values for anything that can't be detected.
*/
export function detectGatewayConfig(): DetectedGateway {
const result: DetectedGateway = { token: null, url: null };
if (!existsSync(OPENCLAW_CONFIG)) {
return result;
}
try {
const raw = readFileSync(OPENCLAW_CONFIG, 'utf-8');
const config = JSON.parse(raw) as OpenClawConfig;
// Extract token
if (config.gateway?.auth?.token) {
result.token = config.gateway.auth.token;
}
// Derive URL from port — always use 127.0.0.1 since Nerve connects locally
const port = config.gateway?.port || 18789;
result.url = `http://127.0.0.1:${port}`;
} catch {
// Config exists but can't be parsed — return nulls
}
return result;
}
/**
* Check if the OPENCLAW_GATEWAY_TOKEN environment variable is already set.
* This is the standard env var that OpenClaw itself uses.
*/
export function getEnvGatewayToken(): string | null {
return process.env.OPENCLAW_GATEWAY_TOKEN || null;
}
/**
* Patch gateway.bind to the given value (e.g. 'lan' for 0.0.0.0).
*/
export function patchGatewayBind(bind: string): GatewayPatchResult {
const result: GatewayPatchResult = { ok: false, message: '', configPath: OPENCLAW_CONFIG };
if (!existsSync(OPENCLAW_CONFIG)) {
result.message = `Config not found: ${OPENCLAW_CONFIG}`;
return result;
}
try {
const raw = readFileSync(OPENCLAW_CONFIG, 'utf-8');
const config = JSON.parse(raw) as OpenClawConfig;
config.gateway = config.gateway || {};
config.gateway.bind = bind;
writeFileSync(OPENCLAW_CONFIG, JSON.stringify(config, null, 2) + '\n');
result.ok = true;
result.message = `Set gateway.bind to "${bind}"`;
return result;
} catch (err) {
result.message = `Failed to patch config: ${err instanceof Error ? err.message : String(err)}`;
return result;
}
}
export interface GatewayPatchResult {
ok: boolean;
message: string;
configPath: string;
}
/**
* Patch the OpenClaw gateway config to allow external origins.
* Adds the given origin to gateway.controlUi.allowedOrigins (deduped).
* Returns a result indicating success/failure.
*/
export function patchGatewayAllowedOrigins(origin: string): GatewayPatchResult {
const result: GatewayPatchResult = { ok: false, message: '', configPath: OPENCLAW_CONFIG };
if (!existsSync(OPENCLAW_CONFIG)) {
result.message = `Config not found: ${OPENCLAW_CONFIG}`;
return result;
}
try {
const raw = readFileSync(OPENCLAW_CONFIG, 'utf-8');
const config = JSON.parse(raw) as OpenClawConfig;
config.gateway = config.gateway || {};
config.gateway.controlUi = config.gateway.controlUi || {};
const origins = config.gateway.controlUi.allowedOrigins || [];
if (origins.includes(origin)) {
result.ok = true;
result.message = `Origin already allowed: ${origin}`;
return result;
}
origins.push(origin);
config.gateway.controlUi.allowedOrigins = origins;
writeFileSync(OPENCLAW_CONFIG, JSON.stringify(config, null, 2) + '\n');
result.ok = true;
result.message = `Added ${origin} to gateway.controlUi.allowedOrigins`;
return result;
} catch (err) {
result.message = `Failed to patch config: ${err instanceof Error ? err.message : String(err)}`;
return result;
}
}
/**
* Attempt to restart the OpenClaw gateway so config changes take effect.
* Tries `openclaw gateway restart` first, falls back to kill + start.
*/
export function restartGateway(): { ok: boolean; message: string } {
try {
execSync('openclaw gateway restart', { timeout: 15000, stdio: 'pipe' });
return { ok: true, message: 'Gateway restarted' };
} catch {
try {
execSync('pkill -f "openclaw gateway" || true', { timeout: 5000, stdio: 'pipe' });
return { ok: true, message: 'Gateway process killed (should auto-restart if supervised)' };
} catch {
return { ok: false, message: 'Could not restart gateway — restart it manually' };
}
}
}

View file

@ -0,0 +1,74 @@
/**
* Prerequisite checker verifies Node.js version, npm, ffmpeg, openssl.
*/
import { execSync } from 'node:child_process';
import { success, warn, fail } from './banner.js';
export interface PrereqResult {
nodeOk: boolean;
nodeVersion: string;
npmOk: boolean;
ffmpegOk: boolean;
opensslOk: boolean;
tailscaleOk: boolean;
tailscaleIp: string | null;
}
/** Check all prerequisites and print results. */
export function checkPrerequisites(opts?: { quiet?: boolean }): PrereqResult {
const quiet = opts?.quiet ?? false;
if (!quiet) console.log(' Checking prerequisites...');
const nodeVersion = process.version;
const nodeMajor = parseInt(nodeVersion.slice(1), 10);
const nodeOk = nodeMajor >= 22;
if (!quiet) {
if (nodeOk) success(`Node.js ${nodeVersion} (≥22 required)`);
else fail(`Node.js ${nodeVersion} — version 22 or later is required`);
}
const npmOk = commandExists('npm');
if (!quiet) {
if (npmOk) success('npm available');
else fail('npm not found');
}
const ffmpegOk = commandExists('ffmpeg');
if (!quiet) {
if (ffmpegOk) success('ffmpeg found (optional, for Qwen TTS)');
else warn('ffmpeg not found (optional — needed for Qwen TTS WAV→MP3)');
}
const opensslOk = commandExists('openssl');
if (!quiet) {
if (opensslOk) success('openssl found (for HTTPS cert generation)');
else warn('openssl not found (optional — needed for self-signed HTTPS certs)');
}
const tailscaleOk = commandExists('tailscale');
let tailscaleIp: string | null = null;
if (tailscaleOk) {
try {
tailscaleIp = execSync('tailscale ip -4 2>/dev/null', { timeout: 3000 }).toString().trim() || null;
} catch { /* not connected */ }
if (!quiet) {
if (tailscaleIp) success(`Tailscale detected (${tailscaleIp})`);
else warn('Tailscale installed but not connected');
}
}
return { nodeOk, nodeVersion, npmOk, ffmpegOk, opensslOk, tailscaleOk, tailscaleIp };
}
/** Check if a command exists on the system. */
function commandExists(cmd: string): boolean {
try {
execSync(`which ${cmd}`, { stdio: 'pipe', timeout: 3000 });
return true;
} catch {
return false;
}
}

59
scripts/lib/validators.ts Normal file
View file

@ -0,0 +1,59 @@
/**
* Input validation functions for the setup CLI.
*/
import net from 'node:net';
/** Check if a string is a valid HTTP(S) URL. */
export function isValidUrl(url: string): boolean {
try {
const u = new URL(url);
return ['http:', 'https:'].includes(u.protocol);
} catch {
return false;
}
}
/** Check if a port number is valid (165535). */
export function isValidPort(port: number): boolean {
return Number.isInteger(port) && port >= 1 && port <= 65535;
}
/** Check if a port is available for binding. */
export async function isPortAvailable(port: number, host: string = '127.0.0.1'): Promise<boolean> {
return new Promise((resolve) => {
const server = net.createServer();
server.once('error', () => resolve(false));
server.once('listening', () => {
server.close();
resolve(true);
});
server.listen(port, host);
});
}
/** Test if the OpenClaw gateway is reachable at the given URL. */
export async function testGatewayConnection(url: string): Promise<{ ok: boolean; message: string }> {
try {
const resp = await fetch(`${url}/health`, { signal: AbortSignal.timeout(5000) });
if (resp.ok) {
return { ok: true, message: 'Gateway reachable' };
}
return { ok: false, message: `Gateway returned HTTP ${resp.status}` };
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
return { ok: false, message: `Cannot reach gateway: ${msg}` };
}
}
/** Loose validation for OpenAI API key format. */
export function isValidOpenAIKey(key: string): boolean {
// OpenAI keys start with sk- and are long
return key.startsWith('sk-') && key.length > 20;
}
/** Loose validation for Replicate API token format. */
export function isValidReplicateToken(token: string): boolean {
// Replicate tokens are typically r8_ prefixed or just long alphanumeric
return token.length > 10;
}

851
scripts/setup.ts Normal file
View file

@ -0,0 +1,851 @@
/**
* Interactive setup wizard for Nerve.
* Guides users through first-time configuration.
*
* Usage:
* npm run setup # Interactive setup
* npm run setup -- --check # Validate existing config
* npm run setup -- --defaults # Non-interactive with defaults
*/
/** Mask a token for display, with a guard for short tokens. */
// Show token in prompts so users can verify what they entered
import { existsSync } from 'node:fs';
import { resolve } from 'node:path';
import { networkInterfaces } from 'node:os';
import { input, password, confirm, select } from '@inquirer/prompts';
import { printBanner, section, success, warn, fail, info, dim, promptTheme } from './lib/banner.js';
import { checkPrerequisites, type PrereqResult } from './lib/prereq-check.js';
import {
isValidUrl,
isValidPort,
testGatewayConnection,
isValidOpenAIKey,
isValidReplicateToken,
} from './lib/validators.js';
import {
writeEnvFile,
backupExistingEnv,
loadExistingEnv,
cleanupTmp,
DEFAULTS,
type EnvConfig,
} from './lib/env-writer.js';
import { generateSelfSignedCert } from './lib/cert-gen.js';
import { detectGatewayConfig, getEnvGatewayToken, patchGatewayAllowedOrigins, patchGatewayBind, restartGateway } from './lib/gateway-detect.js';
const PROJECT_ROOT = resolve(process.cwd());
const ENV_PATH = resolve(PROJECT_ROOT, '.env');
const TOTAL_SECTIONS = 5;
const args = process.argv.slice(2);
const isHelp = args.includes('--help') || args.includes('-h');
const isCheck = args.includes('--check');
const isDefaults = args.includes('--defaults');
// ── Ctrl+C handler ───────────────────────────────────────────────────
process.on('SIGINT', () => {
cleanupTmp(ENV_PATH);
console.log('\n\n Setup cancelled.\n');
process.exit(130);
});
// ── Main ─────────────────────────────────────────────────────────────
async function main(): Promise<void> {
if (isHelp) {
console.log(`
Usage: npm run setup [options]
Options:
--check Validate existing .env config and test gateway connection
--defaults Non-interactive setup using auto-detected values
--help, -h Show this help message
The setup wizard guides you through 5 steps:
1. Gateway Connection connect to your OpenClaw gateway
2. Agent Identity set your agent's display name
3. Access Mode local, Tailscale, LAN, or custom
4. TTS Configuration optional text-to-speech API keys
5. Advanced Settings custom file paths (most users skip this)
Examples:
npm run setup # Interactive setup
npm run setup -- --check # Validate existing config
npm run setup -- --defaults # Auto-configure with detected values
`);
return;
}
printBanner(); // no-ops when NERVE_INSTALLER is set
// Clean up stale .env.tmp from previous interrupted runs
cleanupTmp(ENV_PATH);
// Prerequisite checks (skip verbose output when called from installer — already checked)
const prereqs = checkPrerequisites({ quiet: !!process.env.NERVE_INSTALLER });
if (!prereqs.nodeOk) {
console.log('');
fail('Node.js ≥ 22 is required. Please upgrade and try again.');
process.exit(1);
}
// Load existing config as defaults
const hasExisting = existsSync(ENV_PATH);
const existing: EnvConfig = hasExisting ? loadExistingEnv(ENV_PATH) : {};
if (hasExisting) {
info('Found existing .env configuration');
} else {
info('No existing .env found — starting fresh setup');
}
// --check mode: validate and exit
if (isCheck) {
await runCheck(existing);
return;
}
// --defaults mode: non-interactive
if (isDefaults) {
await runDefaults(existing);
return;
}
// If .env exists, ask whether to update or start fresh
// (Skip this when called from install.sh — the installer already asked)
if (hasExisting && existing.GATEWAY_TOKEN && !process.env.NERVE_INSTALLER) {
const action = await select({
theme: promptTheme,
message: 'What would you like to do?',
choices: [
{ name: 'Update existing configuration', value: 'update' },
{ name: 'Start fresh', value: 'fresh' },
{ name: 'Cancel', value: 'cancel' },
],
});
if (action === 'cancel') {
console.log('\n Setup cancelled.\n');
return;
}
if (action === 'fresh') {
Object.keys(existing).forEach((k) => delete (existing as Record<string, unknown>)[k]);
}
}
// Run interactive setup
const config = await collectInteractive(existing, prereqs);
// Write .env
if (hasExisting) {
const backupPath = backupExistingEnv(ENV_PATH);
info(`Previous config backed up to ${backupPath.replace(PROJECT_ROOT + '/', '')}`);
}
writeEnvFile(ENV_PATH, config);
console.log('');
success('Configuration written to .env');
printSummary(config);
// When invoked from install.sh, build is already done — skip misleading "next steps"
if (!process.env.NERVE_INSTALLER) {
printNextSteps(config);
}
}
// ── Interactive setup ────────────────────────────────────────────────
async function collectInteractive(
existing: EnvConfig,
prereqs: PrereqResult,
): Promise<EnvConfig> {
const config: EnvConfig = { ...existing };
// ── 1/5: Gateway Connection ──────────────────────────────────────
section(1, TOTAL_SECTIONS, 'Gateway Connection');
dim('Nerve connects to your OpenClaw gateway.');
dim('Make sure the gateway is running before continuing.');
console.log('');
// Auto-detect gateway config
const detected = detectGatewayConfig();
const envToken = getEnvGatewayToken();
// Determine default token (priority: existing > env > detected)
const defaultToken = existing.GATEWAY_TOKEN || envToken || detected.token || '';
const defaultUrl = existing.GATEWAY_URL || detected.url || DEFAULTS.GATEWAY_URL;
if (detected.token && !existing.GATEWAY_TOKEN) {
success('Auto-detected gateway token from ~/.openclaw/openclaw.json');
}
if (envToken && !existing.GATEWAY_TOKEN && !detected.token) {
success('Found OPENCLAW_GATEWAY_TOKEN in environment');
}
config.GATEWAY_URL = await input({
theme: promptTheme,
message: 'Gateway URL',
default: defaultUrl,
validate: (val) => {
if (!isValidUrl(val)) return 'Please enter a valid HTTP(S) URL';
return true;
},
});
// If we have an auto-detected token, offer to use it
if (defaultToken && !existing.GATEWAY_TOKEN) {
const useDetected = await confirm({
theme: promptTheme,
message: `Use detected token (${defaultToken})?`,
default: true,
});
if (useDetected) {
config.GATEWAY_TOKEN = defaultToken;
} else {
config.GATEWAY_TOKEN = await password({
theme: promptTheme,
message: 'Gateway Auth Token (required)',
validate: (val) => {
if (!val || !val.trim()) return 'Gateway token is required';
return true;
},
});
}
} else if (existing.GATEWAY_TOKEN) {
// Existing token — offer to keep it
const keepExisting = await confirm({
theme: promptTheme,
message: `Keep existing gateway token (${existing.GATEWAY_TOKEN})?`,
default: true,
});
if (keepExisting) {
config.GATEWAY_TOKEN = existing.GATEWAY_TOKEN;
} else {
config.GATEWAY_TOKEN = await password({
theme: promptTheme,
message: 'Gateway Auth Token (required)',
validate: (val) => {
if (!val || !val.trim()) return 'Gateway token is required';
return true;
},
});
}
} else {
dim('Find your token in ~/.openclaw/openclaw.json or run: openclaw gateway status');
config.GATEWAY_TOKEN = await password({
theme: promptTheme,
message: 'Gateway Auth Token (required)',
validate: (val) => {
if (!val || !val.trim()) return 'Gateway token is required';
return true;
},
});
}
// Test connection
const rail = ` \x1b[2m│\x1b[0m`;
const testPrefix = process.env.NERVE_INSTALLER ? `${rail} ` : ' ';
process.stdout.write(`${testPrefix}Testing connection... `);
const gwTest = await testGatewayConnection(config.GATEWAY_URL!);
if (gwTest.ok) {
console.log(`\x1b[32m✓\x1b[0m ${gwTest.message}`);
} else {
console.log(`\x1b[31m✗\x1b[0m ${gwTest.message}`);
dim(' Start it with: openclaw gateway start');
const proceed = await confirm({
theme: promptTheme,
message: 'Gateway is unreachable. Continue with this URL anyway?',
default: false,
});
if (!proceed) {
console.log('\n Start your gateway with: \x1b[36mopenclaw gateway start\x1b[0m');
console.log(' Then re-run: \x1b[36mnpm run setup\x1b[0m\n');
process.exit(1);
}
}
// ── 2/5: Agent Identity ──────────────────────────────────────────
section(2, TOTAL_SECTIONS, 'Agent Identity');
config.AGENT_NAME = await input({
theme: promptTheme,
message: 'Agent display name',
default: existing.AGENT_NAME || DEFAULTS.AGENT_NAME,
});
// ── 3/5: Access Mode ──────────────────────────────────────────────
section(3, TOTAL_SECTIONS, 'How will you access Nerve?');
// Build access mode choices dynamically
type AccessMode = 'local' | 'tailscale' | 'network' | 'custom';
const accessChoices: { name: string; value: AccessMode; description: string }[] = [
{ name: 'This machine only (localhost)', value: 'local', description: 'Safest — only accessible from this computer' },
];
if (prereqs.tailscaleIp) {
accessChoices.push({
name: `Via Tailscale (${prereqs.tailscaleIp})`,
value: 'tailscale',
description: 'Access from any device on your Tailscale network — secure, no port forwarding needed',
});
}
accessChoices.push(
{ name: 'From other devices on my network', value: 'network', description: 'Opens to LAN — you may need to configure your firewall' },
{ name: 'Custom setup (I know what I\'m doing)', value: 'custom', description: 'Manual port, bind address, HTTPS, CORS configuration' },
);
const accessMode = await select<AccessMode>({
theme: promptTheme,
message: 'How will you connect to Nerve?',
choices: accessChoices,
});
const port = existing.PORT || DEFAULTS.PORT;
config.PORT = port;
// Helper: offer HTTPS setup for non-localhost access modes (voice input needs secure context)
async function offerHttpsSetup(remoteIp: string): Promise<void> {
console.log('');
warn('Voice input (microphone) requires HTTPS on non-localhost connections.');
dim('Browsers block microphone access over plain HTTP for security.');
console.log('');
const enableHttps = await confirm({
theme: promptTheme,
message: 'Enable HTTPS? (recommended for voice input)',
default: true,
});
if (enableHttps) {
let certsReady = false;
if (prereqs.opensslOk) {
const certResult = generateSelfSignedCert(PROJECT_ROOT);
if (certResult.ok) {
success(certResult.message);
certsReady = true;
} else {
fail(certResult.message);
}
} else {
warn('openssl not found — cannot generate self-signed certificate');
dim('Install openssl and run: mkdir -p certs && openssl req -x509 -newkey rsa:2048 \\');
dim(' -keyout certs/key.pem -out certs/cert.pem -days 365 -nodes -subj "/CN=localhost"');
}
if (certsReady) {
const sslPort = existing.SSL_PORT || DEFAULTS.SSL_PORT;
config.SSL_PORT = sslPort;
// Add HTTPS origins to CORS and CSP
const httpsUrl = `https://${remoteIp}:${sslPort}`;
const existingOrigins = config.ALLOWED_ORIGINS || '';
config.ALLOWED_ORIGINS = existingOrigins ? `${existingOrigins},${httpsUrl}` : httpsUrl;
const existingCsp = config.CSP_CONNECT_EXTRA || '';
config.CSP_CONNECT_EXTRA = existingCsp
? `${existingCsp} ${httpsUrl} wss://${remoteIp}:${sslPort}`
: `${httpsUrl} wss://${remoteIp}:${sslPort}`;
success(`HTTPS will be available at ${httpsUrl}`);
dim('Note: Self-signed certs will show a browser warning on first visit — click "Advanced" → "Proceed"');
} else {
warn('HTTPS disabled — voice input will only work on localhost');
}
} else {
dim('Voice input will only work when accessing Nerve from localhost');
}
}
if (accessMode === 'local') {
config.HOST = '127.0.0.1';
success(`Nerve will be available at http://localhost:${port}`);
} else if (accessMode === 'tailscale') {
config.HOST = '0.0.0.0';
const tsIp = prereqs.tailscaleIp!;
const tsUrl = `http://${tsIp}:${port}`;
config.ALLOWED_ORIGINS = tsUrl;
config.WS_ALLOWED_HOSTS = tsIp;
config.CSP_CONNECT_EXTRA = `${tsUrl} ws://${tsIp}:${port}`;
success(`Nerve will be available at ${tsUrl}`);
dim('Accessible from any device on your Tailscale network');
await offerHttpsSetup(tsIp);
} else if (accessMode === 'network') {
config.HOST = '0.0.0.0';
// Auto-detect LAN IP
const detectedIp = (() => {
const nets = networkInterfaces();
for (const addrs of Object.values(nets)) {
for (const addr of addrs ?? []) {
if (!addr.internal && addr.family === 'IPv4') return addr.address;
}
}
return null;
})();
const lanIp = await input({
theme: promptTheme,
message: 'Your LAN IP address',
default: detectedIp || '',
validate: (val) => {
if (!val.trim()) return 'IP address is required for network access';
if (!/^\d{1,3}(\.\d{1,3}){3}$/.test(val.trim())) return 'Enter a valid IPv4 address';
return true;
},
});
const ip = lanIp.trim();
const lanUrl = `http://${ip}:${port}`;
config.ALLOWED_ORIGINS = lanUrl;
config.WS_ALLOWED_HOSTS = ip;
config.CSP_CONNECT_EXTRA = `${lanUrl} ws://${ip}:${port}`;
success(`Nerve will be available at ${lanUrl}`);
dim('Make sure your firewall allows traffic on port ' + port);
dim('Need access from multiple devices? Add more origins to ALLOWED_ORIGINS in .env');
await offerHttpsSetup(ip);
} else {
// Custom — full manual control
const portStr = await input({
theme: promptTheme,
message: 'HTTP port',
default: existing.PORT || DEFAULTS.PORT,
validate: (val) => {
const n = parseInt(val, 10);
if (!isValidPort(n)) return 'Please enter a valid port (165535)';
return true;
},
});
config.PORT = portStr;
config.HOST = await input({
theme: promptTheme,
message: 'Bind address (127.0.0.1 = local only, 0.0.0.0 = all interfaces)',
default: existing.HOST || DEFAULTS.HOST,
});
// HTTPS
const enableHttps = await confirm({
theme: promptTheme,
message: 'Enable HTTPS? (needed for microphone access over network)',
default: false,
});
if (enableHttps) {
let certsReady = false;
if (prereqs.opensslOk) {
const certResult = generateSelfSignedCert(PROJECT_ROOT);
if (certResult.ok) {
success(certResult.message);
certsReady = true;
} else {
fail(certResult.message);
}
} else {
warn('openssl not found — cannot generate self-signed certificate');
dim('Install openssl and run: mkdir -p certs && openssl req -x509 -newkey rsa:2048 \\');
dim(' -keyout certs/key.pem -out certs/cert.pem -days 365 -nodes -subj "/CN=localhost"');
}
if (certsReady) {
config.SSL_PORT = await input({
theme: promptTheme,
message: 'SSL port',
default: existing.SSL_PORT || DEFAULTS.SSL_PORT,
validate: (val) => {
const n = parseInt(val, 10);
if (!isValidPort(n)) return 'Please enter a valid port (165535)';
if (n === parseInt(config.PORT || DEFAULTS.PORT, 10)) return 'SSL port must differ from HTTP port';
return true;
},
});
// Add HTTPS/WSS origins to CORS and CSP when bound to a non-loopback address
const customHost = config.HOST || DEFAULTS.HOST;
if (customHost !== '127.0.0.1' && customHost !== 'localhost' && customHost !== '::1') {
const httpsUrl = `https://${customHost}:${config.SSL_PORT}`;
config.ALLOWED_ORIGINS = config.ALLOWED_ORIGINS
? `${config.ALLOWED_ORIGINS},${httpsUrl}`
: httpsUrl;
config.CSP_CONNECT_EXTRA = config.CSP_CONNECT_EXTRA
? `${config.CSP_CONNECT_EXTRA} ${httpsUrl} wss://${customHost}:${config.SSL_PORT}`
: `${httpsUrl} wss://${customHost}:${config.SSL_PORT}`;
}
} else {
warn('HTTPS disabled — no certificates available');
dim('You can generate certs manually and add SSL_PORT to .env later');
}
}
}
// ── Patch gateway for external access ─────────────────────────────
if (accessMode !== 'local') {
const nervePort = config.PORT || DEFAULTS.PORT;
// Extract the real IP — 0.0.0.0 isn't a valid origin for browsers
let accessIp = config.HOST === '0.0.0.0'
? (config.ALLOWED_ORIGINS?.split(',')[0]?.replace(/^https?:\/\//, '').replace(/:\d+$/, '') || 'localhost')
: (config.HOST || 'localhost');
if (accessIp === '0.0.0.0') {
// Detect actual LAN IP as fallback
const nets = networkInterfaces();
for (const addrs of Object.values(nets)) {
for (const addr of addrs ?? []) {
if (!addr.internal && addr.family === 'IPv4') { accessIp = addr.address; break; }
}
if (accessIp !== '0.0.0.0') break;
}
}
const nerveOrigin = `http://${accessIp}:${nervePort}`;
const sslPort = config.SSL_PORT;
const nerveHttpsOrigin = sslPort ? `https://${accessIp}:${sslPort}` : null;
console.log('');
warn('External access requires updating the OpenClaw gateway config.');
dim('Without this, the gateway will reject WebSocket connections from Nerve.');
console.log('');
dim(' This will:');
dim(` 1. Set gateway.bind to "lan" (listen on all interfaces)`);
dim(` 2. Add ${nerveOrigin} to gateway.controlUi.allowedOrigins`);
if (nerveHttpsOrigin) {
dim(` 3. Add ${nerveHttpsOrigin} to gateway.controlUi.allowedOrigins`);
}
dim(` Config file: ~/.openclaw/openclaw.json`);
console.log('');
const patchGateway = await confirm({
theme: promptTheme,
message: 'Update OpenClaw gateway config to allow Nerve connections?',
default: true,
});
if (patchGateway) {
const bindResult = patchGatewayBind('lan');
if (bindResult.ok) {
success(bindResult.message);
} else {
warn(bindResult.message);
}
const httpResult = patchGatewayAllowedOrigins(nerveOrigin);
if (httpResult.ok) {
success(httpResult.message);
} else {
warn(httpResult.message);
dim('You can manually add the origin to gateway.controlUi.allowedOrigins in ~/.openclaw/openclaw.json');
}
if (nerveHttpsOrigin) {
const httpsResult = patchGatewayAllowedOrigins(nerveHttpsOrigin);
if (httpsResult.ok) {
success(httpsResult.message);
} else {
warn(httpsResult.message);
}
}
// Auto-restart gateway to apply changes
const restartResult = restartGateway();
if (restartResult.ok) {
success(restartResult.message);
} else {
warn(restartResult.message);
}
} else {
warn('Skipped — you may see "origin not allowed" errors in Nerve.');
dim('To fix later, add the origin to gateway.controlUi.allowedOrigins in ~/.openclaw/openclaw.json');
}
}
// ── 4/5: TTS ─────────────────────────────────────────────────────
section(4, TOTAL_SECTIONS, 'Text-to-Speech (optional)');
dim('Edge TTS is always available (free, no API key needed).');
dim('Add API keys below for higher-quality alternatives.');
console.log('');
const openaiKey = await password({
theme: promptTheme,
message: 'OpenAI API Key (press Enter to skip)',
});
if (openaiKey && openaiKey.trim()) {
if (isValidOpenAIKey(openaiKey.trim())) {
config.OPENAI_API_KEY = openaiKey.trim();
success('OpenAI API key accepted (enables TTS + Whisper transcription)');
} else {
warn('Key doesn\'t look like a standard OpenAI key (expected sk-...)');
const useAnyway = await confirm({
theme: promptTheme,
message: 'Use this key anyway?',
default: true,
});
if (useAnyway) {
config.OPENAI_API_KEY = openaiKey.trim();
}
}
}
const replicateToken = await password({
theme: promptTheme,
message: 'Replicate API Token (press Enter to skip)',
});
if (replicateToken && replicateToken.trim()) {
if (isValidReplicateToken(replicateToken.trim())) {
config.REPLICATE_API_TOKEN = replicateToken.trim();
success('Replicate token accepted (enables Qwen TTS)');
if (!prereqs.ffmpegOk) {
warn('ffmpeg not found — Qwen TTS requires it for WAV→MP3 conversion');
}
} else {
warn('Token seems too short');
const useAnyway = await confirm({
theme: promptTheme,
message: 'Use this token anyway?',
default: true,
});
if (useAnyway) {
config.REPLICATE_API_TOKEN = replicateToken.trim();
}
}
}
// ── 5/5: Advanced Settings ────────────────────────────────────────
section(5, TOTAL_SECTIONS, 'Advanced Settings (optional)');
const configureAdvanced = await confirm({
theme: promptTheme,
message: 'Customize file paths? (most users should skip this)',
default: false,
});
if (configureAdvanced) {
const memPath = await input({
theme: promptTheme,
message: 'Custom memory file path (or Enter for default)',
default: existing.MEMORY_PATH || '',
});
if (memPath.trim()) config.MEMORY_PATH = memPath.trim();
const memDir = await input({
theme: promptTheme,
message: 'Custom memory directory path (or Enter for default)',
default: existing.MEMORY_DIR || '',
});
if (memDir.trim()) config.MEMORY_DIR = memDir.trim();
const sessDir = await input({
theme: promptTheme,
message: 'Custom sessions directory (or Enter for default)',
default: existing.SESSIONS_DIR || '',
});
if (sessDir.trim()) config.SESSIONS_DIR = sessDir.trim();
} else {
// Preserve any existing advanced settings on update
if (existing.MEMORY_PATH) config.MEMORY_PATH = existing.MEMORY_PATH;
if (existing.MEMORY_DIR) config.MEMORY_DIR = existing.MEMORY_DIR;
if (existing.SESSIONS_DIR) config.SESSIONS_DIR = existing.SESSIONS_DIR;
if (existing.USAGE_FILE) config.USAGE_FILE = existing.USAGE_FILE;
}
return config;
}
// ── Summary and next steps ───────────────────────────────────────────
function printSummary(config: EnvConfig): void {
const gwUrl = config.GATEWAY_URL || DEFAULTS.GATEWAY_URL;
const agentName = config.AGENT_NAME || DEFAULTS.AGENT_NAME;
const port = config.PORT || DEFAULTS.PORT;
const sslPort = config.SSL_PORT || DEFAULTS.SSL_PORT;
const host = config.HOST || DEFAULTS.HOST;
const hasCerts = existsSync(resolve(PROJECT_ROOT, 'certs', 'cert.pem'));
let ttsProvider = 'Edge (free)';
if (config.OPENAI_API_KEY && config.REPLICATE_API_TOKEN) {
ttsProvider = 'OpenAI + Replicate + Edge';
} else if (config.OPENAI_API_KEY) {
ttsProvider = 'OpenAI + Edge (fallback)';
} else if (config.REPLICATE_API_TOKEN) {
ttsProvider = 'Replicate + Edge (fallback)';
}
const hostLabel = host === '127.0.0.1' ? '127.0.0.1 (local only)' : `${host} (network)`;
if (process.env.NERVE_INSTALLER) {
// Rail-style summary — stays inside the installer's visual flow
const r = ` \x1b[2m│\x1b[0m`;
console.log('');
console.log(`${r} \x1b[2mGateway${' '.repeat(4)}\x1b[0m${gwUrl}`);
console.log(`${r} \x1b[2mAgent${' '.repeat(6)}\x1b[0m${agentName}`);
console.log(`${r} \x1b[2mHTTP${' '.repeat(7)}\x1b[0m:${port}`);
if (hasCerts) {
console.log(`${r} \x1b[2mHTTPS${' '.repeat(6)}\x1b[0m:${sslPort}`);
}
console.log(`${r} \x1b[2mTTS${' '.repeat(8)}\x1b[0m${ttsProvider}`);
console.log(`${r} \x1b[2mHost${' '.repeat(7)}\x1b[0m${hostLabel}`);
} else {
// Standalone mode — boxed summary
console.log('');
console.log(' \x1b[2m┌─────────────────────────────────────────┐\x1b[0m');
console.log(` \x1b[2m│\x1b[0m Gateway ${gwUrl.padEnd(28)}\x1b[2m│\x1b[0m`);
console.log(` \x1b[2m│\x1b[0m Agent ${agentName.padEnd(28)}\x1b[2m│\x1b[0m`);
console.log(` \x1b[2m│\x1b[0m HTTP :${port.padEnd(27)}\x1b[2m│\x1b[0m`);
if (hasCerts) {
console.log(` \x1b[2m│\x1b[0m HTTPS :${sslPort.padEnd(27)}\x1b[2m│\x1b[0m`);
}
console.log(` \x1b[2m│\x1b[0m TTS ${ttsProvider.padEnd(28)}\x1b[2m│\x1b[0m`);
console.log(` \x1b[2m│\x1b[0m Host ${hostLabel.padEnd(28)}\x1b[2m│\x1b[0m`);
console.log(' \x1b[2m└─────────────────────────────────────────┘\x1b[0m');
}
}
function printNextSteps(config: EnvConfig): void {
const port = config.PORT || DEFAULTS.PORT;
console.log('');
console.log(' \x1b[1mNext steps:\x1b[0m');
console.log(` Development: \x1b[36mnpm run dev\x1b[0m && \x1b[36mnpm run dev:server\x1b[0m`);
console.log(` Production: \x1b[36mnpm run prod\x1b[0m`);
console.log('');
console.log(` Open \x1b[36mhttp://localhost:${port}\x1b[0m in your browser.`);
console.log('');
}
// ── --check mode ─────────────────────────────────────────────────────
async function runCheck(config: EnvConfig): Promise<void> {
console.log('');
console.log(' \x1b[1mValidating configuration...\x1b[0m');
console.log('');
let errors = 0;
// Gateway token
if (config.GATEWAY_TOKEN) {
success('GATEWAY_TOKEN is set');
} else {
fail('GATEWAY_TOKEN is missing (required)');
errors++;
}
// Gateway URL
const gwUrl = config.GATEWAY_URL || DEFAULTS.GATEWAY_URL;
if (isValidUrl(gwUrl)) {
success(`GATEWAY_URL is valid: ${gwUrl}`);
// Test connectivity
process.stdout.write(' Testing gateway connection... ');
const gwTest = await testGatewayConnection(gwUrl);
if (gwTest.ok) {
console.log(`\x1b[32m✓\x1b[0m ${gwTest.message}`);
} else {
console.log(`\x1b[33m⚠\x1b[0m ${gwTest.message}`);
}
} else {
fail(`GATEWAY_URL is invalid: ${gwUrl}`);
errors++;
}
// Port
const port = parseInt(config.PORT || DEFAULTS.PORT, 10);
if (isValidPort(port)) {
success(`PORT is valid: ${port}`);
} else {
fail(`PORT is invalid: ${config.PORT}`);
errors++;
}
// TTS
if (config.OPENAI_API_KEY) {
success('OPENAI_API_KEY is set (OpenAI TTS + Whisper enabled)');
} else {
info('OPENAI_API_KEY not set (Edge TTS will be used as fallback)');
}
if (config.REPLICATE_API_TOKEN) {
success('REPLICATE_API_TOKEN is set (Qwen TTS enabled)');
} else {
info('REPLICATE_API_TOKEN not set');
}
// Host binding
const host = config.HOST || DEFAULTS.HOST;
if (host === '0.0.0.0') {
warn('HOST is 0.0.0.0 — server is accessible from the network');
} else {
success(`HOST: ${host}`);
}
// HTTPS certs
if (existsSync(resolve(PROJECT_ROOT, 'certs', 'cert.pem'))) {
success('HTTPS certificates found at certs/');
} else {
info('No HTTPS certificates (HTTP only)');
}
console.log('');
if (errors > 0) {
fail(`${errors} issue(s) found. Run \x1b[36mnpm run setup\x1b[0m to fix.`);
process.exit(1);
} else {
success('Configuration looks good!');
}
console.log('');
}
// ── --defaults mode ──────────────────────────────────────────────────
async function runDefaults(existing: EnvConfig): Promise<void> {
console.log('');
info('Non-interactive mode — using defaults where possible');
console.log('');
const config: EnvConfig = { ...existing };
// Try to auto-detect gateway token
if (!config.GATEWAY_TOKEN) {
const detected = detectGatewayConfig();
const envToken = getEnvGatewayToken();
const token = envToken || detected.token;
if (token) {
config.GATEWAY_TOKEN = token;
success('Auto-detected gateway token');
} else {
fail('GATEWAY_TOKEN is required but could not be auto-detected');
console.log(' Set OPENCLAW_GATEWAY_TOKEN in your environment, or run setup interactively.');
console.log('');
process.exit(1);
}
}
// Apply defaults for everything else
if (!config.GATEWAY_URL) config.GATEWAY_URL = DEFAULTS.GATEWAY_URL;
if (!config.AGENT_NAME) config.AGENT_NAME = DEFAULTS.AGENT_NAME;
if (!config.PORT) config.PORT = DEFAULTS.PORT;
if (!config.HOST) config.HOST = DEFAULTS.HOST;
// Write
if (existsSync(ENV_PATH)) {
const backupPath = backupExistingEnv(ENV_PATH);
info(`Previous config backed up to ${backupPath.replace(PROJECT_ROOT + '/', '')}`);
}
writeEnvFile(ENV_PATH, config);
success('Configuration written to .env');
printSummary(config);
console.log('');
}
// ── Run ──────────────────────────────────────────────────────────────
main().catch((err) => {
// ExitPromptError is thrown when user presses Ctrl+C during a prompt
if (err?.name === 'ExitPromptError') {
cleanupTmp(ENV_PATH);
console.log('\n\n Setup cancelled.\n');
process.exit(130);
}
console.error('\n Setup failed:', err.message || err);
cleanupTmp(ENV_PATH);
process.exit(1);
});

133
server/app.ts Normal file
View file

@ -0,0 +1,133 @@
/**
* Hono app definition + middleware stack.
*
* Assembles all middleware (CORS, security headers, body limits, compression,
* cache-control) and mounts every API route under `/api/`. Also serves the
* Vite-built SPA from `dist/` with a catch-all fallback to `index.html`.
* @module
*/
import { Hono } from 'hono';
import { logger } from 'hono/logger';
import { cors } from 'hono/cors';
import { compress } from 'hono/compress';
import { bodyLimit } from 'hono/body-limit';
import { serveStatic } from '@hono/node-server/serve-static';
import { cacheHeaders } from './middleware/cache-headers.js';
import { errorHandler } from './middleware/error-handler.js';
import { securityHeaders } from './middleware/security-headers.js';
import { config } from './lib/config.js';
import healthRoutes from './routes/health.js';
import ttsRoutes from './routes/tts.js';
import transcribeRoutes from './routes/transcribe.js';
import agentLogRoutes from './routes/agent-log.js';
import tokensRoutes from './routes/tokens.js';
import memoriesRoutes from './routes/memories.js';
import eventsRoutes from './routes/events.js';
import serverInfoRoutes from './routes/server-info.js';
import codexLimitsRoutes from './routes/codex-limits.js';
import claudeCodeLimitsRoutes from './routes/claude-code-limits.js';
import versionRoutes from './routes/version.js';
import gatewayRoutes from './routes/gateway.js';
import connectDefaultsRoutes from './routes/connect-defaults.js';
import workspaceRoutes from './routes/workspace.js';
import cronsRoutes from './routes/crons.js';
import sessionsRoutes from './routes/sessions.js';
import apiKeysRoutes from './routes/api-keys.js';
import skillsRoutes from './routes/skills.js';
import filesRoutes from './routes/files.js';
import voicePhrasesRoutes from './routes/voice-phrases.js';
import fileBrowserRoutes from './routes/file-browser.js';
// activity routes removed — tab dropped from workspace panel
const app = new Hono();
// ── CORS — only allow requests from known local origins ──────────────
const ALLOWED_ORIGINS = new Set([
`http://localhost:${config.port}`,
`https://localhost:${config.sslPort}`,
`http://127.0.0.1:${config.port}`,
`https://127.0.0.1:${config.sslPort}`,
]);
// Allow additional origins via ALLOWED_ORIGINS env var (comma-separated)
// Normalizes via URL constructor to prevent malformed entries; rejects "null" origins
const extraOrigins = process.env.ALLOWED_ORIGINS;
if (extraOrigins) {
for (const raw of extraOrigins.split(',')) {
const trimmed = raw.trim();
if (!trimmed || trimmed === 'null') continue;
try {
ALLOWED_ORIGINS.add(new URL(trimmed).origin);
} catch {
// skip malformed origins
}
}
}
// ── Middleware ────────────────────────────────────────────────────────
app.onError(errorHandler);
app.use('*', logger());
app.use(
'*',
cors({
origin: (origin) => {
// No Origin header: allow only when bound to localhost (same-origin / non-browser).
// When network-exposed (HOST=0.0.0.0), reject to prevent server-to-server CSRF.
if (!origin) {
const isLocal = config.host === '127.0.0.1' || config.host === 'localhost' || config.host === '::1';
return isLocal ? origin : null;
}
return ALLOWED_ORIGINS.has(origin) ? origin : null;
},
credentials: true,
allowMethods: ['GET', 'POST', 'PUT', 'PATCH', 'DELETE', 'OPTIONS'],
allowHeaders: ['Content-Type', 'Authorization'],
}),
);
app.use('*', securityHeaders);
app.use(
'/api/*',
bodyLimit({
maxSize: config.limits.maxBodyBytes,
onError: (c) => c.text('Request body too large', 413),
}),
);
// Apply compression to all routes except SSE (compression buffers chunks and breaks streaming)
app.use('*', async (c, next) => {
if (c.req.path === '/api/events') return next();
return compress()(c, next);
});
app.use('*', cacheHeaders);
// ── API routes ───────────────────────────────────────────────────────
const routes = [
healthRoutes, ttsRoutes, transcribeRoutes, agentLogRoutes,
tokensRoutes, memoriesRoutes, eventsRoutes, serverInfoRoutes,
codexLimitsRoutes, claudeCodeLimitsRoutes, versionRoutes,
gatewayRoutes, connectDefaultsRoutes,
workspaceRoutes, cronsRoutes, sessionsRoutes, skillsRoutes, filesRoutes, apiKeysRoutes,
voicePhrasesRoutes, fileBrowserRoutes,
];
for (const route of routes) app.route('/', route);
// ── Static files + SPA fallback ──────────────────────────────────────
app.use('/assets/*', serveStatic({ root: './dist/' }));
// Serve static files but skip API routes
app.use('*', async (c, next) => {
if (c.req.path.startsWith('/api/')) return next();
return serveStatic({ root: './dist/' })(c, next);
});
// SPA fallback — serve index.html for non-API routes (client-side routing)
app.get('*', async (c, next) => {
if (c.req.path.startsWith('/api/')) return next();
return serveStatic({ root: './dist/', path: 'index.html' })(c, next);
});
export default app;

184
server/index.ts Normal file
View file

@ -0,0 +1,184 @@
/**
* Nerve server entry point.
*
* Starts HTTP and optional HTTPS servers (for secure-context features like
* microphone access), sets up WebSocket proxying to the OpenClaw gateway,
* starts file watchers, and registers graceful shutdown handlers.
* @module
*/
import fs from 'node:fs';
import path from 'node:path';
import https from 'node:https';
import { fileURLToPath } from 'node:url';
import { serve } from '@hono/node-server';
import app from './app.js';
import { releaseWhisperContext } from './services/whisper-local.js';
import { config, validateConfig, printStartupBanner, probeGateway } from './lib/config.js';
import { setupWebSocketProxy, closeAllWebSockets } from './lib/ws-proxy.js';
import { startFileWatcher, stopFileWatcher } from './lib/file-watcher.js';
// ── Startup banner + validation ──────────────────────────────────────
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const pkgPath = path.resolve(__dirname, '..', 'package.json');
const pkgVersion: string = JSON.parse(fs.readFileSync(pkgPath, 'utf-8')).version || '0.0.0';
printStartupBanner(pkgVersion);
validateConfig();
// ── Start file watchers ──────────────────────────────────────────────
startFileWatcher();
// ── HTTP server ──────────────────────────────────────────────────────
const httpServer = serve(
{
fetch: app.fetch,
port: config.port,
hostname: config.host,
},
(info) => {
console.log(`\x1b[33m[openclaw-ui]\x1b[0m http://${config.host}:${info.port}`);
},
);
// Friendly error on port conflict
(httpServer as unknown as import('node:net').Server).on('error', (err: NodeJS.ErrnoException) => {
if (err.code === 'EADDRINUSE') {
console.error(`\x1b[31m[openclaw-ui]\x1b[0m Port ${config.port} is already in use. Is another instance running?`);
process.exit(1);
}
throw err;
});
// Set up WS proxy on HTTP server (for remote access without SSL)
setupWebSocketProxy(httpServer as unknown as import('node:http').Server);
// Non-blocking gateway health check
probeGateway();
// ── HTTPS server (for secure context — microphone access, WSS proxy) ─
let sslServer: https.Server | undefined;
if (fs.existsSync(config.certPath) && fs.existsSync(config.keyPath)) {
const sslOptions = {
cert: fs.readFileSync(config.certPath),
key: fs.readFileSync(config.keyPath),
};
const MAX_BODY_BYTES = config.limits.maxBodyBytes;
sslServer = https.createServer(sslOptions, async (req, res) => {
// Convert Node req/res to fetch Request and forward to Hono
const protocol = 'https';
const host = req.headers.host || `localhost:${config.sslPort}`;
const url = new URL(req.url || '/', `${protocol}://${host}`);
// Read body with size limit
const chunks: Buffer[] = [];
let totalBytes = 0;
for await (const chunk of req) {
totalBytes += (chunk as Buffer).length;
if (totalBytes > MAX_BODY_BYTES) {
res.writeHead(413, { 'Content-Type': 'text/plain' });
res.end('Request body too large');
return;
}
chunks.push(chunk as Buffer);
}
const body = chunks.length > 0 ? Buffer.concat(chunks) : undefined;
const headers = new Headers();
for (const [key, value] of Object.entries(req.headers)) {
if (value) {
if (Array.isArray(value)) {
for (const v of value) headers.append(key, v);
} else {
headers.set(key, value);
}
}
}
const request = new Request(url.toString(), {
method: req.method,
headers,
body: req.method !== 'GET' && req.method !== 'HEAD' ? body : undefined,
duplex: 'half',
});
try {
// Pass the Node.js IncomingMessage as env.incoming so @hono/node-server's
// getConnInfo() can read the real socket remote address (fixes rate limiting on HTTPS).
const response = await app.fetch(request, { incoming: req });
const responseHeaders = Object.fromEntries(response.headers.entries());
const contentType = response.headers.get('content-type') || '';
// Stream SSE responses instead of buffering (Fix #6: SSE over HTTPS)
if (contentType.includes('text/event-stream') && response.body) {
res.writeHead(response.status, responseHeaders);
const reader = response.body.getReader();
const pump = async () => {
while (true) {
const { done, value } = await reader.read();
if (done) { res.end(); return; }
if (!res.writable) { reader.cancel(); return; }
res.write(value);
}
};
pump().catch(() => res.end());
req.on('close', () => reader.cancel());
return;
}
// Buffer non-streaming responses normally
res.writeHead(response.status, responseHeaders);
const arrayBuf = await response.arrayBuffer();
res.end(Buffer.from(arrayBuf));
} catch (err) {
console.error('[https] error:', (err as Error).message);
if (!res.headersSent) {
res.writeHead(500);
}
res.end('Internal Server Error');
}
});
sslServer.listen(config.sslPort, config.host, () => {
console.log(`\x1b[33m[openclaw-ui]\x1b[0m https://${config.host}:${config.sslPort}`);
});
setupWebSocketProxy(sslServer);
}
// ── Graceful shutdown ────────────────────────────────────────────────
function shutdown(signal: string) {
console.log(`\n[openclaw-ui] ${signal} received, shutting down...`);
stopFileWatcher();
closeAllWebSockets();
releaseWhisperContext().catch(() => {});
httpServer.close(() => {
console.log('[openclaw-ui] HTTP server closed');
});
if (sslServer) {
sslServer.close(() => {
console.log('[openclaw-ui] HTTPS server closed');
});
}
// Give connections 5s to drain, then force exit
setTimeout(() => {
console.log('[openclaw-ui] Force exit');
process.exit(0);
}, 5000).unref();
}
process.on('SIGTERM', () => shutdown('SIGTERM'));
process.on('SIGINT', () => shutdown('SIGINT'));

View file

@ -0,0 +1,58 @@
/**
* Generic TTL cache with in-flight request deduplication.
*
* Used by rate-limit endpoints (`/api/codex-limits`, `/api/claude-code-limits`)
* to avoid redundant expensive fetches when multiple clients hit the same
* endpoint concurrently. Failed fetches use a shorter TTL (30 s) so retries
* happen sooner.
* @module
*/
const DEFAULT_TTL_MS = 5 * 60 * 1000; // 5 minutes
interface CacheSlot<T> {
data: T | null;
ts: number;
ttl: number;
inFlight: Promise<T> | null;
}
/**
* Create a cached async fetcher.
*
* @param fetcher the expensive function to cache
* @param ttlMs cache lifetime in ms (default 5 min)
* @returns a function that returns cached or freshly-fetched data
*/
export function createCachedFetch<T>(
fetcher: () => Promise<T>,
ttlMs: number = DEFAULT_TTL_MS,
opts?: { isValid?: (result: T) => boolean },
): () => Promise<T> {
const slot: CacheSlot<T> = { data: null, ts: 0, ttl: ttlMs, inFlight: null };
const FAILURE_TTL_MS = 30_000; // retry failures after 30s, not 5min
return async () => {
const now = Date.now();
if (slot.data !== null && now - slot.ts < slot.ttl) return slot.data;
if (!slot.inFlight) {
slot.inFlight = fetcher().then(
(result) => {
const valid = opts?.isValid ? opts.isValid(result) : true;
slot.data = result;
slot.ts = Date.now();
slot.ttl = valid ? ttlMs : FAILURE_TTL_MS;
slot.inFlight = null;
return result;
},
(err) => {
slot.inFlight = null;
throw err;
},
);
}
return slot.inFlight;
};
}

158
server/lib/config.ts Normal file
View file

@ -0,0 +1,158 @@
/**
* Server configuration all env vars, paths, and constants.
*
* Single source of truth for every tuneable value in the Nerve server.
* Validated at startup via {@link validateConfig}. Also exports the
* startup banner printer and a non-blocking gateway health probe.
* @module
*/
import 'dotenv/config';
import fs from 'node:fs';
import path from 'node:path';
import os from 'node:os';
import { fileURLToPath } from 'node:url';
import { DEFAULT_GATEWAY_URL, DEFAULT_PORT, DEFAULT_SSL_PORT, DEFAULT_HOST, WHISPER_MODEL_FILES } from './constants.js';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const PROJECT_ROOT = path.resolve(__dirname, '..', '..');
const HOME = process.env.HOME || os.homedir();
export const config = {
port: Number(process.env.PORT || DEFAULT_PORT),
sslPort: Number(process.env.SSL_PORT || DEFAULT_SSL_PORT),
// Bind address — defaults to localhost for safety; set HOST=0.0.0.0 for remote access
host: process.env.HOST || DEFAULT_HOST,
openaiApiKey: process.env.OPENAI_API_KEY || '',
replicateApiToken: process.env.REPLICATE_API_TOKEN || '',
// Speech-to-text
sttProvider: (process.env.STT_PROVIDER || 'local') as 'local' | 'openai',
whisperModel: process.env.WHISPER_MODEL || 'tiny.en',
whisperModelDir: process.env.WHISPER_MODEL_DIR || path.join(HOME, '.nerve', 'models'),
// Gateway connection
gatewayUrl: process.env.GATEWAY_URL || DEFAULT_GATEWAY_URL,
gatewayToken: process.env.GATEWAY_TOKEN || process.env.OPENCLAW_GATEWAY_TOKEN || '',
// Agent identity (used in UI)
agentName: process.env.AGENT_NAME || 'Agent',
home: HOME,
// Paths (configurable via env, with OpenClaw defaults)
dist: path.join(PROJECT_ROOT, 'dist'),
agentLogPath: path.join(PROJECT_ROOT, 'agent-log.json'),
memoryPath: process.env.MEMORY_PATH || path.join(HOME, '.openclaw', 'workspace', 'MEMORY.md'),
memoryDir: process.env.MEMORY_DIR || path.join(HOME, '.openclaw', 'workspace', 'memory'),
sessionsDir: process.env.SESSIONS_DIR || path.join(HOME, '.openclaw', 'agents', 'main', 'sessions'),
usageFile: process.env.USAGE_FILE || path.join(HOME, '.openclaw', 'token-usage.json'),
certPath: path.join(PROJECT_ROOT, 'certs', 'cert.pem'),
keyPath: path.join(PROJECT_ROOT, 'certs', 'key.pem'),
bunPath: path.join(HOME, '.bun', 'bin', 'bunx'),
// Limits
limits: {
tts: 64 * 1024, // 64 KB
agentLog: 64 * 1024, // 64 KB
transcribe: 12 * 1024 * 1024, // 12 MB
/** Global max request body size (transcribe + 1 MB overhead) */
maxBodyBytes: 12 * 1024 * 1024 + 1024 * 1024, // ~13 MB
},
// Agent log
agentLogMax: 200,
// TTS cache
ttsCacheTtlMs: Number(process.env.TTS_CACHE_TTL_MS || 3_600_000), // 1 hour
ttsCacheMax: Number(process.env.TTS_CACHE_MAX || 200),
} as const;
/** WebSocket proxy allowed hostnames (extend via WS_ALLOWED_HOSTS env var, comma-separated) */
export const WS_ALLOWED_HOSTS = new Set([
'localhost', '127.0.0.1', '::1',
...(process.env.WS_ALLOWED_HOSTS?.split(',').map(h => h.trim()).filter(Boolean) ?? []),
]);
/** Resolve the TTS provider label for the startup banner. */
function ttsProviderLabel(): string {
if (config.openaiApiKey && config.replicateApiToken) return 'OpenAI + Replicate + Edge';
if (config.openaiApiKey) return 'OpenAI + Edge';
if (config.replicateApiToken) return 'Replicate + Edge';
return 'Edge (free)';
}
/** Resolve the STT provider label for the startup banner. */
function sttProviderLabel(): string {
if (config.sttProvider === 'openai') return config.openaiApiKey ? 'OpenAI Whisper' : 'OpenAI (no key!)';
return `Local (${config.whisperModel})`;
}
/** Print startup banner with version and config summary. */
export function printStartupBanner(version: string): void {
console.log(`\n \x1b[33m⚡ Nerve v${version}\x1b[0m`);
console.log(` Agent: ${config.agentName} | TTS: ${ttsProviderLabel()} | STT: ${sttProviderLabel()}`);
console.log(` Gateway: ${config.gatewayUrl}`);
}
/** Non-blocking gateway health check at startup. */
export async function probeGateway(): Promise<void> {
try {
const resp = await fetch(`${config.gatewayUrl}/health`, {
signal: AbortSignal.timeout(3000),
});
if (resp.ok) {
console.log(' \x1b[32m✓\x1b[0m Gateway reachable');
} else {
console.warn(` \x1b[33m⚠\x1b[0m Gateway returned HTTP ${resp.status}`);
}
} catch {
console.warn(' \x1b[33m⚠\x1b[0m Gateway unreachable — is it running?');
}
}
/** Log startup warnings and validate critical configuration. */
export function validateConfig(): void {
// Critical: gateway token is the only required config
if (!config.gatewayToken) {
console.warn(
'\n \x1b[33m⚠ GATEWAY_TOKEN is not set\x1b[0m\n' +
' Gateway API calls (memories, models, session info) will fail.\n' +
' Run \x1b[36mnpm run setup\x1b[0m to configure, or set GATEWAY_TOKEN in .env\n',
);
}
// Informational warnings
if (!config.openaiApiKey) {
console.warn('[config] ⚠ OPENAI_API_KEY not set — OpenAI TTS/Whisper unavailable (Edge TTS still works)');
}
if (!config.replicateApiToken) {
console.warn('[config] ⚠ REPLICATE_API_TOKEN not set — Qwen TTS unavailable');
}
if (config.host === '0.0.0.0') {
console.warn(
'[config] ⚠ Server binds to 0.0.0.0 — API is accessible from the network.\n' +
' Set HOST=127.0.0.1 for local-only access.',
);
}
// STT validation
if (config.sttProvider === 'local') {
const modelFile = WHISPER_MODEL_FILES[config.whisperModel];
if (modelFile) {
const modelPath = path.join(config.whisperModelDir, modelFile);
try {
fs.accessSync(modelPath);
} catch {
console.warn(
`[config] ⚠ Whisper model not found at ${modelPath}\n` +
` Local STT unavailable. Re-run installer or set STT_PROVIDER=openai`,
);
}
} else {
console.warn(`[config] ⚠ Unknown Whisper model: ${config.whisperModel}`);
}
}
}

39
server/lib/constants.ts Normal file
View file

@ -0,0 +1,39 @@
/**
* Server-side constants no hardcoded values in service files.
* External API URLs, paths, and defaults all live here.
* Override via env vars where noted.
*/
// ─── External API base URLs ───────────────────────────────────────────────────
// Override for proxies, self-hosted endpoints, or API-compatible alternatives.
export const OPENAI_BASE_URL = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1';
export const REPLICATE_BASE_URL = process.env.REPLICATE_BASE_URL || 'https://api.replicate.com/v1';
// ─── API endpoints (derived from base URLs) ──────────────────────────────────
export const OPENAI_TTS_URL = `${OPENAI_BASE_URL}/audio/speech`;
export const OPENAI_WHISPER_URL = `${OPENAI_BASE_URL}/audio/transcriptions`;
export const REPLICATE_QWEN_TTS_URL = `${REPLICATE_BASE_URL}/models/qwen/qwen3-tts/predictions`;
// ─── Default connection ──────────────────────────────────────────────────────
export const DEFAULT_GATEWAY_URL = 'http://127.0.0.1:18789';
export const DEFAULT_GATEWAY_WS = 'ws://127.0.0.1:18789';
export const DEFAULT_PORT = 3080;
export const DEFAULT_SSL_PORT = 3443;
export const DEFAULT_HOST = '127.0.0.1';
// ─── Codex integration ──────────────────────────────────────────────────────
export const CODEX_DIR = process.env.CODEX_DIR || '.codex';
// ─── Whisper STT models (HuggingFace) ────────────────────────────────────────
export const WHISPER_MODELS_BASE_URL = 'https://huggingface.co/ggerganov/whisper.cpp/resolve/main';
export const WHISPER_MODEL_FILES: Record<string, string> = {
'tiny.en': 'ggml-tiny.en.bin',
'base.en': 'ggml-base.en.bin',
'small.en': 'ggml-small.en.bin',
};
export const WHISPER_DEFAULT_MODEL = 'tiny.en';

View file

@ -0,0 +1,164 @@
/**
* Device identity for OpenClaw gateway WebSocket authentication.
*
* OpenClaw 2026.2.14+ requires device identity (Ed25519 keypair + signed challenge)
* for WS connections to receive `operator.read` / `operator.write` scopes.
*
* The keypair is generated once and persisted to `~/.nerve/device-identity.json`.
* On subsequent starts the same identity is reused, avoiding re-pairing.
* @module
*/
import crypto from 'node:crypto';
import fs from 'node:fs';
import path from 'node:path';
interface DeviceIdentity {
deviceId: string;
publicKeyRaw: Buffer; // 32-byte raw Ed25519 public key
publicKeyB64url: string; // base64url-encoded raw public key
privateKeyPem: string; // PEM-encoded private key for signing
}
let cached: DeviceIdentity | null = null;
/** Path to the identity file (next to the running process) */
function identityPath(): string {
// Store in the .nerve directory under the user's home, or fallback to cwd
const dir = process.env.NERVE_DATA_DIR
|| path.join(process.env.HOME || process.cwd(), '.nerve');
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true, mode: 0o700 });
return path.join(dir, 'device-identity.json');
}
/** Load or generate a persistent Ed25519 device identity */
export function getDeviceIdentity(): DeviceIdentity {
if (cached) return cached;
const idPath = identityPath();
// Try loading existing identity
if (fs.existsSync(idPath)) {
try {
const stored = JSON.parse(fs.readFileSync(idPath, 'utf-8'));
if (stored.publicKeyB64url && stored.privateKeyPem && stored.deviceId) {
cached = {
deviceId: stored.deviceId,
publicKeyRaw: Buffer.from(stored.publicKeyB64url, 'base64url'),
publicKeyB64url: stored.publicKeyB64url,
privateKeyPem: stored.privateKeyPem,
};
console.log(`[device-identity] Loaded existing identity: ${cached.deviceId.substring(0, 12)}`);
return cached;
}
} catch (err) {
console.warn('[device-identity] Failed to load identity, regenerating:', (err as Error).message);
}
}
// Generate new Ed25519 keypair
const { publicKey, privateKey } = crypto.generateKeyPairSync('ed25519');
const pubDer = publicKey.export({ type: 'spki', format: 'der' });
const rawPub = pubDer.slice(-32); // Ed25519 SPKI has 12-byte header
const pubB64url = rawPub.toString('base64url');
const deviceId = crypto.createHash('sha256').update(rawPub).digest('hex');
const privateKeyPem = privateKey.export({ type: 'pkcs8', format: 'pem' }) as string;
cached = {
deviceId,
publicKeyRaw: rawPub,
publicKeyB64url: pubB64url,
privateKeyPem,
};
// Persist identity
const stored = {
deviceId,
publicKeyB64url: pubB64url,
privateKeyPem,
createdAt: new Date().toISOString(),
};
fs.writeFileSync(idPath, JSON.stringify(stored, null, 2) + '\n', { mode: 0o600 });
console.log(`[device-identity] Generated new identity: ${deviceId.substring(0, 12)}… → ${idPath}`);
return cached;
}
/**
* Build the signing payload for a connect request (v2 protocol).
*
* Format: v2|deviceId|clientId|clientMode|role|scopes|signedAtMs|token|nonce
*/
export function buildSigningPayload(params: {
deviceId: string;
clientId: string;
clientMode: string;
role: string;
scopes: string[];
signedAtMs: number;
token: string;
nonce: string;
}): string {
return [
'v2',
params.deviceId,
params.clientId,
params.clientMode,
params.role,
params.scopes.join(','),
String(params.signedAtMs),
params.token,
params.nonce,
].join('|');
}
/** Sign a payload with the device's Ed25519 private key, return base64url */
export function signPayload(privateKeyPem: string, payload: string): string {
const key = crypto.createPrivateKey(privateKeyPem);
return crypto.sign(null, Buffer.from(payload, 'utf8'), key).toString('base64url');
}
/**
* Create the `device` object to inject into a connect request.
*
* Call this after receiving the connect.challenge nonce and the client's
* connect params (to extract clientId, clientMode, role, scopes, token).
*/
export function createDeviceBlock(params: {
clientId: string;
clientMode: string;
role: string;
scopes: string[];
token: string;
nonce: string;
}): {
id: string;
publicKey: string;
signature: string;
signedAt: number;
nonce: string;
} {
const identity = getDeviceIdentity();
const signedAt = Date.now();
const payload = buildSigningPayload({
deviceId: identity.deviceId,
clientId: params.clientId,
clientMode: params.clientMode,
role: params.role,
scopes: params.scopes,
signedAtMs: signedAt,
token: params.token,
nonce: params.nonce,
});
const signature = signPayload(identity.privateKeyPem, payload);
return {
id: identity.deviceId,
publicKey: identity.publicKeyB64url,
signature,
signedAt,
nonce: params.nonce,
};
}

118
server/lib/file-utils.ts Normal file
View file

@ -0,0 +1,118 @@
/**
* Shared file utilities for the file browser.
*
* Path validation, exclusion lists, binary detection, and workspace
* path resolution. Used by both the file-browser API routes and
* the extended file watcher.
* @module
*/
import path from 'node:path';
import fs from 'node:fs/promises';
import { config } from './config.js';
// ── Exclusion rules ──────────────────────────────────────────────────
const EXCLUDED_NAMES = new Set([
'node_modules', '.git', 'dist', 'build', 'server-dist', 'certs',
'.env', 'agent-log.json',
]);
const EXCLUDED_PATTERNS = [
/^\.env(\.|$)/, // .env, .env.local, .env.production, etc.
/\.log$/,
];
const BINARY_EXTENSIONS = new Set([
'.png', '.jpg', '.jpeg', '.gif', '.webp', '.avif', '.svg', '.ico',
'.mp3', '.mp4', '.wav', '.ogg', '.flac', '.webm',
'.zip', '.tar', '.gz', '.bz2', '.7z', '.rar',
'.pdf', '.doc', '.docx', '.xls', '.xlsx',
'.exe', '.dll', '.so', '.dylib',
'.woff', '.woff2', '.ttf', '.eot',
'.sqlite', '.db',
]);
/** Check if a file/directory name should be excluded from the tree. */
export function isExcluded(name: string): boolean {
if (EXCLUDED_NAMES.has(name)) return true;
return EXCLUDED_PATTERNS.some(p => p.test(name));
}
/** Check if a file extension indicates binary content. */
export function isBinary(name: string): boolean {
return BINARY_EXTENSIONS.has(path.extname(name).toLowerCase());
}
// ── Workspace root ───────────────────────────────────────────────────
/** Resolve the workspace root directory (parent of MEMORY.md). */
export function getWorkspaceRoot(): string {
return path.dirname(config.memoryPath);
}
// ── Path validation ──────────────────────────────────────────────────
/** Max file size for reading/writing (1 MB). */
export const MAX_FILE_SIZE = 1_048_576;
/**
* Validate and resolve a relative path to an absolute path within the workspace.
*
* Returns the resolved absolute path, or `null` if:
* - The path escapes the workspace root (traversal)
* - The path resolves through a symlink to outside the workspace
* - The path is excluded
*
* For write operations where the file may not exist yet, the parent
* directory is validated instead.
*/
export async function resolveWorkspacePath(
relativePath: string,
options?: { allowNonExistent?: boolean },
): Promise<string | null> {
const root = getWorkspaceRoot();
// Block obvious traversal attempts
const normalized = path.normalize(relativePath);
if (normalized.startsWith('..') || path.isAbsolute(normalized)) {
return null;
}
// Check each path segment for exclusions
const segments = normalized.split(path.sep);
if (segments.some(seg => seg && isExcluded(seg))) {
return null;
}
const resolved = path.resolve(root, normalized);
// Must be within workspace root
if (!resolved.startsWith(root + path.sep) && resolved !== root) {
return null;
}
// Resolve symlinks and re-check
try {
const real = await fs.realpath(resolved);
if (!real.startsWith(root + path.sep) && real !== root) {
return null;
}
return real;
} catch {
// File doesn't exist
if (!options?.allowNonExistent) return null;
// For new files, validate the parent directory
const parent = path.dirname(resolved);
try {
const realParent = await fs.realpath(parent);
if (!realParent.startsWith(root + path.sep) && realParent !== root) {
return null;
}
return resolved;
} catch {
return null;
}
}
}

137
server/lib/file-watcher.ts Normal file
View file

@ -0,0 +1,137 @@
/**
* File watcher for workspace files.
*
* Watches `MEMORY.md`, the `memory/` directory, and the full workspace
* directory for changes. Broadcasts SSE events so the UI can react:
* - `memory.changed` for backward compat (memory panel refresh)
* - `file.changed` for file browser (editor reload / AI lock)
*
* Per-source debouncing prevents duplicate events from a single save.
* @module
*/
import path from 'node:path';
import { watch, type FSWatcher } from 'node:fs';
import { existsSync } from 'node:fs';
import { broadcast } from '../routes/events.js';
import { config } from './config.js';
import { isExcluded, isBinary } from './file-utils.js';
let memoryWatcher: FSWatcher | null = null;
let memoryDirWatcher: FSWatcher | null = null;
let workspaceWatcher: FSWatcher | null = null;
// Per-source debounce to avoid multiple events for single save
// (separate timers so MEMORY.md changes don't suppress daily file changes)
const lastBroadcastBySource = new Map<string, number>();
const DEBOUNCE_MS = 500;
const MAX_SOURCES = 500;
function shouldBroadcast(source: string): boolean {
const now = Date.now();
const last = lastBroadcastBySource.get(source) ?? 0;
if (now - last < DEBOUNCE_MS) {
return false;
}
if (lastBroadcastBySource.size >= MAX_SOURCES) {
lastBroadcastBySource.clear();
}
lastBroadcastBySource.set(source, now);
return true;
}
/**
* Start watching workspace files for changes.
* Call this during server startup.
*/
export function startFileWatcher(): void {
const workspaceRoot = path.dirname(config.memoryPath);
// Watch MEMORY.md
if (existsSync(config.memoryPath)) {
try {
memoryWatcher = watch(config.memoryPath, (eventType) => {
if (eventType === 'change' && shouldBroadcast('MEMORY.md')) {
console.log('[file-watcher] MEMORY.md changed');
broadcast('memory.changed', {
source: 'file',
file: 'MEMORY.md'
});
broadcast('file.changed', { path: 'MEMORY.md' });
}
});
console.log('[file-watcher] Watching MEMORY.md');
} catch (err) {
console.error('[file-watcher] Failed to watch MEMORY.md:', (err as Error).message);
}
}
// Watch memory/ directory for daily files
if (existsSync(config.memoryDir)) {
try {
memoryDirWatcher = watch(config.memoryDir, (eventType, filename) => {
if (filename?.endsWith('.md') && shouldBroadcast(`daily:${filename}`)) {
console.log(`[file-watcher] ${filename} changed`);
broadcast('memory.changed', {
source: 'file',
file: filename
});
broadcast('file.changed', { path: `memory/${filename}` });
}
});
console.log('[file-watcher] Watching memory/ directory');
} catch (err) {
console.error('[file-watcher] Failed to watch memory/:', (err as Error).message);
}
}
// Watch entire workspace directory (recursive where supported)
if (existsSync(workspaceRoot)) {
try {
workspaceWatcher = watch(workspaceRoot, { recursive: true }, (_eventType, filename) => {
if (!filename) return;
// Normalize path separators (Windows compat)
const normalized = filename.replace(/\\/g, '/');
// Skip excluded directories/files and binaries
const segments = normalized.split('/');
if (segments.some(seg => seg && (isExcluded(seg) || seg.startsWith('.')))) return;
if (isBinary(normalized)) return;
// Skip memory files — already handled by dedicated watchers above
if (normalized === 'MEMORY.md' || normalized.startsWith('memory/')) return;
if (shouldBroadcast(`workspace:${normalized}`)) {
console.log(`[file-watcher] workspace: ${normalized} changed`);
broadcast('file.changed', { path: normalized });
}
});
console.log('[file-watcher] Watching workspace directory (recursive)');
} catch (err) {
// recursive: true may not be supported on all Linux kernels
console.warn('[file-watcher] Recursive workspace watch failed (expected on some Linux versions):', (err as Error).message);
console.warn('[file-watcher] File browser will still work — reload manually or via Ctrl+S');
}
}
}
/**
* Stop watching files.
* Call this during graceful shutdown.
*/
export function stopFileWatcher(): void {
if (memoryWatcher) {
memoryWatcher.close();
memoryWatcher = null;
}
if (memoryDirWatcher) {
memoryDirWatcher.close();
memoryDirWatcher = null;
}
if (workspaceWatcher) {
workspaceWatcher.close();
workspaceWatcher = null;
}
}

46
server/lib/files.ts Normal file
View file

@ -0,0 +1,46 @@
/**
* Async file helpers for reading/writing JSON and text files.
*
* All functions swallow `ENOENT` errors and return a caller-supplied fallback,
* making them safe to use before files exist.
* @module
*/
import fs from 'node:fs/promises';
/**
* Read and parse a JSON file. Returns `fallback` on any error.
*/
export async function readJSON<T>(filePath: string, fallback: T): Promise<T> {
try {
const raw = await fs.readFile(filePath, 'utf8');
return JSON.parse(raw) as T;
} catch (err) {
// Log non-ENOENT errors (corruption, permissions, etc.)
if ((err as NodeJS.ErrnoException).code !== 'ENOENT') {
console.warn(`[files] readJSON failed for ${filePath}:`, (err as Error).message);
}
return fallback;
}
}
/**
* Write JSON to a file (pretty-printed).
*/
export async function writeJSON(filePath: string, data: unknown): Promise<void> {
await fs.writeFile(filePath, JSON.stringify(data, null, 2));
}
/**
* Read a text file. Returns `fallback` on any error.
*/
export async function readText(filePath: string, fallback: string = ''): Promise<string> {
try {
return await fs.readFile(filePath, 'utf8');
} catch (err) {
if ((err as NodeJS.ErrnoException).code !== 'ENOENT') {
console.warn(`[files] readText failed for ${filePath}:`, (err as Error).message);
}
return fallback;
}
}

View file

@ -0,0 +1,52 @@
/**
* Shared gateway tool invocation client.
*
* Provides a single {@link invokeGatewayTool} function used by route handlers
* (crons, memories, gateway, etc.) to call OpenClaw gateway tools via its
* `POST /tools/invoke` HTTP API. Eliminates duplication across route files.
* @module
*/
import { config } from './config.js';
const { gatewayUrl: GATEWAY_URL, gatewayToken: GATEWAY_TOKEN } = config;
const DEFAULT_TIMEOUT_MS = 15_000;
interface ToolsInvokeResponse {
ok: boolean;
result?: unknown;
error?: { message: string };
}
/**
* Invoke a gateway tool via the HTTP API.
*
* @param tool - Tool name (e.g. 'cron', 'memory_store', 'sessions_list')
* @param args - Tool arguments
* @param timeoutMs - Request timeout in milliseconds (default: 15s)
*/
export async function invokeGatewayTool(
tool: string,
args: Record<string, unknown>,
timeoutMs = DEFAULT_TIMEOUT_MS,
): Promise<unknown> {
const headers: Record<string, string> = { 'Content-Type': 'application/json' };
if (GATEWAY_TOKEN) headers['Authorization'] = `Bearer ${GATEWAY_TOKEN}`;
const response = await fetch(`${GATEWAY_URL}/tools/invoke`, {
method: 'POST',
headers,
body: JSON.stringify({ tool, args, sessionKey: 'main' }),
signal: AbortSignal.timeout(timeoutMs),
});
if (!response.ok) {
const text = await response.text();
throw new Error(`Gateway tool invoke failed: ${response.status} ${text}`);
}
const result = (await response.json()) as ToolsInvokeResponse;
if (!result.ok) throw new Error(result.error?.message || 'Tool invocation failed');
return result.result;
}

97
server/lib/mutex.test.ts Normal file
View file

@ -0,0 +1,97 @@
/** Tests for the async mutex (serialisation, error recovery, independence). */
import { describe, it, expect } from 'vitest';
import { createMutex } from './mutex.js';
describe('createMutex', () => {
it('should serialize concurrent operations', async () => {
const withLock = createMutex();
const order: number[] = [];
// Launch 3 concurrent tasks — they should execute in order
const p1 = withLock(async () => {
await delay(30);
order.push(1);
return 'first';
});
const p2 = withLock(async () => {
await delay(10);
order.push(2);
return 'second';
});
const p3 = withLock(async () => {
order.push(3);
return 'third';
});
const results = await Promise.all([p1, p2, p3]);
expect(results).toEqual(['first', 'second', 'third']);
expect(order).toEqual([1, 2, 3]);
});
it('should return the value from the locked function', async () => {
const withLock = createMutex();
const result = await withLock(async () => 42);
expect(result).toBe(42);
});
it('should propagate errors without breaking the lock', async () => {
const withLock = createMutex();
// First call throws
await expect(
withLock(async () => {
throw new Error('boom');
}),
).rejects.toThrow('boom');
// Second call should still work (lock released after error)
const result = await withLock(async () => 'recovered');
expect(result).toBe('recovered');
});
it('should handle rapid sequential calls', async () => {
const withLock = createMutex();
let counter = 0;
// Simulate read-modify-write race condition that the mutex prevents
const promises = Array.from({ length: 10 }, () =>
withLock(async () => {
const current = counter;
await delay(1);
counter = current + 1;
}),
);
await Promise.all(promises);
expect(counter).toBe(10); // Without mutex this would likely be < 10
});
it('should create independent mutexes', async () => {
const lock1 = createMutex();
const lock2 = createMutex();
const order: string[] = [];
// lock1 holds for 50ms, lock2 should not wait for it
const p1 = lock1(async () => {
await delay(50);
order.push('lock1');
});
const p2 = lock2(async () => {
order.push('lock2');
});
await Promise.all([p1, p2]);
// lock2 should finish before lock1 since they're independent
expect(order[0]).toBe('lock2');
expect(order[1]).toBe('lock1');
});
});
function delay(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}

43
server/lib/mutex.ts Normal file
View file

@ -0,0 +1,43 @@
/**
* Simple async mutex for serializing file read-modify-write operations.
* @module
*/
/**
* Create an independent mutex instance.
*
* Returns a `withLock` function: call it with an async callback to guarantee
* that only one callback executes at a time for this mutex.
*/
export function createMutex() {
let lock: Promise<void> = Promise.resolve();
return async function withLock<T>(fn: () => Promise<T>): Promise<T> {
let release: () => void;
const next = new Promise<void>((resolve) => { release = resolve; });
const prev = lock;
lock = next;
await prev;
try {
return await fn();
} finally {
release!();
}
};
}
/**
* Keyed mutex one lock per key string.
* Usage: await withMutex('memory-file', () => appendToMemoryFile(...));
*/
const mutexes = new Map<string, ReturnType<typeof createMutex>>();
export async function withMutex<T>(key: string, fn: () => Promise<T>): Promise<T> {
let mutex = mutexes.get(key);
if (!mutex) {
mutex = createMutex();
mutexes.set(key, mutex);
}
return mutex(fn);
}

View file

@ -0,0 +1,44 @@
/**
* Resolve the `openclaw` binary path.
*
* Checks (in order):
* 1. `OPENCLAW_BIN` env var (explicit override)
* 2. Sibling of current Node binary (nvm, fnm, volta)
* 3. Common system paths (`/opt/homebrew/bin`, `/usr/local/bin`, etc.)
* 4. Falls back to bare `'openclaw'` (relies on `PATH`)
*
* Result is cached after the first call.
* @module
*/
import { accessSync, constants } from 'node:fs';
import { homedir } from 'node:os';
let cached: string | null = null;
export function resolveOpenclawBin(): string {
if (cached) return cached;
if (process.env.OPENCLAW_BIN) { cached = process.env.OPENCLAW_BIN; return cached; }
const home = process.env.HOME || homedir();
const nodeBin = process.execPath.replace(/\/node$/, '');
const candidates = [
`${nodeBin}/openclaw`, // same dir as current node (nvm, fnm, volta)
'/opt/homebrew/bin/openclaw', // macOS Apple Silicon (Homebrew)
'/usr/local/bin/openclaw', // macOS Intel (Homebrew) / global npm
'/usr/bin/openclaw', // system package (Linux)
`${home}/.npm-global/bin/openclaw`, // custom npm prefix (npm set prefix)
`${home}/.local/bin/openclaw`, // pip-style local bin
`${home}/.volta/bin/openclaw`, // volta
`${home}/.fnm/aliases/default/bin/openclaw`, // fnm
];
for (const c of candidates) {
try { accessSync(c, constants.X_OK); cached = c; return cached; } catch { /* next */ }
}
console.warn('[openclaw-bin] Could not find openclaw binary. Checked:', candidates.join(', '),
'— Set OPENCLAW_BIN env var to fix. Falling back to bare "openclaw" (requires PATH).');
cached = 'openclaw';
return cached;
}

132
server/lib/tts-config.ts Normal file
View file

@ -0,0 +1,132 @@
/**
* TTS voice configuration reads/writes a JSON config file.
*
* All voice-related settings (OpenAI, Qwen/Replicate, Edge) live here
* instead of env vars or hardcoded values. On first run, default settings
* are written to `<PROJECT_ROOT>/tts-config.json`. Subsequent reads merge
* the on-disk config with defaults so new fields are always present.
* @module
*/
import fs from 'node:fs';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const PROJECT_ROOT = path.resolve(__dirname, '..', '..');
const CONFIG_PATH = path.join(PROJECT_ROOT, 'tts-config.json');
export interface TTSVoiceConfig {
/** Qwen / Replicate TTS settings */
qwen: {
/** TTS mode: 'voice_design' or 'custom_voice' */
mode: string;
/** Language for synthesis */
language: string;
/** Preset speaker name (for custom_voice mode) */
speaker: string;
/** Voice description (for voice_design mode) */
voiceDescription: string;
/** Style/emotion instruction */
styleInstruction: string;
};
/** OpenAI TTS settings */
openai: {
/** OpenAI TTS model (gpt-4o-mini-tts, tts-1, tts-1-hd) */
model: string;
/** Voice name (nova, alloy, echo, fable, onyx, shimmer) */
voice: string;
/** Natural language instructions for how the voice should sound */
instructions: string;
};
/** Edge TTS settings */
edge: {
/** Voice name (e.g. en-US-AriaNeural, en-GB-SoniaNeural) */
voice: string;
};
}
const DEFAULTS: TTSVoiceConfig = {
qwen: {
mode: 'voice_design',
language: 'English',
speaker: 'Serena',
voiceDescription:
'Female native English speaker in her mid-20s with a bright, warm voice. Clear American accent, pleasant mid-range pitch, friendly and personable.',
styleInstruction:
'Speak in a natural, conversational tone with a light touch of warmth. Not too serious, not too bubbly — just genuinely friendly.',
},
openai: {
model: 'gpt-4o-mini-tts',
voice: 'nova',
instructions:
'Speak naturally and conversationally, like a real person. Warm, friendly tone with a slight British accent. Keep it casual and relaxed, not robotic or overly formal.',
},
edge: {
voice: 'en-US-AriaNeural',
},
};
let cached: TTSVoiceConfig | null = null;
/** Load TTS config from disk, merging with defaults for any missing fields. */
export function getTTSConfig(): TTSVoiceConfig {
if (cached) return cached;
try {
if (fs.existsSync(CONFIG_PATH)) {
const raw = JSON.parse(fs.readFileSync(CONFIG_PATH, 'utf-8'));
cached = deepMerge(DEFAULTS, raw) as TTSVoiceConfig;
return cached;
}
} catch (err) {
console.warn('[tts-config] Failed to read config, using defaults:', (err as Error).message);
}
// First run — write defaults to disk
cached = { ...DEFAULTS };
saveTTSConfig(cached);
return cached;
}
/** Save TTS config to disk and update cache. */
export function saveTTSConfig(cfg: TTSVoiceConfig): void {
cached = cfg;
try {
fs.writeFileSync(CONFIG_PATH, JSON.stringify(cfg, null, 2) + '\n', 'utf-8');
} catch (err) {
console.error('[tts-config] Failed to write config:', (err as Error).message);
}
}
/** Update a partial config (deep merge) and save. */
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export function updateTTSConfig(patch: Record<string, any>): TTSVoiceConfig {
const current = getTTSConfig();
const updated = deepMerge(current, patch) as TTSVoiceConfig;
saveTTSConfig(updated);
return updated;
}
/** Simple deep merge (target ← source). Only merges plain objects, overwrites everything else. */
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function deepMerge(target: any, source: any): any {
const result = { ...target };
for (const key of Object.keys(source)) {
const sv = source[key];
const tv = target[key];
if (
sv !== undefined &&
typeof sv === 'object' &&
sv !== null &&
!Array.isArray(sv) &&
typeof tv === 'object' &&
tv !== null &&
!Array.isArray(tv)
) {
result[key] = deepMerge(tv, sv);
} else if (sv !== undefined) {
result[key] = sv;
}
}
return result;
}

View file

@ -0,0 +1,72 @@
/**
* Persistent token usage tracker.
*
* Stores a high-water mark of token input/output counts and cost that
* survives session compaction. Values only ever increase a session
* reset won't lose accumulated usage data. Uses async `fs/promises`
* for non-blocking I/O and a mutex to prevent concurrent write races.
* @module
*/
import fs from 'node:fs/promises';
import path from 'node:path';
import { config } from './config.js';
import { createMutex } from './mutex.js';
const USAGE_FILE = config.usageFile;
interface UsageData {
totalInput: number;
totalOutput: number;
totalCost: number;
lastUpdated: string;
}
const DEFAULT_USAGE: UsageData = {
totalInput: 0,
totalOutput: 0,
totalCost: 0,
lastUpdated: '',
};
const withLock = createMutex();
async function loadUsage(): Promise<UsageData> {
try {
const data = await fs.readFile(USAGE_FILE, 'utf8');
return JSON.parse(data) as UsageData;
} catch {
// File doesn't exist or is invalid — return defaults
return { ...DEFAULT_USAGE };
}
}
async function saveUsage(data: UsageData): Promise<void> {
data.lastUpdated = new Date().toISOString();
// Ensure directory exists
await fs.mkdir(path.dirname(USAGE_FILE), { recursive: true });
await fs.writeFile(USAGE_FILE, JSON.stringify(data, null, 2));
}
/**
* Update with current tokscale totals.
* Only increases - never decreases (survives compaction).
* Serialized via mutex to prevent read-modify-write races.
*/
export async function updateUsage(
input: number,
output: number,
cost: number,
): Promise<UsageData> {
return withLock(async () => {
const data = await loadUsage();
// High water mark: only go up, never down
if (input > data.totalInput) data.totalInput = input;
if (output > data.totalOutput) data.totalOutput = output;
if (cost > data.totalCost) data.totalCost = cost;
await saveUsage(data);
return data;
});
}

View file

@ -0,0 +1,46 @@
/**
* Voice phrase configuration reads from voice-phrases.json.
* Stop, cancel, and wake phrases live here instead of hardcoded in constants.
*
* Config file: <PROJECT_ROOT>/voice-phrases.json
*/
import fs from 'node:fs';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const PROJECT_ROOT = path.resolve(__dirname, '..', '..');
const CONFIG_PATH = path.join(PROJECT_ROOT, 'voice-phrases.json');
export interface VoicePhrases {
stopPhrases: string[];
cancelPhrases: string[];
}
const DEFAULTS: VoicePhrases = {
stopPhrases: ["boom", "i'm done", "im done", "all right i'm done", "alright i'm done", "that's it", "thats it", "send it", "done"],
cancelPhrases: ['cancel', 'never mind', 'nevermind'],
};
let cached: VoicePhrases | null = null;
let cachedMtime = 0;
/** Read voice phrases, with file-change detection. */
export function getVoicePhrases(): VoicePhrases {
try {
const stat = fs.statSync(CONFIG_PATH);
if (cached && stat.mtimeMs === cachedMtime) return cached;
const raw = JSON.parse(fs.readFileSync(CONFIG_PATH, 'utf-8'));
cached = {
stopPhrases: Array.isArray(raw.stopPhrases) ? raw.stopPhrases : DEFAULTS.stopPhrases,
cancelPhrases: Array.isArray(raw.cancelPhrases) ? raw.cancelPhrases : DEFAULTS.cancelPhrases,
};
cachedMtime = stat.mtimeMs;
return cached;
} catch {
// File missing or invalid — use defaults
return DEFAULTS;
}
}

227
server/lib/ws-proxy.ts Normal file
View file

@ -0,0 +1,227 @@
/**
* WebSocket proxy bridges browser clients to the OpenClaw gateway.
*
* Clients connect to `ws(s)://host:port/ws?target=<gateway-ws-url>` and this
* module opens a corresponding connection to the gateway, relaying messages
* bidirectionally. During the initial handshake it intercepts the
* `connect.challenge` event and injects an Ed25519-signed device identity
* block so the gateway grants `operator.read` / `operator.write` scopes.
* @module
*/
import type { Server as HttpsServer } from 'node:https';
import type { Server as HttpServer } from 'node:http';
import { WebSocket, WebSocketServer } from 'ws';
import type { IncomingMessage } from 'node:http';
import type { Duplex } from 'node:stream';
import { WS_ALLOWED_HOSTS } from './config.js';
import { createDeviceBlock, getDeviceIdentity } from './device-identity.js';
/** Active WSS instances — used for graceful shutdown */
const activeWssInstances: WebSocketServer[] = [];
/** Close all active WebSocket connections */
export function closeAllWebSockets(): void {
for (const wss of activeWssInstances) {
for (const client of wss.clients) client.close(1001, 'Server shutting down');
wss.close();
}
activeWssInstances.length = 0;
}
/**
* Set up the WS/WSS proxy on an HTTP or HTTPS server.
* Proxies ws(s)://host:port/ws?target=ws://gateway/ws to the OpenClaw gateway.
*/
export function setupWebSocketProxy(server: HttpServer | HttpsServer): void {
const wss = new WebSocketServer({ noServer: true });
activeWssInstances.push(wss);
// Eagerly load device identity at startup
getDeviceIdentity();
server.on('upgrade', (req: IncomingMessage, socket: Duplex, head: Buffer) => {
if (req.url?.startsWith('/ws')) {
wss.handleUpgrade(req, socket, head, (ws) => wss.emit('connection', ws, req));
} else {
socket.destroy();
}
});
wss.on('connection', (clientWs: WebSocket, req: IncomingMessage) => {
const url = new URL(req.url || '/', 'https://localhost');
const target = url.searchParams.get('target');
console.log(`[ws-proxy] New connection: target=${target}`);
if (!target) {
clientWs.close(1008, 'Missing ?target= param');
return;
}
let targetUrl: URL;
try {
targetUrl = new URL(target);
} catch {
clientWs.close(1008, 'Invalid target URL');
return;
}
if (!['ws:', 'wss:'].includes(targetUrl.protocol) || !WS_ALLOWED_HOSTS.has(targetUrl.hostname)) {
console.warn(`[ws-proxy] Rejected: target not allowed: ${target}`);
clientWs.close(1008, 'Target not allowed');
return;
}
// Forward origin header for gateway auth
const isEncrypted = !!(req.socket as unknown as { encrypted?: boolean }).encrypted;
const scheme = isEncrypted ? 'https' : 'http';
const clientOrigin = req.headers.origin || `${scheme}://${req.headers.host}`;
const gwWs = new WebSocket(targetUrl.toString(), {
headers: { Origin: clientOrigin },
});
// State machine for connect handshake interception
let challengeNonce: string | null = null;
let handshakeComplete = false;
// Buffer client messages until gateway connection is open (with cap)
const MAX_PENDING_MESSAGES = 100;
const MAX_PENDING_BYTES = 1024 * 1024; // 1 MB
const pendingMessages: { data: Buffer | string; isBinary: boolean }[] = [];
let pendingBytes = 0;
clientWs.on('message', (data: Buffer | string, isBinary: boolean) => {
if (gwWs.readyState !== WebSocket.OPEN) {
const size = typeof data === 'string' ? Buffer.byteLength(data) : data.length;
if (pendingMessages.length >= MAX_PENDING_MESSAGES || pendingBytes + size > MAX_PENDING_BYTES) {
clientWs.close(1008, 'Too many pending messages');
return;
}
pendingBytes += size;
pendingMessages.push({ data, isBinary });
return;
}
// Intercept connect request to inject device identity
if (!handshakeComplete && !isBinary && challengeNonce) {
try {
const msg = JSON.parse(data.toString());
if (msg.type === 'req' && msg.method === 'connect' && msg.params) {
const modified = injectDeviceIdentity(msg, challengeNonce);
gwWs.send(JSON.stringify(modified));
handshakeComplete = true; // Only intercept the first connect
return;
}
} catch {
// Not JSON or parse error — pass through
}
}
gwWs.send(isBinary ? data : data.toString());
});
// Register gateway→client relay IMMEDIATELY (before open) to avoid
// dropping messages that arrive between readyState=OPEN and the 'open' callback.
gwWs.on('message', (data: Buffer | string, isBinary: boolean) => {
// Intercept connect.challenge to capture nonce
if (!handshakeComplete && !isBinary) {
try {
const msg = JSON.parse(data.toString());
if (msg.type === 'event' && msg.event === 'connect.challenge' && msg.payload?.nonce) {
challengeNonce = msg.payload.nonce;
}
} catch { /* ignore */ }
}
if (clientWs.readyState === WebSocket.OPEN) {
clientWs.send(isBinary ? data : data.toString());
}
});
gwWs.on('open', () => {
// Flush buffered messages
for (const msg of pendingMessages) {
// Check for connect request in buffered messages too
if (!handshakeComplete && !msg.isBinary && challengeNonce) {
try {
const parsed = JSON.parse(msg.data.toString());
if (parsed.type === 'req' && parsed.method === 'connect' && parsed.params) {
const modified = injectDeviceIdentity(parsed, challengeNonce);
gwWs.send(JSON.stringify(modified));
handshakeComplete = true;
continue;
}
} catch { /* pass through */ }
}
gwWs.send(msg.isBinary ? msg.data : msg.data.toString());
}
pendingMessages.length = 0;
});
gwWs.on('error', (err) => {
console.error('[ws-proxy] Gateway error:', err.message);
clientWs.close();
});
gwWs.on('close', (code, reason) => {
console.log(`[ws-proxy] Gateway closed: code=${code}, reason=${reason?.toString()}`);
clientWs.close();
});
clientWs.on('close', (code, reason) => {
console.log(`[ws-proxy] Client closed: code=${code}, reason=${reason?.toString()}`);
gwWs.close();
});
clientWs.on('error', (err) => {
console.error('[ws-proxy] Client error:', err.message);
gwWs.close();
});
});
}
/**
* Inject Nerve's device identity into a connect request.
* This adds the `device` block with Ed25519 signature so the gateway
* grants operator.read/operator.write scopes.
*/
interface ConnectParams {
client?: { id?: string; mode?: string };
role?: string;
scopes?: string[];
auth?: { token?: string };
}
function injectDeviceIdentity(msg: Record<string, unknown>, nonce: string): Record<string, unknown> {
const params = (msg.params || {}) as ConnectParams;
const clientId = params.client?.id || 'webchat-ui';
const clientMode = params.client?.mode || 'webchat';
const role = params.role || 'operator';
const scopes = params.scopes || ['operator.admin', 'operator.read', 'operator.write'];
const token = params.auth?.token || '';
// Ensure scopes include read/write
const scopeSet = new Set(scopes);
scopeSet.add('operator.read');
scopeSet.add('operator.write');
const finalScopes = [...scopeSet] as string[];
const device = createDeviceBlock({
clientId,
clientMode,
role,
scopes: finalScopes,
token,
nonce,
});
console.log(`[ws-proxy] Injected device identity: ${device.id.substring(0, 12)}`);
return {
...msg,
params: {
...params,
scopes: finalScopes,
device,
},
};
}

View file

@ -0,0 +1,40 @@
/**
* Cache-Control middleware for static assets.
*
* - API routes and `/health`: `no-cache`
* - Vite-hashed assets (e.g. `index-Pbmes8jg.js`): immutable, 1-year max-age
* - Other static files: `must-revalidate`
* @module
*/
import type { MiddlewareHandler } from 'hono';
/**
* Pattern matching hashed filenames from Vite builds.
* e.g. index-Pbmes8jg.js, style-CsmNuK-P.css
* Vite uses base64url-ish hashes (mixed case, digits, hyphens, underscores).
*/
const HASHED_ASSET_RE = /-[a-zA-Z0-9_-]{6,}\.\w+$/;
export const cacheHeaders: MiddlewareHandler = async (c, next) => {
await next();
const path = c.req.path;
// API routes — no caching
if (path.startsWith('/api/') || path === '/health') {
c.header('Cache-Control', 'no-cache');
return;
}
// Hashed static assets — cache forever
if (HASHED_ASSET_RE.test(path)) {
c.header('Cache-Control', 'public, max-age=31536000, immutable');
return;
}
// Non-hashed static files (index.html etc) — revalidate
if (path !== '/' && path.includes('.')) {
c.header('Cache-Control', 'public, max-age=0, must-revalidate');
}
};

View file

@ -0,0 +1,23 @@
/**
* Global error handler middleware.
*
* Catches unhandled errors thrown by route handlers and returns a consistent
* response: JSON `{ error }` for `/api/*` routes, plain text otherwise.
* In development mode, stack traces are logged to stderr.
* @module
*/
import type { ErrorHandler } from 'hono';
const isDev = process.env.NODE_ENV !== 'production';
export const errorHandler: ErrorHandler = (err, c) => {
console.error('[server] unhandled error:', err.message || err);
if (isDev && err.stack) {
console.error('[server] stack:', err.stack);
}
if (c.req.path.startsWith('/api/') || c.req.path.startsWith('/api')) {
return c.json({ error: 'Internal server error' }, 500);
}
return c.text('Internal server error', 500);
};

View file

@ -0,0 +1,187 @@
/** Tests for the sliding-window rate limiter middleware. */
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { Hono } from 'hono';
import { rateLimit, getClientId } from './rate-limit.js';
/**
* The rate limiter uses a module-level Map, so we use unique client IPs
* per test to avoid cross-test contamination.
*
* In test environments, getConnInfo() is unavailable (no real socket), so
* directIp falls back to 'unknown'. Since 'unknown' is not a trusted proxy,
* forwarded headers (x-forwarded-for / x-real-ip) are NOT trusted.
*
* To test per-client rate limiting, we override getClientId at the middleware
* level by injecting a client ID via a custom header that the test helper
* reads. In production, getConnInfo provides the real socket IP.
*
* For these tests, we use a middleware that sets a custom client id from
* a test-only header, simulating distinct clients.
*/
let testCounter = 0;
function uniqueIp() {
return `test-${++testCounter}.${Date.now()}`;
}
/**
* Test helper: inject a per-client override by setting c.set('rateLimitClientId', ...)
* before the rate limiter runs.
*/
function clientIdOverride() {
return async (c: Parameters<typeof getClientId>[0], next: () => Promise<void>) => {
const testClientId = c.req.header('x-test-client-id');
if (testClientId) {
c.set('rateLimitClientId' as never, testClientId as never);
}
await next();
};
}
describe('rate-limit middleware', () => {
beforeEach(() => {
vi.useFakeTimers();
});
afterEach(() => {
vi.useRealTimers();
});
function createApp(maxRequests: number, windowMs: number) {
const app = new Hono();
const limiter = rateLimit({ maxRequests, windowMs });
app.use('/test', clientIdOverride());
app.use('/test', limiter);
app.get('/test', (c) => c.text('ok'));
return app;
}
function req(ip: string) {
return { headers: { 'x-test-client-id': ip } };
}
it('should allow requests under the limit', async () => {
const app = createApp(3, 60_000);
const ip = uniqueIp();
const res = await app.request('/test', req(ip));
expect(res.status).toBe(200);
expect(await res.text()).toBe('ok');
expect(res.headers.get('X-RateLimit-Limit')).toBe('3');
expect(res.headers.get('X-RateLimit-Remaining')).toBe('2');
});
it('should return 429 when limit exceeded', async () => {
const app = createApp(2, 60_000);
const ip = uniqueIp();
await app.request('/test', req(ip));
await app.request('/test', req(ip));
const res = await app.request('/test', req(ip));
expect(res.status).toBe(429);
expect(await res.text()).toBe('Too many requests. Please try again later.');
expect(res.headers.get('X-RateLimit-Remaining')).toBe('0');
expect(res.headers.get('Retry-After')).toBeTruthy();
expect(res.headers.get('X-RateLimit-Reset')).toBeTruthy();
});
it('should track remaining count correctly', async () => {
const app = createApp(5, 60_000);
const ip = uniqueIp();
const res1 = await app.request('/test', req(ip));
expect(res1.headers.get('X-RateLimit-Remaining')).toBe('4');
const res2 = await app.request('/test', req(ip));
expect(res2.headers.get('X-RateLimit-Remaining')).toBe('3');
const res3 = await app.request('/test', req(ip));
expect(res3.headers.get('X-RateLimit-Remaining')).toBe('2');
});
it('should reset after window expires', async () => {
const app = createApp(2, 1000);
const ip = uniqueIp();
await app.request('/test', req(ip));
await app.request('/test', req(ip));
let res = await app.request('/test', req(ip));
expect(res.status).toBe(429);
vi.advanceTimersByTime(1001);
res = await app.request('/test', req(ip));
expect(res.status).toBe(200);
expect(res.headers.get('X-RateLimit-Remaining')).toBe('1');
});
it('should rate limit per path', async () => {
const app = new Hono();
const limiter = rateLimit({ maxRequests: 1, windowMs: 60_000 });
app.use('/a', clientIdOverride());
app.use('/b', clientIdOverride());
app.use('/a', limiter);
app.use('/b', limiter);
app.get('/a', (c) => c.text('a'));
app.get('/b', (c) => c.text('b'));
const ip = uniqueIp();
await app.request('/a', req(ip));
const resA = await app.request('/a', req(ip));
expect(resA.status).toBe(429);
const resB = await app.request('/b', req(ip));
expect(resB.status).toBe(200);
});
it('should isolate rate limits between different clients', async () => {
const app = createApp(1, 60_000);
const ip1 = uniqueIp();
const ip2 = uniqueIp();
await app.request('/test', req(ip1));
const blocked = await app.request('/test', req(ip1));
expect(blocked.status).toBe(429);
const allowed = await app.request('/test', req(ip2));
expect(allowed.status).toBe(200);
});
it('should use sliding window (partial expiry)', async () => {
const app = createApp(3, 1000);
const ip = uniqueIp();
// t=0: first request
await app.request('/test', req(ip));
// t=500: second request
vi.advanceTimersByTime(500);
await app.request('/test', req(ip));
// t=700: third request
vi.advanceTimersByTime(200);
await app.request('/test', req(ip));
// t=700: should be blocked (3/3 used)
let res = await app.request('/test', req(ip));
expect(res.status).toBe(429);
// t=1001: first request expires, one slot opens
vi.advanceTimersByTime(301);
res = await app.request('/test', req(ip));
expect(res.status).toBe(200);
});
it('should fall back to "unknown" when no client id is available', async () => {
const app = new Hono();
const limiter = rateLimit({ maxRequests: 100, windowMs: 60_000 });
app.use('/test', limiter);
app.get('/test', (c) => c.text('ok'));
// No x-test-client-id header, no real socket → falls back to 'unknown'
const res = await app.request('/test');
expect(res.status).toBe(200);
// All anonymous clients share the 'unknown' bucket
});
});

View file

@ -0,0 +1,160 @@
/**
* Simple in-memory rate limiter middleware for Hono.
*
* Uses a sliding-window approach keyed by `clientIP:path`. Includes automatic
* periodic cleanup, a hard cap on store size to prevent memory amplification
* from spoofed IPs, and configurable trusted-proxy support for `X-Forwarded-For`.
*
* Presets exported: {@link rateLimitTTS}, {@link rateLimitTranscribe}, {@link rateLimitGeneral}.
* @module
*/
import type { Context, Next } from 'hono';
import { getConnInfo } from '@hono/node-server/conninfo';
interface RateLimitEntry {
timestamps: number[];
}
interface RateLimitConfig {
/** Maximum requests allowed in the window */
maxRequests: number;
/** Time window in milliseconds */
windowMs: number;
}
const store = new Map<string, RateLimitEntry>();
/** Hard cap on store size to prevent memory amplification from spoofed IPs */
const MAX_STORE_SIZE = 10_000;
// Cleanup old entries every 5 minutes
const CLEANUP_INTERVAL = 5 * 60 * 1000;
const MAX_WINDOW_MS = 60 * 1000; // largest window used by any preset
function cleanup(): void {
const now = Date.now();
const cutoff = now - MAX_WINDOW_MS;
for (const [key, entry] of store) {
entry.timestamps = entry.timestamps.filter((ts) => ts > cutoff);
if (entry.timestamps.length === 0) {
store.delete(key);
}
}
}
// Interval-based cleanup so entries don't pile up during idle periods
const cleanupInterval = setInterval(cleanup, CLEANUP_INTERVAL);
cleanupInterval.unref(); // don't keep process alive just for cleanup
/**
* Trusted proxy IPs that are allowed to set X-Forwarded-For / X-Real-IP.
* Default: loopback only. Extend via TRUSTED_PROXIES env (comma-separated).
*/
const TRUSTED_PROXIES = new Set(['127.0.0.1', '::1', '::ffff:127.0.0.1']);
// Allow additional trusted proxies via env
const extraProxies = process.env.TRUSTED_PROXIES;
if (extraProxies) {
for (const ip of extraProxies.split(',')) {
const trimmed = ip.trim();
if (trimmed) TRUSTED_PROXIES.add(trimmed);
}
}
/**
* Get client identifier from request.
*
* Uses the real TCP socket address from Node.js (via getConnInfo) not
* spoofable request headers. Only trusts X-Forwarded-For / X-Real-IP when
* the socket address belongs to a trusted proxy.
*/
export function getClientId(c: Context): string {
// Allow middleware-injected override (for testing / custom client identification)
const override = c.get('rateLimitClientId' as never) as string | undefined;
if (override) return override;
// Get the real TCP socket remote address (not spoofable)
let directIp = 'unknown';
try {
const info = getConnInfo(c);
directIp = info.remote.address || 'unknown';
} catch {
// getConnInfo may fail in test environments — fall back to 'unknown'
}
// Only trust forwarded headers from known proxy IPs
if (TRUSTED_PROXIES.has(directIp)) {
// Prefer X-Forwarded-For (standard), fall back to X-Real-IP (nginx convention)
const forwarded = c.req.header('x-forwarded-for');
if (forwarded) {
return forwarded.split(',')[0].trim();
}
const realIp = c.req.header('x-real-ip');
if (realIp) {
return realIp;
}
}
return directIp;
}
/**
* Create a rate limiting middleware
*/
export function rateLimit(config: RateLimitConfig) {
const { maxRequests, windowMs } = config;
return async (c: Context, next: Next) => {
const clientId = getClientId(c);
const path = c.req.path;
const key = `${clientId}:${path}`;
const now = Date.now();
const cutoff = now - windowMs;
let entry = store.get(key);
if (!entry) {
// Enforce store size cap — evict oldest entry if full
if (store.size >= MAX_STORE_SIZE) {
const oldestKey = store.keys().next().value;
if (oldestKey !== undefined) store.delete(oldestKey);
}
entry = { timestamps: [] };
store.set(key, entry);
}
// Remove old timestamps outside the window
entry.timestamps = entry.timestamps.filter((ts) => ts > cutoff);
if (entry.timestamps.length >= maxRequests) {
const oldestTs = entry.timestamps[0];
const retryAfter = Math.ceil((oldestTs + windowMs - now) / 1000);
c.header('Retry-After', String(retryAfter));
c.header('X-RateLimit-Limit', String(maxRequests));
c.header('X-RateLimit-Remaining', '0');
c.header('X-RateLimit-Reset', String(Math.ceil((oldestTs + windowMs) / 1000)));
return c.text('Too many requests. Please try again later.', 429);
}
// Add current timestamp
entry.timestamps.push(now);
// Set rate limit headers
c.header('X-RateLimit-Limit', String(maxRequests));
c.header('X-RateLimit-Remaining', String(maxRequests - entry.timestamps.length));
await next();
};
}
/** Preset: 10 requests per minute (for expensive operations like TTS) */
export const rateLimitTTS = rateLimit({ maxRequests: 10, windowMs: 60 * 1000 });
/** Preset: 30 requests per minute (for transcription) */
export const rateLimitTranscribe = rateLimit({ maxRequests: 30, windowMs: 60 * 1000 });
/** Preset: 60 requests per minute (for general API calls like memories) */
export const rateLimitGeneral = rateLimit({ maxRequests: 60, windowMs: 60 * 1000 });

View file

@ -0,0 +1,96 @@
/**
* Security headers middleware.
*
* Adds essential security headers to all responses:
* - Content-Security-Policy (CSP)
* - X-Frame-Options
* - X-Content-Type-Options
* - Strict-Transport-Security (HSTS)
* - Referrer-Policy
* - X-XSS-Protection
*/
import type { MiddlewareHandler } from 'hono';
/**
* Content Security Policy
*
* - default-src 'self': Only allow resources from same origin by default
* - script-src 'self': Only allow scripts from same origin
* - style-src: Allow self, inline styles (needed for some UI libraries), and Google Fonts
* - font-src: Allow self and Google Fonts CDN
* - connect-src: Allow self and WebSocket connections to localhost
* - img-src: Allow self, data URIs, and blob URLs (for generated images)
* - frame-ancestors 'none': Prevent framing (like X-Frame-Options: DENY)
*/
// Build connect-src dynamically: always include localhost, plus any extra CSP sources
const baseConnectSrc = "'self' ws://localhost:* wss://localhost:* http://localhost:* https://localhost:* ws://127.0.0.1:* wss://127.0.0.1:* http://127.0.0.1:* https://127.0.0.1:*";
/**
* Build CSP directives string lazily env vars may not be loaded at import time
* (dotenv/config runs in config.ts which may be imported after this module).
*/
let _cspDirectives: string | null = null;
function getCspDirectives(): string {
if (_cspDirectives) return _cspDirectives;
// CSP_CONNECT_EXTRA env var: space-separated additional connect-src entries
// e.g. "wss://your-server.example.com:3443 https://your-server.example.com:3443"
// Sanitize: strip semicolons and CR/LF to prevent directive injection
const extraConnectSrc = process.env.CSP_CONNECT_EXTRA
?.replace(/[;\r\n]/g, '')
.trim()
.split(/\s+/)
.filter(token => /^(https?|wss?):\/\//.test(token))
.join(' ');
const connectSrc = extraConnectSrc
? `${baseConnectSrc} ${extraConnectSrc}`
: baseConnectSrc;
_cspDirectives = [
"default-src 'self'",
"script-src 'self' https://s3.tradingview.com",
"style-src 'self' 'unsafe-inline' https://fonts.googleapis.com",
"font-src 'self' https://fonts.gstatic.com",
`connect-src ${connectSrc}`,
"img-src 'self' data: blob:",
"media-src 'self' blob:", // Allow blob: URLs for TTS audio playback
"frame-src https://s3.tradingview.com https://www.tradingview.com https://www.tradingview-widget.com https://s.tradingview.com",
"frame-ancestors 'none'",
"base-uri 'self'",
"form-action 'self'",
].join('; ');
return _cspDirectives;
}
export const securityHeaders: MiddlewareHandler = async (c, next) => {
await next();
// Content Security Policy - defense in depth against XSS
c.header('Content-Security-Policy', getCspDirectives());
// Prevent clickjacking
c.header('X-Frame-Options', 'DENY');
// Prevent MIME type sniffing
c.header('X-Content-Type-Options', 'nosniff');
// Enable legacy XSS filter (mostly for older browsers)
c.header('X-XSS-Protection', '1; mode=block');
// Enforce HTTPS (1 year, include subdomains) — production only
if (process.env.NODE_ENV === 'production') {
c.header('Strict-Transport-Security', 'max-age=31536000; includeSubDomains');
}
// Control referrer information
c.header('Referrer-Policy', 'strict-origin-when-cross-origin');
// Prevent browsers from caching sensitive responses
// (can be overridden by cache-headers middleware for specific routes)
if (!c.res.headers.get('Cache-Control')) {
c.header('Cache-Control', 'no-store');
}
};

View file

@ -0,0 +1,76 @@
/**
* GET/POST /api/agentlog Agent log persistence.
*
* GET: Returns the full agent log as JSON array.
* POST: Appends an entry (validated with Zod).
*/
import { Hono } from 'hono';
import { zValidator } from '@hono/zod-validator';
import { z } from 'zod';
import { config } from '../lib/config.js';
import { readJSON, writeJSON } from '../lib/files.js';
import { createMutex } from '../lib/mutex.js';
import { rateLimitGeneral } from '../middleware/rate-limit.js';
import type { AgentLogEntry } from '../types.js';
const withLock = createMutex();
const app = new Hono();
/** Validation schema for agent log entries */
const agentLogSchema = z.object({
ts: z.number().optional(),
type: z.string().optional(),
message: z.string().optional(),
level: z.string().optional(),
icon: z.string().optional(),
text: z.string().optional(),
data: z.record(z.string(), z.unknown()).optional(),
});
async function readAgentLog(): Promise<AgentLogEntry[]> {
return readJSON<AgentLogEntry[]>(config.agentLogPath, []);
}
async function writeAgentLog(entries: AgentLogEntry[]): Promise<void> {
const trimmed = entries.slice(-config.agentLogMax);
await writeJSON(config.agentLogPath, trimmed);
}
app.get('/api/agentlog', async (c) => {
try {
const log = await readAgentLog();
return c.json(log);
} catch (err) {
console.error('[agentlog] read error:', (err as Error).message);
return c.json([]);
}
});
app.post(
'/api/agentlog',
rateLimitGeneral,
zValidator('json', agentLogSchema, (result, c) => {
if (!result.success) {
const msg = result.error.issues[0]?.message || 'Invalid log entry';
return c.json({ ok: false, error: msg }, 400);
}
}),
async (c) => {
try {
const entry = c.req.valid('json') as AgentLogEntry;
await withLock(async () => {
const log = await readAgentLog();
log.push({ ...entry, ts: entry.ts || Date.now() });
await writeAgentLog(log);
});
return c.json({ ok: true });
} catch (err) {
console.error('[agentlog] write error:', (err as Error).message);
return c.json({ ok: false, error: 'Failed to write log' }, 500);
}
},
);
export default app;

88
server/routes/api-keys.ts Normal file
View file

@ -0,0 +1,88 @@
/**
* API key management read status and write keys to .env
*
* GET /api/keys returns which keys are configured (booleans, never exposes values)
* PUT /api/keys accepts key values, writes to .env, updates runtime config
* @module
*/
import { Hono } from 'hono';
import fs from 'node:fs';
import path from 'node:path';
import { config } from '../lib/config.js';
import { rateLimitGeneral } from '../middleware/rate-limit.js';
const app = new Hono();
/** Path to the .env file in the project root. */
const ENV_PATH = path.resolve(process.cwd(), '.env');
/** Write key=value pairs back to .env, preserving comments and order. */
function writeEnvKey(key: string, value: string): void {
let lines: string[] = [];
try {
lines = fs.readFileSync(ENV_PATH, 'utf8').split('\n');
} catch { /* file doesn't exist yet */ }
let found = false;
const updated = lines.map((line) => {
const trimmed = line.trim();
if (trimmed.startsWith('#') || !trimmed) return line;
const eqIdx = trimmed.indexOf('=');
if (eqIdx < 0) return line;
const lineKey = trimmed.slice(0, eqIdx).trim();
if (lineKey === key) {
found = true;
return `${key}=${value}`;
}
return line;
});
if (!found) {
updated.push(`${key}=${value}`);
}
fs.writeFileSync(ENV_PATH, updated.join('\n'));
}
/** GET /api/keys — which API keys are configured */
app.get('/api/keys', rateLimitGeneral, (c) => {
return c.json({
openaiKeySet: !!config.openaiApiKey,
replicateKeySet: !!config.replicateApiToken,
});
});
/** PUT /api/keys — save API keys to .env and update runtime config */
app.put('/api/keys', rateLimitGeneral, async (c) => {
try {
const body = await c.req.json() as Record<string, string>;
const results: string[] = [];
if (body.openaiKey !== undefined) {
const val = body.openaiKey.trim();
writeEnvKey('OPENAI_API_KEY', val);
// Update runtime config (cast away readonly for hot-reload)
(config as Record<string, unknown>).openaiApiKey = val;
results.push(val ? 'OPENAI_API_KEY saved' : 'OPENAI_API_KEY cleared');
}
if (body.replicateToken !== undefined) {
const val = body.replicateToken.trim();
writeEnvKey('REPLICATE_API_TOKEN', val);
(config as Record<string, unknown>).replicateApiToken = val;
results.push(val ? 'REPLICATE_API_TOKEN saved' : 'REPLICATE_API_TOKEN cleared');
}
return c.json({
ok: true,
message: results.join(', ') || 'No changes',
openaiKeySet: !!config.openaiApiKey,
replicateKeySet: !!config.replicateApiToken,
});
} catch {
return c.text('Invalid request', 400);
}
});
export default app;

View file

@ -0,0 +1,142 @@
/**
* GET /api/claude-code-limits Claude Code rate limit information.
*
* Spawns the Claude CLI via PTY (see {@link getClaudeUsage}), captures the
* `/usage` output, and normalises reset timestamps to epoch-ms so the
* frontend doesn't need to parse Claude's human-readable time strings.
* Results are cached via {@link createCachedFetch} (5 min TTL, 30 s on failure).
* @module
*/
import { Hono } from 'hono';
import { createCachedFetch } from '../lib/cached-fetch.js';
import { getClaudeUsage } from '../services/claude-usage.js';
const app = new Hono();
// ── Types ────────────────────────────────────────────────────────────
interface RawLimitWindow {
used_percent: number;
left_percent: number;
resets_at: string; // human-readable from CLI, e.g. "7:59pm (UTC)"
}
interface NormalisedLimitWindow {
used_percent: number;
left_percent: number;
resets_at_epoch: number | null;
resets_at_raw: string;
}
interface ClaudeCodeLimitsResponse {
available: boolean;
session_limit?: NormalisedLimitWindow;
weekly_limit?: NormalisedLimitWindow;
error?: string;
}
// ── Reset-time parser ────────────────────────────────────────────────
// Claude CLI outputs times like "7:59pm", "1am", "Feb 13, 6:59pm" — all UTC.
const MONTHS: Record<string, number> = {
Jan: 0, Feb: 1, Mar: 2, Apr: 3, May: 4, Jun: 5,
Jul: 6, Aug: 7, Sep: 8, Oct: 9, Nov: 10, Dec: 11,
};
function parseClaudeResetToEpochMs(raw: string): number | null {
if (!raw) return null;
const s = raw.replace(/\s*\([^)]*\)\s*/g, '').trim();
// "7:59pm", "7:59 pm"
const hhmm = s.match(/^(\d{1,2}):(\d{2})\s*(am|pm)$/i);
if (hhmm) {
const h24 = (parseInt(hhmm[1], 10) % 12) + (hhmm[3].toLowerCase() === 'pm' ? 12 : 0);
const now = new Date();
let ts = Date.UTC(now.getUTCFullYear(), now.getUTCMonth(), now.getUTCDate(), h24, parseInt(hhmm[2], 10));
if (ts <= Date.now()) ts += 86_400_000;
return ts;
}
// "1am", "7pm"
const hOnly = s.match(/^(\d{1,2})\s*(am|pm)$/i);
if (hOnly) {
const h24 = (parseInt(hOnly[1], 10) % 12) + (hOnly[2].toLowerCase() === 'pm' ? 12 : 0);
const now = new Date();
let ts = Date.UTC(now.getUTCFullYear(), now.getUTCMonth(), now.getUTCDate(), h24, 0);
if (ts <= Date.now()) ts += 86_400_000;
return ts;
}
// "Feb 13, 6:59pm"
const dt = s.match(/^([A-Za-z]{3})\s+(\d{1,2}),\s*(\d{1,2}):(\d{2})\s*(am|pm)$/i);
if (dt) {
const mon = MONTHS[dt[1]];
if (mon == null) return null;
const h24 = (parseInt(dt[3], 10) % 12) + (dt[5].toLowerCase() === 'pm' ? 12 : 0);
const y = new Date().getUTCFullYear();
let ts = Date.UTC(y, mon, parseInt(dt[2], 10), h24, parseInt(dt[4], 10));
if (ts <= Date.now() - 7 * 86_400_000) ts = Date.UTC(y + 1, mon, parseInt(dt[2], 10), h24, parseInt(dt[4], 10));
return ts;
}
// "Feb 13, 7pm"
const dh = s.match(/^([A-Za-z]{3})\s+(\d{1,2}),\s*(\d{1,2})\s*(am|pm)$/i);
if (dh) {
const mon = MONTHS[dh[1]];
if (mon == null) return null;
const h24 = (parseInt(dh[3], 10) % 12) + (dh[4].toLowerCase() === 'pm' ? 12 : 0);
const y = new Date().getUTCFullYear();
let ts = Date.UTC(y, mon, parseInt(dh[2], 10), h24, 0);
if (ts <= Date.now() - 7 * 86_400_000) ts = Date.UTC(y + 1, mon, parseInt(dh[2], 10), h24, 0);
return ts;
}
return null;
}
function normaliseWindow(w: RawLimitWindow): NormalisedLimitWindow {
return {
used_percent: w.used_percent,
left_percent: w.left_percent,
resets_at_epoch: parseClaudeResetToEpochMs(w.resets_at),
resets_at_raw: w.resets_at,
};
}
// ── Fetch + cache ────────────────────────────────────────────────────
async function getClaudeCodeLimits(): Promise<ClaudeCodeLimitsResponse> {
try {
const raw = await getClaudeUsage();
return {
available: raw.available,
session_limit: raw.session_limit ? normaliseWindow(raw.session_limit) : undefined,
weekly_limit: raw.weekly_limit ? normaliseWindow(raw.weekly_limit) : undefined,
error: raw.error,
};
} catch (error) {
console.error('Error fetching Claude Code limits:', error);
return {
available: false,
error: error instanceof Error ? error.message : 'Unknown error',
};
}
}
const getClaudeCodeLimitsCached = createCachedFetch(getClaudeCodeLimits, undefined, {
isValid: (r) => r.available,
});
// ── Route ────────────────────────────────────────────────────────────
app.get('/api/claude-code-limits', async (c) => {
try {
return c.json(await getClaudeCodeLimitsCached());
} catch (error) {
console.error('Error in claude-code-limits endpoint:', error);
return c.json({ available: false, error: 'Failed to fetch Claude Code limits' }, 500);
}
});
export default app;

View file

@ -0,0 +1,235 @@
/**
* GET /api/codex-limits Codex (OpenAI) rate limit information.
*
* Fetches usage data from the OpenAI WHAM API using a locally-stored access
* token (`~/.codex/auth.json`). Falls back to parsing the most recent local
* `.jsonl` session files if the API is unreachable. Results are cached via
* {@link createCachedFetch} (5 min TTL, 30 s on failure).
* @module
*/
import { Hono } from 'hono';
import * as fs from 'node:fs';
import * as path from 'node:path';
import * as os from 'node:os';
import { CODEX_DIR } from '../lib/constants.js';
import { createCachedFetch } from '../lib/cached-fetch.js';
const app = new Hono();
// ── Types ────────────────────────────────────────────────────────────
interface LimitWindow {
used_percent: number;
left_percent: number;
resets_at: number | null;
resets_at_formatted: string | null;
}
interface Credits {
has_credits: boolean;
unlimited: boolean;
balance: number | null;
}
interface RateLimits {
five_hour_limit: LimitWindow;
weekly_limit: LimitWindow;
credits: Credits;
plan_type: string | null;
}
interface CodexLimitsResponse {
available: boolean;
source?: string;
error?: string;
five_hour_limit?: LimitWindow;
weekly_limit?: LimitWindow;
credits?: Credits;
plan_type?: string | null;
}
interface OpenAIUsageResponse {
plan_type: string;
rate_limit: {
primary_window: { used_percent: number; reset_at: number };
secondary_window: { used_percent: number; reset_at: number };
};
credits: { has_credits: boolean; unlimited: boolean; balance: string };
}
// ── Helpers ──────────────────────────────────────────────────────────
function formatTime(epoch: number): string {
return new Date(epoch * 1000).toLocaleTimeString('en-US', {
hour: '2-digit',
minute: '2-digit',
hour12: false,
});
}
function formatDateTime(epoch: number): string {
return new Date(epoch * 1000).toLocaleString('en-US', {
hour: '2-digit',
minute: '2-digit',
hour12: false,
day: '2-digit',
month: 'short',
});
}
function buildLimitWindow(usedPercent: number, resetAt: number | null, includeDate = false): LimitWindow {
return {
used_percent: usedPercent,
left_percent: 100 - usedPercent,
resets_at: resetAt,
resets_at_formatted: resetAt
? (includeDate ? formatDateTime(resetAt) : formatTime(resetAt))
: null,
};
}
// ── Access token ─────────────────────────────────────────────────────
async function getAccessToken(): Promise<string | null> {
try {
const authPath = path.join(os.homedir(), CODEX_DIR, 'auth.json');
try {
await fs.promises.access(authPath);
} catch {
return null;
}
const authData = JSON.parse(await fs.promises.readFile(authPath, 'utf-8'));
return authData?.tokens?.access_token || null;
} catch {
return null;
}
}
// ── API fetch ────────────────────────────────────────────────────────
async function fetchFromAPI(token: string): Promise<RateLimits | null> {
try {
const response = await fetch('https://chatgpt.com/backend-api/wham/usage', {
headers: { Authorization: `Bearer ${token}`, Accept: 'application/json' },
signal: AbortSignal.timeout(10_000),
});
if (!response.ok) {
console.error(`[codex-limits] API returned ${response.status}`);
return null;
}
const data = (await response.json()) as OpenAIUsageResponse;
const primary = data.rate_limit?.primary_window;
const secondary = data.rate_limit?.secondary_window;
return {
five_hour_limit: buildLimitWindow(primary?.used_percent || 0, primary?.reset_at || null),
weekly_limit: buildLimitWindow(secondary?.used_percent || 0, secondary?.reset_at || null, true),
credits: {
has_credits: data.credits?.has_credits || false,
unlimited: data.credits?.unlimited || false,
balance: data.credits?.balance ? parseFloat(data.credits.balance) : null,
},
plan_type: data.plan_type || null,
};
} catch (error) {
console.error('[codex-limits] API fetch failed:', error);
return null;
}
}
// ── Local session fallback ───────────────────────────────────────────
async function parseLocalSessions(): Promise<RateLimits | null> {
const codexDir = path.join(os.homedir(), CODEX_DIR, 'sessions');
try {
await fs.promises.access(codexDir);
} catch {
return null;
}
const sessionFiles: string[] = [];
async function findJsonlFiles(dir: string) {
try {
const entries = await fs.promises.readdir(dir, { withFileTypes: true });
for (const entry of entries) {
const fullPath = path.join(dir, entry.name);
if (entry.isDirectory()) await findJsonlFiles(fullPath);
else if (entry.isFile() && entry.name.endsWith('.jsonl')) sessionFiles.push(fullPath);
}
} catch { /* ignore permission errors */ }
}
await findJsonlFiles(codexDir);
// Sort by mtime async
const fileStats = await Promise.all(
sessionFiles.map(async (f) => ({ file: f, mtime: (await fs.promises.stat(f)).mtime.getTime() }))
);
fileStats.sort((a, b) => b.mtime - a.mtime);
for (const { file: sessionFile } of fileStats.slice(0, 10)) {
try {
const lines = (await fs.promises.readFile(sessionFile, 'utf-8')).split('\n').filter(Boolean);
for (let i = lines.length - 1; i >= 0; i--) {
try {
const entry = JSON.parse(lines[i]);
if (
entry.type !== 'event_msg' ||
entry.payload?.type !== 'token_count' ||
!entry.payload?.rate_limits
) continue;
const rl = entry.payload.rate_limits;
const primary = rl.primary || {};
const secondary = rl.secondary || {};
return {
five_hour_limit: buildLimitWindow(primary.used_percent || 0, primary.resets_at || null),
weekly_limit: buildLimitWindow(secondary.used_percent || 0, secondary.resets_at || null, true),
credits: rl.credits || { has_credits: false, unlimited: false, balance: null },
plan_type: rl.plan_type || null,
};
} catch { continue; }
}
} catch { continue; }
}
return null;
}
// ── Cached fetcher ───────────────────────────────────────────────────
async function getCodexLimits(): Promise<CodexLimitsResponse> {
const token = await getAccessToken();
if (token) {
const apiLimits = await fetchFromAPI(token);
if (apiLimits) return { available: true, source: 'api', ...apiLimits };
}
const localLimits = await parseLocalSessions();
if (localLimits) return { available: true, source: 'local', ...localLimits };
return { available: false, error: 'No Codex data found' };
}
const getCodexLimitsCached = createCachedFetch(getCodexLimits, undefined, {
isValid: (r) => r.available,
});
// ── Route ────────────────────────────────────────────────────────────
app.get('/api/codex-limits', async (c) => {
try {
return c.json(await getCodexLimitsCached());
} catch (error) {
console.error('Error fetching Codex limits:', error);
return c.json({ available: false, error: 'Failed to fetch Codex limits' }, 500);
}
});
export default app;

View file

@ -0,0 +1,51 @@
/**
* GET /api/connect-defaults Provides gateway connection defaults for the browser.
*
* The ConnectDialog in the frontend needs the WebSocket URL and auth token.
* Instead of requiring users to enter these manually in the browser,
* this endpoint exposes the server's configured gateway URL and token
* so the frontend can pre-fill (or auto-connect).
*
* Security: The gateway token is only returned to loopback clients.
* Remote clients receive the wsUrl and agentName but token is null.
*/
import { Hono } from 'hono';
import { getConnInfo } from '@hono/node-server/conninfo';
import { config } from '../lib/config.js';
import { rateLimitGeneral } from '../middleware/rate-limit.js';
const LOOPBACK_RE = /^(127\.\d+\.\d+\.\d+|::1|::ffff:127\.\d+\.\d+\.\d+)$/;
const app = new Hono();
app.get('/api/connect-defaults', rateLimitGeneral, (c) => {
// Determine if the request originates from loopback
let remoteIp = '';
try {
const info = getConnInfo(c);
remoteIp = info.remote?.address ?? '';
} catch {
// fallback: not available in some test environments
}
const isLoopback = LOOPBACK_RE.test(remoteIp);
// Derive WebSocket URL from the HTTP gateway URL
const gwUrl = config.gatewayUrl;
let wsUrl = '';
try {
const parsed = new URL(gwUrl);
const wsProtocol = parsed.protocol === 'https:' ? 'wss:' : 'ws:';
wsUrl = `${wsProtocol}//${parsed.host}/ws`;
} catch {
wsUrl = gwUrl.replace(/^http/, 'ws');
}
return c.json({
wsUrl,
token: isLoopback ? (config.gatewayToken || null) : null,
agentName: config.agentName,
});
});
export default app;

184
server/routes/crons.ts Normal file
View file

@ -0,0 +1,184 @@
/**
* Cron API Routes proxy to OpenClaw gateway
*
* GET /api/crons List all cron jobs
* POST /api/crons Create a new cron job
* PATCH /api/crons/:id Update a cron job
* DELETE /api/crons/:id Delete a cron job
* POST /api/crons/:id/toggle Toggle enabled/disabled
* POST /api/crons/:id/run Run a cron job immediately
* GET /api/crons/:id/runs Get run history
*/
import { Hono } from 'hono';
import { z } from 'zod';
import { invokeGatewayTool } from '../lib/gateway-client.js';
import { rateLimitGeneral } from '../middleware/rate-limit.js';
const scheduleSchema = z.union([
z.object({ kind: z.literal('at'), at: z.string() }),
z.object({ kind: z.literal('every'), everyMs: z.number(), anchorMs: z.number().optional() }),
z.object({ kind: z.literal('cron'), expr: z.string(), tz: z.string().optional() }),
]);
const payloadSchema = z.union([
z.object({ kind: z.literal('systemEvent'), text: z.string() }),
z.object({ kind: z.literal('agentTurn'), message: z.string(), model: z.string().optional(), thinking: z.string().optional(), timeoutSeconds: z.number().optional() }),
]);
const deliverySchema = z.object({
mode: z.enum(['none', 'announce']).optional(),
channel: z.string().optional(),
to: z.string().optional(),
bestEffort: z.boolean().optional(),
}).optional();
const cronJobSchema = z.object({
job: z.object({
name: z.string().min(1).max(200).optional(),
schedule: scheduleSchema.optional(),
payload: payloadSchema.optional(),
delivery: deliverySchema,
sessionTarget: z.enum(['main', 'isolated']).optional(),
enabled: z.boolean().optional(),
notify: z.boolean().optional(),
// Legacy compat — Nerve may send these flat fields
prompt: z.string().max(10000).optional(),
model: z.string().max(200).optional(),
thinkingLevel: z.string().max(50).optional(),
channel: z.string().max(200).optional(),
}),
});
const cronPatchSchema = z.object({
patch: z.object({
name: z.string().min(1).max(200).optional(),
schedule: scheduleSchema.optional(),
payload: payloadSchema.optional(),
delivery: deliverySchema,
sessionTarget: z.enum(['main', 'isolated']).optional(),
enabled: z.boolean().optional(),
notify: z.boolean().optional(),
prompt: z.string().max(10000).optional(),
model: z.string().max(200).optional(),
thinkingLevel: z.string().max(50).optional(),
channel: z.string().max(200).optional(),
}),
});
const app = new Hono();
const GATEWAY_RUN_TIMEOUT_MS = 60_000;
app.get('/api/crons', rateLimitGeneral, async (c) => {
try {
const result = await invokeGatewayTool('cron', {
action: 'list',
includeDisabled: true,
});
return c.json({ ok: true, result });
} catch (err) {
console.error('[crons] list error:', (err as Error).message);
return c.json({ ok: false, error: (err as Error).message }, 502);
}
});
app.post('/api/crons', rateLimitGeneral, async (c) => {
try {
const raw = await c.req.json();
const parsed = cronJobSchema.safeParse(raw);
if (!parsed.success) return c.json({ ok: false, error: parsed.error.issues[0]?.message || 'Invalid body' }, 400);
const body = parsed.data;
console.log('[crons] add raw input:', JSON.stringify(raw, null, 2));
console.log('[crons] add parsed job:', JSON.stringify(body.job, null, 2));
const result = await invokeGatewayTool('cron', {
action: 'add',
job: body.job,
});
return c.json({ ok: true, result });
} catch (err) {
console.error('[crons] add error:', (err as Error).message);
return c.json({ ok: false, error: (err as Error).message }, 502);
}
});
app.patch('/api/crons/:id', rateLimitGeneral, async (c) => {
const id = c.req.param('id');
try {
const raw = await c.req.json();
const parsed = cronPatchSchema.safeParse(raw);
if (!parsed.success) return c.json({ ok: false, error: parsed.error.issues[0]?.message || 'Invalid body' }, 400);
const body = parsed.data;
const result = await invokeGatewayTool('cron', {
action: 'update',
jobId: id,
patch: body.patch,
});
return c.json({ ok: true, result });
} catch (err) {
console.error('[crons] update error:', (err as Error).message);
return c.json({ ok: false, error: (err as Error).message }, 502);
}
});
app.delete('/api/crons/:id', rateLimitGeneral, async (c) => {
const id = c.req.param('id');
try {
const result = await invokeGatewayTool('cron', {
action: 'remove',
jobId: id,
});
return c.json({ ok: true, result });
} catch (err) {
console.error('[crons] remove error:', (err as Error).message);
return c.json({ ok: false, error: (err as Error).message }, 502);
}
});
app.post('/api/crons/:id/toggle', rateLimitGeneral, async (c) => {
const id = c.req.param('id');
// Get current state first, then flip
try {
const body = await c.req.json<{ enabled: boolean }>().catch(() => ({ enabled: true }));
const result = await invokeGatewayTool('cron', {
action: 'update',
jobId: id,
patch: { enabled: body.enabled },
});
return c.json({ ok: true, result });
} catch (err) {
console.error('[crons] toggle error:', (err as Error).message);
return c.json({ ok: false, error: (err as Error).message }, 502);
}
});
app.post('/api/crons/:id/run', rateLimitGeneral, async (c) => {
const id = c.req.param('id');
try {
const result = await invokeGatewayTool('cron', {
action: 'run',
jobId: id,
}, GATEWAY_RUN_TIMEOUT_MS);
return c.json({ ok: true, result });
} catch (err) {
console.error('[crons] run error:', (err as Error).message);
return c.json({ ok: false, error: (err as Error).message }, 502);
}
});
app.get('/api/crons/:id/runs', rateLimitGeneral, async (c) => {
const id = c.req.param('id');
try {
const result = await invokeGatewayTool('cron', {
action: 'runs',
jobId: id,
limit: 10,
});
return c.json({ ok: true, result });
} catch (err) {
console.error('[crons] runs error:', (err as Error).message);
return c.json({ ok: false, error: (err as Error).message }, 502);
}
});
export default app;

124
server/routes/events.ts Normal file
View file

@ -0,0 +1,124 @@
/**
* Server-Sent Events (SSE) endpoint for real-time push updates.
*
* GET /api/events SSE stream for real-time updates
* POST /api/events/test Debug: broadcast a test event
*
* Event types:
* - memory.changed Memory file was modified
* - tokens.updated Token usage changed
* - status.changed Gateway status changed
* - ping Keep-alive (every 30s)
*/
import { Hono } from 'hono';
import { streamSSE } from 'hono/streaming';
import { EventEmitter } from 'node:events';
const app = new Hono();
// ── Broadcaster (singleton) ──────────────────────────────────────────
export interface SSEEvent {
event: string;
data: unknown;
ts: number;
}
class SSEBroadcaster extends EventEmitter {
private static instance: SSEBroadcaster;
private constructor() {
super();
this.setMaxListeners(100); // one per connected client
}
static getInstance(): SSEBroadcaster {
if (!SSEBroadcaster.instance) {
SSEBroadcaster.instance = new SSEBroadcaster();
}
return SSEBroadcaster.instance;
}
broadcast(event: string, data: unknown = {}): void {
this.emit('message', { event, data, ts: Date.now() } satisfies SSEEvent);
}
}
export const broadcaster = SSEBroadcaster.getInstance();
/** Convenience: broadcast an event to all connected SSE clients. */
export function broadcast(event: string, data: unknown = {}): void {
broadcaster.broadcast(event, data);
}
// ── SSE stream ───────────────────────────────────────────────────────
const PING_INTERVAL_MS = 30_000;
app.get('/api/events', async (c) => {
c.header('Content-Type', 'text/event-stream');
c.header('Cache-Control', 'no-cache');
c.header('Connection', 'keep-alive');
c.header('X-Accel-Buffering', 'no');
return streamSSE(c, async (stream) => {
let connected = true;
let resolveDisconnect: (() => void) | undefined;
const onMessage = (payload: SSEEvent) => {
if (!connected) return;
try {
stream.writeSSE({ event: payload.event, data: JSON.stringify(payload) });
} catch {
disconnect();
}
};
function disconnect() {
if (!connected) return;
connected = false;
clearInterval(pingTimer);
broadcaster.off('message', onMessage);
resolveDisconnect?.();
}
broadcaster.on('message', onMessage);
await stream.writeSSE({
event: 'connected',
data: JSON.stringify({ event: 'connected', ts: Date.now() }),
});
const pingTimer = setInterval(() => {
if (!connected) { clearInterval(pingTimer); return; }
try {
stream.writeSSE({ event: 'ping', data: JSON.stringify({ event: 'ping', ts: Date.now() }) });
} catch {
disconnect();
}
}, PING_INTERVAL_MS);
stream.onAbort(() => disconnect());
// Keep stream open until client disconnects (no polling needed)
await new Promise<void>((resolve) => {
resolveDisconnect = resolve;
if (!connected) resolve();
});
});
});
// ── Debug endpoint (dev only) ────────────────────────────────────────
if (process.env.NODE_ENV === 'development') {
app.post('/api/events/test', async (c) => {
const body = await c.req.json().catch(() => ({})) as Record<string, unknown>;
const event = (body.event as string) || 'test';
const data = body.data || { message: 'Test broadcast' };
broadcast(event, data);
return c.json({ ok: true, event, data });
});
}
export default app;

View file

@ -0,0 +1,303 @@
/**
* File browser API routes.
*
* Provides directory tree listing and file reading for the workspace
* file browser UI. All paths are relative to the workspace root
* (~/.openclaw/workspace/) and validated against traversal + exclusion rules.
*
* GET /api/files/tree List directory entries (lazy, depth-limited)
* GET /api/files/read Read a text file's content
* PUT /api/files/write Write/update a text file
* @module
*/
import { Hono } from 'hono';
import fs from 'node:fs/promises';
import path from 'node:path';
import {
getWorkspaceRoot,
resolveWorkspacePath,
isExcluded,
isBinary,
MAX_FILE_SIZE,
} from '../lib/file-utils.js';
const app = new Hono();
// ── Types ────────────────────────────────────────────────────────────
interface TreeEntry {
name: string;
path: string; // relative to workspace root
type: 'file' | 'directory';
size?: number; // bytes, files only
mtime?: number; // epoch ms
binary?: boolean; // true for binary files
children?: TreeEntry[] | null; // null = not loaded, [] = empty dir
}
// ── Helpers ──────────────────────────────────────────────────────────
async function listDirectory(
dirPath: string,
basePath: string,
depth: number,
): Promise<TreeEntry[]> {
const entries: TreeEntry[] = [];
let items;
try {
items = await fs.readdir(dirPath, { withFileTypes: true });
} catch {
return entries;
}
// Sort: directories first, then alphabetical (case-insensitive)
items.sort((a, b) => {
if (a.isDirectory() !== b.isDirectory()) return a.isDirectory() ? -1 : 1;
return a.name.localeCompare(b.name, undefined, { sensitivity: 'base' });
});
for (const item of items) {
// Skip excluded names and hidden files (except specific ones)
if (isExcluded(item.name)) continue;
if (item.name.startsWith('.') && item.name !== '.nerveignore') continue;
const relativePath = basePath ? path.join(basePath, item.name) : item.name;
const fullPath = path.join(dirPath, item.name);
if (item.isDirectory()) {
entries.push({
name: item.name,
path: relativePath,
type: 'directory',
children: depth > 1
? await listDirectory(fullPath, relativePath, depth - 1)
: null,
});
} else if (item.isFile()) {
try {
const stat = await fs.stat(fullPath);
entries.push({
name: item.name,
path: relativePath,
type: 'file',
size: stat.size,
mtime: Math.floor(stat.mtimeMs),
binary: isBinary(item.name) || undefined,
});
} catch {
// Skip files we can't stat
}
}
}
return entries;
}
// ── GET /api/files/tree ──────────────────────────────────────────────
app.get('/api/files/tree', async (c) => {
const root = getWorkspaceRoot();
const subPath = c.req.query('path') || '';
const depth = Math.min(Math.max(Number(c.req.query('depth')) || 1, 1), 5);
// Resolve the target directory
let targetDir: string;
if (subPath) {
const resolved = await resolveWorkspacePath(subPath);
if (!resolved) {
return c.json({ ok: false, error: 'Invalid path' }, 400);
}
targetDir = resolved;
// Ensure it's a directory
try {
const stat = await fs.stat(targetDir);
if (!stat.isDirectory()) {
return c.json({ ok: false, error: 'Not a directory' }, 400);
}
} catch {
return c.json({ ok: false, error: 'Directory not found' }, 404);
}
} else {
targetDir = root;
}
const entries = await listDirectory(targetDir, subPath, depth);
return c.json({ ok: true, root: subPath || '.', entries });
});
// ── GET /api/files/read ──────────────────────────────────────────────
app.get('/api/files/read', async (c) => {
const filePath = c.req.query('path');
if (!filePath) {
return c.json({ ok: false, error: 'Missing path parameter' }, 400);
}
const resolved = await resolveWorkspacePath(filePath);
if (!resolved) {
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
}
// Check if binary
if (isBinary(path.basename(resolved))) {
return c.json({ ok: false, error: 'Binary file', binary: true }, 415);
}
// Stat the file
let stat;
try {
stat = await fs.stat(resolved);
} catch {
return c.json({ ok: false, error: 'File not found' }, 404);
}
if (!stat.isFile()) {
return c.json({ ok: false, error: 'Not a file' }, 400);
}
if (stat.size > MAX_FILE_SIZE) {
return c.json({ ok: false, error: `File too large (${(stat.size / 1024).toFixed(0)}KB, max 1MB)` }, 413);
}
try {
const content = await fs.readFile(resolved, 'utf-8');
return c.json({
ok: true,
content,
size: stat.size,
mtime: Math.floor(stat.mtimeMs),
});
} catch {
return c.json({ ok: false, error: 'Failed to read file' }, 500);
}
});
// ── PUT /api/files/write ─────────────────────────────────────────────
app.put('/api/files/write', async (c) => {
let body: { path?: string; content?: string; expectedMtime?: number };
try {
body = await c.req.json();
} catch {
return c.json({ ok: false, error: 'Invalid JSON body' }, 400);
}
const { path: filePath, content, expectedMtime } = body;
if (!filePath || typeof filePath !== 'string') {
return c.json({ ok: false, error: 'Missing path' }, 400);
}
if (typeof content !== 'string') {
return c.json({ ok: false, error: 'Missing or invalid content' }, 400);
}
if (content.length > MAX_FILE_SIZE) {
return c.json({ ok: false, error: 'Content too large (max 1MB)' }, 413);
}
const resolved = await resolveWorkspacePath(filePath, { allowNonExistent: true });
if (!resolved) {
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
}
if (isBinary(path.basename(resolved))) {
return c.json({ ok: false, error: 'Cannot write binary files' }, 415);
}
// Conflict detection: check mtime if expectedMtime provided
if (typeof expectedMtime === 'number') {
try {
const stat = await fs.stat(resolved);
const currentMtime = Math.floor(stat.mtimeMs);
if (currentMtime !== expectedMtime) {
return c.json({
ok: false,
error: 'File was modified since you loaded it',
currentMtime,
}, 409);
}
} catch {
// File doesn't exist yet — no conflict possible
}
}
// Ensure parent directory exists
await fs.mkdir(path.dirname(resolved), { recursive: true });
// Write the file
try {
await fs.writeFile(resolved, content, 'utf-8');
const stat = await fs.stat(resolved);
return c.json({
ok: true,
mtime: Math.floor(stat.mtimeMs),
});
} catch {
return c.json({ ok: false, error: 'Failed to write file' }, 500);
}
});
// ── GET /api/files/raw ───────────────────────────────────────────────
const IMAGE_EXTENSIONS = new Set(['.png', '.jpg', '.jpeg', '.gif', '.webp', '.avif', '.svg', '.ico']);
const MIME_TYPES: Record<string, string> = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.webp': 'image/webp',
'.avif': 'image/avif',
'.svg': 'image/svg+xml',
'.ico': 'image/x-icon',
};
/** Check if a file is a supported image. */
export function isImage(name: string): boolean {
return IMAGE_EXTENSIONS.has(path.extname(name).toLowerCase());
}
app.get('/api/files/raw', async (c) => {
const filePath = c.req.query('path');
if (!filePath) {
return c.json({ ok: false, error: 'Missing path parameter' }, 400);
}
const resolved = await resolveWorkspacePath(filePath);
if (!resolved) {
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
}
const ext = path.extname(resolved).toLowerCase();
const mime = MIME_TYPES[ext];
if (!mime) {
return c.json({ ok: false, error: 'Unsupported file type' }, 415);
}
try {
const stat = await fs.stat(resolved);
if (!stat.isFile()) {
return c.json({ ok: false, error: 'Not a file' }, 400);
}
// Cap at 10MB for images
if (stat.size > 10_485_760) {
return c.json({ ok: false, error: 'File too large (max 10MB)' }, 413);
}
const buffer = await fs.readFile(resolved);
return new Response(buffer, {
headers: {
'Content-Type': mime,
'Content-Length': String(stat.size),
'Cache-Control': 'no-cache',
},
});
} catch {
return c.json({ ok: false, error: 'Failed to read file' }, 500);
}
});
export default app;

82
server/routes/files.ts Normal file
View file

@ -0,0 +1,82 @@
/**
* GET /api/files?path=<encoded-path> serve local image files.
*
* Security:
* - Image MIME types only (png, jpg, gif, webp, svg, avif)
* - Directory traversal blocked (resolve + prefix check)
* - Restricted to allowed directory prefixes
*/
import { Hono } from 'hono';
import fs from 'node:fs';
import path from 'node:path';
import os from 'node:os';
import { config } from '../lib/config.js';
const app = new Hono();
const MIME_MAP: Record<string, string> = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.webp': 'image/webp',
'.svg': 'image/svg+xml',
'.avif': 'image/avif',
};
/** Directories we allow serving files from. */
function allowedPrefixes(): string[] {
const home = os.homedir();
return [
'/tmp',
path.join(home, '.openclaw'),
config.memoryDir,
].filter(Boolean);
}
app.get('/api/files', async (c) => {
const rawPath = c.req.query('path');
if (!rawPath) return c.text('Missing path parameter', 400);
// Resolve to absolute, blocking traversal
const resolved = path.resolve(rawPath.replace(/^~/, os.homedir()));
const ext = path.extname(resolved).toLowerCase();
// MIME check — images only
const mime = MIME_MAP[ext];
if (!mime) return c.text('Not an allowed file type', 403);
// Directory prefix check
const prefixes = allowedPrefixes();
const allowed = prefixes.some((prefix) => resolved.startsWith(prefix + path.sep) || resolved === prefix);
if (!allowed) return c.text('Access denied', 403);
// Resolve symlinks and re-check prefix to prevent symlink traversal
let realPath: string;
try {
realPath = await fs.promises.realpath(resolved);
} catch {
return c.text('Not found', 404);
}
const realAllowed = prefixes.some((prefix) => realPath.startsWith(prefix + path.sep) || realPath === prefix);
if (!realAllowed) return c.text('Access denied', 403);
try {
const data = await fs.promises.readFile(realPath);
return new Response(data, {
status: 200,
headers: {
'Content-Type': mime,
'Cache-Control': 'public, max-age=3600',
'Content-Length': String(data.length),
// Force download for SVGs to prevent stored XSS via embedded <script> tags
...(ext === '.svg' ? { 'Content-Disposition': 'attachment' } : {}),
},
});
} catch {
return c.text('Failed to read file', 500);
}
});
export default app;

343
server/routes/gateway.ts Normal file
View file

@ -0,0 +1,343 @@
/**
* Gateway API Routes
*
* GET /api/gateway/models Returns available models via `openclaw models list`.
* Respects allowlist if configured; falls back to all available.
* GET /api/gateway/session-info Returns the current session's runtime info (model, thinking level).
* POST /api/gateway/session-patch Change model/effort for a session via HTTP (reliable fallback).
*
* Response (models): { models: Array<{ id: string; label: string; provider: string }> }
* Response (session-info): { model?: string; thinking?: string }
* Response (session-patch): { ok: boolean; model?: string; thinking?: string; error?: string }
*/
import { Hono } from 'hono';
import { execFile } from 'node:child_process';
import { homedir } from 'node:os';
import { z } from 'zod';
import { invokeGatewayTool } from '../lib/gateway-client.js';
import { rateLimitGeneral } from '../middleware/rate-limit.js';
import { resolveOpenclawBin } from '../lib/openclaw-bin.js';
const app = new Hono();
const GATEWAY_TIMEOUT_MS = 8_000;
export interface GatewayModelInfo {
id: string;
label: string;
provider: string;
}
// ─── Model catalog via `openclaw models list` CLI ──────────────────────────────
/** How long to cache the model catalog (ms). Models don't change often. */
const MODEL_CACHE_TTL_MS = 5 * 60 * 1000; // 5 minutes
interface ModelCache {
models: GatewayModelInfo[];
fetchedAt: number;
}
let modelCache: ModelCache | null = null;
interface CliModelEntry {
key: string;
name?: string;
available?: boolean;
}
interface CliModelsOutput {
models?: CliModelEntry[];
}
/** Parse CLI JSON output into GatewayModelInfo[].
* When `configuredOnly` is true, include all models regardless of `available` flag
* (user explicitly configured them). Otherwise filter to available only. */
function parseModelsOutput(stdout: string, configuredOnly = false): GatewayModelInfo[] {
const data = JSON.parse(stdout) as CliModelsOutput;
if (!Array.isArray(data.models)) return [];
const out: GatewayModelInfo[] = [];
for (const m of data.models) {
if (!configuredOnly && !m.available) continue;
const id = m.key;
if (!id) continue;
const [provider, ...rest] = id.split('/');
out.push({
id,
label: rest.join('/') || id,
provider: provider || 'unknown',
});
}
return out.sort((a, b) => a.id.localeCompare(b.id));
}
const openclawBin = resolveOpenclawBin();
/** Directory containing the node binary — needed in PATH for `#!/usr/bin/env node` shims. */
const nodeBinDir = process.execPath.replace(/\/node$/, '');
/**
* Infer the HOME directory for openclaw execution.
* When server runs as root but openclaw is installed under a user account
* (e.g., /home/username/.nvm/...), we need to use that user's HOME so openclaw
* can find its config at ~/.openclaw/config.yaml.
*
* Extracts home from paths like:
* /home/username/.nvm/... /home/username
* /Users/username/.nvm/... /Users/username
*
* Falls back to process.env.HOME if extraction fails.
*/
function inferOpenclawHome(): string {
// Try to extract from openclaw binary path
const match = openclawBin.match(/^(\/home\/[^/]+|\/Users\/[^/]+)/);
if (match) return match[1];
// Fallback: use actual user home (works for any user, not just root)
return process.env.HOME || homedir();
}
const openclawHome = inferOpenclawHome();
/** Run `openclaw models list` with the given args. */
function runModelsList(args: string[]): Promise<string> {
return new Promise((resolve, reject) => {
execFile(openclawBin, ['models', 'list', ...args], {
timeout: GATEWAY_TIMEOUT_MS,
maxBuffer: 2 * 1024 * 1024,
env: {
...process.env,
HOME: openclawHome,
PATH: `${nodeBinDir}:${process.env.PATH || '/usr/bin:/bin'}`
},
}, (err, stdout) => {
if (err) reject(err);
else resolve(stdout);
});
});
}
/**
* Fetch models available for the model selector.
*
* Strategy (works for any OpenClaw install):
* 1. Run `openclaw models list --json` (returns configured/allowlisted models)
* 2. If that yields 1 model (no allowlist, just primary), fall back to
* `openclaw models list --all --json` filtered to available models
*
* This respects `agents.defaults.models` when configured, and gracefully
* shows all available models when it isn't.
*/
async function execOpenclawModels(): Promise<GatewayModelInfo[]> {
try {
// First: try configured models (respects allowlist)
// Always include configured models regardless of `available` flag —
// if the user configured them, they should appear.
const configured = await runModelsList(['--json']);
const models = parseModelsOutput(configured, true);
if (models.length > 0) return models;
// Fallback: no allowlist configured — show all available (filter by available)
const all = await runModelsList(['--all', '--json']);
const allModels = parseModelsOutput(all, false);
if (allModels.length === 0) {
console.warn('[gateway/models] openclaw models list returned 0 models.',
`Binary: ${openclawBin}, PATH includes: ${nodeBinDir}`);
}
return allModels;
} catch (err) {
console.warn('[gateway/models] openclaw models list failed.',
`Binary: ${openclawBin}, Error: ${(err as Error).message}`);
return [];
}
}
/** Get models from cache or fetch fresh. */
async function getModelCatalog(): Promise<GatewayModelInfo[]> {
if (modelCache && Date.now() - modelCache.fetchedAt < MODEL_CACHE_TTL_MS) {
return modelCache.models;
}
const models = await execOpenclawModels();
if (models.length > 0) {
modelCache = { models, fetchedAt: Date.now() };
}
return models;
}
app.get('/api/gateway/models', rateLimitGeneral, async (c) => {
const models = await getModelCatalog();
return c.json({ models });
});
/**
* Extract the current session's thinking/effort level from gateway status.
* Looks in common locations: agent.thinking, config.thinking, top-level thinking,
* and falls back to parsing the runtime string (e.g. "thinking=medium").
*/
function extractThinking(payload: unknown): string | null {
if (!payload || typeof payload !== 'object') return null;
const p = payload as Record<string, unknown>;
// Direct fields
const candidates = [
p.thinking,
(p.agent as Record<string, unknown> | undefined)?.thinking,
(p.config as Record<string, unknown> | undefined)?.thinking,
];
for (const c of candidates) {
if (typeof c === 'string' && c.trim()) return c.trim().toLowerCase();
}
// Parse from runtime string (e.g. "thinking=medium")
const runtime = p.runtime || (p.agent as Record<string, unknown> | undefined)?.runtime;
if (typeof runtime === 'string') {
const match = runtime.match(/thinking=(\w+)/);
if (match) return match[1].toLowerCase();
}
return null;
}
/**
* Extract the current session's model from gateway status.
*/
function extractSessionModel(payload: unknown): string | null {
if (!payload || typeof payload !== 'object') return null;
const p = payload as Record<string, unknown>;
const candidates = [
p.model,
p.defaultModel,
(p.agent as Record<string, unknown> | undefined)?.model,
(p.config as Record<string, unknown> | undefined)?.model,
];
for (const c of candidates) {
if (typeof c === 'string' && c.trim()) return c.trim();
}
// Parse from runtime string (e.g. "model=anthropic/claude-opus-4-6")
const runtime = p.runtime || (p.agent as Record<string, unknown> | undefined)?.runtime;
if (typeof runtime === 'string') {
const match = runtime.match(/model=(\S+)/);
if (match) return match[1];
}
return null;
}
app.get('/api/gateway/session-info', rateLimitGeneral, async (c) => {
const sessionKey = c.req.query('sessionKey') || 'agent:main:main';
const info: { model?: string; thinking?: string } = {};
// Primary: fetch per-session data from sessions.list (source of truth for per-session state)
try {
const result = await invokeGatewayTool('sessions_list', { activeMinutes: 120, limit: 50 }, GATEWAY_TIMEOUT_MS) as Record<string, unknown>;
const sessions = (result?.sessions as Array<{ sessionKey?: string; key?: string; model?: string; thinking?: string; thinkingLevel?: string }>) || [];
const session = sessions.find(s => (s.sessionKey || s.key) === sessionKey);
if (session) {
if (session.model) info.model = session.model;
const thinking = session.thinking || session.thinkingLevel;
if (thinking) info.thinking = thinking.toLowerCase();
}
if (info.model || info.thinking) return c.json(info);
} catch (err) {
console.warn(`[gateway/session-info] sessions_list failed:`, (err as Error).message);
}
// Fallback: try global status tools (less accurate — returns global defaults, not per-session)
const toolsToTry = ['gateway_status', 'status', 'session_status'];
for (const tool of toolsToTry) {
try {
const result = await invokeGatewayTool(tool, {}, GATEWAY_TIMEOUT_MS);
const thinking = extractThinking(result);
const model = extractSessionModel(result);
if (thinking && !info.thinking) info.thinking = thinking;
if (model && !info.model) info.model = model;
if (info.thinking && info.model) return c.json(info);
} catch (err) {
console.warn(`[gateway/session-info] ${tool} failed:`, (err as Error).message);
}
}
return c.json(info);
});
// ─── Session patch via HTTP (reliable fallback for WS RPC) ─────────────────────
const sessionPatchSchema = z.object({
sessionKey: z.string().max(200).optional(),
model: z.string().max(200).optional(),
thinkingLevel: z.string().max(50).nullable().optional(),
});
type SessionPatchBody = z.infer<typeof sessionPatchSchema>;
/**
* POST /api/gateway/session-patch
*
* Changes model and/or thinking level for a session. Uses the `session_status`
* tool for model changes (proven reliable) and `sessions_list` + gateway WS RPC
* fallback for thinking level.
*
* This exists as a reliable HTTP fallback when the frontend's direct WS RPC
* (`sessions.patch`) fails due to proxy issues, reconnection races, etc.
*/
app.post('/api/gateway/session-patch', rateLimitGeneral, async (c) => {
let body: SessionPatchBody;
try {
const raw = await c.req.json();
const parsed = sessionPatchSchema.safeParse(raw);
if (!parsed.success) {
return c.json({ ok: false, error: parsed.error.issues[0]?.message || 'Invalid body' }, 400);
}
body = parsed.data;
} catch {
return c.json({ ok: false, error: 'Invalid JSON body' }, 400);
}
const sessionKey = body.sessionKey || 'agent:main:main';
const result: { ok: boolean; model?: string; thinking?: string; error?: string } = { ok: true };
// Change model via session_status tool (reliable — uses HTTP tools/invoke)
if (body.model) {
try {
const statusResult = await invokeGatewayTool(
'session_status',
{ model: body.model, sessionKey },
GATEWAY_TIMEOUT_MS,
) as Record<string, unknown>;
// Extract confirmed model from response
const details = statusResult?.details as Record<string, unknown> | undefined;
if (details?.changedModel === false && details?.statusText) {
// session_status returns changedModel:false when model is already set or change failed
// Parse the model from status text as confirmation
const statusText = details.statusText as string;
const modelMatch = statusText.match(/Model:\s*(\S+)/);
result.model = modelMatch?.[1] || body.model;
} else {
result.model = body.model;
}
} catch (err) {
console.warn('[gateway/session-patch] session_status model change failed:', (err as Error).message);
result.ok = false;
result.error = `Model change failed: ${(err as Error).message}`;
return c.json(result, 502);
}
}
// Thinking level changes are NOT supported via this HTTP endpoint.
// The gateway's session_status tool doesn't accept thinkingLevel.
// The frontend should use the WS RPC (sessions.patch) for thinking changes.
if (body.thinkingLevel !== undefined && !body.model) {
return c.json({ ok: false, error: 'Thinking level changes are only supported via WebSocket RPC' }, 501);
} else if (body.thinkingLevel !== undefined) {
// Model change succeeded above, but note thinking was not applied
result.thinking = undefined;
}
return c.json(result);
});
export default app;

View file

@ -0,0 +1,71 @@
/** Tests for the GET /health endpoint and its gateway probe. */
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { Hono } from 'hono';
describe('GET /health', () => {
let originalFetch: typeof globalThis.fetch;
beforeEach(() => {
originalFetch = globalThis.fetch;
vi.resetModules();
});
afterEach(() => {
globalThis.fetch = originalFetch;
vi.restoreAllMocks();
});
async function importHealthApp() {
const mod = await import('./health.js');
const app = new Hono();
app.route('/', mod.default);
return app;
}
it('should return status ok and uptime', async () => {
// Mock fetch to simulate gateway being reachable
globalThis.fetch = vi.fn().mockResolvedValue({ ok: true });
const app = await importHealthApp();
const res = await app.request('/health');
expect(res.status).toBe(200);
const json = (await res.json()) as Record<string, unknown>;
expect(json.status).toBe('ok');
expect(typeof json.uptime).toBe('number');
expect(json.gateway).toBe('ok');
});
it('should report gateway unreachable when fetch fails', async () => {
globalThis.fetch = vi.fn().mockRejectedValue(new Error('ECONNREFUSED'));
const app = await importHealthApp();
const res = await app.request('/health');
expect(res.status).toBe(200);
const json = (await res.json()) as Record<string, unknown>;
expect(json.status).toBe('ok');
expect(json.gateway).toBe('unreachable');
});
it('should report gateway unreachable when response is not ok', async () => {
globalThis.fetch = vi.fn().mockResolvedValue({ ok: false, status: 503 });
const app = await importHealthApp();
const res = await app.request('/health');
const json = (await res.json()) as Record<string, unknown>;
expect(json.gateway).toBe('unreachable');
});
it('should call gateway health endpoint with abort signal', async () => {
globalThis.fetch = vi.fn().mockResolvedValue({ ok: true });
const app = await importHealthApp();
await app.request('/health');
expect(globalThis.fetch).toHaveBeenCalledTimes(1);
const callArgs = (globalThis.fetch as ReturnType<typeof vi.fn>).mock.calls[0];
expect(callArgs[0]).toContain('/health');
expect(callArgs[1]).toHaveProperty('signal');
});
});

25
server/routes/health.ts Normal file
View file

@ -0,0 +1,25 @@
/**
* GET /health Health check endpoint.
* Includes optional gateway connectivity probe.
*/
import { Hono } from 'hono';
import { config } from '../lib/config.js';
const app = new Hono();
app.get('/health', async (c) => {
let gateway: 'ok' | 'unreachable' = 'unreachable';
try {
const res = await fetch(`${config.gatewayUrl}/health`, {
signal: AbortSignal.timeout(3000),
});
if (res.ok) gateway = 'ok';
} catch {
// gateway unreachable — not a server failure
}
return c.json({ status: 'ok', uptime: process.uptime(), gateway });
});
export default app;

561
server/routes/memories.ts Normal file
View file

@ -0,0 +1,561 @@
/**
* Memory API Routes
*
* GET /api/memories Parsed memory data from MEMORY.md + daily files
* POST /api/memories Store a new memory via gateway RPC
* DELETE /api/memories Delete a memory via gateway RPC
*
* Response: Array of { type: "section"|"item"|"daily", text, date? }
*/
import { Hono } from 'hono';
import { zValidator } from '@hono/zod-validator';
import { z } from 'zod';
import fs from 'node:fs/promises';
import path from 'node:path';
import { config } from '../lib/config.js';
import { invokeGatewayTool } from '../lib/gateway-client.js';
import { readText } from '../lib/files.js';
import { rateLimitGeneral } from '../middleware/rate-limit.js';
import { broadcast } from './events.js';
import { withMutex } from '../lib/mutex.js';
import type { MemoryItem } from '../types.js';
const app = new Hono();
/* Gateway tool invocation via shared client */
/** Validation schema for creating a memory */
const createMemorySchema = z.object({
text: z
.string()
.min(1, 'Text is required')
.max(10000, 'Text too long (max 10000 chars)')
.refine((s) => s.trim().length > 0, 'Text cannot be empty'),
section: z.string().max(200, 'Section name too long').optional(),
category: z.enum(['preference', 'fact', 'decision', 'entity', 'other']).optional(),
importance: z.number().min(0).max(1).optional(),
});
/** Validation schema for deleting a memory */
const deleteMemorySchema = z.object({
query: z.string().min(1, 'Query is required').max(1000, 'Query too long'),
type: z.enum(['section', 'item', 'daily']).optional(),
date: z.string().regex(/^\d{4}-\d{2}-\d{2}$/, 'Date must be YYYY-MM-DD').optional(),
});
/**
* Clean up multiple consecutive blank lines in an array of lines
*/
function cleanBlankLines(lines: string[]): string[] {
return lines.reduce((acc: string[], line) => {
if (line.trim() === '' && acc.length > 0 && acc[acc.length - 1].trim() === '') {
return acc;
}
acc.push(line);
return acc;
}, []);
}
/**
* Delete a section (header + all content until next section) from file content
*/
function deleteSectionFromLines(lines: string[], sectionTitle: string): string[] | null {
let sectionStart = -1;
let sectionEnd = lines.length;
for (let i = 0; i < lines.length; i++) {
const trimmed = lines[i].trim();
if (sectionStart === -1) {
// Looking for the section to delete
if (trimmed.startsWith('## ') && trimmed.slice(3).trim() === sectionTitle) {
sectionStart = i;
}
} else {
// Found section, looking for next section header
if (trimmed.startsWith('## ')) {
sectionEnd = i;
break;
}
}
}
if (sectionStart === -1) {
return null; // Section not found
}
// Remove lines from sectionStart to sectionEnd (exclusive)
return [...lines.slice(0, sectionStart), ...lines.slice(sectionEnd)];
}
/**
* Delete a single line (bullet point) from file content
*/
function deleteItemFromLines(lines: string[], itemText: string): string[] | null {
const originalLength = lines.length;
const filtered = lines.filter((line) => {
const trimmed = line.trim();
// Match bullet points or numbered lists
if (/^[-*]\s+/.test(trimmed) || /^\d+\.\s/.test(trimmed)) {
const clean = trimmed
.replace(/^[-*]\s+|^\d+\.\s+/, '')
.replace(/\*\*/g, '')
.replace(/`/g, '');
if (clean.trim() === itemText) {
return false;
}
}
return true;
});
if (filtered.length === originalLength) {
return null; // Nothing was removed
}
return filtered;
}
interface DeleteOptions {
text: string;
type?: 'section' | 'item' | 'daily';
date?: string; // For daily files: YYYY-MM-DD
}
/**
* Delete from MEMORY.md or daily files
* - If type is 'section': delete the section header AND all items until the next section
* - If type is 'item': delete just that one line from MEMORY.md
* - If type is 'daily': delete the section from the daily file (memory/YYYY-MM-DD.md)
*/
async function deleteMemory(opts: DeleteOptions): Promise<{ deleted: boolean; file?: string }> {
const { text, type, date } = opts;
try {
// Determine which file to edit
let filePath: string;
if (type === 'daily' && date) {
filePath = path.join(config.memoryDir, `${date}.md`);
} else {
filePath = config.memoryPath;
}
const content = await readText(filePath);
if (!content) {
return { deleted: false };
}
const lines = content.split('\n');
let result: string[] | null = null;
if (type === 'section' || type === 'daily') {
// Delete entire section (header + content until next section)
result = deleteSectionFromLines(lines, text);
} else {
// Delete single item (bullet point)
result = deleteItemFromLines(lines, text);
}
if (!result) {
return { deleted: false };
}
// Clean up and save
const cleaned = cleanBlankLines(result);
await fs.writeFile(filePath, cleaned.join('\n'), 'utf-8');
return { deleted: true, file: path.basename(filePath) };
} catch (err) {
console.error('[memories] Failed to delete:', (err as Error).message);
return { deleted: false };
}
}
// invokeGatewayTool imported from shared module
/**
* Append a bullet point to MEMORY.md under the given section heading.
* If the section doesn't exist, create it at the end of the file.
*/
async function appendToMemoryFile(text: string, section: string): Promise<void> {
const filePath = config.memoryPath;
let content = '';
try {
content = await fs.readFile(filePath, 'utf-8');
} catch {
// File doesn't exist yet — start fresh
content = '# MEMORY.md\n';
}
const lines = content.split('\n');
const sectionHeader = `## ${section}`;
// Find the section
let sectionStart = -1;
let sectionEnd = lines.length; // default: end of file
for (let i = 0; i < lines.length; i++) {
const trimmed = lines[i].trim();
if (sectionStart === -1) {
if (trimmed.toLowerCase() === sectionHeader.toLowerCase()) {
sectionStart = i;
}
} else {
// Found section, look for next section header
if (trimmed.startsWith('## ')) {
sectionEnd = i;
break;
}
}
}
const bulletLine = `- ${text}`;
if (sectionStart === -1) {
// Section doesn't exist — append new section at end of file
// Ensure trailing newline before new section
const trimmedEnd = content.trimEnd();
const newContent = `${trimmedEnd}\n\n${sectionHeader}\n${bulletLine}\n`;
await fs.writeFile(filePath, newContent, 'utf-8');
} else {
// Section exists — find the last non-blank line within the section to append after
let insertAt = sectionEnd;
// Walk backwards from sectionEnd to find last content line in section
for (let i = sectionEnd - 1; i > sectionStart; i--) {
if (lines[i].trim() !== '') {
insertAt = i + 1;
break;
}
}
// Insert the bullet line
lines.splice(insertAt, 0, bulletLine);
const cleaned = cleanBlankLines(lines);
await fs.writeFile(filePath, cleaned.join('\n'), 'utf-8');
}
}
app.get('/api/memories', rateLimitGeneral, async (c) => {
const memories: MemoryItem[] = [];
// Parse MEMORY.md — sections and bullet points
const content = await readText(config.memoryPath);
if (content) {
for (const line of content.split('\n')) {
const trimmed = line.trim();
if (trimmed.startsWith('## ')) {
memories.push({ type: 'section', text: trimmed.slice(3).trim() });
} else if (/^[-*]\s+/.test(trimmed) || /^\d+\.\s/.test(trimmed)) {
const clean = trimmed
.replace(/^[-*]\s+|^\d+\.\s+/, '')
.replace(/\*\*/g, '')
.replace(/`/g, '');
if (clean.length > 0) {
memories.push({ type: 'item', text: clean });
}
}
}
}
// Parse recent daily files — section headers only
try {
const files = (await fs.readdir(config.memoryDir))
.filter((f) => f.endsWith('.md'))
.sort()
.reverse()
.slice(0, 7);
for (const f of files) {
const dailyContent = await readText(path.join(config.memoryDir, f));
for (const line of dailyContent.split('\n')) {
const trimmed = line.trim();
if (trimmed.startsWith('## ')) {
memories.push({
type: 'daily',
date: f.replace('.md', ''),
text: trimmed.slice(3).trim(),
});
}
}
}
} catch {
// Memory dir may not exist — that's fine
}
return c.json(memories);
});
/**
* GET /api/memories/section Get raw markdown content of a section
*
* Query params:
* - title: Section title (required)
* - date: For daily files, the date (YYYY-MM-DD). Omit for MEMORY.md
*
* Returns: { ok: true, content: string } or { ok: false, error: string }
*/
app.get('/api/memories/section', rateLimitGeneral, async (c) => {
const title = c.req.query('title');
const date = c.req.query('date');
if (!title) {
return c.json({ ok: false, error: 'Missing title parameter' }, 400);
}
// Validate date format to prevent path traversal
if (date && !/^\d{4}-\d{2}-\d{2}$/.test(date)) {
return c.json({ ok: false, error: 'Invalid date format (expected YYYY-MM-DD)' }, 400);
}
try {
let filePath: string;
if (date) {
filePath = path.join(config.memoryDir, `${date}.md`);
} else {
filePath = config.memoryPath;
}
const content = await readText(filePath);
if (!content) {
return c.json({ ok: false, error: 'File not found' }, 404);
}
const lines = content.split('\n');
let sectionStart = -1;
let sectionEnd = lines.length;
// Find the section
for (let i = 0; i < lines.length; i++) {
const trimmed = lines[i].trim();
if (sectionStart === -1) {
if (trimmed.startsWith('## ') && trimmed.slice(3).trim() === title) {
sectionStart = i;
}
} else {
if (trimmed.startsWith('## ')) {
sectionEnd = i;
break;
}
}
}
if (sectionStart === -1) {
return c.json({ ok: false, error: 'Section not found' }, 404);
}
// Extract section content (excluding the header itself)
const sectionLines = lines.slice(sectionStart + 1, sectionEnd);
const sectionContent = sectionLines.join('\n').trim();
return c.json({ ok: true, content: sectionContent });
} catch (err) {
console.error('[memories] GET section error:', (err as Error).message);
return c.json({ ok: false, error: 'Failed to read memory section' }, 500);
}
});
/**
* POST /api/memories Store a new memory
*
* Body: { text: string, section?: string, category?: string, importance?: number }
*
* Writes the memory as a bullet point to MEMORY.md under the given section,
* and also stores it in the gateway's LanceDB for vector search.
*/
app.post(
'/api/memories',
rateLimitGeneral,
zValidator('json', createMemorySchema, (result, c) => {
if (!result.success) {
const msg = result.error.issues[0]?.message || 'Invalid request';
return c.json({ ok: false, error: msg }, 400);
}
}),
async (c) => {
try {
const body = c.req.valid('json');
const trimmedText = body.text.trim();
const safeSection = (body.section ?? '').replace(/[\r\n]/g, ' ').trim();
const section = safeSection || 'General';
// 1. Write to MEMORY.md (primary display source)
await withMutex('memory-file', () => appendToMemoryFile(trimmedText, section));
// 2. Also store in gateway LanceDB (for vector search) — best effort
try {
await invokeGatewayTool('memory_store', {
text: trimmedText,
category: body.category || 'other',
importance: body.importance ?? 0.7,
});
} catch (err) {
// Gateway store is best-effort; file write is what matters
console.warn('[memories] Gateway memory_store failed (non-fatal):', (err as Error).message);
}
// Broadcast memory change to all SSE clients
broadcast('memory.changed', { source: 'api', action: 'create', section });
return c.json({ ok: true, result: { written: true, section } });
} catch (err) {
console.error('[memories] POST error:', (err as Error).message);
return c.json({ ok: false, error: 'Failed to store memory' }, 500);
}
},
);
/**
* PUT /api/memories/section Update a section's content
*
* Body: { title: string, content: string, date?: string }
*/
const updateSectionSchema = z.object({
title: z.string().min(1, 'Title is required').max(200, 'Title too long'),
content: z.string().max(50000, 'Content too long'),
date: z.string().regex(/^\d{4}-\d{2}-\d{2}$/, 'Date must be YYYY-MM-DD').optional(),
});
app.put(
'/api/memories/section',
rateLimitGeneral,
zValidator('json', updateSectionSchema, (result, c) => {
if (!result.success) {
const msg = result.error.issues[0]?.message || 'Invalid request';
return c.json({ ok: false, error: msg }, 400);
}
}),
async (c) => {
try {
const { title, content, date } = c.req.valid('json');
// Determine which file to edit
let filePath: string;
if (date) {
filePath = path.join(config.memoryDir, `${date}.md`);
} else {
filePath = config.memoryPath;
}
const result = await withMutex('memory-file', async () => {
const fileContent = await readText(filePath);
if (!fileContent) {
return { ok: false as const, error: 'File not found', status: 404 as const };
}
const lines = fileContent.split('\n');
let sectionStart = -1;
let sectionEnd = lines.length;
// Find the section
for (let i = 0; i < lines.length; i++) {
const trimmed = lines[i].trim();
if (sectionStart === -1) {
if (trimmed.startsWith('## ') && trimmed.slice(3).trim() === title) {
sectionStart = i;
}
} else {
if (trimmed.startsWith('## ')) {
sectionEnd = i;
break;
}
}
}
if (sectionStart === -1) {
return { ok: false as const, error: 'Section not found', status: 404 as const };
}
// Replace the section content (keep the header, replace everything until next section)
const newLines = [
...lines.slice(0, sectionStart + 1), // Everything before section + section header
content, // New content
'', // Blank line before next section
...lines.slice(sectionEnd), // Everything from next section onwards
];
// Clean up multiple consecutive blank lines
const cleaned = cleanBlankLines(newLines);
await fs.writeFile(filePath, cleaned.join('\n'), 'utf-8');
return { ok: true as const };
});
if (!result.ok) {
return c.json({ ok: false, error: result.error }, result.status);
}
// Broadcast memory change to all SSE clients
broadcast('memory.changed', {
source: 'api',
action: 'update',
file: path.basename(filePath),
section: title
});
return c.json({
ok: true,
result: {
updated: true,
file: path.basename(filePath),
section: title
}
});
} catch (err) {
console.error('[memories] PUT section error:', (err as Error).message);
return c.json({ ok: false, error: 'Failed to update memory section' }, 500);
}
},
);
/**
* DELETE /api/memories Delete a memory from MEMORY.md
*
* Body: { query: string, type?: 'section' | 'item' | 'daily' }
*/
app.delete(
'/api/memories',
rateLimitGeneral,
zValidator('json', deleteMemorySchema, (result, c) => {
if (!result.success) {
const msg = result.error.issues[0]?.message || 'Invalid request';
return c.json({ ok: false, error: msg }, 400);
}
}),
async (c) => {
try {
const body = c.req.valid('json');
const result = await withMutex('memory-file', () => deleteMemory({
text: body.query,
type: body.type,
date: body.date,
}));
if (result.deleted) {
// Broadcast memory change to all SSE clients
broadcast('memory.changed', {
source: 'api',
action: 'delete',
file: result.file
});
return c.json({
ok: true,
result: {
deleted: 1,
source: 'file',
file: result.file,
type: body.type || 'item'
}
});
} else {
const file = body.type === 'daily' ? `memory/${body.date}.md` : 'MEMORY.md';
return c.json({ ok: false, error: `Memory not found in ${file}` }, 404);
}
} catch (err) {
console.error('[memories] DELETE error:', (err as Error).message);
return c.json({ ok: false, error: 'Failed to delete memory' }, 500);
}
},
);
export default app;

View file

@ -0,0 +1,83 @@
/**
* GET /api/server-info Server time and gateway uptime info.
*
* Returns `serverTime` (epoch ms), `gatewayStartedAt` (epoch ms), `timezone`,
* and `agentName` so the frontend can show a real-time server clock and true
* gateway uptime. Gateway start time is derived from `/proc` on Linux and
* cached for 30 s.
* @module
*/
import { Hono } from 'hono';
import { execFile } from 'node:child_process';
import fs from 'node:fs';
import os from 'node:os';
import { config } from '../lib/config.js';
import { rateLimitGeneral } from '../middleware/rate-limit.js';
const app = new Hono();
const isLinux = os.platform() === 'linux';
/** Tick rate on Linux — SC_CLK_TCK is virtually always 100 */
const CLK_TCK = 100;
// Cache gateway start time (only changes on restart)
let gatewayStartedAtCache: number | null = null;
let cacheTs = 0;
const CACHE_TTL = 30_000;
/**
* Determine when the OpenClaw gateway process started (Linux only).
*
* Uses `pgrep` to find the gateway PID, then reads `/proc/<pid>/stat`
* to extract the start time in clock ticks, converting to epoch ms
* via the system boot time from `/proc/stat`. Result is cached for 30 s.
*
* @returns Epoch ms of gateway start, or `null` on non-Linux / if not running.
*/
async function getGatewayStartedAt(): Promise<number | null> {
if (!isLinux) return null; // /proc and pgrep are Linux-only
const now = Date.now();
if (gatewayStartedAtCache && now - cacheTs < CACHE_TTL) return gatewayStartedAtCache;
try {
const pidStr = await new Promise<string>((resolve, reject) => {
execFile('pgrep', ['-f', 'openclaw-gatewa'], { timeout: 2000 }, (err, stdout) => {
if (err) return reject(err);
resolve(stdout.trim().split('\n')[0] || '');
});
});
if (!pidStr) return null;
const stat = await fs.promises.readFile(`/proc/${pidStr}/stat`, 'utf8');
// Parse starttime (field 22, 0-indexed 21) after the comm field.
// comm can contain spaces/parens, so find the last ')' first.
const afterComm = stat.slice(stat.lastIndexOf(')') + 2);
const startTimeTicks = parseInt(afterComm.split(' ')[19], 10); // field 22 = index 19 after pid+comm
const procStat = await fs.promises.readFile('/proc/stat', 'utf8');
const btimeLine = procStat.split('\n').find((l) => l.startsWith('btime'));
if (!btimeLine) return null;
const btime = parseInt(btimeLine.split(' ')[1], 10);
const startSecs = btime + startTimeTicks / CLK_TCK;
gatewayStartedAtCache = Math.round(startSecs * 1000);
cacheTs = now;
return gatewayStartedAtCache;
} catch {
return gatewayStartedAtCache; // return stale if available
}
}
app.get('/api/server-info', rateLimitGeneral, async (c) => {
return c.json({
serverTime: Date.now(),
gatewayStartedAt: await getGatewayStartedAt(),
timezone: Intl.DateTimeFormat().resolvedOptions().timeZone,
agentName: config.agentName,
});
});
export default app;

95
server/routes/sessions.ts Normal file
View file

@ -0,0 +1,95 @@
/**
* Sessions API Routes
*
* GET /api/sessions/:id/model Read the actual model used in a session from its transcript.
*
* The gateway's sessions.list returns the agent default model, not the model
* actually used in a cron-run session (where payload.model overrides it).
* This endpoint reads the session transcript to find the real model.
*/
import { Hono } from 'hono';
import { createReadStream } from 'node:fs';
import { createInterface } from 'node:readline';
import { join } from 'node:path';
import { access, readdir } from 'node:fs/promises';
import { config } from '../lib/config.js';
import { rateLimitGeneral } from '../middleware/rate-limit.js';
const app = new Hono();
/** Resolve the transcript path for a session ID, checking both active and deleted files. */
async function findTranscript(sessionId: string): Promise<string | null> {
const sessionsDir = config.sessionsDir;
const activePath = join(sessionsDir, `${sessionId}.jsonl`);
try {
await access(activePath);
return activePath;
} catch {
// Check for deleted transcripts (one-shot cron runs get cleaned up)
try {
const files = await readdir(sessionsDir);
const deleted = files.find(f => f.startsWith(`${sessionId}.jsonl.deleted`));
if (deleted) return join(sessionsDir, deleted);
} catch { /* dir doesn't exist */ }
return null;
}
}
/** Read the first N lines of a JSONL file to find a model_change entry. */
async function readModelFromTranscript(filePath: string): Promise<string | null> {
return new Promise((resolve) => {
const stream = createReadStream(filePath, { encoding: 'utf-8' });
const rl = createInterface({ input: stream, crlfDelay: Infinity });
let lineCount = 0;
let resolved = false;
const done = (result: string | null) => {
if (resolved) return;
resolved = true;
rl.close();
stream.destroy();
resolve(result);
};
rl.on('line', (line) => {
if (resolved) return;
lineCount++;
try {
const entry = JSON.parse(line);
if (entry.type === 'model_change' && entry.modelId) {
done(entry.modelId);
return;
}
} catch { /* skip malformed lines */ }
// Only check first 10 lines — model_change is always near the top
if (lineCount >= 10) {
done(null);
}
});
rl.on('close', () => done(null));
rl.on('error', () => done(null));
});
}
app.get('/api/sessions/:id/model', rateLimitGeneral, async (c) => {
const sessionId = c.req.param('id');
// Basic validation — session IDs are UUIDs
if (!/^[0-9a-f-]{36}$/.test(sessionId)) {
return c.json({ ok: false, error: 'Invalid session ID' }, 400);
}
const transcriptPath = await findTranscript(sessionId);
if (!transcriptPath) {
return c.json({ ok: false, error: 'Transcript not found' }, 404);
}
const modelId = await readModelFromTranscript(transcriptPath);
return c.json({ ok: true, model: modelId });
});
export default app;

82
server/routes/skills.ts Normal file
View file

@ -0,0 +1,82 @@
/**
* Skills API Routes
*
* GET /api/skills List all skills via `openclaw skills list --json`
*/
import { Hono } from 'hono';
import { execFile } from 'node:child_process';
import { dirname } from 'node:path';
import { rateLimitGeneral } from '../middleware/rate-limit.js';
import { resolveOpenclawBin } from '../lib/openclaw-bin.js';
const app = new Hono();
const SKILLS_TIMEOUT_MS = 15_000;
/** Ensure PATH includes the directory of the current Node binary (for #!/usr/bin/env node shims under systemd) */
const nodeDir = dirname(process.execPath);
const enrichedEnv = { ...process.env, PATH: `${nodeDir}:${process.env.PATH || ''}` };
interface SkillMissing {
bins?: string[];
anyBins?: string[];
env?: string[];
config?: string[];
os?: string[];
}
interface RawSkill {
name: string;
description: string;
emoji: string;
eligible: boolean;
disabled: boolean;
blockedByAllowlist: boolean;
source: string;
bundled: boolean;
homepage?: string;
missing?: SkillMissing;
}
interface SkillsOutput {
workspaceDir?: string;
managedSkillsDir?: string;
skills?: RawSkill[];
}
function execOpenclawSkills(): Promise<RawSkill[]> {
return new Promise((resolve) => {
const openclawBin = resolveOpenclawBin();
execFile(openclawBin, ['skills', 'list', '--json'], {
timeout: SKILLS_TIMEOUT_MS,
maxBuffer: 2 * 1024 * 1024,
env: enrichedEnv,
}, (err, stdout) => {
if (err) {
console.warn('[skills] openclaw skills list failed:', err.message);
return resolve([]);
}
try {
const data = JSON.parse(stdout) as SkillsOutput;
if (!Array.isArray(data.skills)) return resolve([]);
return resolve(data.skills);
} catch (parseErr) {
console.warn('[skills] Failed to parse openclaw output:', (parseErr as Error).message);
resolve([]);
}
});
});
}
app.get('/api/skills', rateLimitGeneral, async (c) => {
try {
const skills = await execOpenclawSkills();
return c.json({ ok: true, skills });
} catch (err) {
console.error('[skills] list error:', (err as Error).message);
return c.json({ ok: false, error: (err as Error).message }, 502);
}
});
export default app;

160
server/routes/tokens.ts Normal file
View file

@ -0,0 +1,160 @@
/**
* GET /api/tokens Token usage statistics with persistent tracking.
*
* Scans `.jsonl` session transcript files in the sessions directory for
* accumulated cost and token data, aggregated by provider. Results are
* cached for 60 s and also persisted via the usage tracker (high-water mark).
* @module
*/
import { Hono } from 'hono';
import fs from 'node:fs/promises';
import path from 'node:path';
import readline from 'node:readline';
import { createReadStream } from 'node:fs';
import { updateUsage } from '../lib/usage-tracker.js';
import { rateLimitGeneral } from '../middleware/rate-limit.js';
import { config } from '../lib/config.js';
const app = new Hono();
// ── Types ────────────────────────────────────────────────────────────
interface ProviderStats {
cost: number;
messages: number;
input: number;
output: number;
cacheRead: number;
errors: number;
}
interface SessionCostData {
totalCost: number;
totalInput: number;
totalOutput: number;
totalMessages: number;
entries: Array<{
source: string;
cost: number;
messageCount: number;
inputTokens: number;
outputTokens: number;
cacheReadTokens: number;
errorCount: number;
}>;
}
// ── Session cost scanning (cached 60s) ───────────────────────────────
const EMPTY_COST_DATA: SessionCostData = { totalCost: 0, totalInput: 0, totalOutput: 0, totalMessages: 0, entries: [] };
const COST_CACHE_TTL = 60_000;
let costCache: { data: SessionCostData; ts: number } = { data: EMPTY_COST_DATA, ts: 0 };
function newProviderStats(): ProviderStats {
return { cost: 0, messages: 0, input: 0, output: 0, cacheRead: 0, errors: 0 };
}
/**
* Scan all `.jsonl` session files and aggregate token usage by provider.
* Results are cached for {@link COST_CACHE_TTL} ms.
*/
async function scanSessionCosts(): Promise<SessionCostData> {
const now = Date.now();
if (costCache.ts && now - costCache.ts < COST_CACHE_TTL) return costCache.data;
const costByProvider: Record<string, ProviderStats> = {};
let totalCost = 0, totalInput = 0, totalOutput = 0, totalMessages = 0;
try {
const files = (await fs.readdir(config.sessionsDir)).filter((f) => f.endsWith('.jsonl'));
for (const file of files) {
try {
const rl = readline.createInterface({
input: createReadStream(path.join(config.sessionsDir, file)),
crlfDelay: Infinity,
});
for await (const line of rl) {
try {
const entry = JSON.parse(line);
if (entry.type === 'error') {
const provider = entry.provider || entry.message?.provider || 'unknown';
(costByProvider[provider] ??= newProviderStats()).errors++;
continue;
}
if (entry.type !== 'message') continue;
const msg = entry.message;
if (!msg?.usage || !msg.provider || msg.provider === 'openclaw') continue;
const { usage, provider = 'unknown' } = msg;
const cost = usage.cost?.total || 0;
const input = usage.input || 0;
const output = usage.output || 0;
const cacheRead = usage.cacheRead || usage.cache_read || 0;
totalCost += cost;
totalInput += input;
totalOutput += output;
totalMessages++;
const stats = costByProvider[provider] ??= newProviderStats();
stats.cost += cost;
stats.messages++;
stats.input += input;
stats.output += output;
stats.cacheRead += cacheRead;
} catch { /* skip malformed lines */ }
}
} catch { /* skip unreadable files */ }
}
} catch { /* sessions dir might not exist yet */ }
const round4 = (n: number) => Math.round(n * 10000) / 10000;
const entries = Object.entries(costByProvider)
.map(([source, d]) => ({
source,
cost: round4(d.cost),
messageCount: d.messages,
inputTokens: d.input,
outputTokens: d.output,
cacheReadTokens: d.cacheRead,
errorCount: d.errors,
}))
.sort((a, b) => b.cost - a.cost);
const result: SessionCostData = {
totalCost: round4(totalCost),
totalInput,
totalOutput,
totalMessages,
entries,
};
costCache = { data: result, ts: now };
return result;
}
// ── Route ────────────────────────────────────────────────────────────
app.get('/api/tokens', rateLimitGeneral, async (c) => {
const costData = await scanSessionCosts();
const persistent = await updateUsage(costData.totalInput, costData.totalOutput, costData.totalCost);
return c.json({
...costData,
persistent: {
totalInput: persistent.totalInput,
totalOutput: persistent.totalOutput,
totalCost: persistent.totalCost,
lastUpdated: persistent.lastUpdated,
},
updatedAt: Date.now(),
});
});
export default app;

131
server/routes/transcribe.ts Normal file
View file

@ -0,0 +1,131 @@
/**
* POST /api/transcribe Audio transcription.
*
* Routes to local Whisper (default, no API key needed) or OpenAI Whisper API.
* Body: multipart/form-data with a "file" field containing audio data.
* Response: { text: string }
*/
import { Hono } from 'hono';
import { config } from '../lib/config.js';
import { transcribe as transcribeOpenAI } from '../services/openai-whisper.js';
import { transcribeLocal, isModelAvailable, getActiveModel, setWhisperModel, getDownloadProgress, getSystemInfo } from '../services/whisper-local.js';
import { rateLimitTranscribe } from '../middleware/rate-limit.js';
const MAX_FILE_SIZE = config.limits.transcribe; // 12 MB
/** MIME types accepted for transcription */
const ALLOWED_AUDIO_TYPES = new Set([
'audio/webm',
'audio/mp3',
'audio/mpeg',
'audio/mp4',
'audio/m4a',
'audio/wav',
'audio/x-wav',
'audio/ogg',
'audio/flac',
'audio/x-flac',
]);
const app = new Hono();
app.post('/api/transcribe', rateLimitTranscribe, async (c) => {
try {
const body = await c.req.parseBody();
const file = body['file'];
if (!file || !(file instanceof File)) {
return c.text('No file found in request', 400);
}
if (file.size > MAX_FILE_SIZE) {
return c.text(`File too large (max ${MAX_FILE_SIZE / 1024 / 1024}MB)`, 413);
}
if (file.type && !ALLOWED_AUDIO_TYPES.has(file.type)) {
return c.text(`Unsupported audio format: ${file.type}`, 415);
}
const arrayBuf = await file.arrayBuffer();
const fileData = Buffer.from(arrayBuf);
const filename = file.name || 'audio.webm';
// Route to configured STT provider
let result;
if (config.sttProvider === 'openai') {
if (!config.openaiApiKey) {
return c.text('OpenAI API key not configured. Set OPENAI_API_KEY in .env or switch to STT_PROVIDER=local', 500);
}
result = await transcribeOpenAI(fileData, filename, file.type || 'audio/webm');
} else {
result = await transcribeLocal(fileData, filename);
}
if (!result.ok) {
return c.text(result.message, result.status as 400 | 500);
}
return c.json({ text: result.text });
} catch (err) {
console.error('[transcribe] error:', (err as Error).message || err);
return c.text('Transcription failed', 500);
}
});
/** GET /api/transcribe/config — current STT provider info + download progress */
app.get('/api/transcribe/config', (c) => {
const model = getActiveModel();
const download = getDownloadProgress();
const { hasGpu } = getSystemInfo();
return c.json({
provider: config.sttProvider,
model,
modelReady: config.sttProvider === 'local' ? isModelAvailable() : true,
openaiKeySet: !!config.openaiApiKey,
replicateKeySet: !!config.replicateApiToken,
hasGpu,
availableModels: {
'tiny.en': { size: '75MB', ready: isModelAvailable('tiny.en') },
'base.en': { size: '142MB', ready: isModelAvailable('base.en') },
'small.en': { size: '466MB', ready: isModelAvailable('small.en') },
},
download: download ? {
model: download.model,
downloading: download.downloading,
percent: download.percent,
error: download.error,
} : null,
});
});
/** PUT /api/transcribe/config — switch STT provider or model at runtime */
app.put('/api/transcribe/config', async (c) => {
try {
const body = await c.req.json() as { model?: string; provider?: string };
const messages: string[] = [];
// Switch provider
if (body.provider === 'local' || body.provider === 'openai') {
(config as Record<string, unknown>).sttProvider = body.provider;
messages.push(`Provider set to ${body.provider}`);
}
// Switch model
if (body.model) {
const result = await setWhisperModel(body.model);
if (!result.ok) return c.text(result.message, 400);
messages.push(result.message);
}
return c.json({
provider: config.sttProvider,
model: getActiveModel(),
message: messages.join(', ') || 'No changes',
});
} catch {
return c.text('Invalid request', 400);
}
});
export default app;

170
server/routes/tts.ts Normal file
View file

@ -0,0 +1,170 @@
/**
* POST /api/tts Text-to-speech synthesis.
*
* Supports OpenAI TTS, Replicate (Qwen, etc.), and Edge TTS (free, zero-config).
* Body: { text: string, provider?: string, model?: string, voice?: string }
* Response: audio/mpeg binary
*
* Provider selection priority:
* - Explicit provider choice is always honoured
* - "openai" OpenAI TTS (requires OPENAI_API_KEY)
* - "replicate" Replicate-hosted models (requires REPLICATE_API_TOKEN)
* - "edge" Microsoft Edge Read-Aloud TTS (free, no key needed)
* - Auto fallback: openai (if key) replicate (if key) edge (always available)
*
* Backward compat: provider "qwen" is treated as replicate + model "qwen-tts".
*/
import { Hono } from 'hono';
import { zValidator } from '@hono/zod-validator';
import { z } from 'zod';
import crypto from 'node:crypto';
import { config } from '../lib/config.js';
import { getTTSConfig, updateTTSConfig } from '../lib/tts-config.js';
import { getTtsCache, setTtsCache } from '../services/tts-cache.js';
import { synthesizeOpenAI } from '../services/openai-tts.js';
import { synthesizeReplicate } from '../services/replicate-tts.js';
import { synthesizeEdge } from '../services/edge-tts.js';
import { rateLimitTTS, rateLimitGeneral } from '../middleware/rate-limit.js';
import type { ContentfulStatusCode } from 'hono/utils/http-status';
const app = new Hono();
const MAX_TEXT_LENGTH = 5000;
const ttsSchema = z.object({
text: z
.string()
.min(1, 'Text is required')
.max(MAX_TEXT_LENGTH, `Text too long (max ${MAX_TEXT_LENGTH} chars)`)
.refine((s) => s.trim().length > 0, 'Text cannot be empty or whitespace'),
voice: z.string().optional(),
// Accept both old ("qwen") and new ("replicate") values
provider: z.enum(['openai', 'replicate', 'qwen', 'edge']).optional(),
model: z.string().optional(),
});
function audioResponse(buf: Buffer, contentType = 'audio/mpeg'): Response {
return new Response(buf, {
status: 200,
headers: { 'Content-Type': contentType },
});
}
app.post(
'/api/tts',
rateLimitTTS,
zValidator('json', ttsSchema, (result, c) => {
if (!result.success) {
return c.text(result.error.issues[0]?.message || 'Invalid request', 400);
}
}),
async (c) => {
try {
const { text, voice: rawVoice, provider: rawProvider, model: rawModel } = c.req.valid('json');
// Normalize "qwen" → "replicate" + model "qwen-tts" for backward compat
const isLegacyQwen = rawProvider === 'qwen';
const provider = isLegacyQwen ? 'replicate' : rawProvider;
const model = rawModel || (isLegacyQwen ? 'qwen-tts' : undefined);
// Voice is passed through — each provider resolves its own default from config
const voice = rawVoice;
// Resolve effective provider: explicit > openai (if key) > replicate (if key) > edge
const useReplicate =
provider === 'replicate' ||
(!provider && !config.openaiApiKey && !!config.replicateApiToken);
const useEdge =
provider === 'edge' ||
(!provider && !config.openaiApiKey && !config.replicateApiToken);
const effectiveProvider = useEdge ? 'edge' : useReplicate ? 'replicate' : 'openai';
console.log(`[tts] provider=${effectiveProvider} voice=${voice} text="${text.slice(0, 50)}..."`);
// Cache key includes provider + model + voice for proper isolation
const hash = crypto
.createHash('md5')
.update(`${effectiveProvider}:${model || ''}:${voice}:${text}`)
.digest('hex');
const cached = getTtsCache(hash);
if (cached) {
// Detect WAV (starts with "RIFF") vs MP3 for correct content type
const cachedCt = cached.length > 4 && cached.toString('ascii', 0, 4) === 'RIFF' ? 'audio/wav' : 'audio/mpeg';
return audioResponse(cached, cachedCt);
}
let result;
if (effectiveProvider === 'edge') {
result = await synthesizeEdge(text, voice);
} else if (effectiveProvider === 'replicate') {
result = await synthesizeReplicate(text, { model, voice });
} else {
result = await synthesizeOpenAI(text, voice, model);
}
if (!result.ok) {
return c.text(result.message, result.status as ContentfulStatusCode);
}
const ct = 'contentType' in result ? (result as { contentType: string }).contentType : 'audio/mpeg';
setTtsCache(hash, result.buf);
return audioResponse(result.buf, ct);
} catch (err) {
console.error('[tts] error:', (err as Error).message || err);
return c.text('TTS failed', 500);
}
},
);
// ---------------------------------------------------------------------------
// TTS voice config API — read & update tts-config.json
// ---------------------------------------------------------------------------
/** GET /api/tts/config — return current TTS voice config */
app.get('/api/tts/config', rateLimitGeneral, (c) => {
return c.json(getTTSConfig());
});
/** Allowed top-level keys and their allowed child keys (all must be strings) */
const TTS_CONFIG_SCHEMA: Record<string, string[]> = {
qwen: ['mode', 'language', 'speaker', 'voiceDescription', 'styleInstruction'],
openai: ['model', 'voice', 'instructions'],
edge: ['voice'],
};
/** Validate TTS config patch — only allow known keys with string values */
function validateTTSPatch(patch: unknown): string | null {
if (typeof patch !== 'object' || patch === null || Array.isArray(patch)) {
return 'Body must be a JSON object';
}
for (const [key, val] of Object.entries(patch as Record<string, unknown>)) {
if (!(key in TTS_CONFIG_SCHEMA)) return `Unknown section: "${key}"`;
if (typeof val !== 'object' || val === null || Array.isArray(val)) {
return `"${key}" must be an object`;
}
const allowed = TTS_CONFIG_SCHEMA[key];
for (const [subKey, subVal] of Object.entries(val as Record<string, unknown>)) {
if (!allowed.includes(subKey)) return `Unknown key: "${key}.${subKey}"`;
if (typeof subVal !== 'string') return `"${key}.${subKey}" must be a string`;
if (subVal.length > 2000) return `"${key}.${subKey}" exceeds max length (2000)`;
}
}
return null;
}
/** PUT /api/tts/config — partial update TTS voice config */
app.put('/api/tts/config', rateLimitGeneral, async (c) => {
try {
const patch = await c.req.json();
const err = validateTTSPatch(patch);
if (err) return c.text(err, 400);
const updated = updateTTSConfig(patch);
return c.json(updated);
} catch (err) {
console.error('[tts-config] update error:', (err as Error).message);
return c.text('Invalid config', 400);
}
});
export default app;

21
server/routes/version.ts Normal file
View file

@ -0,0 +1,21 @@
/**
* GET /api/version Returns the application version from package.json.
*/
import { Hono } from 'hono';
import { rateLimitGeneral } from '../middleware/rate-limit.js';
import { readFileSync } from 'node:fs';
import { resolve, dirname } from 'node:path';
import { fileURLToPath } from 'node:url';
const __dirname = dirname(fileURLToPath(import.meta.url));
const pkg = JSON.parse(readFileSync(resolve(__dirname, '../../package.json'), 'utf-8')) as {
version: string;
name: string;
};
const app = new Hono();
app.get('/api/version', rateLimitGeneral, (c) => c.json({ version: pkg.version, name: pkg.name }));
export default app;

View file

@ -0,0 +1,15 @@
/**
* GET /api/voice-phrases Returns configured stop/cancel phrases.
* Client uses these for voice recognition instead of hardcoded values.
*/
import { Hono } from 'hono';
import { getVoicePhrases } from '../lib/voice-phrases.js';
const app = new Hono();
app.get('/api/voice-phrases', (c) => {
return c.json(getVoicePhrases());
});
export default app;

View file

@ -0,0 +1,89 @@
/**
* Workspace file API Routes
*
* GET /api/workspace/:key Read a workspace file by key
* PUT /api/workspace/:key Write a workspace file by key
*
* Strict allowlist of keys files. No directory traversal.
*/
import { Hono } from 'hono';
import fs from 'node:fs/promises';
import path from 'node:path';
import { config } from '../lib/config.js';
import { readText } from '../lib/files.js';
import { rateLimitGeneral } from '../middleware/rate-limit.js';
const app = new Hono();
/** Workspace base directory — parent of memoryPath */
const workspacePath = path.dirname(config.memoryPath);
/** Strict allowlist mapping key → filename */
const FILE_MAP: Record<string, string> = {
soul: 'SOUL.md',
tools: 'TOOLS.md',
identity: 'IDENTITY.md',
user: 'USER.md',
agents: 'AGENTS.md',
heartbeat: 'HEARTBEAT.md',
};
function resolveFile(key: string): string | null {
const filename = FILE_MAP[key];
if (!filename) return null;
return path.join(workspacePath, filename);
}
app.get('/api/workspace/:key', rateLimitGeneral, async (c) => {
const filePath = resolveFile(c.req.param('key'));
if (!filePath) return c.json({ ok: false, error: 'Unknown file key' }, 400);
try {
await fs.access(filePath);
} catch {
return c.json({ ok: false, error: 'File not found' }, 404);
}
const content = await readText(filePath);
return c.json({ ok: true, content });
});
app.put('/api/workspace/:key', rateLimitGeneral, async (c) => {
const filePath = resolveFile(c.req.param('key'));
if (!filePath) return c.json({ ok: false, error: 'Unknown file key' }, 400);
const body = await c.req.json<{ content: string }>();
if (typeof body.content !== 'string') {
return c.json({ ok: false, error: 'Missing content field' }, 400);
}
if (body.content.length > 100_000) {
return c.json({ ok: false, error: 'Content too large (max 100KB)' }, 400);
}
try {
await fs.mkdir(path.dirname(filePath), { recursive: true });
await fs.writeFile(filePath, body.content, 'utf-8');
return c.json({ ok: true });
} catch (err) {
console.error('[workspace] PUT error:', (err as Error).message);
return c.json({ ok: false, error: 'Failed to write file' }, 500);
}
});
/** List available workspace file keys and their existence status */
app.get('/api/workspace', rateLimitGeneral, async (c) => {
const files: Array<{ key: string; filename: string; exists: boolean }> = [];
for (const [key, filename] of Object.entries(FILE_MAP)) {
const filePath = path.join(workspacePath, filename);
let exists = false;
try {
await fs.access(filePath);
exists = true;
} catch { /* not found */ }
files.push({ key, filename, exists });
}
return c.json({ ok: true, files });
});
export default app;

View file

@ -0,0 +1,235 @@
/**
* Claude Code usage limits spawns Claude CLI via node-pty, sends `/usage`,
* and parses the output.
*
* The flow:
* 1. Spawn `claude` in a PTY (needed for its interactive TUI)
* 2. Wait for the ready prompt (handling workspace trust prompts if shown)
* 3. Send `/usage` + Enter
* 4. Parse the ANSI-stripped output for session and weekly usage percentages
*
* Exported {@link getClaudeUsage} returns raw usage data consumed by the
* `/api/claude-code-limits` route, which normalises reset timestamps.
* @module
*/
import * as nodePty from 'node-pty';
import { existsSync } from 'node:fs';
import { homedir, tmpdir } from 'node:os';
import { join } from 'node:path';
// ── Types ────────────────────────────────────────────────────────────
interface RawLimitWindow {
used_percent: number;
left_percent: number;
resets_at: string;
}
export interface RawClaudeLimits {
available: boolean;
session_limit?: RawLimitWindow | null;
weekly_limit?: RawLimitWindow | null;
error?: string;
}
// ── Helpers ──────────────────────────────────────────────────────────
/** Strip ANSI/OSC/CSI escape sequences and carriage returns from PTY output. */
function stripAnsi(s: string): string {
return s
// OSC sequences: ESC ] ... (terminated by BEL or ST)
// eslint-disable-next-line no-control-regex
.replace(/\x1B\].*?(?:\x07|\x1B\\)/g, '')
// CSI sequences: ESC [ <params> <intermediates> <final byte>
// eslint-disable-next-line no-control-regex
.replace(/\x1B\[[\x30-\x3F]*[\x20-\x2F]*[\x40-\x7E]/g, '')
// Other two-char escape sequences (e.g. ESC =, ESC >)
// eslint-disable-next-line no-control-regex
.replace(/\x1B[^[\]].?/g, '')
// Carriage returns
.replace(/\r/g, '');
}
function resolveClaudeBin(): string {
const local = join(homedir(), '.local', 'bin', 'claude');
if (existsSync(local)) return local;
return 'claude';
}
function sleep(ms: number): Promise<void> {
return new Promise((r) => setTimeout(r, ms));
}
/**
* Poll the buffer until `test` returns true, checking every `intervalMs`.
* Returns true if matched, false on timeout.
*/
async function pollFor(
getBuffer: () => string,
test: (clean: string) => boolean,
timeoutMs: number,
intervalMs = 500,
): Promise<boolean> {
const deadline = Date.now() + timeoutMs;
while (Date.now() < deadline) {
if (test(stripAnsi(getBuffer()))) return true;
await sleep(intervalMs);
}
return false;
}
/** Check if the buffer contains Claude's ready prompt ( or >) */
function hasReadyPrompt(clean: string): boolean {
// Claude shows (U+276F) or > when ready for input
return clean.includes('') || /^>\s*$/m.test(clean);
}
/** Check if the buffer contains a workspace trust prompt */
function hasTrustPrompt(clean: string): boolean {
const lower = clean.toLowerCase();
return (
lower.includes('trust this folder') ||
lower.includes('quick safety check') ||
lower.includes('accessing workspace')
);
}
// ── Main ─────────────────────────────────────────────────────────────
export async function getClaudeUsage(): Promise<RawClaudeLimits> {
const claudeBin = resolveClaudeBin();
let pty: nodePty.IPty | null = null;
let buffer = '';
try {
pty = nodePty.spawn(claudeBin, [], {
name: 'xterm-256color',
cols: 200,
rows: 50,
cwd: tmpdir(),
env: process.env as Record<string, string>,
});
pty.onData((data: string) => {
buffer += data;
});
const getBuffer = () => buffer;
// Step 1: Wait for either a trust prompt or the ready prompt (max 15s)
const gotInitial = await pollFor(
getBuffer,
(clean) => hasTrustPrompt(clean) || hasReadyPrompt(clean),
15_000,
);
if (!gotInitial) {
return { available: false, error: 'Timeout waiting for Claude to start' };
}
// Step 2: If trust prompt, accept it and wait for ready prompt
if (hasTrustPrompt(stripAnsi(buffer))) {
pty.write('\r');
const gotReady = await pollFor(getBuffer, hasReadyPrompt, 15_000);
if (!gotReady) {
return { available: false, error: 'Timeout waiting for Claude after trust prompt' };
}
}
// Step 3: Send /usage command
pty.write('/usage\r');
await sleep(1000);
pty.write('\r'); // confirmation Enter
// Step 4: Poll for usage data to appear (max 10s)
const gotUsage = await pollFor(
getBuffer,
(clean) => /\d+%\s*used/i.test(clean) || /hit\s*your\s*limit/i.test(clean),
10_000,
);
if (!gotUsage) {
console.error('Claude usage: no usage data found in output. Buffer (stripped):', stripAnsi(buffer).slice(-2000));
return { available: false, error: 'No usage data found in Claude output' };
}
// Parse the collected output
const output = stripAnsi(buffer);
let sessionUsed: number | null = null;
let sessionResets: string | null = null;
let weeklyUsed: number | null = null;
let weeklyResets: string | null = null;
// Panel format — may be multi-line or single-line after ANSI stripping:
// Single: "Current session █████ 42%usedReses10:59pm (Europe/Berlin)"
// Multi: "Current session\n █████ 42% used\n Resets 10:59pm"
// Use [\s\S]*? to match across line boundaries
const mPanelSess = output.match(
/Current\s*session[\s\S]*?(\d+)%\s*used[\s\S]*?Rese?t?s?\s*([^\n]+)/i,
);
if (mPanelSess) {
sessionUsed = parseInt(mPanelSess[1], 10);
sessionResets = mPanelSess[2].trim();
}
// Weekly — same flexible matching across line boundaries
const mPanelWeek = output.match(
/Current\s*week(?:\s*\([^)]*\))?[\s\S]*?(\d+)%\s*used[\s\S]*?Rese?t?s?\s*([^\n]+)/i,
);
if (mPanelWeek) {
weeklyUsed = parseInt(mPanelWeek[1], 10);
weeklyResets = mPanelWeek[2].trim();
}
// Footer fallbacks
for (const line of output.split('\n')) {
const sessionMatch = line.match(/used\s*(\d+)%\s*of\s*your\s*session\s*limit.*?rese?ts?\s*([^·]+)/i);
if (sessionMatch) {
sessionUsed = parseInt(sessionMatch[1], 10);
sessionResets = sessionMatch[2].trim();
}
const hitMatch = line.match(/hit\s*your\s*limit.*?rese?ts?\s*([^·]+)/i);
if (hitMatch) {
sessionUsed = 100;
sessionResets = hitMatch[1].trim();
}
}
return {
available: sessionUsed !== null || weeklyUsed !== null,
session_limit:
sessionUsed !== null
? {
used_percent: sessionUsed,
left_percent: 100 - sessionUsed,
resets_at: sessionResets!,
}
: null,
weekly_limit:
weeklyUsed !== null
? {
used_percent: weeklyUsed,
left_percent: 100 - weeklyUsed,
resets_at: weeklyResets!,
}
: null,
};
} catch (error) {
console.error('Error fetching Claude usage via PTY:', error);
return {
available: false,
error: error instanceof Error ? error.message : 'Unknown error',
};
} finally {
if (pty) {
try {
pty.kill();
} catch {
// already dead
}
}
}
}

181
server/services/edge-tts.ts Normal file
View file

@ -0,0 +1,181 @@
/**
* Edge TTS free text-to-speech via Microsoft Edge's speech service.
*
* Implements the WebSocket protocol directly (no npm dependency beyond `ws`).
* Uses the same endpoint the Edge browser's Read Aloud feature talks to.
* Includes Sec-MS-GEC token generation for anti-abuse auth.
*
* Default voice options:
* - en-US-AriaNeural, en-US-GuyNeural, en-US-JennyNeural (American)
* - en-GB-SoniaNeural, en-GB-RyanNeural (British)
* @module
*/
import { WebSocket } from 'ws';
import crypto from 'node:crypto';
const BASE_URL =
'speech.platform.bing.com/consumer/speech/synthesize/readaloud';
const TRUSTED_CLIENT_TOKEN = '6A5AA1D4EAFF4E9FB37E23D68491D6F4';
const CHROMIUM_FULL_VERSION = '143.0.3650.75';
const CHROMIUM_MAJOR_VERSION = CHROMIUM_FULL_VERSION.split('.')[0];
const SEC_MS_GEC_VERSION = `1-${CHROMIUM_FULL_VERSION}`;
import { getTTSConfig } from '../lib/tts-config.js';
const DEFAULT_VOICE = 'en-US-AriaNeural';
// Windows epoch offset: seconds between 1601-01-01 and 1970-01-01
const WIN_EPOCH = 11644473600;
function uuid(): string {
return crypto.randomUUID().replaceAll('-', '');
}
function escapeXml(text: string): string {
return text.replace(
/[&<>"']/g,
(c) =>
({ '&': '&amp;', '<': '&lt;', '>': '&gt;', '"': '&quot;', "'": '&apos;' })[
c
]!,
);
}
/**
* Generate the Sec-MS-GEC token required by Microsoft's anti-abuse system.
* Based on: https://github.com/rany2/edge-tts/blob/master/src/edge_tts/drm.py
*/
function generateSecMsGec(): string {
// Current time in seconds (Unix epoch)
let ticks = Date.now() / 1000;
// Convert to Windows file time epoch
ticks += WIN_EPOCH;
// Round down to nearest 5 minutes (300 seconds)
ticks -= ticks % 300;
// Convert to 100-nanosecond intervals (Windows file time format)
ticks *= 1e7;
// Hash: ticks + trusted client token
const strToHash = `${ticks.toFixed(0)}${TRUSTED_CLIENT_TOKEN}`;
return crypto.createHash('sha256').update(strToHash, 'ascii').digest('hex').toUpperCase();
}
function buildWsUrl(): string {
const secMsGec = generateSecMsGec();
return (
`wss://${BASE_URL}/edge/v1` +
`?TrustedClientToken=${TRUSTED_CLIENT_TOKEN}` +
`&Sec-MS-GEC=${secMsGec}` +
`&Sec-MS-GEC-Version=${SEC_MS_GEC_VERSION}` +
`&ConnectionId=${uuid()}`
);
}
export async function synthesizeEdge(
text: string,
voice?: string,
): Promise<
{ ok: true; buf: Buffer } | { ok: false; message: string; status: number }
> {
const effectiveVoice = voice || getTTSConfig().edge.voice || DEFAULT_VOICE;
console.log(`[edge-tts] Starting synthesis, voice=${effectiveVoice}`);
try {
const buf = await new Promise<Buffer>((resolve, reject) => {
const muid = crypto.randomBytes(16).toString('hex').toUpperCase();
const ws = new WebSocket(buildWsUrl(), {
host: 'speech.platform.bing.com',
origin: 'chrome-extension://jdiccldimpdaibmpdkjnbmckianbfold',
headers: {
'User-Agent': `Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/${CHROMIUM_MAJOR_VERSION}.0.0.0 Safari/537.36 Edg/${CHROMIUM_MAJOR_VERSION}.0.0.0`,
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'Cookie': `muid=${muid};`,
},
});
const audioData: Buffer[] = [];
const timeout = setTimeout(() => {
ws.close();
reject(new Error('Edge TTS timeout after 30s'));
}, 30_000);
ws.on('message', (rawData: Buffer, isBinary: boolean) => {
if (!isBinary) {
const data = rawData.toString('utf8');
if (data.includes('turn.end')) {
clearTimeout(timeout);
resolve(Buffer.concat(audioData));
ws.close();
}
return;
}
const separator = 'Path:audio\r\n';
const idx = rawData.indexOf(separator);
if (idx >= 0) {
audioData.push(rawData.subarray(idx + separator.length));
}
});
ws.on('error', (err) => {
console.error('[edge-tts] WebSocket error:', err.message);
clearTimeout(timeout);
reject(err);
});
const speechConfig = JSON.stringify({
context: {
synthesis: {
audio: {
metadataoptions: {
sentenceBoundaryEnabled: false,
wordBoundaryEnabled: false,
},
outputFormat: 'audio-24khz-48kbitrate-mono-mp3',
},
},
},
});
const configMessage =
`X-Timestamp:${new Date().toString()}\r\n` +
`Content-Type:application/json; charset=utf-8\r\n` +
`Path:speech.config\r\n\r\n${speechConfig}`;
ws.on('open', () => {
console.log('[edge-tts] WebSocket connected, sending config...');
ws.send(configMessage, { compress: true }, (err) => {
if (err) {
clearTimeout(timeout);
return reject(err);
}
const ssmlMessage =
`X-RequestId:${uuid()}\r\nContent-Type:application/ssml+xml\r\n` +
`X-Timestamp:${new Date().toString()}Z\r\nPath:ssml\r\n\r\n` +
`<speak version='1.0' xmlns='http://www.w3.org/2001/10/synthesis' xml:lang='en-US'>` +
`<voice name='${escapeXml(effectiveVoice)}'><prosody pitch='+0Hz' rate='+0%' volume='+0%'>` +
`${escapeXml(text)}</prosody></voice></speak>`;
ws.send(ssmlMessage, { compress: true }, (ssmlErr) => {
if (ssmlErr) {
clearTimeout(timeout);
reject(ssmlErr);
}
});
});
});
});
if (buf.length === 0) {
return { ok: false, message: 'Edge TTS returned empty audio', status: 500 };
}
return { ok: true, buf };
} catch (err) {
console.error('[edge-tts] error:', (err as Error).message);
return {
ok: false,
message: `Edge TTS failed: ${(err as Error).message}`,
status: 502,
};
}
}

View file

@ -0,0 +1,67 @@
/**
* OpenAI TTS provider.
*
* Supports multiple models: gpt-4o-mini-tts (default), tts-1, tts-1-hd.
* Voice and model settings are read from `tts-config.json` via {@link getTTSConfig}.
* @module
*/
import { config } from '../lib/config.js';
import { getTTSConfig } from '../lib/tts-config.js';
import { OPENAI_TTS_URL } from '../lib/constants.js';
export interface OpenAITTSResult {
ok: true;
buf: Buffer;
}
export interface OpenAITTSError {
ok: false;
status: number;
message: string;
}
/**
* Generate speech via OpenAI TTS API.
* Returns the audio buffer on success, or an error object.
*/
export async function synthesizeOpenAI(
text: string,
voice?: string,
model?: string,
): Promise<OpenAITTSResult | OpenAITTSError> {
if (!config.openaiApiKey) {
return { ok: false, status: 500, message: 'OpenAI API key not configured' };
}
const ttsConfig = getTTSConfig().openai;
const effectiveModel = model || ttsConfig.model;
const effectiveVoice = voice || ttsConfig.voice;
const resp = await fetch(OPENAI_TTS_URL, {
method: 'POST',
headers: {
Authorization: `Bearer ${config.openaiApiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: effectiveModel,
voice: effectiveVoice,
input: text,
response_format: 'mp3',
// instructions only supported by gpt-4o-mini-tts
...(effectiveModel === 'gpt-4o-mini-tts' && {
instructions: ttsConfig.instructions,
}),
}),
});
if (!resp.ok) {
const errBody = await resp.text();
console.error('[tts:openai] API error:', resp.status, errBody);
return { ok: false, status: resp.status, message: errBody };
}
const buf = Buffer.from(await resp.arrayBuffer());
return { ok: true, buf };
}

View file

@ -0,0 +1,65 @@
/**
* OpenAI Whisper transcription client.
*
* Sends audio data to the OpenAI `whisper-1` model via a manually-constructed
* multipart/form-data request (no FormData dependency needed).
* @module
*/
import { config } from '../lib/config.js';
import { OPENAI_WHISPER_URL } from '../lib/constants.js';
export interface WhisperResult {
ok: true;
text: string;
}
export interface WhisperError {
ok: false;
status: number;
message: string;
}
/**
* Transcribe audio via OpenAI Whisper API.
* Accepts raw file data and builds a multipart request.
*/
export async function transcribe(
fileData: Buffer,
filename: string,
mimeType: string = 'audio/webm',
): Promise<WhisperResult | WhisperError> {
if (!config.openaiApiKey) {
return { ok: false, status: 500, message: 'OpenAI API key not configured' };
}
// Build multipart payload for OpenAI
// Sanitize filename to prevent header injection via quotes/newlines
const safeFilename = filename.replace(/[\r\n"]/g, '_');
const boundary = `----FormBoundary${Date.now()}`;
const header = Buffer.from(
`--${boundary}\r\nContent-Disposition: form-data; name="file"; filename="${safeFilename}"\r\nContent-Type: ${mimeType}\r\n\r\n`,
);
const footer = Buffer.from(
`\r\n--${boundary}\r\nContent-Disposition: form-data; name="model"\r\n\r\nwhisper-1\r\n--${boundary}--\r\n`,
);
const payload = Buffer.concat([header, fileData, footer]);
const resp = await fetch(OPENAI_WHISPER_URL, {
method: 'POST',
headers: {
Authorization: `Bearer ${config.openaiApiKey}`,
'Content-Type': `multipart/form-data; boundary=${boundary}`,
},
body: payload,
});
if (!resp.ok) {
const errText = await resp.text();
console.error('[transcribe] API error:', resp.status, errText);
return { ok: false, status: resp.status, message: errText };
}
const result = (await resp.json()) as { text: string };
return { ok: true, text: result.text };
}

View file

@ -0,0 +1,232 @@
/**
* Replicate TTS generic provider for Replicate-hosted TTS models.
*
* Calls the Replicate predictions API directly no Python, no temp files.
* Uses `Prefer: wait` to get synchronous results when possible, with polling
* fallback for cold starts. WAV output is piped through ffmpeg for MP3 conversion.
*
* Supported models:
* - qwen-tts (default): Qwen3-TTS, returns WAV converted to MP3 via ffmpeg
*
* To add a new Replicate model, add an entry to `REPLICATE_MODELS` below.
* @module
*/
import { config } from '../lib/config.js';
import { getTTSConfig } from '../lib/tts-config.js';
import { REPLICATE_QWEN_TTS_URL } from '../lib/constants.js';
export interface ReplicateTTSResult {
ok: true;
buf: Buffer;
contentType: string;
}
export interface ReplicateTTSError {
ok: false;
status: number;
message: string;
}
/** Registry of supported Replicate TTS models. */
interface ReplicateModelDef {
url: string;
buildInput: (text: string, voice?: string) => Record<string, string>;
}
/**
* Build Qwen TTS input from tts-config.json settings.
* Supports voice_design (description-based) and custom_voice (preset speaker) modes.
*/
// eslint-disable-next-line @typescript-eslint/no-unused-vars
function buildQwenInput(text: string, voice?: string): Record<string, string> {
const qwen = getTTSConfig().qwen;
const input: Record<string, string> = {
text,
language: qwen.language,
mode: qwen.mode,
};
if (qwen.mode === 'voice_design') {
if (qwen.voiceDescription) input.voice_description = qwen.voiceDescription;
if (qwen.styleInstruction) input.style_instruction = qwen.styleInstruction;
} else if (qwen.mode === 'custom_voice') {
input.speaker = qwen.speaker;
if (qwen.styleInstruction) input.style_instruction = qwen.styleInstruction;
}
return input;
}
const REPLICATE_MODELS: Record<string, ReplicateModelDef> = {
'qwen-tts': {
url: REPLICATE_QWEN_TTS_URL,
buildInput: buildQwenInput,
},
};
const DEFAULT_MODEL = 'qwen-tts';
const POLL_INTERVAL_MS = 1_000;
const MAX_POLL_MS = 120_000;
/** Per-fetch timeout for polling / download requests. */
const FETCH_TIMEOUT_MS = 30_000;
/** Timeout for the initial create request with Prefer: wait (Replicate holds connection). */
const CREATE_TIMEOUT_MS = 90_000;
/**
* Generate speech via a Replicate-hosted TTS model.
* Returns the MP3 audio buffer on success, or an error object.
*/
export async function synthesizeReplicate(
text: string,
options?: { model?: string; voice?: string },
): Promise<ReplicateTTSResult | ReplicateTTSError> {
if (!config.replicateApiToken) {
return {
ok: false,
status: 500,
message: 'No TTS provider configured (need REPLICATE_API_TOKEN)',
};
}
const modelId = options?.model || DEFAULT_MODEL;
const modelDef = REPLICATE_MODELS[modelId];
if (!modelDef) {
return {
ok: false,
status: 400,
message: `Unknown Replicate TTS model: ${modelId}. Available: ${Object.keys(REPLICATE_MODELS).join(', ')}`,
};
}
const authHeader = `Bearer ${config.replicateApiToken}`;
const input = modelDef.buildInput(text, options?.voice);
try {
// Create prediction via model-specific endpoint (no version hash needed)
// Use longer timeout for Prefer: wait — Replicate holds the connection until
// the prediction completes (up to ~60s), and cold starts can be slow.
const createResp = await fetchWithTimeout(modelDef.url, {
method: 'POST',
headers: {
Authorization: authHeader,
'Content-Type': 'application/json',
Prefer: 'wait',
},
body: JSON.stringify({ input }),
}, CREATE_TIMEOUT_MS);
if (!createResp.ok) {
const errBody = await createResp.text();
console.error('[tts:replicate] Replicate create error:', createResp.status, errBody);
return { ok: false, status: 502, message: 'Qwen TTS: upstream error' };
}
let prediction = (await createResp.json()) as ReplicatePrediction;
// Poll until terminal state (Prefer: wait may resolve immediately)
const deadline = Date.now() + MAX_POLL_MS;
while (!isTerminal(prediction.status)) {
if (Date.now() > deadline) {
return { ok: false, status: 504, message: 'Qwen TTS timed out' };
}
await sleep(POLL_INTERVAL_MS);
const pollUrl = prediction.urls?.get;
if (!pollUrl) {
return { ok: false, status: 502, message: 'Qwen TTS: no poll URL returned' };
}
const pollResp = await fetchWithTimeout(pollUrl, {
headers: { Authorization: authHeader },
});
if (!pollResp.ok) {
console.error('[tts:replicate] Replicate poll error:', pollResp.status, await pollResp.text());
return { ok: false, status: 502, message: 'Qwen TTS: upstream poll error' };
}
prediction = (await pollResp.json()) as ReplicatePrediction;
}
if (prediction.status !== 'succeeded') {
console.error('[tts:replicate] prediction failed/canceled:', prediction.error);
return { ok: false, status: 502, message: 'Qwen TTS prediction failed' };
}
// Extract the output URL — could be string, array of strings, or unexpected
const outputUrl = extractOutputUrl(prediction.output);
if (!outputUrl) {
console.error('[tts:replicate] unexpected output format:', JSON.stringify(prediction.output));
return { ok: false, status: 502, message: 'Qwen TTS: unexpected output format' };
}
// Download the audio
const audioResp = await fetchWithTimeout(outputUrl);
if (!audioResp.ok) {
console.error(`[tts:replicate:${modelId}] audio download failed:`, audioResp.status);
return { ok: false, status: 502, message: 'Failed to download TTS audio' };
}
const rawBuf = Buffer.from(await audioResp.arrayBuffer());
// Serve WAV directly — all modern browsers support it natively.
// Eliminates ffmpeg dependency and ~200ms conversion overhead.
return { ok: true, buf: rawBuf, contentType: 'audio/wav' };
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
console.error('[tts:replicate] error:', msg);
return { ok: false, status: 500, message: 'Qwen TTS error' };
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
interface ReplicatePrediction {
id: string;
status: string;
output?: unknown;
error?: string;
urls?: { get?: string };
}
const TERMINAL_STATUSES = new Set(['succeeded', 'failed', 'canceled']);
function isTerminal(status: string): boolean {
return TERMINAL_STATUSES.has(status);
}
/** Extract a URL string from Replicate's output field (string | string[] | FileOutput). */
function extractOutputUrl(output: unknown): string | null {
if (typeof output === 'string' && output.startsWith('http')) return output;
if (Array.isArray(output)) {
const first = output[0];
if (typeof first === 'string' && first.startsWith('http')) return first;
}
// FileOutput or object with .url
if (output && typeof output === 'object' && 'url' in output) {
const url = (output as { url: unknown }).url;
if (typeof url === 'string') return url;
}
// Last-resort: stringify and check if it looks like a URL
const str = String(output);
if (str.startsWith('http')) return str;
return null;
}
/** Fetch with an AbortController timeout. */
function fetchWithTimeout(url: string, init?: RequestInit, timeoutMs = FETCH_TIMEOUT_MS): Promise<Response> {
const controller = new AbortController();
const timer = setTimeout(() => controller.abort(), timeoutMs);
return fetch(url, { ...init, signal: controller.signal }).finally(() =>
clearTimeout(timer),
);
}
/** Convert a WAV buffer to MP3 via ffmpeg (piped, no temp files).
* Currently unused WAV is served directly to the browser. */
// function wavToMp3(wav: Buffer): Promise<Buffer> { ... }
function sleep(ms: number): Promise<void> {
return new Promise((r) => setTimeout(r, ms));
}

View file

@ -0,0 +1,114 @@
/** Tests for the LRU TTS cache (TTL expiry, eviction, memory budget). */
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
// Mock config before importing the module
vi.mock('../lib/config.js', () => ({
config: {
ttsCacheTtlMs: 5000, // 5 seconds for testing
ttsCacheMax: 3,
},
}));
// We need to reset module state between tests since tts-cache uses module-level state
let getTtsCache: typeof import('./tts-cache.js').getTtsCache;
let setTtsCache: typeof import('./tts-cache.js').setTtsCache;
describe('tts-cache', () => {
beforeEach(async () => {
vi.useFakeTimers();
// Re-import module to reset state (cache Map and totalBytes)
vi.resetModules();
const mod = await import('./tts-cache.js');
getTtsCache = mod.getTtsCache;
setTtsCache = mod.setTtsCache;
});
afterEach(() => {
vi.useRealTimers();
});
it('should store and retrieve a cached buffer', () => {
const buf = Buffer.from('audio-data');
setTtsCache('hash1', buf);
const result = getTtsCache('hash1');
expect(result).not.toBeNull();
expect(result!.toString()).toBe('audio-data');
});
it('should return null for cache miss', () => {
expect(getTtsCache('nonexistent')).toBeNull();
});
it('should expire entries after TTL', () => {
const buf = Buffer.from('audio-data');
setTtsCache('hash1', buf);
// Still valid
vi.advanceTimersByTime(4999);
expect(getTtsCache('hash1')).not.toBeNull();
// Expired
vi.advanceTimersByTime(2);
expect(getTtsCache('hash1')).toBeNull();
});
it('should enforce max entry count (LRU eviction)', () => {
// Config max is 3
setTtsCache('a', Buffer.from('1'));
setTtsCache('b', Buffer.from('2'));
setTtsCache('c', Buffer.from('3'));
// All three should exist
expect(getTtsCache('a')).not.toBeNull();
expect(getTtsCache('b')).not.toBeNull();
expect(getTtsCache('c')).not.toBeNull();
// Adding a 4th should evict the oldest (a)
setTtsCache('d', Buffer.from('4'));
expect(getTtsCache('a')).toBeNull();
expect(getTtsCache('d')).not.toBeNull();
});
it('should promote entries on access (LRU)', () => {
setTtsCache('a', Buffer.from('1'));
setTtsCache('b', Buffer.from('2'));
setTtsCache('c', Buffer.from('3'));
// Access 'a' to promote it (moves to end of Map iteration order)
getTtsCache('a');
// Add a 4th — should evict 'b' (now the oldest unreferenced) instead of 'a'
setTtsCache('d', Buffer.from('4'));
expect(getTtsCache('a')).not.toBeNull();
expect(getTtsCache('b')).toBeNull();
});
it('should enforce memory budget (100 MB)', () => {
// Create buffers that total over 100MB
const big = Buffer.alloc(40 * 1024 * 1024); // 40 MB each
setTtsCache('a', big);
setTtsCache('b', big);
// a + b = 80MB, still under 100MB
expect(getTtsCache('a')).not.toBeNull();
expect(getTtsCache('b')).not.toBeNull();
// Adding a third 40MB buffer → 120MB total, should evict oldest to get under budget
setTtsCache('c', big);
expect(getTtsCache('a')).toBeNull(); // evicted
expect(getTtsCache('c')).not.toBeNull();
});
it('should prune expired entries on set', () => {
setTtsCache('old', Buffer.from('old-data'));
// Advance past TTL
vi.advanceTimersByTime(6000);
// Setting a new entry should prune the expired one
setTtsCache('new', Buffer.from('new-data'));
expect(getTtsCache('old')).toBeNull();
expect(getTtsCache('new')).not.toBeNull();
});
});

View file

@ -0,0 +1,75 @@
/**
* LRU TTS cache with TTL expiry.
*
* In-memory cache for synthesised audio buffers keyed by content hash.
* Entries are evicted by TTL, max count, or a 100 MB memory budget
* whichever limit is hit first. Access promotes entries (LRU).
* @module
*/
import type { TtsCacheEntry } from '../types.js';
import { config } from '../lib/config.js';
const cache = new Map<string, TtsCacheEntry>();
let totalBytes = 0;
const MAX_CACHE_BYTES = 100 * 1024 * 1024; // 100 MB memory budget
/**
* Retrieve a cached TTS buffer by hash. Returns null on miss or expiry.
* Promotes the entry (LRU) on hit.
*/
export function getTtsCache(hash: string): Buffer | null {
const entry = cache.get(hash);
if (!entry) return null;
if (Date.now() - entry.createdAt > config.ttsCacheTtlMs) {
totalBytes -= entry.buf.length;
cache.delete(hash);
return null;
}
// LRU promotion: delete and re-insert
cache.delete(hash);
entry.lastAccess = Date.now();
cache.set(hash, entry);
return entry.buf;
}
/**
* Store a TTS buffer in the cache and prune if necessary.
*/
export function setTtsCache(hash: string, buf: Buffer): void {
const now = Date.now();
const existing = cache.get(hash);
if (existing) {
totalBytes -= existing.buf.length;
}
cache.set(hash, { buf, createdAt: now, lastAccess: now });
totalBytes += buf.length;
// Prune expired entries
for (const [key, e] of cache) {
if (now - e.createdAt > config.ttsCacheTtlMs) {
totalBytes -= e.buf.length;
cache.delete(key);
}
}
// Enforce max count
while (cache.size > config.ttsCacheMax) {
const oldest = cache.keys().next().value;
if (!oldest) break;
const entry = cache.get(oldest);
if (entry) totalBytes -= entry.buf.length;
cache.delete(oldest);
}
// Enforce memory budget
while (totalBytes > MAX_CACHE_BYTES && cache.size > 0) {
const oldest = cache.keys().next().value;
if (!oldest) break;
const entry = cache.get(oldest);
if (entry) totalBytes -= entry.buf.length;
cache.delete(oldest);
}
}

View file

@ -0,0 +1,361 @@
/**
* Local Whisper transcription via @fugood/whisper.node.
*
* Keeps a singleton WhisperContext alive for the server's lifetime.
* Converts incoming audio to 16kHz mono WAV via ffmpeg, then transcribes.
* @module
*/
import { execFileSync, execSync } from 'node:child_process';
import { writeFileSync, unlinkSync, accessSync, mkdirSync, createWriteStream } from 'node:fs';
import { join } from 'node:path';
import { tmpdir, cpus } from 'node:os';
import { randomUUID } from 'node:crypto';
import { initWhisper } from '@fugood/whisper.node';
import type { WhisperContext } from '@fugood/whisper.node';
import { config } from '../lib/config.js';
import { WHISPER_MODEL_FILES, WHISPER_MODELS_BASE_URL } from '../lib/constants.js';
// ── Types ────────────────────────────────────────────────────────────────────
export interface TranscribeLocalResult {
ok: true;
text: string;
}
export interface TranscribeLocalError {
ok: false;
status: number;
message: string;
}
// ── Singleton context ────────────────────────────────────────────────────────
let whisperContext: WhisperContext | null = null;
let contextInitializing: Promise<WhisperContext> | null = null;
let activeModel: string = config.whisperModel;
/** Resolve the full path to a Whisper model file. */
function modelPath(model?: string): string {
const m = model || activeModel;
const filename = WHISPER_MODEL_FILES[m];
if (!filename) throw new Error(`Unknown Whisper model: ${m}`);
return join(config.whisperModelDir, filename);
}
/** Get the currently active model name. */
export function getActiveModel(): string {
return activeModel;
}
/** Check if a Whisper model file exists. */
export function isModelAvailable(model?: string): boolean {
try {
accessSync(modelPath(model));
return true;
} catch {
return false;
}
}
/**
* Get or initialize the singleton WhisperContext.
* The context loads the model into memory once and reuses it for all requests.
* This avoids ~2-3 second model loading time on every transcription.
*/
async function getContext(): Promise<WhisperContext> {
if (whisperContext) return whisperContext;
// Prevent concurrent initialization (multiple requests hitting at same time)
if (contextInitializing) return contextInitializing;
contextInitializing = initWhisper({
filePath: modelPath(),
useGpu: true, // auto-detects Metal on macOS; CPU fallback elsewhere
}).then((ctx) => {
whisperContext = ctx;
contextInitializing = null;
console.log(`[whisper-local] Model loaded: ${activeModel}`);
return ctx;
}).catch((err) => {
contextInitializing = null;
throw err;
});
return contextInitializing;
}
/** Release the Whisper context (call on server shutdown). */
export async function releaseWhisperContext(): Promise<void> {
if (whisperContext) {
await whisperContext.release();
whisperContext = null;
console.log('[whisper-local] Context released');
}
}
// ── Model download ───────────────────────────────────────────────────────────
interface DownloadProgress {
model: string;
downloading: boolean;
bytesDownloaded: number;
totalBytes: number;
percent: number;
error?: string;
}
let currentDownload: DownloadProgress | null = null;
/** Get current download progress (null if no download active). */
export function getDownloadProgress(): DownloadProgress | null {
return currentDownload;
}
/**
* Download a Whisper model from HuggingFace.
* Tracks progress in `currentDownload` for the UI to poll.
*/
async function downloadModel(model: string): Promise<{ ok: boolean; message: string }> {
const filename = WHISPER_MODEL_FILES[model];
if (!filename) return { ok: false, message: `Unknown model: ${model}` };
const destPath = modelPath(model);
const tmpPath = destPath + '.downloading';
// Ensure directory exists
mkdirSync(config.whisperModelDir, { recursive: true });
const url = `${WHISPER_MODELS_BASE_URL}/${filename}`;
console.log(`[whisper-local] Downloading model: ${model} from ${url}`);
currentDownload = { model, downloading: true, bytesDownloaded: 0, totalBytes: 0, percent: 0 };
try {
const response = await fetch(url, { redirect: 'follow' });
if (!response.ok || !response.body) {
currentDownload = { ...currentDownload, downloading: false, error: `HTTP ${response.status}` };
return { ok: false, message: `Download failed: HTTP ${response.status}` };
}
const totalBytes = Number(response.headers.get('content-length') || 0);
currentDownload.totalBytes = totalBytes;
const writer = createWriteStream(tmpPath);
let bytesDownloaded = 0;
const reader = response.body.getReader();
while (true) {
const { done, value } = await reader.read();
if (done) break;
writer.write(Buffer.from(value));
bytesDownloaded += value.byteLength;
currentDownload.bytesDownloaded = bytesDownloaded;
currentDownload.percent = totalBytes > 0 ? Math.round((bytesDownloaded / totalBytes) * 100) : 0;
}
writer.end();
// Wait for file to finish writing
await new Promise<void>((resolve, reject) => {
writer.on('finish', resolve);
writer.on('error', reject);
});
// Rename tmp → final (atomic-ish)
const { renameSync } = await import('node:fs');
renameSync(tmpPath, destPath);
currentDownload = { ...currentDownload, downloading: false, percent: 100 };
console.log(`[whisper-local] Model downloaded: ${model} (${(bytesDownloaded / 1024 / 1024).toFixed(0)}MB)`);
// Clear download state after a moment so UI can see completion
setTimeout(() => { currentDownload = null; }, 3000);
return { ok: true, message: `Downloaded ${model}` };
} catch (err) {
const msg = (err as Error).message;
console.error(`[whisper-local] Download failed:`, msg);
currentDownload = { ...currentDownload, downloading: false, error: msg };
// Clean up partial file
try { unlinkSync(tmpPath); } catch { /* ignore */ }
// Clear error state after a moment
setTimeout(() => { currentDownload = null; }, 5000);
return { ok: false, message: `Download failed: ${msg}` };
}
}
/**
* Switch to a different Whisper model at runtime.
* If the model isn't downloaded, triggers download automatically.
*/
export async function setWhisperModel(model: string): Promise<{ ok: boolean; message: string; downloading?: boolean }> {
if (!WHISPER_MODEL_FILES[model]) {
return { ok: false, message: `Unknown model: ${model}. Available: ${Object.keys(WHISPER_MODEL_FILES).join(', ')}` };
}
// If model not available, start downloading
if (!isModelAvailable(model)) {
if (currentDownload?.downloading) {
return { ok: true, message: `Already downloading ${currentDownload.model}`, downloading: true };
}
// Fire and forget — download runs in background
downloadModel(model).then((result) => {
if (result.ok) {
// Auto-switch after successful download
releaseWhisperContext().then(() => {
activeModel = model;
console.log(`[whisper-local] Switched to downloaded model: ${model}`);
});
}
});
return { ok: true, message: `Downloading ${model}...`, downloading: true };
}
if (model === activeModel && whisperContext) {
return { ok: true, message: `Already using ${model}` };
}
// Release current context — next request will init with new model
await releaseWhisperContext();
activeModel = model;
console.log(`[whisper-local] Switched to model: ${model}`);
return { ok: true, message: `Switched to ${model}` };
}
// ── System info ──────────────────────────────────────────────────────────────
let gpuDetected: boolean | null = null;
/** Check if a GPU is available by looking at Vulkan/Metal/CUDA device presence. */
function detectGpu(): boolean {
if (gpuDetected !== null) return gpuDetected;
try {
// Try nvidia-smi (CUDA)
try { execSync('nvidia-smi', { stdio: 'pipe', timeout: 3000 }); gpuDetected = true; return true; } catch { /* no nvidia */ }
// Try vulkaninfo
try { execSync('vulkaninfo --summary', { stdio: 'pipe', timeout: 3000 }); gpuDetected = true; return true; } catch { /* no vulkan */ }
// macOS Metal is always available on Apple Silicon
if (process.platform === 'darwin' && process.arch === 'arm64') { gpuDetected = true; return true; }
gpuDetected = false;
return false;
} catch {
gpuDetected = false;
return false;
}
}
/** Return system info relevant to STT performance. */
export function getSystemInfo(): { hasGpu: boolean; cpuCount: number } {
return {
hasGpu: detectGpu(),
cpuCount: cpus().length,
};
}
// ── ffmpeg ────────────────────────────────────────────────────────────────────
let ffmpegAvailable: boolean | null = null;
/** Check if ffmpeg is available (cached after first call). */
function hasFfmpeg(): boolean {
if (ffmpegAvailable !== null) return ffmpegAvailable;
try {
execFileSync('ffmpeg', ['-version'], { stdio: 'pipe', timeout: 5000 });
ffmpegAvailable = true;
} catch {
ffmpegAvailable = false;
}
return ffmpegAvailable;
}
/** Convert audio to 16kHz mono WAV using ffmpeg. */
function convertToWav(inputPath: string, outputPath: string): void {
execFileSync('ffmpeg', [
'-i', inputPath,
'-ar', '16000', // 16kHz sample rate (required by Whisper)
'-ac', '1', // mono
'-sample_fmt', 's16', // 16-bit signed PCM
'-f', 'wav',
'-y', // overwrite output
outputPath,
], { stdio: 'pipe', timeout: 30_000 });
}
// ── Main transcription function ──────────────────────────────────────────────
/**
* Transcribe audio using the local Whisper model.
*
* Flow:
* 1. Save uploaded buffer to temp file
* 2. ffmpeg convert to 16kHz mono WAV
* 3. whisper context.transcribeFile() TranscribeResult
* 4. Extract result.result (full text string)
* 5. Clean up temp files
*/
export async function transcribeLocal(
fileData: Buffer,
filename: string,
): Promise<TranscribeLocalResult | TranscribeLocalError> {
// Pre-flight checks
if (!isModelAvailable()) {
return {
ok: false,
status: 500,
message: `Speech model not found at ${modelPath()}. Re-run the installer or set STT_PROVIDER=openai with an OPENAI_API_KEY.`,
};
}
if (!hasFfmpeg()) {
return {
ok: false,
status: 500,
message: 'ffmpeg not found. Install it: apt install ffmpeg (Linux) or brew install ffmpeg (macOS)',
};
}
const id = randomUUID().slice(0, 8);
const inputTmp = join(tmpdir(), `nerve-stt-in-${id}-${filename}`);
const wavTmp = join(tmpdir(), `nerve-stt-${id}.wav`);
try {
// 1. Save uploaded audio to temp file
writeFileSync(inputTmp, fileData);
// 2. Convert to 16kHz mono WAV
try {
convertToWav(inputTmp, wavTmp);
} catch (err) {
console.error('[whisper-local] ffmpeg conversion failed:', (err as Error).message);
return { ok: false, status: 500, message: 'Audio format conversion failed' };
}
// 3. Get/init the singleton context and transcribe
const ctx = await getContext();
const { promise } = ctx.transcribeFile(wavTmp, {
language: 'en',
temperature: 0.0,
});
const result = await promise;
// 4. Extract text
if (result.isAborted) {
return { ok: false, status: 500, message: 'Transcription was aborted' };
}
const text = result.result?.trim() || '';
if (!text) {
return { ok: false, status: 500, message: 'Transcription returned empty result' };
}
return { ok: true, text };
} catch (err) {
console.error('[whisper-local] Transcription failed:', (err as Error).message);
return { ok: false, status: 500, message: 'Local transcription failed: ' + (err as Error).message };
} finally {
// 5. Always clean up temp files
try { unlinkSync(inputTmp); } catch { /* ignore */ }
try { unlinkSync(wavTmp); } catch { /* ignore */ }
}
}

40
server/types.ts Normal file
View file

@ -0,0 +1,40 @@
/**
* Shared types for the Nerve server.
*
* Centralised type definitions used across routes, services, and lib modules.
* @module
*/
/** A single entry in the in-memory TTS audio cache. */
export interface TtsCacheEntry {
/** Raw audio buffer (typically MP3). */
buf: Buffer;
/** Epoch ms when the entry was first cached. */
createdAt: number;
/** Epoch ms of the most recent cache hit (used for LRU eviction). */
lastAccess: number;
}
/** A single entry in the agent activity log (persisted to agent-log.json). */
export interface AgentLogEntry {
/** Epoch ms timestamp of the log entry. */
ts: number;
/** Arbitrary additional fields. */
[key: string]: unknown;
}
/**
* Parsed memory item returned by GET /api/memories.
*
* - `section` a `## Heading` from MEMORY.md
* - `item` a bullet point under a section in MEMORY.md
* - `daily` a `## Heading` from a daily file (memory/YYYY-MM-DD.md)
*/
export interface MemoryItem {
/** Discriminator for the memory source. */
type: 'section' | 'item' | 'daily';
/** The text content (section title or bullet text, markdown stripped). */
text: string;
/** Date string (YYYY-MM-DD) — present only for `daily` items. */
date?: string;
}

445
src/App.tsx Normal file
View file

@ -0,0 +1,445 @@
/**
* App.tsx - Main application layout component
*
* This component focuses on layout and composition.
* Connection management is handled by useConnectionManager.
* Dashboard data fetching is handled by useDashboardData.
*/
import { useState, useEffect, useRef, useCallback, useMemo, lazy, Suspense } from 'react';
import { useGateway, loadConfig } from '@/contexts/GatewayContext';
import { useSessionContext } from '@/contexts/SessionContext';
import { useChat } from '@/contexts/ChatContext';
import { useSettings } from '@/contexts/SettingsContext';
import { getSessionKey } from '@/types';
import { useConnectionManager } from '@/hooks/useConnectionManager';
import { useDashboardData } from '@/hooks/useDashboardData';
import { ConnectDialog } from '@/features/connect/ConnectDialog';
import { TopBar } from '@/components/TopBar';
import { StatusBar } from '@/components/StatusBar';
import { ConfirmDialog } from '@/components/ConfirmDialog';
import { ChatPanel, type ChatPanelHandle } from '@/features/chat/ChatPanel';
import type { TTSProvider } from '@/features/tts/useTTS';
import { ResizablePanels } from '@/components/ResizablePanels';
import { getContextLimit, DEFAULT_GATEWAY_WS } from '@/lib/constants';
import { useKeyboardShortcuts } from '@/hooks/useKeyboardShortcuts';
import { createCommands } from '@/features/command-palette/commands';
import { PanelErrorBoundary } from '@/components/PanelErrorBoundary';
import { SpawnAgentDialog } from '@/features/sessions/SpawnAgentDialog';
import { FileTreePanel, TabbedContentArea, useOpenFiles } from '@/features/file-browser';
// Lazy-loaded features (not needed in initial bundle)
const SettingsDrawer = lazy(() => import('@/features/settings/SettingsDrawer').then(m => ({ default: m.SettingsDrawer })));
const CommandPalette = lazy(() => import('@/features/command-palette/CommandPalette').then(m => ({ default: m.CommandPalette })));
// Lazy-loaded side panels
const SessionList = lazy(() => import('@/features/sessions/SessionList').then(m => ({ default: m.SessionList })));
const WorkspacePanel = lazy(() => import('@/features/workspace/WorkspacePanel').then(m => ({ default: m.WorkspacePanel })));
export default function App() {
// Gateway state
const {
connectionState, connectError, reconnectAttempt, model, sparkline,
} = useGateway();
// Session state
const {
sessions, sessionsLoading, currentSession, setCurrentSession,
busyState, agentStatus, unreadSessions, refreshSessions, deleteSession, abortSession, spawnAgent, renameSession,
agentLogEntries, eventEntries,
agentName,
} = useSessionContext();
// Chat state
const {
messages, isGenerating, streamingHtml, processingStage,
lastEventTimestamp, activityLog, currentToolDescription,
handleSend, handleAbort, handleReset, loadHistory,
loadMore, hasMore,
showResetConfirm, confirmReset, cancelReset,
} = useChat();
// Settings state
const {
soundEnabled, toggleSound,
ttsProvider, ttsModel, setTtsProvider, setTtsModel,
sttProvider, setSttProvider, sttModel, setSttModel,
wakeWordEnabled, handleToggleWakeWord, handleWakeWordState,
panelRatio, setPanelRatio,
eventsVisible,
toggleEvents, toggleTelemetry,
setTheme, setFont,
} = useSettings();
// Connection management (extracted hook)
const {
dialogOpen,
editableUrl, setEditableUrl,
editableToken, setEditableToken,
handleConnect, handleReconnect,
} = useConnectionManager();
// Track last changed file path for tree refresh
const [lastChangedPath, setLastChangedPath] = useState<string | null>(null);
// File browser state
const {
openFiles, activeTab, setActiveTab,
openFile, closeFile, updateContent, saveFile, reloadFile, initializeFiles,
handleFileChanged,
} = useOpenFiles();
// Save with conflict toast
const [saveToast, setSaveToast] = useState<{ path: string; type: 'conflict' | 'error' } | null>(null);
const handleSaveFile = useCallback(async (filePath: string) => {
const result = await saveFile(filePath);
if (!result.ok) {
if (result.conflict) {
setSaveToast({ path: filePath, type: 'conflict' });
// Auto-dismiss after 5s
setTimeout(() => setSaveToast(null), 5000);
}
} else {
setSaveToast(null);
}
}, [saveFile]);
// Single file.changed handler — feeds both open files and tree refresh
const onFileChanged = useCallback((path: string) => {
handleFileChanged(path);
setLastChangedPath(path);
}, [handleFileChanged]);
// Dashboard data (extracted hook) — single SSE connection handles all events
const { memories, memoriesLoading, tokenData, refreshMemories } = useDashboardData({ onFileChanged });
// UI state
const [settingsOpen, setSettingsOpen] = useState(false);
const [booted, setBooted] = useState(false);
const [logGlow, setLogGlow] = useState(false);
const prevLogCount = useRef(0);
const chatPanelRef = useRef<ChatPanelHandle>(null);
// Command palette state
const [paletteOpen, setPaletteOpen] = useState(false);
const [searchOpen, setSearchOpen] = useState(false);
const [spawnDialogOpen, setSpawnDialogOpen] = useState(false);
// Build command list with stable references
const openSettings = useCallback(() => setSettingsOpen(true), []);
const openSearch = useCallback(() => setSearchOpen(true), []);
const closeSettings = useCallback(() => setSettingsOpen(false), []);
const closeSearch = useCallback(() => setSearchOpen(false), []);
const closePalette = useCallback(() => setPaletteOpen(false), []);
const openSpawnDialog = useCallback(() => setSpawnDialogOpen(true), []);
const commands = useMemo(() => createCommands({
onNewSession: openSpawnDialog,
onResetSession: handleReset,
onToggleSound: toggleSound,
onSettings: openSettings,
onSearch: openSearch,
onAbort: handleAbort,
onSetTheme: setTheme,
onSetFont: setFont,
onTtsProviderChange: setTtsProvider,
onToggleWakeWord: handleToggleWakeWord,
onToggleEvents: toggleEvents,
onToggleTelemetry: toggleTelemetry,
onOpenSettings: openSettings,
onRefreshSessions: refreshSessions,
onRefreshMemory: refreshMemories,
}), [openSpawnDialog, handleReset, toggleSound, handleAbort, openSettings, openSearch,
setTheme, setFont, setTtsProvider, handleToggleWakeWord, toggleEvents, toggleTelemetry,
refreshSessions, refreshMemories]);
// Keyboard shortcut handlers with useCallback
const handleOpenPalette = useCallback(() => setPaletteOpen(true), []);
const handleCtrlC = useCallback(() => {
if (isGenerating) {
handleAbort();
}
}, [isGenerating, handleAbort]);
const toggleSearch = useCallback(() => setSearchOpen(prev => !prev), []);
const handleEscape = useCallback(() => {
if (paletteOpen) {
setPaletteOpen(false);
} else if (searchOpen) {
setSearchOpen(false);
} else if (isGenerating) {
handleAbort();
}
}, [paletteOpen, searchOpen, isGenerating, handleAbort]);
// Global keyboard shortcuts
useKeyboardShortcuts([
{ key: 'k', meta: true, handler: handleOpenPalette },
{ key: 'f', meta: true, handler: toggleSearch, skipInEditor: true }, // Cmd+F → chat search (yields to CodeMirror search in editor)
{ key: 'c', ctrl: true, handler: handleCtrlC, preventDefault: false }, // Ctrl+C → abort (when generating), allow copy to still work
{ key: 'Escape', handler: handleEscape, skipInEditor: true },
]);
// Get current session's context usage for StatusBar
const currentSessionData = useMemo(() => {
return sessions.find(s => getSessionKey(s) === currentSession);
}, [sessions, currentSession]);
// Get display name for current session (agent name for main, label for subagents)
const currentSessionDisplayName = useMemo(() => {
if (currentSession === 'agent:main:main') return agentName;
return currentSessionData?.label || agentName;
}, [currentSession, currentSessionData, agentName]);
const contextTokens = currentSessionData?.totalTokens ?? 0;
const contextLimit = currentSessionData?.contextTokens || getContextLimit(model);
// Restore previously open file tabs
useEffect(() => {
if (connectionState === 'connected') {
initializeFiles();
}
}, [connectionState, initializeFiles]);
// Boot sequence: fade in panels when connected
useEffect(() => {
if (connectionState === 'connected' && !booted) {
const timer = setTimeout(() => setBooted(true), 50);
return () => clearTimeout(timer);
}
}, [connectionState, booted]);
// Log header glow when new entries arrive
// This effect legitimately needs to set state in response to prop changes
// (visual feedback for new log entries)
useEffect(() => {
const currentCount = agentLogEntries.length;
if (currentCount > prevLogCount.current) {
// eslint-disable-next-line react-hooks/set-state-in-effect -- valid: UI feedback for external change
setLogGlow(true);
const timer = setTimeout(() => setLogGlow(false), 500);
prevLogCount.current = currentCount;
return () => clearTimeout(timer);
}
prevLogCount.current = currentCount;
}, [agentLogEntries.length]);
// Handler for session changes
const handleSessionChange = useCallback(async (key: string) => {
setCurrentSession(key);
await loadHistory(key);
}, [setCurrentSession, loadHistory]);
// Handlers for TTS provider/model changes
const handleTtsProviderChange = useCallback((provider: TTSProvider) => {
setTtsProvider(provider);
}, [setTtsProvider]);
const handleTtsModelChange = useCallback((model: string) => {
setTtsModel(model);
}, [setTtsModel]);
const handleSttProviderChange = useCallback((provider: 'local' | 'openai') => {
setSttProvider(provider);
}, [setSttProvider]);
const handleSttModelChange = useCallback((model: string) => {
setSttModel(model);
}, [setSttModel]);
const savedConfig = useMemo(() => loadConfig(), []);
const defaultUrl = savedConfig.url || DEFAULT_GATEWAY_WS;
return (
<div className="h-screen flex flex-col overflow-hidden scan-lines" data-booted={booted}>
{/* Skip to main content link for keyboard navigation */}
<a
href="#main-chat"
className="sr-only focus:not-sr-only focus:absolute focus:top-2 focus:left-2 focus:z-[100] focus:px-4 focus:py-2 focus:bg-primary focus:text-primary-foreground focus:font-bold focus:text-sm"
>
Skip to chat
</a>
<ConnectDialog
open={dialogOpen && connectionState !== 'connected' && connectionState !== 'reconnecting'}
onConnect={handleConnect}
error={connectError}
defaultUrl={defaultUrl}
defaultToken={editableToken}
/>
{/* Reconnecting banner — mission control style */}
{connectionState === 'reconnecting' && (
<div className="fixed top-12 left-1/2 -translate-x-1/2 z-50 bg-gradient-to-r from-red-900/90 to-orange-900/90 text-red-200 px-5 py-2 rounded-sm text-[11px] font-mono flex items-center gap-2 shadow-lg border border-red-700/60 uppercase tracking-wider">
<span className="text-red-400"></span>
<span>SIGNAL LOST</span>
<span className="text-red-600">·</span>
<span>RECONNECTING{reconnectAttempt > 1 ? ` (ATTEMPT ${reconnectAttempt})` : ''}</span>
<span className="w-2 h-2 bg-red-400 rounded-full animate-pulse" />
</div>
)}
<TopBar
onSettings={openSettings}
agentLogEntries={agentLogEntries}
tokenData={tokenData}
logGlow={logGlow}
eventEntries={eventEntries}
eventsVisible={eventsVisible}
/>
<PanelErrorBoundary name="Settings">
<Suspense fallback={null}>
<SettingsDrawer
open={settingsOpen}
onClose={closeSettings}
gatewayUrl={editableUrl}
gatewayToken={editableToken}
onUrlChange={setEditableUrl}
onTokenChange={setEditableToken}
onReconnect={handleReconnect}
connectionState={connectionState}
soundEnabled={soundEnabled}
onToggleSound={toggleSound}
ttsProvider={ttsProvider}
ttsModel={ttsModel}
onTtsProviderChange={handleTtsProviderChange}
onTtsModelChange={handleTtsModelChange}
sttProvider={sttProvider}
sttModel={sttModel}
onSttProviderChange={handleSttProviderChange}
onSttModelChange={handleSttModelChange}
wakeWordEnabled={wakeWordEnabled}
onToggleWakeWord={handleToggleWakeWord}
agentName={agentName}
/>
</Suspense>
</PanelErrorBoundary>
<div className="flex-1 flex overflow-hidden min-h-0">
{/* File tree — far left, collapsible */}
<PanelErrorBoundary name="File Explorer">
<FileTreePanel onOpenFile={openFile} lastChangedPath={lastChangedPath} />
</PanelErrorBoundary>
{/* Main resizable area */}
<ResizablePanels
leftPercent={panelRatio}
onResize={setPanelRatio}
minLeftPercent={30}
maxLeftPercent={75}
leftClassName="boot-panel"
rightClassName="boot-panel flex flex-col gap-px bg-border"
left={
<TabbedContentArea
activeTab={activeTab}
openFiles={openFiles}
onSelectTab={setActiveTab}
onCloseTab={closeFile}
onContentChange={updateContent}
onSaveFile={handleSaveFile}
saveToast={saveToast}
onDismissToast={() => setSaveToast(null)}
onReloadFile={reloadFile}
onRetryFile={reloadFile}
chatPanel={
<PanelErrorBoundary name="Chat">
<ChatPanel
ref={chatPanelRef}
id="main-chat"
messages={messages}
onSend={handleSend}
onAbort={handleAbort}
isGenerating={isGenerating}
streamingHtml={streamingHtml}
processingStage={processingStage}
lastEventTimestamp={lastEventTimestamp}
currentToolDescription={currentToolDescription}
activityLog={activityLog}
onWakeWordState={handleWakeWordState}
onReset={handleReset}
searchOpen={searchOpen}
onSearchClose={closeSearch}
agentName={currentSessionDisplayName}
loadMore={loadMore}
hasMore={hasMore}
/>
</PanelErrorBoundary>
}
/>
}
right={
<Suspense fallback={<div className="flex-1 flex items-center justify-center text-muted-foreground text-xs bg-background">Loading</div>}>
{/* Sessions + Memory stacked vertically */}
<div className="flex-1 flex flex-col gap-px min-h-0">
<div className="flex-1 flex flex-col min-h-0 overflow-hidden bg-background">
<PanelErrorBoundary name="Sessions">
<SessionList
sessions={sessions}
currentSession={currentSession}
busyState={busyState}
agentStatus={agentStatus}
unreadSessions={unreadSessions}
onSelect={handleSessionChange}
onRefresh={refreshSessions}
onDelete={deleteSession}
onSpawn={spawnAgent}
onRename={renameSession}
onAbort={abortSession}
isLoading={sessionsLoading}
agentName={agentName}
/>
</PanelErrorBoundary>
</div>
<div className="flex-1 flex flex-col min-h-0 overflow-hidden bg-background">
<PanelErrorBoundary name="Workspace">
<WorkspacePanel memories={memories} onRefreshMemories={refreshMemories} memoriesLoading={memoriesLoading} />
</PanelErrorBoundary>
</div>
</div>
</Suspense>
}
/>
</div>
{/* Status Bar */}
<div className="boot-panel" style={{ transitionDelay: '200ms' }}>
<StatusBar
connectionState={connectionState}
sessionCount={sessions.length}
sparkline={sparkline}
contextTokens={contextTokens}
contextLimit={contextLimit}
/>
</div>
{/* Command Palette */}
<PanelErrorBoundary name="Command Palette">
<Suspense fallback={null}>
<CommandPalette
open={paletteOpen}
onClose={closePalette}
commands={commands}
/>
</Suspense>
</PanelErrorBoundary>
{/* Reset Session Confirmation */}
<ConfirmDialog
open={showResetConfirm}
title="Reset Session"
message="This will start fresh and clear all context."
confirmLabel="Reset"
cancelLabel="Cancel"
onConfirm={confirmReset}
onCancel={cancelReset}
variant="danger"
/>
{/* Spawn Agent Dialog (from command palette) */}
<SpawnAgentDialog
open={spawnDialogOpen}
onOpenChange={setSpawnDialogOpen}
onSpawn={spawnAgent}
/>
</div>
);
}

View file

@ -0,0 +1,118 @@
import { useEffect, useCallback, useRef } from 'react';
import { AlertTriangle } from 'lucide-react';
/** Props for {@link ConfirmDialog}. */
interface ConfirmDialogProps {
/** Whether the dialog is visible. */
open: boolean;
/** Dialog heading text. */
title: string;
/** Descriptive body text shown below the title. */
message: string;
/** Label for the confirm button. @default "Confirm" */
confirmLabel?: string;
/** Label for the cancel button. @default "Cancel" */
cancelLabel?: string;
/** Called when the user confirms the action. */
onConfirm: () => void;
/** Called when the user cancels (button click, Escape, or backdrop click). */
onCancel: () => void;
/** Visual style — `danger` shows a red confirm button. @default "default" */
variant?: 'danger' | 'warning' | 'default';
}
/**
* Modal confirmation dialog with keyboard support (Enter to confirm, Escape to cancel)
* and a focus-trapped overlay. Used for destructive or important actions in the Nerve UI.
*/
export function ConfirmDialog({
open,
title,
message,
confirmLabel = 'Confirm',
cancelLabel = 'Cancel',
onConfirm,
onCancel,
variant = 'default',
}: ConfirmDialogProps) {
const dialogRef = useRef<HTMLDivElement>(null);
const cancelButtonRef = useRef<HTMLButtonElement>(null);
const previousFocusRef = useRef<HTMLElement | null>(null);
const handleKeyDown = useCallback((e: KeyboardEvent) => {
if (e.key === 'Escape') onCancel();
if (e.key === 'Enter' && dialogRef.current?.contains(document.activeElement)) {
e.preventDefault();
onConfirm();
}
}, [onConfirm, onCancel]);
const handleTabKey = useCallback((e: KeyboardEvent) => {
if (e.key !== 'Tab' || !dialogRef.current) return;
const focusableElements = dialogRef.current.querySelectorAll<HTMLElement>(
'button, [href], input, select, textarea, [tabindex]:not([tabindex="-1"])'
);
const firstElement = focusableElements[0];
const lastElement = focusableElements[focusableElements.length - 1];
if (e.shiftKey && document.activeElement === firstElement) {
e.preventDefault();
lastElement?.focus();
} else if (!e.shiftKey && document.activeElement === lastElement) {
e.preventDefault();
firstElement?.focus();
}
}, []);
useEffect(() => {
if (open) {
previousFocusRef.current = document.activeElement as HTMLElement;
document.addEventListener('keydown', handleKeyDown);
document.addEventListener('keydown', handleTabKey);
cancelButtonRef.current?.focus();
return () => {
document.removeEventListener('keydown', handleKeyDown);
document.removeEventListener('keydown', handleTabKey);
previousFocusRef.current?.focus();
};
}
}, [open, handleKeyDown, handleTabKey]);
if (!open) return null;
const confirmClass = variant === 'danger'
? 'bg-red text-white hover:bg-red/90'
: 'bg-primary text-primary-foreground hover:opacity-90';
return (
<>
<div className="fixed inset-0 bg-black/60 z-50" role="presentation" onClick={onCancel} />
<div
ref={dialogRef}
role="alertdialog"
aria-modal="true"
aria-labelledby="confirm-dialog-title"
aria-describedby="confirm-dialog-message"
className="fixed top-1/2 left-1/2 -translate-x-1/2 -translate-y-1/2 bg-card border border-border p-6 z-50 min-w-[300px] max-w-md"
>
<div className="flex items-start gap-3 mb-4">
{variant !== 'default' && <AlertTriangle className={variant === 'danger' ? 'text-red' : 'text-orange'} size={20} aria-hidden="true" />}
<div>
<h3 id="confirm-dialog-title" className="text-sm font-bold text-foreground mb-1">{title}</h3>
<p id="confirm-dialog-message" className="text-xs text-muted-foreground">{message}</p>
</div>
</div>
<div className="flex justify-end gap-2">
<button ref={cancelButtonRef} onClick={onCancel} className="px-3 py-1.5 text-xs text-muted-foreground hover:text-foreground border border-border hover:border-muted-foreground">
{cancelLabel}
</button>
<button onClick={onConfirm} className={`px-3 py-1.5 text-xs font-bold ${confirmClass}`}>
{confirmLabel}
</button>
</div>
</div>
</>
);
}

View file

@ -0,0 +1,112 @@
import { useRef, useEffect, useState } from 'react';
import { AlertTriangle } from 'lucide-react';
import { fmtK } from '@/lib/formatting';
import { AnimatedNumber } from '@/components/ui/AnimatedNumber';
import { CONTEXT_WARNING_THRESHOLD, CONTEXT_CRITICAL_THRESHOLD } from '@/lib/constants';
import { PROGRESS_BAR_TRANSITION } from '@/lib/progress-colors';
// Pre-defined color configs to avoid object creation during render
const COLOR_CRITICAL = {
bar: 'bg-red',
glow: 'rgba(231, 76, 60, 0.4)',
growGlow: 'rgba(231, 76, 60, 0.6)',
text: 'text-red',
} as const;
const COLOR_WARNING = {
bar: 'bg-orange',
glow: 'rgba(232, 168, 56, 0.4)',
growGlow: 'rgba(232, 168, 56, 0.6)',
text: 'text-orange',
} as const;
const COLOR_NORMAL = {
bar: 'bg-green',
glow: 'rgba(76, 175, 80, 0.3)',
growGlow: 'rgba(76, 175, 80, 0.5)',
text: 'text-muted-foreground',
} as const;
/** Props for {@link ContextMeter}. */
interface ContextMeterProps {
/** Number of context tokens consumed so far. */
used: number;
/** Maximum context window size in tokens. */
limit: number;
}
/**
* Compact progress bar showing context-window token usage.
*
* Transitions through green orange red as usage crosses warning/critical
* thresholds, and includes an animated token counter and glow effects.
* Displayed in the {@link StatusBar}.
*/
export function ContextMeter({ used, limit }: ContextMeterProps) {
const percent = Math.min(100, (used / limit) * 100);
const [isGrowing, setIsGrowing] = useState(false);
const prevPercentRef = useRef(percent);
useEffect(() => {
setIsGrowing(percent > prevPercentRef.current);
prevPercentRef.current = percent;
}, [percent]);
const isWarning = percent >= CONTEXT_WARNING_THRESHOLD;
const isCritical = percent >= CONTEXT_CRITICAL_THRESHOLD;
const colors = isCritical ? COLOR_CRITICAL : isWarning ? COLOR_WARNING : COLOR_NORMAL;
// Enhanced glow when growing
const boxShadow = isGrowing
? `0 0 8px ${colors.growGlow}, 0 0 4px ${colors.glow}`
: `0 0 4px ${colors.glow}`;
const tooltipText = `Context: ${fmtK(used)} / ${fmtK(limit)} tokens (${percent.toFixed(0)}%)${
isCritical
? ' — CRITICAL: Consider starting a new session'
: isWarning
? ' — Warning: Approaching context limit'
: ''
}`;
return (
<div
className="flex items-center gap-1.5 cursor-default"
title={tooltipText}
>
{/* Warning icon - only show when warning/critical */}
{(isWarning || isCritical) && (
<AlertTriangle
size={10}
className={`${colors.text} ${isCritical ? 'animate-pulse' : ''}`}
/>
)}
{/* Progress bar with smooth width and color transitions */}
<div className="w-12 h-1.5 bg-background border border-border/60 overflow-hidden">
<div
className={`h-full ${colors.bar}`}
style={{
width: `${percent}%`,
boxShadow,
transition: PROGRESS_BAR_TRANSITION,
}}
/>
</div>
{/* Animated token count */}
<AnimatedNumber
value={used}
format={fmtK}
className={`text-[9px] ${colors.text}`}
duration={700}
/>
{/* Label - changes based on state */}
<span className={`text-[8px] uppercase tracking-wider ${colors.text}`}>
CTX
</span>
</div>
);
}

Some files were not shown because too many files have changed in this diff Show more