mirror of
https://github.com/daggerhashimoto/openclaw-nerve
synced 2026-04-21 18:47:16 +00:00
Compare commits
135 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a0bcc3d1ab | ||
|
|
f96295941e | ||
|
|
928c7f7f2f | ||
|
|
f4259c3313 | ||
|
|
f682c864d6 | ||
|
|
fd0eda6d65 | ||
|
|
319c69af73 | ||
|
|
febbec65ab | ||
|
|
9ee82597c5 | ||
|
|
ecba134430 | ||
|
|
aa93f5b74a | ||
|
|
b8d00349dd | ||
|
|
4bc4773792 | ||
|
|
e34413a3c7 | ||
|
|
6502a954af | ||
|
|
9eeeca8ef0 | ||
|
|
80c71d6e07 | ||
|
|
db5540f27a | ||
|
|
60c9a9161d | ||
|
|
bf4e8e49b0 | ||
|
|
ed29c44d65 | ||
|
|
2dccbb661b | ||
|
|
ba3e82ed04 | ||
|
|
3b569f75f7 | ||
|
|
232cd0b336 | ||
|
|
d2f177564b | ||
|
|
d1117dc7b1 | ||
|
|
0ddc4a3ecf | ||
|
|
41b6a40d9f | ||
|
|
3bae862b43 | ||
|
|
f388af6a6b | ||
|
|
04be8508a7 | ||
|
|
b359c2bf63 | ||
|
|
2e6ad17317 | ||
|
|
14e70d8f81 | ||
|
|
a5f7973eed | ||
|
|
03e91580a0 | ||
|
|
c31601d792 | ||
|
|
84be2f5e18 | ||
|
|
e2ed697ed3 | ||
|
|
77f297ba9f | ||
|
|
49575156ef | ||
|
|
5f70f840d7 | ||
|
|
f0dbe68f5c | ||
|
|
7fbb8a9961 | ||
|
|
6260878394 | ||
|
|
fed3ab15ee | ||
|
|
68e6ef6975 | ||
|
|
a35ffc0469 | ||
|
|
c9ad1bd745 | ||
|
|
44403353a6 | ||
|
|
7c444db6a2 | ||
|
|
1be6bd5db1 | ||
|
|
8c4a88ee06 | ||
|
|
a515fee5ce | ||
|
|
dfebb8b780 | ||
|
|
72d92042e2 | ||
|
|
dce097ee61 | ||
|
|
808a5fcc8e | ||
|
|
b205b7f654 | ||
|
|
a8136621db | ||
|
|
8046ece2fe | ||
|
|
b72e39357a | ||
|
|
5393ad48c8 | ||
|
|
c89155f9aa | ||
|
|
ee72a15760 | ||
|
|
a66b932273 | ||
|
|
c43fb6912c | ||
|
|
4cc820e3d1 | ||
|
|
d94bfa5bb8 | ||
|
|
51079e9edf | ||
|
|
d63c492972 | ||
|
|
e16ee00fc5 | ||
|
|
fbfeeaa9a6 | ||
|
|
3496004412 | ||
|
|
530129a653 | ||
|
|
d2367a9e5b | ||
|
|
d68a17f5dd | ||
|
|
6287455470 | ||
|
|
5439c3a943 | ||
|
|
c9f8502d52 | ||
|
|
7f45b00cf9 | ||
|
|
438900a3b2 | ||
|
|
4253220c01 | ||
|
|
7495ec692a | ||
|
|
cc5c212691 | ||
|
|
4f8ccce309 | ||
|
|
056402ff05 | ||
|
|
4006dab371 | ||
|
|
6c304cdafe | ||
|
|
e1a1620c3f | ||
|
|
da768ea3d2 | ||
|
|
a70ac62599 | ||
|
|
3811b1efc1 | ||
|
|
8564e578b2 | ||
|
|
f64493f3f3 | ||
|
|
6995784549 | ||
|
|
4e13fc6b08 | ||
|
|
ef73da1aa9 | ||
|
|
da43dca6ff | ||
|
|
c4bae81b1a | ||
|
|
a3087e7495 | ||
|
|
5aefdd64cf | ||
|
|
5d2afa9059 | ||
|
|
7ae756f4df | ||
|
|
292e6190fa | ||
|
|
6c56662317 | ||
|
|
fbf61365c9 | ||
|
|
7744fdecc0 | ||
|
|
55bed76a19 | ||
|
|
4d6671475d | ||
|
|
4fc7a714cc | ||
|
|
3ac2fe8ece | ||
|
|
d339e0eea2 | ||
|
|
e8df42595d | ||
|
|
984e0f5e14 | ||
|
|
241d59da76 | ||
|
|
f3686f44c4 | ||
|
|
3a2510c377 | ||
|
|
6e6f78f676 | ||
|
|
75fbe3d722 | ||
|
|
6f1a028e1c | ||
|
|
334407cddc | ||
|
|
6b545afb42 | ||
|
|
9d1307234f | ||
|
|
9cb0e09dcd | ||
|
|
705ef59e94 | ||
|
|
b0f4f77dd0 | ||
|
|
150a30a273 | ||
|
|
860fc6b47c | ||
|
|
896738511f | ||
|
|
874dfa0038 | ||
|
|
9b439a8be2 | ||
|
|
92b72be1f3 | ||
|
|
72511f064a |
295 changed files with 38292 additions and 3956 deletions
|
|
@ -23,6 +23,7 @@ AGENT_NAME=Agent
|
|||
GATEWAY_TOKEN=
|
||||
# OPENCLAW_GATEWAY_TOKEN= # Alternative name for GATEWAY_TOKEN (checked as fallback)
|
||||
GATEWAY_URL=http://127.0.0.1:18789
|
||||
# NERVE_PUBLIC_ORIGIN=https://your-nerve.example.com # Explicit browser origin for remote-workspace gateway RPC fallback
|
||||
|
||||
# ─── Authentication ──────────────────────────────────────────────────────────
|
||||
# Enable to require a password for all API/WebSocket access.
|
||||
|
|
@ -42,10 +43,12 @@ GATEWAY_URL=http://127.0.0.1:18789
|
|||
# MEMORY_DIR=~/.openclaw/workspace/memory
|
||||
# SESSIONS_DIR=~/.openclaw/agents/main/sessions
|
||||
# USAGE_FILE=~/.openclaw/token-usage.json
|
||||
# NERVE_WATCH_WORKSPACE_RECURSIVE=false # Disable full-workspace live file watching (default: enabled)
|
||||
|
||||
# ─── API Keys (optional — Edge TTS is always available as a free fallback) ───
|
||||
OPENAI_API_KEY=
|
||||
REPLICATE_API_TOKEN=
|
||||
MIMO_API_KEY= # Xiaomi MiMo TTS, used only when Xiaomi is selected explicitly
|
||||
|
||||
# ─── API Base URLs (optional — override for proxies or self-hosted) ──────────
|
||||
# OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
|
|
|
|||
4
.gitignore
vendored
4
.gitignore
vendored
|
|
@ -45,7 +45,9 @@ src/test/*.js
|
|||
# Local review artifacts
|
||||
wave-reviews/
|
||||
|
||||
# Kanban runtime data
|
||||
# Kanban runtime data (legacy in-repo paths)
|
||||
server/data/kanban/tasks.json
|
||||
server/data/kanban/audit.log
|
||||
server-dist/data/kanban/tasks.json
|
||||
server-dist/data/kanban/audit.log
|
||||
node_modules
|
||||
|
|
|
|||
92
CHANGELOG.md
92
CHANGELOG.md
|
|
@ -6,6 +6,98 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.5.2] - 2026-03-30
|
||||
|
||||
### Highlights
|
||||
|
||||
**Kanban execution now matches the real session tree.** Assigned tasks launch as real child sessions beneath the selected assignee root, task completion and failures report back to the parent root, and background root notifications no longer misfire while those updates land (PR #198).
|
||||
|
||||
**Remote and hybrid installs are less brittle.** Nerve now supports remote-gateway installation up front via `--gateway-url`, resolves gateway RPC origins from public config for remote workspace access, and explains missing cron capability with a clear remediation path instead of a dead-end warning (PR #181, PR #197, PR #200).
|
||||
|
||||
**Session and agent state are less misleading.** The model picker now reflects the active OpenClaw config, duplicate root-agent creation correctly registers suffixed agents in `openclaw.json`, direct-message sessions nest under the correct agent root, and the main root label stays canonical (PR #174, PR #185, PR #192, PR #196).
|
||||
|
||||
**Docs and setup guidance caught back up to reality.** AI setup docs landed, setup now prints the right deployment guide links, and stale operator docs were refreshed to match the current runtime and installer behavior (PR #179, PR #182, PR #191).
|
||||
|
||||
### Added
|
||||
- Installer support for `--gateway-url` so Nerve can target a remote gateway from first boot (PR #181)
|
||||
- AI agent setup docs and a raw install contract for agent-driven installs (PR #182)
|
||||
- A dedicated `GET /api/kanban/tasks/:id` endpoint for direct Kanban task lookup by id (PR #176)
|
||||
- An assignee picker for Kanban task forms so users no longer need to enter raw assignee values manually (PR #203)
|
||||
- Support for custom board column keys via board config (PR #173)
|
||||
- Shebang-based syntax highlighting for extensionless executable files (PR #190)
|
||||
|
||||
### Changed
|
||||
- Setup now prints deployment guide links after configuration so operators can jump straight to the right topology docs (PR #179)
|
||||
- Setup now ensures `sessions_spawn` is allowlisted alongside the other required gateway tools for Kanban execution on current OpenClaw builds (PR #159)
|
||||
- Model selection now comes from the active OpenClaw config instead of Nerve-side fallback lists (PR #174)
|
||||
- Chat input helper text now points users at the command palette more clearly (PR #175)
|
||||
|
||||
### Fixed
|
||||
- Skills API parsing now falls back to structured stderr JSON output when tools emit machine-readable results there (PR #161)
|
||||
- Sidebar session tree cleanup: only real roots are shown, direct-message sessions nest under their owning agent root, and `agent:main:main` always renders with a canonical label (PR #177, PR #185, PR #196)
|
||||
- Session selection click targets are more forgiving thanks to a small hover delay that reduces accidental steals while moving through the tree (PR #187)
|
||||
- Duplicate root-agent creation now registers the correct suffixed agent in `openclaw.json` so config, workspace, and session roots stay aligned (PR #192)
|
||||
- Assigned Kanban tasks now launch as real child sessions, clean up orphaned child sessions on partial launch failures, and report completion back to the parent root that owns the work (PR #198)
|
||||
- Background top-level root updates now set unread state correctly and only ping on terminal events (PR #198)
|
||||
- Remote-workspace gateway RPC now derives its request origin from public config instead of hardcoded loopback values, fixing hybrid/cloud `origin not allowed` failures (PR #200)
|
||||
|
||||
### Documentation
|
||||
- Added AI setup docs and refreshed stale repo docs so installation, deployment, configuration, and troubleshooting guidance line up with the current runtime (PR #182, PR #191)
|
||||
|
||||
## [1.5.1] - 2026-03-25
|
||||
|
||||
### Fixed
|
||||
- Restored the browser websocket auth identity to `webchat-ui` so remote deployments do not trip the gateway's stricter Control UI device-identity requirement on non-secure page origins. This fixes the 1.5.0 login failure reported by users connecting to remote gateway endpoints from plain remote HTTP Nerve pages.
|
||||
|
||||
## [1.5.0] - 2026-03-25
|
||||
|
||||
### Highlights
|
||||
|
||||
**Workspace context now follows the owning top-level agent**. File browser state, Memory, Config, and Skills now switch with the selected top-level agent instead of leaking across agents, and dirty editor tabs now block cross-agent switches with an explicit save / discard / cancel choice (PR #123).
|
||||
|
||||
**Agent runtime flows got tighter**. Subagents can now choose whether they stay visible after one-shot runs, subagent deletion is more reliable, the model catalog waits longer on cold starts so configured Codex and other models are more likely to appear in the spawn dialog, and remote or sandboxed workspace access now falls back cleanly through the gateway when local filesystem access is unavailable (PR #119, PR #120, PR #124, PR #145).
|
||||
|
||||
**Voice and readability both moved forward**. Xiaomi MiMo joins as a first-class TTS provider, the new global font size control now reaches more of the UI, and small-screen inputs keep a fixed 16px size to avoid mobile auto-zoom regressions (PR #128, PR #129, PR #130).
|
||||
|
||||
**Installer, setup, and execution hardening all moved up a notch**. Tailscale setup now supports distinct IP and Serve flows, wake word is disabled on mobile web, setup defaults are stricter around device approval and can infer the agent name from local metadata, and kanban reruns now keep stable identifiers without stale completion state leaking across runs (PR #116, PR #118, PR #122, PR #141, PR #143, PR #151).
|
||||
|
||||
**Workspace navigation got smoother**. Markdown and chat workspace path references can now resolve and reveal files safely in the file browser, with follow-up fixes for missing-path semantics and refreshed open handlers (PR #148, PR #149).
|
||||
|
||||
### Added
|
||||
- Tailscale IP and Tailscale Serve setup flows in the installer, with matching installer-step documentation (PR #116)
|
||||
- An **After run** selector for one-shot subagents, with **Keep** and **Delete** cleanup options (PR #120)
|
||||
- **Font size setting** in Appearance settings, adjustable from 10px to 24px via dropdown, stored in `localStorage`, and applied instantly via a CSS custom property (PR #128)
|
||||
- **Xiaomi MiMo** as a first-class TTS provider, including API key plumbing, server-side synthesis support, and Audio settings controls for model, voice, and style (PR #129)
|
||||
- **Gateway RPC fallback for remote and sandboxed workspace access**, including a sandboxed-workspace notice in the Memory panel when local filesystem access is unavailable (PR #145)
|
||||
- **Safe workspace path resolve and reveal** from markdown and chat references into the file browser (PR #148)
|
||||
|
||||
### Changed
|
||||
- Workspace scope is now derived from the owning top-level agent, including when viewing subagent sessions (PR #123)
|
||||
- File browser tabs, selection state, drafts, Memory, Config, and Skills now persist per top-level agent instead of globally (PR #123)
|
||||
- Cross-agent workspace switches now show **Save and switch**, **Discard and switch**, or **Cancel** when dirty editor tabs exist (PR #123)
|
||||
- Model catalog fetches now allow a longer cold-start timeout before giving up, so configured Codex and other models appear more reliably in the spawn dialog (PR #124)
|
||||
- Mobile web now disables wake word and points users to manual mic activation instead (PR #118)
|
||||
- Right sidebar resizing now allows a narrower minimum width (PR #122)
|
||||
- Cron list and dialog typography now fully follows the global font size system, with the remaining fixed pixel sizes converted to `rem` units (PR #130)
|
||||
- Setup defaults now infer `AGENT_NAME` from local identity metadata when the value is not already explicitly set (PR #151)
|
||||
|
||||
### Fixed
|
||||
- Subagent session deletion no longer fails on the Nerve side when the gateway closes a proxied WebSocket normally during delete flows (PR #119)
|
||||
- Agent-scoped workspace switching no longer leaks same-path editor state, save toasts, watcher refreshes, or async file reads across top-level agents (PR #123)
|
||||
- Tailscale origin handling is more robust during setup and follow-up gateway patching (PR #116)
|
||||
- Small-screen text inputs now stay at 16px so mobile browsers do not auto-zoom the composer and settings controls after font size changes (PR #130)
|
||||
- Older top-level agent chats stay visible in the sidebar instead of disappearing once they fall outside the recent-activity query window (PR #134)
|
||||
- Kanban runtime data now lives under `${NERVE_DATA_DIR:-~/.nerve}/kanban`, and legacy installs automatically migrate data from old `server-dist/data/kanban` or `server/data/kanban` locations on first run (PR #135)
|
||||
- Setup no longer attempts to approve malformed pending device request IDs, and gateway auth validation now uses a working token probe during defaults and check flows (PR #141)
|
||||
- Kanban run completion now accepts stable child identifiers, ignores stale client `run` patches, stops stale pollers after reruns, and normalizes spawn session aliases consistently (PR #143)
|
||||
- Remote and sandboxed workspace gateway fallback now authenticates correctly with device identity in real OpenShell-style deployments (PR #145)
|
||||
- Workspace path resolve now returns `404` for safe missing targets, and markdown file-link handlers refresh when workspace path callbacks change (PR #149)
|
||||
|
||||
### Documentation
|
||||
- Added a dedicated Tailscale guide for existing installs, linked from the docs index and configuration docs (PR #117)
|
||||
- Refreshed the API, architecture, configuration, troubleshooting, and changelog docs to match agent-scoped workspace behavior and newer gateway and file APIs (PR #126)
|
||||
- Rewrote the README around current positioning, capabilities, install flow, and embedded demo video, with follow-up formatting and video asset fixes (PR #136)
|
||||
|
||||
---
|
||||
|
||||
## [1.4.9] — 2026-03-18
|
||||
|
|
|
|||
115
CONTRIBUTING.md
115
CONTRIBUTING.md
|
|
@ -45,61 +45,78 @@ Thanks for wanting to help! This guide covers everything you need to start contr
|
|||
# Terminal 1 — Vite frontend with HMR
|
||||
npm run dev
|
||||
|
||||
# Terminal 2 — Backend with file watching
|
||||
npm run dev:server
|
||||
# Terminal 2 — Backend with file watching on a separate port
|
||||
PORT=3081 npm run dev:server
|
||||
```
|
||||
|
||||
5. Open **http://localhost:3080**. The frontend proxies API requests to the backend on `:3081`.
|
||||
5. Open **http://localhost:3080**. In this split setup, Vite proxies API and WebSocket traffic to the backend on `:3081`.
|
||||
|
||||
`npm run dev:server` does not default to `:3081` on its own. Without `PORT=3081`, the backend uses its normal default of `:3080`.
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
nerve/
|
||||
openclaw-nerve/
|
||||
├── src/ # Frontend (React + TypeScript)
|
||||
│ ├── features/ # Feature modules (co-located)
|
||||
│ │ ├── auth/ # Login page, auth gate, session hook
|
||||
│ │ ├── chat/ # Chat panel, messages, input, search
|
||||
│ │ ├── voice/ # Push-to-talk, wake word, audio feedback
|
||||
│ │ ├── tts/ # Text-to-speech playback
|
||||
│ │ ├── sessions/ # Session list, tree, spawn dialog
|
||||
│ │ ├── workspace/ # Tabbed panel: memory, crons, skills, config
|
||||
│ │ ├── file-browser/ # Workspace file browser with tabbed editor
|
||||
│ │ ├── settings/ # Settings drawer (appearance, audio, connection)
|
||||
│ ├── features/ # Product surfaces and feature-local helpers
|
||||
│ │ ├── activity/ # Agent log and event log panels
|
||||
│ │ ├── auth/ # Login gate and auth flows
|
||||
│ │ ├── charts/ # Inline chart extraction and renderers
|
||||
│ │ ├── chat/ # Chat UI, message loading, streaming operations
|
||||
│ │ ├── command-palette/ # ⌘K command palette
|
||||
│ │ ├── markdown/ # Markdown renderer, code block actions
|
||||
│ │ ├── charts/ # Inline chart extraction and rendering
|
||||
│ │ ├── memory/ # Memory editor, add/delete dialogs
|
||||
│ │ ├── activity/ # Agent log, event log
|
||||
│ │ ├── dashboard/ # Token usage, memory list, limits
|
||||
│ │ └── connect/ # Connect dialog (gateway setup)
|
||||
│ ├── components/ # Shared UI components
|
||||
│ │ ├── ui/ # Primitives (button, input, dialog, etc.)
|
||||
│ │ └── skeletons/ # Loading skeletons
|
||||
│ ├── contexts/ # React contexts (Chat, Session, Gateway, Settings)
|
||||
│ ├── hooks/ # Shared hooks (WebSocket, SSE, keyboard, etc.)
|
||||
│ ├── lib/ # Utilities (formatting, themes, sanitize, etc.)
|
||||
│ ├── types.ts # Shared type definitions
|
||||
│ └── test/ # Test setup
|
||||
│ │ ├── connect/ # Gateway connect dialog
|
||||
│ │ ├── dashboard/ # Token usage and memory list views
|
||||
│ │ ├── file-browser/ # Workspace tree, tabs, editors
|
||||
│ │ ├── kanban/ # Task board, proposals, execution views
|
||||
│ │ ├── markdown/ # Markdown and tool output rendering
|
||||
│ │ ├── memory/ # Memory editing dialogs and hooks
|
||||
│ │ ├── sessions/ # Session list, tree helpers, spawn flows
|
||||
│ │ ├── settings/ # Settings drawer and audio controls
|
||||
│ │ ├── tts/ # Text-to-speech playback/config
|
||||
│ │ ├── voice/ # Push-to-talk, wake word, audio feedback
|
||||
│ │ └── workspace/ # Workspace-scoped panels and state
|
||||
│ ├── components/ # Shared UI building blocks
|
||||
│ ├── contexts/ # Gateway, session, chat, and settings contexts
|
||||
│ ├── hooks/ # Cross-cutting hooks used across features
|
||||
│ ├── lib/ # Shared frontend utilities
|
||||
│ ├── App.tsx # Main layout and panel composition
|
||||
│ └── main.tsx # Frontend entry point
|
||||
├── server/ # Backend (Hono + TypeScript)
|
||||
│ ├── routes/ # API route handlers
|
||||
│ ├── services/ # TTS engines, Whisper, usage tracking
|
||||
│ ├── lib/ # Utilities (config, WS proxy, file watcher, etc.)
|
||||
│ ├── middleware/ # Auth, rate limiting, security headers, caching
|
||||
│ └── app.ts # Hono app assembly
|
||||
├── config/ # TypeScript configs for server build
|
||||
│ ├── routes/ # API routes, mounted from server/app.ts
|
||||
│ │ ├── auth.ts
|
||||
│ │ ├── gateway.ts
|
||||
│ │ ├── sessions.ts
|
||||
│ │ ├── workspace.ts
|
||||
│ │ ├── files.ts
|
||||
│ │ ├── file-browser.ts
|
||||
│ │ ├── kanban.ts
|
||||
│ │ ├── crons.ts
|
||||
│ │ ├── memories.ts
|
||||
│ │ ├── tts.ts
|
||||
│ │ ├── transcribe.ts
|
||||
│ │ └── ...plus route tests beside many handlers
|
||||
│ ├── services/ # Whisper, TTS, and related backend services
|
||||
│ ├── lib/ # Config, gateway helpers, cache, file watchers, mutexes
|
||||
│ ├── middleware/ # Auth, security headers, cache, limits
|
||||
│ ├── app.ts # Hono app assembly
|
||||
│ └── index.ts # HTTP/HTTPS server startup
|
||||
├── bin/ # CLI/update entrypoints
|
||||
├── config/ # TypeScript build configs
|
||||
├── docs/ # User and operator docs
|
||||
├── public/ # Static assets
|
||||
├── scripts/ # Setup wizard and utilities
|
||||
├── docs/ # Documentation
|
||||
├── vitest.config.ts # Test configuration
|
||||
├── eslint.config.js # Lint configuration
|
||||
└── vite.config.ts # Vite build configuration
|
||||
├── vite.config.ts # Vite config
|
||||
├── vitest.config.ts # Vitest config
|
||||
└── eslint.config.js # ESLint flat config
|
||||
```
|
||||
|
||||
### Key conventions
|
||||
|
||||
- **Feature modules** live in `src/features/<name>/`. Each feature owns its components, hooks, types, and tests.
|
||||
- **`@/` import alias** maps to `src/` — use it for cross-feature imports.
|
||||
- **Tests are co-located** with source files: `foo.ts` → `foo.test.ts`.
|
||||
- **Server routes** are thin handlers that delegate to `services/` and `lib/`.
|
||||
- **Feature modules** usually live in `src/features/<name>/`. Keep new UI work inside the closest existing feature instead of inventing a parallel structure.
|
||||
- **`@/` import alias** maps to `src/`.
|
||||
- **Tests are usually nearby** the code they cover, especially for hooks, routes, and utilities.
|
||||
- **Cross-feature imports exist**, but keep them narrow and intentional. Reuse small helpers, avoid circular dependencies, and do not spread one-off shortcuts across the app.
|
||||
- **Server routes** live in `server/routes/` and are mounted in `server/app.ts`. Shared logic belongs in `server/lib/`, `server/services/`, or `server/middleware/`.
|
||||
|
||||
## Adding a Feature
|
||||
|
||||
|
|
@ -181,19 +198,21 @@ test(voice): add wake-word persistence tests
|
|||
## Pull Request Process
|
||||
|
||||
1. **Open an issue first** for non-trivial changes. Discuss the approach before writing code.
|
||||
2. **Branch from `master`**: `git checkout -b feat/my-feature`.
|
||||
3. **Keep PRs focused** — one feature or fix per PR.
|
||||
4. **Ensure all checks pass** before requesting review:
|
||||
2. **Create a branch from `master`**: `git checkout -b feat/my-feature`, then open your PR back into `master`.
|
||||
3. **Use branches even if you have GitHub Write access**. `master` is protected, so direct pushes there are not the normal workflow.
|
||||
4. **Keep PRs focused** — one feature or fix per PR.
|
||||
5. **Ensure all checks pass** before requesting review:
|
||||
```bash
|
||||
npm run lint
|
||||
npm run build
|
||||
npm run build:server
|
||||
npm test -- --run
|
||||
```
|
||||
5. **Fill out the PR template** — describe what, why, and how.
|
||||
6. **Include tests** for new features. Bug fixes should include a regression test when feasible.
|
||||
7. **Screenshots welcome** for UI changes.
|
||||
8. A maintainer will review, possibly request changes, and merge.
|
||||
6. **Fill out the PR template** — describe what, why, and how.
|
||||
7. **Include tests** for new features. Bug fixes should include a regression test when feasible.
|
||||
8. **Screenshots welcome** for UI changes.
|
||||
9. A maintainer may push small fixes to your PR branch when GitHub allows it, but fork permissions can vary.
|
||||
10. A maintainer will review, possibly request changes, and merge.
|
||||
|
||||
## License
|
||||
|
||||
|
|
|
|||
208
README.md
208
README.md
|
|
@ -4,78 +4,94 @@
|
|||
|
||||
# Nerve
|
||||
|
||||
**The cockpit for your [OpenClaw](https://github.com/openclaw/openclaw) agents.**
|
||||
**The cockpit OpenClaw deserves.**
|
||||
|
||||
[](LICENSE)
|
||||

|
||||
[](https://discord.gg/Sh9ZGtctva)
|
||||
*OpenClaw is powerful. Nerve is the interface that makes people say “oh, now I get it."*
|
||||
|
||||
|
||||
[](https://github.com/daggerhashimoto/openclaw-nerve)
|
||||
[](LICENSE)
|
||||
[](https://discord.gg/Sh9ZGtctva)
|
||||
|
||||
</div>
|
||||
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/daggerhashimoto/openclaw-nerve/master/install.sh | bash
|
||||
```
|
||||
> *Run the installer, live in 60 seconds*
|
||||
|
||||
## What is Nerve?
|
||||
|
||||
You can already chat with your OpenClaw agent through webchat, Telegram, WhatsApp, Discord. Nerve is what you open when chatting isn't enough.
|
||||
|
||||
Nerve is a self-hosted web UI for [OpenClaw](https://github.com/openclaw/openclaw) AI agents. Voice conversations, live workspace editing, inline charts, cron scheduling, and full token-level visibility. One install script. Running in 30 seconds.
|
||||
|
||||
## Why Nerve?
|
||||
|
||||
Messaging channels are great for chatting. But you can't watch charts render in real-time, edit your agent's workspace mid-conversation, browse its files, manage tasks on a kanban board, or monitor token spend from a Telegram window. Nerve is the dashboard that gives you the full picture.
|
||||
|
||||
<div align="center">
|
||||
|
||||

|
||||
<https://github.com/user-attachments/assets/25d65a85-1d42-45bc-baae-5e6fca531705>
|
||||
|
||||
</div>
|
||||
|
||||
## What makes it different
|
||||
<table align="center">
|
||||
<p align="center">
|
||||
<strong>Mobile Screenshots</strong>
|
||||
</p>
|
||||
<tr>
|
||||
<td align="center"><img src="https://y7qrgowhkj.ufs.sh/f/WxqUPjL9oRPTRBre6go2FRlk6Tbtc5WnrMZ9AUKdBC3vsNi0" alt="Nerve screenshot 1" width="200" /></td>
|
||||
<td align="center"><img src="https://y7qrgowhkj.ufs.sh/f/WxqUPjL9oRPTW4tYWWL9oRPTJEIWbY7sVfKzjyrSCXqMdGv6" alt="Nerve screenshot 2" width="200" /></td>
|
||||
<td align="center"><img src="https://y7qrgowhkj.ufs.sh/f/WxqUPjL9oRPTSe31HqKJOvEcMqm9djw1Y5srz3GQTIUlFg84" alt="Nerve screenshot 3" width="200" /></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
### Voice that actually works
|
||||
Talk to your agent in 12 languages. Explicit language selection (no flaky auto-detect), wake-word activation, per-language stop/cancel phrases, and on-device Whisper transcription with model selection (tiny, base, small) plus GPU detection. No API key needed. Multi-provider TTS with Edge, OpenAI, and Replicate.
|
||||
## Why Nerve exists
|
||||
|
||||
### Full workspace visibility
|
||||
Your sub-agent sessions, workspace files, memory, config, tools. All visible, all editable, all live. No file hunting, no guessing what it remembers.
|
||||
Chat is great for talking to agents.
|
||||
It is not enough for operating them.
|
||||
|
||||
### Kanban task board
|
||||
Drag-and-drop task management with agent execution, review workflow, and proposal inbox. Create tasks, assign them to your agent, and watch the work happen — all from a visual board.
|
||||
The moment you care about visibility, control and coordination over your agents, the thread gets too small. You want the workspace, sessions, taskboard, editor, usage, and agent context in one place.
|
||||
|
||||
### Responsive by default
|
||||
Nerve adapts cleanly across desktop, tablet, and mobile, with touch-friendly controls on smaller screens and no core workflow loss.
|
||||
*Nerve is that place.*
|
||||
|
||||
### Live charts from a chat message
|
||||
Your agent can drop interactive TradingView charts, candlestick plots, and data visualizations directly into the conversation. Say "show me gold this year" and get a real chart, not a code block.
|
||||
## Why it feels different
|
||||
|
||||
### Cron and scheduling from the UI
|
||||
Create recurring jobs and one-shot reminders. Every scheduled run shows up as its own session in the sidebar. You can watch it execute live, read the full transcript, and see exactly what it did.
|
||||
### ✨ Fleet control, not just chat
|
||||
Run multiple agents from one place. Each agent can have its own workspace, subagents, memory, identity, soul, and skills, while Nerve gives you a single control plane to switch context, inspect state, and operate the whole fleet.
|
||||
|
||||
## Everything else
|
||||
### ✨ Voice that feels built in
|
||||
Push-to-talk, wake word flows, explicit language selection, local Whisper transcription, multilingual stop and cancel phrases, and multiple TTS providers. Voice is part of the product, not an afterthought.
|
||||
|
||||
| | |
|
||||
### ✨ Full agent operating context
|
||||
Each agent can have its own workspace, memory, identity, soul, and skills. Nerve lets you inspect, edit, and manage that context live, without guessing what an agent knows, where it works, or how it is configured.
|
||||
|
||||
### ✨ A real operating layer
|
||||
Crons, session trees, kanban workflows, review loops, proposal inboxes, and model overrides. Nerve gives agent work an operating surface instead of leaving it trapped inside chat history.
|
||||
|
||||
### ✨ Rich live output
|
||||
Charts, diffs, previews, syntax-highlighted code, structured tool rendering, and streaming UI that makes agent responses easier to inspect.
|
||||
|
||||
><details>
|
||||
>
|
||||
> <summary>What you can do with it</summary>
|
||||
>
|
||||
> - **Talk to your agent by voice** and hear it answer back naturally
|
||||
> - **Browse and edit the workspace live** while the conversation is still happening
|
||||
> - **Watch cron runs as separate sessions** instead of treating automation like a black box
|
||||
> - **Delegate work onto a kanban board** and review what came back
|
||||
> - **Ask for a chart** and get a real chart, not a code block pretending to be one
|
||||
> - **Track token usage, costs, and context pressure** while long tasks run
|
||||
> - **Inspect subagent activity** without losing the main thread
|
||||
> - **Switch between per-agent workspaces and memory** without losing context
|
||||
> - **Inspect each agent’s identity, soul, and skills** from the UI
|
||||
> - **Delegate subagent work inside a larger agent fleet** instead of treating everything as one thread
|
||||
|
||||
</details>
|
||||
|
||||
## Capability snapshot
|
||||
|
||||
| Area | Highlights |
|
||||
|---|---|
|
||||
| **Voice I/O** | Push-to-talk + wake word, live transcription preview, language-aware stop/cancel phrases, local Whisper model picker, TTS providers (Edge/OpenAI/Replicate) |
|
||||
| **Streaming chat** | Markdown, syntax highlighting, diff views, image paste, file previews. All rendering as it streams |
|
||||
| **File browser** | Browse your workspace, rename, move, trash, and restore files. Open files in tabs. Support for custom workspace roots via `FILE_BROWSER_ROOT` |
|
||||
| **Built-in editor** | CodeMirror editor with syntax highlighting, conflict-safe saves, and automatic lock protection during concurrent agent edits |
|
||||
| **Multi-session** | Session tree with sub-agents, per-session model overrides, unread indicators |
|
||||
| **Kanban board** | Drag-and-drop task management with agent execution, review workflow, and proposal inbox |
|
||||
| **Responsive UI** | Fully responsive layout across desktop, tablet, and mobile with touch-friendly controls |
|
||||
| **Sub-agents** | Spawn background workers with custom models and reasoning levels |
|
||||
| **Monitoring** | Token usage, context window meter, cost tracking, activity logs |
|
||||
| **Command palette** | Cmd+K to search, switch sessions, change models. Keyboard-first |
|
||||
| **Search** | Full-text search across all messages in the current session |
|
||||
| **Images** | Paste from clipboard, drag & drop files, full-screen lightbox with download |
|
||||
| **Skills browser** | Browse installed skills, check status and requirements from the workspace panel |
|
||||
| **Local STT** | On-device Whisper — tiny, base, or small models with multilingual support, explicit language selection, GPU detection, and auto-download. No API key needed |
|
||||
| **Code actions** | Copy or save-to-file buttons on every code block |
|
||||
| **API key management** | Add provider keys from settings — writes to .env and hot-reloads, no restart needed |
|
||||
| **14 themes** | Dark, light, and everything in between. Resizable panels, custom fonts |
|
||||
| **Auto-updater** | Built-in updater with automatic rollback. One command to update, verify, and restart |
|
||||
|
||||
## Get Started
|
||||
| **Agent fleet** | Run multiple agents from one control plane, each with its own workspace, subagents, memory, identity, soul, and skills |
|
||||
| **Interaction** | Streaming chat, markdown, syntax highlighting, diff views, image paste, file previews, voice input, TTS, live transcription preview |
|
||||
| **Workspace** | Per-agent file browser, tabbed editor, memory editing, config editing, skills browser |
|
||||
| **Operations** | Session tree, subagents, cron scheduling, kanban task board, review flow, proposal inbox, model overrides |
|
||||
| **Observability** | Token usage, cost tracking, context meter, agent logs, event logs |
|
||||
| **Polish** | Command palette, responsive UI, 14 themes, font family and 10px to 24px font size controls, mobile-safe input sizing, hot-reloadable settings, updater with rollback |
|
||||
## Get started
|
||||
|
||||
### One command
|
||||
|
||||
|
|
@ -83,84 +99,98 @@ Create recurring jobs and one-shot reminders. Every scheduled run shows up as it
|
|||
curl -fsSL https://raw.githubusercontent.com/daggerhashimoto/openclaw-nerve/master/install.sh | bash
|
||||
```
|
||||
|
||||
The installer handles dependencies, cloning, building, and launching. It runs a setup wizard that auto-detects your gateway token and walks you through configuration.
|
||||
> *The installer handles dependencies, clone, build, and then usually hands off straight into the setup wizard. Guided access modes include localhost, LAN, Tailscale tailnet IP, and Tailscale Serve.*
|
||||
|
||||
|
||||
### Pick your setup
|
||||
|
||||
- **[Run everything on one machine](docs/DEPLOYMENT-A.md)**
|
||||
- **[Use a cloud Gateway with Nerve on your laptop](docs/DEPLOYMENT-B.md)**
|
||||
- **[Run both Nerve and Gateway in the cloud](docs/DEPLOYMENT-C.md)**
|
||||
- **[Local](docs/DEPLOYMENT-A.md)** — Run Nerve and Gateway on one machine. *Recommended default setup for reliability and simplicity.*
|
||||
- **[Hybrid](docs/DEPLOYMENT-B.md)** — Keep Nerve local, run Gateway in the cloud
|
||||
- **[Cloud](docs/DEPLOYMENT-C.md)** — Run Nerve and Gateway in the cloud
|
||||
|
||||
### Manual install
|
||||
<details><summary><strong>Manual install</strong></summary>
|
||||
|
||||
```bash
|
||||
git clone https://github.com/daggerhashimoto/openclaw-nerve.git
|
||||
cd openclaw-nerve
|
||||
npm install
|
||||
npm run setup # interactive wizard — configures .env
|
||||
npm run prod # builds and starts the server
|
||||
npm run setup
|
||||
npm run prod
|
||||
```
|
||||
|
||||
### Updating
|
||||
</details>
|
||||
|
||||
|
||||
|
||||
<details><summary><strong>Updating</strong></summary>
|
||||
|
||||
|
||||
```bash
|
||||
npm run update -- --yes
|
||||
```
|
||||
|
||||
Fetches the latest release, rebuilds, restarts, and verifies health. Auto-rolls back on failure. See [docs/UPDATING.md](docs/UPDATING.md) for flags and details.
|
||||
Fetches the latest release, rebuilds, restarts, verifies health, and rolls back automatically on failure.
|
||||
|
||||
### Development
|
||||
</details>
|
||||
|
||||
<details><summary><strong>Development</strong></summary>
|
||||
|
||||
```bash
|
||||
npm run dev # frontend — Vite HMR on :3080
|
||||
npm run dev:server # backend — watch mode on :3081
|
||||
npm run dev # frontend — Vite on :3080 by default
|
||||
PORT=3081 npm run dev:server # backend — explicit split-port dev setup
|
||||
```
|
||||
|
||||
**Requires:** Node.js 22+ and an [OpenClaw](https://github.com/openclaw/openclaw) gateway.
|
||||
`npm run dev:server` uses the normal server `PORT` setting. If you do not override it, the backend also defaults to `:3080` and will collide with Vite.
|
||||
|
||||
## How it works
|
||||
**Requires:** Node.js 22+ and an OpenClaw gateway.
|
||||
</details>
|
||||
|
||||
```
|
||||
|
||||
## How it fits into OpenClaw
|
||||
|
||||
Nerve sits in front of the gateway and gives you a richer operating surface in the browser.
|
||||
|
||||
```text
|
||||
Browser ─── Nerve (:3080) ─── OpenClaw Gateway (:18789)
|
||||
│ │
|
||||
├─ WS ──────┤ proxied to gateway
|
||||
├─ SSE ─────┤ file watchers, real-time sync
|
||||
└─ REST ────┘ files, memories, TTS, models
|
||||
│ │
|
||||
├─ WS ──────┤ proxied to gateway
|
||||
├─ SSE ─────┤ file watchers, real-time sync
|
||||
└─ REST ────┘ files, memories, TTS, models
|
||||
```
|
||||
|
||||
Nerve proxies WebSocket traffic to your gateway and adds its own REST layer.
|
||||
OpenClaw remains the engine. Nerve gives it a cockpit.
|
||||
|
||||
**Frontend:** React 19 · Tailwind CSS 4 · shadcn/ui · Vite 7
|
||||
**Frontend:** React 19 · Tailwind CSS 4 · shadcn/ui · Vite 7
|
||||
**Backend:** Hono 4 on Node.js
|
||||
|
||||
## Security
|
||||
|
||||
Nerve binds to `127.0.0.1` (localhost) by default — only you can access it. When you expose it to the network (`HOST=0.0.0.0`), built-in password authentication protects all endpoints. The setup wizard auto-prompts for a password when network access is configured.
|
||||
Nerve binds to `127.0.0.1` by default, so it stays local unless you choose to expose it.
|
||||
|
||||
- **Session cookies** — `HttpOnly`, `SameSite=Strict`, HMAC-SHA256 signed
|
||||
- **Password storage** — scrypt with 32-byte salt
|
||||
- **WebSocket auth** — cookie verified on upgrade
|
||||
- **Gateway token injection** — Automatically injected server-side for trusted connections
|
||||
When you bind it to the network (`HOST=0.0.0.0`), built-in password authentication protects the UI and its endpoints. Sessions use signed cookies, passwords are stored as hashes, WebSocket upgrades are authenticated, and trusted connections can use server-side gateway token injection.
|
||||
|
||||
See [Security](docs/SECURITY.md) for the full threat model.
|
||||
For the full threat model and hardening details, see **[docs/SECURITY.md](docs/SECURITY.md)**.
|
||||
|
||||
## Docs
|
||||
## Documentation
|
||||
|
||||
| | |
|
||||
|---|---|
|
||||
| **[Architecture](docs/ARCHITECTURE.md)** | How the codebase is organized |
|
||||
| **[Configuration](docs/CONFIGURATION.md)** | Every `.env` variable explained |
|
||||
| **[Deployment Guides](docs/README.md#deployment-guides)** | Practical guides for local, remote Gateway, and cloud setups |
|
||||
| **[Agent Markers](docs/AGENT-MARKERS.md)** | TTS markers, inline charts, and how agents render rich UI |
|
||||
| **[Security](docs/SECURITY.md)** | What's locked down and how |
|
||||
| **[API](docs/API.md)** | REST and WebSocket endpoints |
|
||||
| **[Contributing](CONTRIBUTING.md)** | Dev setup, code style, PRs |
|
||||
| **[Troubleshooting](docs/TROUBLESHOOTING.md)** | Common issues and fixes |
|
||||
| **[Changelog](CHANGELOG.md)** | Release notes and shipped changes |
|
||||
- **[Architecture](docs/ARCHITECTURE.md)** — codebase structure and system design
|
||||
- **[Configuration](docs/CONFIGURATION.md)** — `.env` variables and setup behavior
|
||||
- **[Deployment Guides](docs/README.md)** — local, hybrid, and cloud setups
|
||||
- **[Agent Markers](docs/AGENT-MARKERS.md)** — TTS, charts, kanban markers, and rich UI output
|
||||
- **[Troubleshooting](docs/TROUBLESHOOTING.md)** — common issues and fixes
|
||||
- **[Tailscale Guide](docs/TAILSCALE.md)** — private remote access via tailnet IP or Tailscale Serve
|
||||
- **[Contributing](CONTRIBUTING.md)** — development workflow and pull requests
|
||||
- **[Changelog](CHANGELOG.md)** — release notes and shipped changes
|
||||
|
||||
## Community
|
||||
|
||||
Join the [Nerve Discord](https://discord.gg/Sh9ZGtctva) — get help, share your setup, report bugs, and follow development.
|
||||
If this is the kind of interface you want around your OpenClaw setup, give the repo a star, contribute and keep an eye on it.
|
||||
|
||||
Join the **[Nerve Discord](https://discord.gg/Sh9ZGtctva)** to get help, discuss, share your setup, and follow development.
|
||||
|
||||
### People building Nerve
|
||||
|
||||
[](https://github.com/daggerhashimoto/openclaw-nerve/graphs/contributors)
|
||||
|
||||
## License
|
||||
|
||||
|
|
|
|||
|
|
@ -131,9 +131,9 @@ The marker must contain valid JSON inside `[chart:{...}]`. The parser uses brack
|
|||
|
||||
### How Agents Learn About Charts
|
||||
|
||||
Unlike TTS markers (which use runtime prompt injection), chart markers are taught to agents via the **`TOOLS.md` workspace file**. Nerve's installer can inject chart documentation into `TOOLS.md` automatically (see PR #218).
|
||||
Unlike TTS markers, chart markers are **not** injected by Nerve at runtime.
|
||||
|
||||
Agents that have the chart syntax in their `TOOLS.md` will naturally include `[chart:{...}]` markers when data visualization is appropriate.
|
||||
Agents only use `[chart:{...}]` markers when that syntax is already present in their own instructions or workspace context, for example in `TOOLS.md`, `AGENTS.md`, or another prompt source you manage.
|
||||
|
||||
### Implementation
|
||||
|
||||
|
|
|
|||
198
docs/AI_SETUP.md
Normal file
198
docs/AI_SETUP.md
Normal file
|
|
@ -0,0 +1,198 @@
|
|||
# AI Agent Setup
|
||||
|
||||
Nerve supports agent-driven installation.
|
||||
|
||||
If you want an AI agent to install, configure, and validate Nerve, the canonical raw instruction file is:
|
||||
|
||||
- [`INSTALL.md`](./INSTALL.md)
|
||||
- raw URL: `https://raw.githubusercontent.com/daggerhashimoto/openclaw-nerve/master/docs/INSTALL.md`
|
||||
|
||||
Use that file as the operational contract.
|
||||
Use this page for the human-readable explanation, boundaries, and topology map.
|
||||
|
||||
## Copy-paste prompt
|
||||
|
||||
```text
|
||||
Install, configure, and validate Nerve on this machine.
|
||||
|
||||
Fetch and follow instructions from:
|
||||
https://raw.githubusercontent.com/daggerhashimoto/openclaw-nerve/master/docs/INSTALL.md
|
||||
```
|
||||
|
||||
## Default agent behavior
|
||||
|
||||
Unless you say otherwise, agents should use these defaults:
|
||||
- local-first
|
||||
- localhost-only by default
|
||||
- `~/nerve` as the install path
|
||||
- installer-first (`install.sh`)
|
||||
- setup wizard first
|
||||
- validate before reporting completion
|
||||
|
||||
That means the common happy path is:
|
||||
1. detect an existing OpenClaw gateway
|
||||
2. install Nerve
|
||||
3. run setup
|
||||
4. start or restart Nerve
|
||||
5. validate the UI and gateway connection
|
||||
|
||||
## What agents may do automatically
|
||||
|
||||
For the default local path, agents may automatically:
|
||||
- inspect the current machine and existing install state
|
||||
- detect whether `~/nerve` already exists
|
||||
- detect whether OpenClaw is installed and reachable
|
||||
- detect the gateway token and gateway URL
|
||||
- install missing core prerequisites for the local path
|
||||
- run the Nerve installer
|
||||
- run the setup wizard
|
||||
- use defaults mode for local-only setup when no TTY is available
|
||||
- write a minimal local `.env` if the wizard cannot be used
|
||||
- restart local services or processes
|
||||
- run validation and smoke checks
|
||||
|
||||
Agents may also apply minimal localhost-safe OpenClaw changes needed for the default path, such as local origin allowlisting, local pairing fixes, or required local tool allow entries.
|
||||
|
||||
## What requires confirmation
|
||||
|
||||
Agents should ask before they do anything that changes exposure, trust, or install ownership.
|
||||
|
||||
That includes:
|
||||
- installing OpenClaw
|
||||
- exposing Nerve beyond localhost
|
||||
- LAN access
|
||||
- public internet exposure
|
||||
- reverse proxy setup
|
||||
- Tailscale setup or topology changes
|
||||
- destructive reinstall, replacement, or deletion
|
||||
- remote gateway auth or allowlist changes
|
||||
- cloud-hosted assumptions that materially change the security posture
|
||||
|
||||
Short version: local repair and local setup can be automated. Public or destructive changes need consent.
|
||||
|
||||
## Dependency behavior
|
||||
|
||||
Nerve is not a standalone app. It depends on an OpenClaw gateway.
|
||||
|
||||
Expected agent behavior:
|
||||
1. look for an existing reachable gateway first
|
||||
2. prefer using that gateway
|
||||
3. if none is found, explain the dependency clearly
|
||||
4. ask before installing OpenClaw
|
||||
|
||||
For the default same-machine flow, agents may make the smallest safe local OpenClaw fixes required for Nerve to connect. For remote or public gateway changes, they should stop and ask.
|
||||
|
||||
## Topology map
|
||||
|
||||
Choose the guide that matches the intended setup:
|
||||
|
||||
- **Local / same machine:** [DEPLOYMENT-A.md](./DEPLOYMENT-A.md)
|
||||
- **Hybrid / local Nerve + remote gateway:** [DEPLOYMENT-B.md](./DEPLOYMENT-B.md)
|
||||
- **Cloud / remote Nerve:** [DEPLOYMENT-C.md](./DEPLOYMENT-C.md)
|
||||
- **Add Tailscale to an existing install:** [TAILSCALE.md](./TAILSCALE.md)
|
||||
|
||||
Recommended default: start with the local setup unless you already know you need something else.
|
||||
|
||||
## Done criteria
|
||||
|
||||
An agent should only report success when all of these are true:
|
||||
- Nerve is installed at the intended path
|
||||
- Nerve starts successfully
|
||||
- it points at the intended OpenClaw gateway
|
||||
- access and auth match the requested mode
|
||||
- the smoke test passes
|
||||
|
||||
A script finishing is not enough. A valid install must actually respond and connect.
|
||||
|
||||
## Smoke test expectations
|
||||
|
||||
Keep validation minimal and real:
|
||||
|
||||
1. confirm the Nerve service or process is running
|
||||
2. confirm the expected URL responds
|
||||
3. confirm the intended gateway responds
|
||||
4. confirm `.env` matches the intended gateway
|
||||
5. if auth is enabled, confirm the login surface or protected access path is present
|
||||
|
||||
Typical local checks:
|
||||
|
||||
```bash
|
||||
openclaw gateway status
|
||||
curl -fsS http://127.0.0.1:18789/health
|
||||
curl -fsS http://127.0.0.1:3080/health
|
||||
```
|
||||
|
||||
For remote or custom setups, agents should adjust the URLs to match the requested topology.
|
||||
|
||||
## Failure and recovery behavior
|
||||
|
||||
Agents should fail clearly, not vaguely.
|
||||
|
||||
### No OpenClaw gateway found
|
||||
|
||||
Explain that Nerve depends on OpenClaw and ask before installing it.
|
||||
|
||||
### Installer or setup wizard unavailable
|
||||
|
||||
Use the smallest correct fallback:
|
||||
- clone the repo if needed
|
||||
- `npm install`
|
||||
- `npm run setup` when available
|
||||
- if local-only and no TTY is available, `npm run setup -- --defaults`
|
||||
- if needed, write a minimal `.env`, then build and start manually
|
||||
|
||||
### Existing install already present
|
||||
|
||||
Inspect it first. Prefer restart, repair, or reconfigure. Ask before replacing or deleting anything.
|
||||
|
||||
### Port, auth, or access mismatch
|
||||
|
||||
Adjust the config, restart, and revalidate. Do not declare success just because the process exists.
|
||||
|
||||
### Remote or public topology requested without details
|
||||
|
||||
Ask for the missing details instead of guessing. This especially matters for Tailscale, reverse proxies, public domains, and remote gateway allowlists.
|
||||
|
||||
## Example prompts
|
||||
|
||||
### Default local install
|
||||
|
||||
```text
|
||||
Install, configure, and validate Nerve on this machine.
|
||||
Use the safest local-first path and keep it localhost-only unless you need my approval.
|
||||
|
||||
Fetch and follow instructions from:
|
||||
https://raw.githubusercontent.com/daggerhashimoto/openclaw-nerve/master/docs/INSTALL.md
|
||||
```
|
||||
|
||||
### Local Nerve + existing remote gateway
|
||||
|
||||
```text
|
||||
Install Nerve on this machine and connect it to my existing remote OpenClaw gateway.
|
||||
Do not expose Nerve beyond localhost unless I approve it.
|
||||
If remote gateway config needs changing, tell me before you do it.
|
||||
|
||||
Fetch and follow instructions from:
|
||||
https://raw.githubusercontent.com/daggerhashimoto/openclaw-nerve/master/docs/INSTALL.md
|
||||
```
|
||||
|
||||
### Tailscale setup
|
||||
|
||||
```text
|
||||
Install Nerve and make it reachable over Tailscale.
|
||||
Ask before making any exposure or gateway allowlist changes.
|
||||
Use the repo's Tailscale guidance instead of inventing a new flow.
|
||||
|
||||
Fetch and follow instructions from:
|
||||
https://raw.githubusercontent.com/daggerhashimoto/openclaw-nerve/master/docs/INSTALL.md
|
||||
```
|
||||
|
||||
### Existing install repair
|
||||
|
||||
```text
|
||||
Inspect my existing Nerve install, repair it if needed, and validate it.
|
||||
Do not reinstall or delete anything unless you ask first.
|
||||
|
||||
Fetch and follow instructions from:
|
||||
https://raw.githubusercontent.com/daggerhashimoto/openclaw-nerve/master/docs/INSTALL.md
|
||||
```
|
||||
465
docs/API.md
465
docs/API.md
|
|
@ -21,7 +21,7 @@ Nerve exposes a REST + SSE API served by [Hono](https://hono.dev/) on the config
|
|||
- [Memories](#memories)
|
||||
- [Agent Log](#agent-log)
|
||||
- [Gateway](#gateway)
|
||||
- [Git Info](#git-info)
|
||||
- [Sessions](#sessions)
|
||||
- [Workspace Files](#workspace-files)
|
||||
- [Cron Jobs](#cron-jobs)
|
||||
- [Skills](#skills)
|
||||
|
|
@ -151,7 +151,7 @@ Returns server time, gateway process uptime, timezone, and agent name.
|
|||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `serverTime` | `number` | Current epoch milliseconds |
|
||||
| `gatewayStartedAt` | `number \| null` | Gateway process start time (epoch ms). Linux only; `null` elsewhere |
|
||||
| `gatewayStartedAt` | `number \| null` | Gateway process start time (epoch ms), when the host OS can resolve it (Linux via `/proc`, macOS via `ps`) |
|
||||
| `timezone` | `string` | IANA timezone of the server |
|
||||
| `agentName` | `string` | Configured agent display name |
|
||||
|
||||
|
|
@ -246,8 +246,8 @@ data: {"event":"memory.changed","data":{"source":"api","action":"create","sectio
|
|||
|-------|---------|-------------|
|
||||
| `connected` | On initial connection | `{ ts }` |
|
||||
| `ping` | Every 30 seconds (keep-alive) | `{ ts }` |
|
||||
| `memory.changed` | Memory file modified via API | `{ source, action, section?, file? }` |
|
||||
| `file.changed` | Workspace file modified (by agent or externally) | `{ path, mtime }` |
|
||||
| `memory.changed` | Memory file modified via API or file watcher | `{ source, action?, section?, file?, agentId? }` |
|
||||
| `file.changed` | Workspace file modified by the file watcher | `{ path, agentId }` |
|
||||
| `tokens.updated` | Token usage changed | varies |
|
||||
| `status.changed` | Gateway status changed | varies |
|
||||
|
||||
|
|
@ -270,7 +270,7 @@ data: {"event":"memory.changed","data":{"source":"api","action":"create","sectio
|
|||
|
||||
### `POST /api/tts`
|
||||
|
||||
Synthesizes speech from text. Returns raw `audio/mpeg` binary.
|
||||
Synthesizes speech from text. Returns raw audio binary.
|
||||
|
||||
**Rate Limit:** TTS (10/min)
|
||||
**Body Size Limit:** 64 KB
|
||||
|
|
@ -289,17 +289,19 @@ Synthesizes speech from text. Returns raw `audio/mpeg` binary.
|
|||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `text` | `string` | Yes | Text to synthesize (1–5000 chars, non-empty after trim) |
|
||||
| `provider` | `"openai" \| "replicate" \| "edge" \| "qwen"` | No | TTS provider. `"qwen"` is an alias for `"replicate"` + model `"qwen-tts"` |
|
||||
| `provider` | `"openai" \| "replicate" \| "edge" \| "qwen" \| "xiaomi"` | No | TTS provider. `"qwen"` is a legacy alias for `"replicate"` + model `"qwen-tts"` |
|
||||
| `voice` | `string` | No | Provider-specific voice name |
|
||||
| `model` | `string` | No | Provider-specific model ID |
|
||||
|
||||
**Provider Selection (when `provider` is omitted):**
|
||||
|
||||
1. OpenAI — if `OPENAI_API_KEY` is set
|
||||
2. Replicate — if `REPLICATE_API_TOKEN` is set
|
||||
3. Edge TTS — always available (free, no API key)
|
||||
1. OpenAI, if `OPENAI_API_KEY` is set
|
||||
2. Replicate, if `REPLICATE_API_TOKEN` is set
|
||||
3. Edge TTS, always available (free, no API key)
|
||||
|
||||
**Response:** `audio/mpeg` binary (200)
|
||||
Xiaomi MiMo is available when `provider: "xiaomi"` is requested and `MIMO_API_KEY` is configured. It is not part of the automatic fallback chain.
|
||||
|
||||
**Response:** `audio/mpeg` binary for OpenAI, Replicate, and Edge, or `audio/wav` for Xiaomi MiMo (200)
|
||||
|
||||
**Errors:**
|
||||
|
||||
|
|
@ -319,9 +321,20 @@ Returns the current TTS voice configuration.
|
|||
|
||||
```json
|
||||
{
|
||||
"qwen": { "mode": "preset", "speaker": "Chelsie" },
|
||||
"openai": { "model": "tts-1", "voice": "alloy" },
|
||||
"edge": { "voice": "en-US-AriaNeural" }
|
||||
"qwen": {
|
||||
"mode": "voice_design",
|
||||
"language": "English",
|
||||
"speaker": "Serena",
|
||||
"voiceDescription": "",
|
||||
"styleInstruction": ""
|
||||
},
|
||||
"openai": {
|
||||
"model": "gpt-4o-mini-tts",
|
||||
"voice": "nova",
|
||||
"instructions": "Speak naturally and conversationally, like a real person. Warm, friendly tone with a slight British accent. Keep it casual and relaxed, not robotic or overly formal."
|
||||
},
|
||||
"edge": { "voice": "en-US-AriaNeural" },
|
||||
"xiaomi": { "model": "mimo-v2-tts", "voice": "mimo_default", "style": "" }
|
||||
}
|
||||
```
|
||||
|
||||
|
|
@ -334,7 +347,8 @@ Partially updates the TTS voice configuration. Only known keys are accepted.
|
|||
```json
|
||||
{
|
||||
"openai": { "voice": "nova", "instructions": "Speak cheerfully" },
|
||||
"edge": { "voice": "en-GB-SoniaNeural" }
|
||||
"edge": { "voice": "en-GB-SoniaNeural" },
|
||||
"xiaomi": { "voice": "default_en", "style": "Happy" }
|
||||
}
|
||||
```
|
||||
|
||||
|
|
@ -345,9 +359,52 @@ Partially updates the TTS voice configuration. Only known keys are accepted.
|
|||
| `qwen` | `mode`, `language`, `speaker`, `voiceDescription`, `styleInstruction` |
|
||||
| `openai` | `model`, `voice`, `instructions` |
|
||||
| `edge` | `voice` |
|
||||
| `xiaomi` | `model`, `voice`, `style` |
|
||||
|
||||
All values must be strings, max 2000 characters each.
|
||||
|
||||
### `GET /api/keys`
|
||||
|
||||
Returns whether optional provider keys are configured. Key values are never returned.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
{
|
||||
"openaiKeySet": true,
|
||||
"replicateKeySet": false,
|
||||
"xiaomiKeySet": true
|
||||
}
|
||||
```
|
||||
|
||||
### `PUT /api/keys`
|
||||
|
||||
Writes optional provider keys to `.env` and hot-reloads the in-memory config.
|
||||
|
||||
**Request Body (partial update):**
|
||||
|
||||
```json
|
||||
{
|
||||
"openaiKey": "sk-...",
|
||||
"replicateToken": "r8_...",
|
||||
"mimoApiKey": "sk-mimo-..."
|
||||
}
|
||||
```
|
||||
|
||||
Any subset of fields may be provided. Sending an empty string clears that key.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
{
|
||||
"ok": true,
|
||||
"message": "OPENAI_API_KEY saved, MIMO_API_KEY saved",
|
||||
"openaiKeySet": true,
|
||||
"replicateKeySet": false,
|
||||
"xiaomiKeySet": true
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Transcription
|
||||
|
|
@ -370,8 +427,8 @@ Transcribes audio using the configured STT provider.
|
|||
|
||||
| Model | Size | Speed | Quality |
|
||||
|-------|------|-------|---------|
|
||||
| `tiny` (default) | 75 MB | Fastest | Good baseline, multilingual |
|
||||
| `base` | 142 MB | Fast | Better conversational accuracy, multilingual |
|
||||
| `tiny` | 75 MB | Fastest | Good baseline, multilingual |
|
||||
| `base` (default) | 142 MB | Fast | Better conversational accuracy, multilingual |
|
||||
| `small` | 466 MB | Moderate | Best accuracy (CPU-intensive), multilingual |
|
||||
| `tiny.en` | 75 MB | Fastest | English-only variant |
|
||||
| `base.en` | 142 MB | Fast | English-only variant |
|
||||
|
|
@ -414,7 +471,7 @@ Returns current STT runtime config + local model readiness/download state.
|
|||
```json
|
||||
{
|
||||
"provider": "local",
|
||||
"model": "tiny",
|
||||
"model": "base",
|
||||
"language": "en",
|
||||
"modelReady": true,
|
||||
"openaiKeySet": false,
|
||||
|
|
@ -422,7 +479,7 @@ Returns current STT runtime config + local model readiness/download state.
|
|||
"hasGpu": false,
|
||||
"availableModels": {
|
||||
"tiny": { "size": "75MB", "ready": true, "multilingual": true },
|
||||
"base": { "size": "142MB", "ready": false, "multilingual": true },
|
||||
"base": { "size": "142MB", "ready": true, "multilingual": true },
|
||||
"tiny.en": { "size": "75MB", "ready": true, "multilingual": false }
|
||||
},
|
||||
"download": null
|
||||
|
|
@ -523,7 +580,7 @@ Returns full provider × language support matrix and current local model state.
|
|||
"tts": { "edge": true, "qwen3": true, "openai": true }
|
||||
}
|
||||
],
|
||||
"currentModel": "tiny",
|
||||
"currentModel": "base",
|
||||
"isMultilingual": true
|
||||
}
|
||||
```
|
||||
|
|
@ -641,12 +698,20 @@ Session data is cached for 60 seconds to avoid repeated filesystem scans.
|
|||
|
||||
## Memories
|
||||
|
||||
All memory routes accept an optional `agentId` scope. If omitted, Nerve uses `main`. The UI normally derives this from the owning top-level agent when you switch sessions.
|
||||
|
||||
### `GET /api/memories`
|
||||
|
||||
Returns parsed memory data from `MEMORY.md` (sections + bullet items) and the 7 most recent daily files (section headers only).
|
||||
Returns parsed memory data from `MEMORY.md` (sections + bullet items) and the 7 most recent daily files (section headers only) for the selected workspace agent.
|
||||
|
||||
**Rate Limit:** General (60/min)
|
||||
|
||||
**Query Parameters:**
|
||||
|
||||
| Param | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `agentId` | `string` | No | Workspace agent id. Defaults to `main` |
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
|
|
@ -667,6 +732,7 @@ Returns the raw markdown content of a specific section.
|
|||
|-------|------|----------|-------------|
|
||||
| `title` | `string` | Yes | Section title (exact match) |
|
||||
| `date` | `string` | No | `YYYY-MM-DD` for daily files; omit for MEMORY.md |
|
||||
| `agentId` | `string` | No | Workspace agent id. Defaults to `main` |
|
||||
|
||||
**Response:**
|
||||
|
||||
|
|
@ -702,6 +768,7 @@ Creates a new memory entry. Writes a bullet point to `MEMORY.md` under the speci
|
|||
| `section` | `string` | No | Section heading (default: "General", max 200 chars) |
|
||||
| `category` | `"preference" \| "fact" \| "decision" \| "entity" \| "other"` | No | Category for vector store |
|
||||
| `importance` | `number` | No | 0–1 importance score (default: 0.7) |
|
||||
| `agentId` | `string` | No | Workspace agent id. Defaults to `main` |
|
||||
|
||||
**Response:**
|
||||
|
||||
|
|
@ -709,7 +776,7 @@ Creates a new memory entry. Writes a bullet point to `MEMORY.md` under the speci
|
|||
{ "ok": true, "result": { "written": true, "section": "Preferences" } }
|
||||
```
|
||||
|
||||
Broadcasts `memory.changed` SSE event on success.
|
||||
Broadcasts `memory.changed` SSE event on success, tagged with the affected `agentId`.
|
||||
|
||||
### `PUT /api/memories/section`
|
||||
|
||||
|
|
@ -730,6 +797,7 @@ Replaces the content of an existing section.
|
|||
| `title` | `string` | Yes | Section title (1–200 chars) |
|
||||
| `content` | `string` | Yes | New markdown content (max 50000 chars) |
|
||||
| `date` | `string` | No | `YYYY-MM-DD` for daily files; omit for MEMORY.md |
|
||||
| `agentId` | `string` | No | Workspace agent id. Defaults to `main` |
|
||||
|
||||
### `DELETE /api/memories`
|
||||
|
||||
|
|
@ -749,6 +817,7 @@ Deletes a memory entry from the file.
|
|||
|-------|------|----------|-------------|
|
||||
| `query` | `string` | Yes | Text to find (exact match for items, section title for sections) |
|
||||
| `type` | `"section" \| "item" \| "daily"` | No | What to delete. `section`/`daily` removes header + all content. Default: item |
|
||||
| `agentId` | `string` | No | Workspace agent id. Defaults to `main` |
|
||||
| `date` | `string` | No | `YYYY-MM-DD` — required when `type` is `"daily"` |
|
||||
|
||||
**Response:**
|
||||
|
|
@ -798,7 +867,7 @@ All fields are optional. A `ts` (epoch ms) is automatically set on write. The lo
|
|||
|
||||
### `GET /api/gateway/models`
|
||||
|
||||
Returns available AI models from the OpenClaw gateway. Models are fetched via `openclaw models list` CLI and cached for 5 minutes.
|
||||
Returns the models defined in the active OpenClaw config. This endpoint is config-backed now, not CLI-discovered or cache-backed.
|
||||
|
||||
**Rate Limit:** General (60/min)
|
||||
|
||||
|
|
@ -807,15 +876,26 @@ Returns available AI models from the OpenClaw gateway. Models are fetched via `o
|
|||
```json
|
||||
{
|
||||
"models": [
|
||||
{ "id": "anthropic/claude-sonnet-4-20250514", "label": "claude-sonnet-4-20250514", "provider": "anthropic" },
|
||||
{ "id": "openai/gpt-4o", "label": "gpt-4o", "provider": "openai" }
|
||||
]
|
||||
{
|
||||
"id": "anthropic/claude-sonnet-4-20250514",
|
||||
"label": "claude-sonnet-4-20250514",
|
||||
"provider": "anthropic",
|
||||
"configured": true,
|
||||
"role": "primary"
|
||||
}
|
||||
],
|
||||
"error": null,
|
||||
"source": "config"
|
||||
}
|
||||
```
|
||||
|
||||
**Selection logic:**
|
||||
1. Configured/allowlisted models (from `agents.defaults.models` in OpenClaw config) — all included regardless of `available` flag
|
||||
2. If ≤0 results: falls back to all available models from the gateway
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `models` | `array` | Configured models from `agents.defaults.model` and `agents.defaults.models` in the active OpenClaw config |
|
||||
| `error` | `string \| null` | Read error or configuration problem, for example config unreadable or no configured models |
|
||||
| `source` | `"config"` | Identifies the backing source |
|
||||
|
||||
Model roles are `primary`, `fallback`, or `allowed`. If the config cannot be read, or no models are configured, the endpoint returns an empty `models` array with an explanatory `error` string.
|
||||
|
||||
### `GET /api/gateway/session-info`
|
||||
|
||||
|
|
@ -840,7 +920,7 @@ Resolution order: per-session data from `sessions_list` → global `gateway_stat
|
|||
|
||||
### `POST /api/gateway/session-patch`
|
||||
|
||||
Changes the model and/or thinking level for a session. HTTP fallback when WebSocket RPC fails.
|
||||
HTTP fallback for **model changes** when the frontend cannot apply `sessions.patch` over WebSocket. Thinking-only changes are not supported here.
|
||||
|
||||
**Rate Limit:** General (60/min)
|
||||
|
||||
|
|
@ -854,69 +934,127 @@ Changes the model and/or thinking level for a session. HTTP fallback when WebSoc
|
|||
}
|
||||
```
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `sessionKey` | `string` | No | Target session. If omitted, Nerve tries to pick a preferred active root session |
|
||||
| `model` | `string` | No | Model to apply via the gateway's `session_status` tool |
|
||||
| `thinkingLevel` | `string \| null` | No | Accepted by the schema, but not applied by this HTTP fallback |
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
{ "ok": true, "model": "anthropic/claude-sonnet-4-20250514", "thinking": "high" }
|
||||
{ "ok": true, "model": "anthropic/claude-sonnet-4-20250514" }
|
||||
```
|
||||
|
||||
**Errors:** 400 (invalid JSON), 502 (gateway tool invocation failed)
|
||||
**Behavior notes:**
|
||||
- Thinking changes belong on WebSocket RPC `sessions.patch`, alongside other session metadata/settings updates.
|
||||
- A request that only changes `thinkingLevel` returns **501**.
|
||||
- If no active root session can be found and `sessionKey` is omitted, the endpoint returns **409**.
|
||||
- If both `model` and `thinkingLevel` are sent, the model change is applied and the thinking change is ignored.
|
||||
|
||||
**Errors:**
|
||||
|
||||
| Status | Condition |
|
||||
|--------|-----------|
|
||||
| 400 | Invalid JSON or validation error |
|
||||
| 409 | No active root session available |
|
||||
| 501 | Thinking-only changes are not supported over HTTP |
|
||||
| 502 | Model change failed |
|
||||
|
||||
### `POST /api/gateway/restart`
|
||||
|
||||
Restarts the OpenClaw gateway service, then waits for the service to report healthy status and for the gateway port to become reachable.
|
||||
|
||||
**Rate Limit:** Restart (3/min)
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
{ "ok": true, "output": "Gateway restarted successfully" }
|
||||
```
|
||||
|
||||
**Errors:** 500 if restart or post-restart verification fails.
|
||||
|
||||
---
|
||||
|
||||
## Git Info
|
||||
## Sessions
|
||||
|
||||
### `GET /api/git-info`
|
||||
### `GET /api/sessions/hidden`
|
||||
|
||||
Returns the current git branch and dirty status.
|
||||
Returns hidden cron-like sessions from `sessions.json`, sorted by recent activity. Used to surface session metadata that is not part of the normal active session tree.
|
||||
|
||||
**Rate Limit:** General (60/min)
|
||||
|
||||
**Query Parameters:**
|
||||
|
||||
| Param | Description |
|
||||
|-------|-------------|
|
||||
| `sessionKey` | Use a registered session-specific working directory |
|
||||
| Param | Default | Description |
|
||||
|-------|---------|-------------|
|
||||
| `activeMinutes` | `1440` | Include sessions updated within the last N minutes |
|
||||
| `limit` | `200` | Maximum results. Clamped to `2000` |
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
{ "branch": "main", "dirty": true }
|
||||
{
|
||||
"ok": true,
|
||||
"sessions": [
|
||||
{
|
||||
"key": "agent:main:cron:daily:run:abc",
|
||||
"sessionKey": "agent:main:cron:daily:run:abc",
|
||||
"id": "123e4567-e89b-12d3-a456-426614174000",
|
||||
"label": "daily summary",
|
||||
"displayName": "daily summary",
|
||||
"updatedAt": 1708100000000,
|
||||
"model": "openai/gpt-5",
|
||||
"thinking": "medium",
|
||||
"thinkingLevel": "medium",
|
||||
"totalTokens": 1234,
|
||||
"contextTokens": 456,
|
||||
"parentId": "agent:main:cron:daily"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Returns `{ "branch": null, "dirty": false }` if not in a git repo.
|
||||
If the backing `sessions.json` file is unavailable, the endpoint returns `{ "ok": true, "sessions": [] }`. In remote-workspace cases it may also include `remoteWorkspace: true`.
|
||||
|
||||
### `POST /api/git-info/workdir`
|
||||
### `GET /api/sessions/:id/model`
|
||||
|
||||
Registers a working directory for a session, so `GET /api/git-info?sessionKey=...` resolves to the correct repo.
|
||||
Reads the actual model used by a session from its transcript. This is mainly for cron-run sessions where gateway session listings may only expose the parent agent's default model.
|
||||
|
||||
**Request Body:**
|
||||
**Rate Limit:** General (60/min)
|
||||
|
||||
**Path Parameters:**
|
||||
|
||||
| Param | Description |
|
||||
|-------|-------------|
|
||||
| `id` | Session UUID |
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
{ "sessionKey": "agent:main:subagent:abc123", "workdir": "/home/user/project" }
|
||||
{ "ok": true, "model": "openai/gpt-5", "missing": false }
|
||||
```
|
||||
|
||||
The workdir must be within the allowed base directory (derived from `WORKSPACE_ROOT` env var, git worktree list, or the parent of `process.cwd()`). Returns 403 if the path is outside the allowed base.
|
||||
If the transcript cannot be found, the endpoint returns `{ "ok": true, "model": null, "missing": true }`.
|
||||
|
||||
Session workdir entries expire after 1 hour. Max 100 entries.
|
||||
|
||||
### `DELETE /api/git-info/workdir`
|
||||
|
||||
Unregisters a session's working directory.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
{ "sessionKey": "agent:main:subagent:abc123" }
|
||||
```
|
||||
**Errors:** 400 if `id` is not a valid UUID.
|
||||
|
||||
---
|
||||
|
||||
## Workspace Files
|
||||
|
||||
Workspace file routes accept an optional `agentId` scope. If omitted, Nerve uses the `main` workspace.
|
||||
|
||||
### `GET /api/workspace`
|
||||
|
||||
Lists available workspace file keys and their existence status.
|
||||
Lists available workspace file keys and their existence status for the selected workspace agent.
|
||||
|
||||
**Query Parameters:**
|
||||
|
||||
| Param | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `agentId` | `string` | No | Workspace agent id. Defaults to `main` |
|
||||
|
||||
**Response:**
|
||||
|
||||
|
|
@ -940,6 +1078,12 @@ Reads a workspace file by key.
|
|||
|
||||
**Valid keys:** `soul`, `tools`, `identity`, `user`, `agents`, `heartbeat`
|
||||
|
||||
**Query Parameters:**
|
||||
|
||||
| Param | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `agentId` | `string` | No | Workspace agent id. Defaults to `main` |
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
|
|
@ -955,9 +1099,14 @@ Writes content to a workspace file.
|
|||
**Request Body:**
|
||||
|
||||
```json
|
||||
{ "content": "# Updated content\n\nNew text here." }
|
||||
{ "content": "# Updated content\n\nNew text here.", "agentId": "research" }
|
||||
```
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `content` | `string` | Yes | New file contents |
|
||||
| `agentId` | `string` | No | Workspace agent id. Defaults to `main` |
|
||||
|
||||
Content must be a string, max 100 KB.
|
||||
|
||||
---
|
||||
|
|
@ -1028,10 +1177,18 @@ Returns the last 10 run history entries for a cron job.
|
|||
|
||||
### `GET /api/skills`
|
||||
|
||||
Lists all OpenClaw skills via `openclaw skills list --json`.
|
||||
Lists all OpenClaw skills via `openclaw skills list --json` for the selected workspace agent.
|
||||
|
||||
Nerve scopes this by creating a temporary OpenClaw config whose `agents.defaults.workspace` points at the selected agent workspace, then runs the CLI against that workspace.
|
||||
|
||||
**Rate Limit:** General (60/min)
|
||||
|
||||
**Query Parameters:**
|
||||
|
||||
| Param | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `agentId` | `string` | No | Workspace agent id. Defaults to `main` |
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
|
|
@ -1057,43 +1214,65 @@ Lists all OpenClaw skills via `openclaw skills list --json`.
|
|||
|
||||
## File Browser
|
||||
|
||||
Browse, read, and edit workspace files. All paths are restricted to the workspace directory with traversal protection.
|
||||
Browse, read, and edit workspace files. All paths are restricted to the selected workspace directory with traversal protection.
|
||||
|
||||
All file-browser routes accept an optional `agentId` scope. If omitted, Nerve uses `main`. If `FILE_BROWSER_ROOT` is set, file-browser agent scoping is bypassed and all sessions browse the same custom root.
|
||||
|
||||
### `GET /api/files/tree`
|
||||
|
||||
Returns the workspace directory tree. Excludes `node_modules`, `.git`, `dist`, `server-dist`, and other build artifacts.
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
[
|
||||
{
|
||||
"name": "MEMORY.md",
|
||||
"path": "MEMORY.md",
|
||||
"type": "file"
|
||||
},
|
||||
{
|
||||
"name": "memory",
|
||||
"path": "memory",
|
||||
"type": "directory",
|
||||
"children": [...]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### `GET /api/files/read`
|
||||
|
||||
Read a file's contents with its modification time (for conflict detection on save).
|
||||
|
||||
**Query Parameters:**
|
||||
|
||||
| Param | Description |
|
||||
|-------|-------------|
|
||||
| `path` | Relative path within the workspace |
|
||||
| Param | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `path` | `string` | No | Relative directory path to expand. Defaults to root |
|
||||
| `depth` | `number` | No | Expansion depth, clamped to 1–5 |
|
||||
| `agentId` | `string` | No | Workspace agent id. Defaults to `main` |
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"ok": true,
|
||||
"root": ".",
|
||||
"entries": [
|
||||
{
|
||||
"name": "MEMORY.md",
|
||||
"path": "MEMORY.md",
|
||||
"type": "file",
|
||||
"mtime": 1771355007542
|
||||
},
|
||||
{
|
||||
"name": "memory",
|
||||
"path": "memory",
|
||||
"type": "directory",
|
||||
"children": null
|
||||
}
|
||||
],
|
||||
"workspaceInfo": {
|
||||
"isCustomWorkspace": false,
|
||||
"rootPath": "/home/user/.openclaw/workspace"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### `GET /api/files/read`
|
||||
|
||||
Read a text file's contents with its modification time (for conflict detection on save).
|
||||
|
||||
**Query Parameters:**
|
||||
|
||||
| Param | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `path` | `string` | Yes | Relative path within the selected workspace |
|
||||
| `agentId` | `string` | No | Workspace agent id. Defaults to `main` |
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"ok": true,
|
||||
"content": "# MEMORY.md\n...",
|
||||
"size": 128,
|
||||
"mtime": 1771355007542
|
||||
}
|
||||
```
|
||||
|
|
@ -1102,22 +1281,33 @@ Read a file's contents with its modification time (for conflict detection on sav
|
|||
|
||||
| Status | Condition |
|
||||
|--------|-----------|
|
||||
| 400 | Missing `path`, path traversal detected, or binary file |
|
||||
| 400 | Missing `path`, not a file, or other invalid request |
|
||||
| 403 | Path traversal or excluded path |
|
||||
| 404 | File not found |
|
||||
| 413 | File too large |
|
||||
| 415 | Binary file |
|
||||
|
||||
### `POST /api/files/write`
|
||||
### `PUT /api/files/write`
|
||||
|
||||
Write file contents with optimistic concurrency via mtime comparison. If the file was modified since it was last read, returns 409 Conflict.
|
||||
Write text file contents with optimistic concurrency via mtime comparison. If the file was modified since it was last read, returns 409 Conflict.
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"path": "MEMORY.md",
|
||||
"content": "# Updated content\n...",
|
||||
"mtime": 1771355007542
|
||||
"expectedMtime": 1771355007542,
|
||||
"agentId": "research"
|
||||
}
|
||||
```
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `path` | `string` | Yes | Relative file path within the selected workspace |
|
||||
| `content` | `string` | Yes | UTF-8 file contents |
|
||||
| `expectedMtime` | `number` | No | Expected current mtime for optimistic concurrency |
|
||||
| `agentId` | `string` | No | Workspace agent id. Defaults to `main` |
|
||||
|
||||
**Response (success):**
|
||||
```json
|
||||
{
|
||||
|
|
@ -1130,8 +1320,60 @@ Write file contents with optimistic concurrency via mtime comparison. If the fil
|
|||
|
||||
| Status | Condition |
|
||||
|--------|-----------|
|
||||
| 400 | Missing fields or path traversal |
|
||||
| 409 | File modified since last read (mtime mismatch) |
|
||||
| 400 | Missing fields or invalid request |
|
||||
| 403 | Path traversal or excluded path |
|
||||
| 409 | File modified since last read (`currentMtime` returned) |
|
||||
| 413 | Content too large |
|
||||
| 415 | Binary file write attempted |
|
||||
|
||||
### `POST /api/files/rename`
|
||||
|
||||
Rename a file or directory within the selected workspace.
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{ "path": "notes/today.md", "newName": "tomorrow.md", "agentId": "research" }
|
||||
```
|
||||
|
||||
### `POST /api/files/move`
|
||||
|
||||
Move a file or directory into another directory within the selected workspace.
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{ "sourcePath": "notes/today.md", "targetDirPath": "archive", "agentId": "research" }
|
||||
```
|
||||
|
||||
### `POST /api/files/trash`
|
||||
|
||||
Move a file or directory into `.trash` when using the default workspace root. If `FILE_BROWSER_ROOT` is set, deletion is permanent instead.
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{ "path": "notes/today.md", "agentId": "research" }
|
||||
```
|
||||
|
||||
### `POST /api/files/restore`
|
||||
|
||||
Restore an item from `.trash` back to its original location.
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{ "path": ".trash/2026-03-20-notes-today.md", "agentId": "research" }
|
||||
```
|
||||
|
||||
### `GET /api/files/raw`
|
||||
|
||||
Serves supported image files from the selected workspace for previews.
|
||||
|
||||
**Query Parameters:**
|
||||
|
||||
| Param | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `path` | `string` | Yes | Relative image path |
|
||||
| `agentId` | `string` | No | Workspace agent id. Defaults to `main` |
|
||||
|
||||
Supported image types: `png`, `jpg`, `jpeg`, `gif`, `webp`, `avif`, `svg`, `ico`.
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -1323,6 +1565,20 @@ List tasks with optional filters and pagination.
|
|||
|
||||
Tasks are sorted by status order → column order → most recently updated.
|
||||
|
||||
### `GET /api/kanban/tasks/:id`
|
||||
|
||||
Fetch a single task by ID.
|
||||
|
||||
**Rate Limit:** General (60/min)
|
||||
|
||||
**Response:** The matching `KanbanTask` object.
|
||||
|
||||
**Errors:**
|
||||
|
||||
| Status | Body | Condition |
|
||||
|--------|------|-----------|
|
||||
| 404 | `{ "error": "not_found" }` | Task not found |
|
||||
|
||||
### `POST /api/kanban/tasks`
|
||||
|
||||
Create a new task.
|
||||
|
|
@ -1460,7 +1716,7 @@ Move a task to a different position within its column or to another column. CAS-
|
|||
|
||||
### `POST /api/kanban/tasks/:id/execute`
|
||||
|
||||
Execute a task by spawning an agent session. The task must be in `todo` or `backlog` status. Moves the task to `in-progress` and starts polling the agent session for completion.
|
||||
Execute a task and move it to `in-progress`. The launch path depends on the task's assignee and platform. The task must be in `todo` or `backlog` status. Moves the task to `in-progress` and starts polling the agent session for completion.
|
||||
|
||||
**Rate Limit:** General (60/min)
|
||||
|
||||
|
|
@ -1475,22 +1731,31 @@ Execute a task by spawning an agent session. The task must be in `todo` or `back
|
|||
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `model` | `string` | No | Model override (max 200 chars). Falls back to task's model → board `defaultModel` → `anthropic/claude-sonnet-4-5` |
|
||||
| `thinking` | `string` | No | Thinking level: `off`, `low`, `medium`, `high` |
|
||||
| `model` | `string` | No | Execution model override (max 200 chars). Cascade: execute request → task `model` → board `defaultModel` → OpenClaw configured default |
|
||||
| `thinking` | `string` | No | Thinking override: `off`, `low`, `medium`, `high` |
|
||||
|
||||
**Response:** The updated `KanbanTask` object with `status: "in-progress"` and a `run` object.
|
||||
|
||||
**Execution paths:**
|
||||
- **Assigned tasks** create a real child session beneath the assignee's live root. Nerve verifies that the parent root exists, creates the child with `sessions.create(parentSessionKey=...)`, then sends the task into that child with `sessions.send`.
|
||||
- **Unassigned or `operator` tasks** use the normal `sessions_spawn` path.
|
||||
- **macOS fallback rule:** unassigned or `operator` tasks are rejected. Assign the task to a live worker root first.
|
||||
|
||||
**Errors:**
|
||||
|
||||
| Status | Body | Condition |
|
||||
|--------|------|-----------|
|
||||
| 404 | `{ "error": "not_found" }` | Task not found |
|
||||
| 409 | `{ "error": "duplicate_execution" }` | Task is already running |
|
||||
| 409 | `{ "error": "invalid_execution_target" }` | Required parent root is missing, or macOS requires an assigned live worker root |
|
||||
| 409 | `{ "error": "invalid_transition", "from": "done", "to": "in-progress" }` | Task not in `todo` or `backlog` status |
|
||||
|
||||
**Notes:**
|
||||
- If the task is already `in-progress` with an active run, returns the task as-is (idempotent).
|
||||
- The spawned agent receives the task title and description as its prompt.
|
||||
- The backend polls the gateway every 5 seconds for up to 30 minutes. On completion, the task moves to `review`. On error, it moves back to `todo`.
|
||||
- The spawned worker receives the task title and description as its prompt.
|
||||
- Assigned-task runs keep both a deterministic run correlation key and the real `childSessionKey`.
|
||||
- When an assigned child session finishes or fails, Nerve sends a completion report back to the parent root session.
|
||||
- Backend pollers run every 5 seconds for up to **720 attempts / 60 minutes**.
|
||||
- On success the task moves to `review`. On error it moves back to `todo`.
|
||||
|
||||
### `POST /api/kanban/tasks/:id/complete`
|
||||
|
||||
|
|
@ -1498,10 +1763,11 @@ Complete a running task. Called by the backend poller automatically, but can als
|
|||
|
||||
**Rate Limit:** General (60/min)
|
||||
|
||||
**Request Body (optional):**
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
{
|
||||
"sessionKey": "kb-auth-refactor-123-v4-1708100000000",
|
||||
"result": "Refactored auth module. Extracted SessionService class...",
|
||||
"error": "Agent session timed out"
|
||||
}
|
||||
|
|
@ -1509,6 +1775,7 @@ Complete a running task. Called by the backend poller automatically, but can als
|
|||
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `sessionKey` | `string` | Yes | Active run session key used to match the task run |
|
||||
| `result` | `string` | No | Agent output text (max 50000 chars). Kanban markers are parsed and stripped automatically |
|
||||
| `error` | `string` | No | Error message (max 5000 chars). If set, task moves to `todo` instead of `review` |
|
||||
|
||||
|
|
@ -1518,8 +1785,9 @@ Complete a running task. Called by the backend poller automatically, but can als
|
|||
|
||||
| Status | Condition |
|
||||
|--------|-----------|
|
||||
| 400 | Invalid body or missing `sessionKey` |
|
||||
| 404 | Task not found |
|
||||
| 409 | No active run to complete |
|
||||
| 409 | No active matching run to complete |
|
||||
|
||||
### `POST /api/kanban/tasks/:id/approve`
|
||||
|
||||
|
|
@ -1811,7 +2079,8 @@ All `/api/*` routes have rate limiting applied. Limits are per-client-IP per-pat
|
|||
|--------|--------|-------|
|
||||
| **TTS** | `POST /api/tts` | 10 requests / 60 seconds |
|
||||
| **Transcribe** | `POST /api/transcribe` | 30 requests / 60 seconds |
|
||||
| **General** | All other `/api/*` routes | 60 requests / 60 seconds |
|
||||
| **General** | Most `/api/*` routes | 60 requests / 60 seconds |
|
||||
| **Restart** | `POST /api/gateway/restart` | 3 requests / 60 seconds |
|
||||
|
||||
**Rate limit headers** are included on every response:
|
||||
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ All global state flows through four React contexts, nested in dependency order:
|
|||
| Context | File | Responsibilities |
|
||||
|---------|------|-----------------|
|
||||
| **GatewayContext** | `src/contexts/GatewayContext.tsx` | WebSocket connection lifecycle, RPC method calls, event fan-out via pub/sub pattern, model/thinking status polling, activity sparkline |
|
||||
| **SettingsContext** | `src/contexts/SettingsContext.tsx` | Sound, TTS provider/model, wake word, panel ratio, theme, font, telemetry/events visibility. Persists to `localStorage` |
|
||||
| **SettingsContext** | `src/contexts/SettingsContext.tsx` | Sound, TTS provider/model, wake word, panel ratio, theme, font, font size, telemetry/events visibility. Persists to `localStorage` |
|
||||
| **SessionContext** | `src/contexts/SessionContext.tsx` | Session list (via gateway RPC), granular agent status tracking (IDLE/THINKING/STREAMING/DONE/ERROR), busy state derivation, unread session tracking, agent log, event log, session CRUD (delete, spawn, rename, abort) |
|
||||
| **ChatContext** | `src/contexts/ChatContext.tsx` | Thin orchestrator composing 4 hooks: `useChatMessages` (CRUD, history, scroll), `useChatStreaming` (deltas, processing stage, activity log), `useChatRecovery` (reconnect, retry, gap detection), `useChatTTS` (playback, voice fallback, sound feedback) |
|
||||
|
||||
|
|
@ -114,7 +114,7 @@ Session management sidebar.
|
|||
| `SessionList.tsx` | Hierarchical session tree with parent-child relationships |
|
||||
| `SessionNode.tsx` | Individual session row with status indicator, context menu |
|
||||
| `SessionInfoPanel.tsx` | Session detail panel (model, tokens, thinking level) |
|
||||
| `SpawnAgentDialog.tsx` | Dialog for spawning sub-agents with task/model/thinking config |
|
||||
| `SpawnAgentDialog.tsx` | Dialog for spawning top-level agents and subagents with task, model, thinking, and subagent **After run** cleanup config |
|
||||
| `sessionTree.ts` | Builds tree structure from flat session list using `parentId` |
|
||||
| `statusUtils.ts` | Maps agent status to icons and labels |
|
||||
|
||||
|
|
@ -139,18 +139,30 @@ Full workspace file browser with tabbed CodeMirror editor.
|
|||
#### `features/workspace/`
|
||||
Workspace file editor and management tabs.
|
||||
|
||||
The workspace scope is derived from the **owning top-level agent**. Memory, Config, Skills, file-browser state, and persisted drafts follow that top-level agent. Crons and Kanban stay global.
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `WorkspacePanel.tsx` | Container for workspace tabs |
|
||||
| `WorkspacePanel.tsx` | Container for workspace tabs, scoped by the current top-level workspace agent |
|
||||
| `WorkspaceTabs.tsx` | Tab switcher (Memory, Config, Crons, Skills) |
|
||||
| `tabs/MemoryTab.tsx` | View/edit MEMORY.md and daily files |
|
||||
| `tabs/ConfigTab.tsx` | Edit workspace files (SOUL.md, TOOLS.md, USER.md, etc.) |
|
||||
| `tabs/CronsTab.tsx` | Cron job management (list, create, toggle, run) |
|
||||
| `tabs/MemoryTab.tsx` | View/edit the selected top-level agent's `MEMORY.md` and daily files |
|
||||
| `tabs/ConfigTab.tsx` | Edit scoped workspace files (`SOUL.md`, `TOOLS.md`, `USER.md`, etc.) for the selected top-level agent |
|
||||
| `tabs/CronsTab.tsx` | Cron job management (list, create, toggle, run). Remains global |
|
||||
| `tabs/CronDialog.tsx` | Cron creation/edit dialog |
|
||||
| `tabs/SkillsTab.tsx` | View installed skills with eligibility status |
|
||||
| `hooks/useWorkspaceFile.ts` | Fetch/save workspace files via REST API |
|
||||
| `tabs/SkillsTab.tsx` | View installed skills for the selected top-level agent workspace |
|
||||
| `hooks/useWorkspaceFile.ts` | Fetch/save scoped workspace files via REST API + `agentId` |
|
||||
| `hooks/useCrons.ts` | Cron CRUD operations via REST API |
|
||||
| `hooks/useSkills.ts` | Fetch skills list |
|
||||
| `hooks/useSkills.ts` | Fetch scoped skills list via `agentId` |
|
||||
| `workspaceScope.ts` | Derives workspace scope and localStorage keys from the owning top-level agent |
|
||||
| `workspaceSwitchGuard.ts` | Pure guard logic for dirty-file prompts when switching between top-level agents |
|
||||
|
||||
### Workspace scope rules
|
||||
|
||||
- Root sessions use their own top-level agent as workspace scope
|
||||
- Subagent and cron-run views inherit the owning top-level agent workspace
|
||||
- The child session itself does **not** create a separate workspace scope
|
||||
- Cross-agent dirty file prompts only fire when the owning top-level agent changes
|
||||
- Crons and Kanban stay global even while Memory, Config, Skills, and file-browser state switch per agent
|
||||
|
||||
#### `features/settings/`
|
||||
Settings drawer with tabbed sections.
|
||||
|
|
@ -160,15 +172,15 @@ Settings drawer with tabbed sections.
|
|||
| `SettingsDrawer.tsx` | Slide-out drawer container. Includes logout button when auth is enabled |
|
||||
| `ConnectionSettings.tsx` | Gateway URL/token, reconnect |
|
||||
| `AudioSettings.tsx` | TTS provider, model, voice, wake word |
|
||||
| `AppearanceSettings.tsx` | Theme, font selection |
|
||||
| `AppearanceSettings.tsx` | Theme, font family, font size selection |
|
||||
|
||||
#### `features/tts/`
|
||||
Text-to-speech integration.
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `useTTS.ts` | Core TTS hook — speaks text via server `/api/tts` endpoint. Supports OpenAI, Replicate, Edge (default) providers |
|
||||
| `useTTSConfig.ts` | Server-side TTS voice configuration management |
|
||||
| `useTTS.ts` | Core TTS hook, speaks text via server `/api/tts` endpoint. Supports OpenAI, Replicate, Edge (default), and Xiaomi MiMo providers |
|
||||
| `useTTSConfig.ts` | Server-side TTS voice configuration management for Qwen, OpenAI, Edge, and Xiaomi MiMo settings |
|
||||
|
||||
#### `features/voice/`
|
||||
Voice input and audio feedback.
|
||||
|
|
@ -332,28 +344,39 @@ Applied in order in `app.ts`:
|
|||
| `/api/voice-phrases/:lang` | `routes/voice-phrases.ts` | GET, PUT | Read/save language-specific stop/cancel/wake phrase overrides |
|
||||
| `/api/agentlog` | `routes/agent-log.ts` | GET, POST | Agent activity log persistence. Zod-validated entries. Mutex-protected file I/O |
|
||||
| `/api/tokens` | `routes/tokens.ts` | GET | Token usage statistics — scans session transcripts, persists high water mark |
|
||||
| `/api/memories` | `routes/memories.ts` | GET, POST, DELETE | Memory management — reads MEMORY.md + daily files, stores/deletes via gateway tool invocation |
|
||||
| `/api/memories/section` | `routes/memories.ts` | GET, PUT | Read/replace a specific memory section by title |
|
||||
| `/api/gateway/models` | `routes/gateway.ts` | GET | Available models via `openclaw models list`. Allowlist support |
|
||||
| `/api/memories` | `routes/memories.ts` | GET, POST, DELETE | Agent-scoped memory management — reads `MEMORY.md` + daily files, stores/deletes via gateway tool invocation |
|
||||
| `/api/memories/section` | `routes/memories.ts` | GET, PUT | Read/replace a specific memory section by title, scoped via `agentId` |
|
||||
| `/api/gateway/models` | `routes/gateway.ts` | GET | Config-backed model catalog from the active OpenClaw config. Returns `{ models, error, source: "config" }` |
|
||||
| `/api/gateway/session-info` | `routes/gateway.ts` | GET | Current session model/thinking level |
|
||||
| `/api/gateway/session-patch` | `routes/gateway.ts` | POST | Change model/effort for a session |
|
||||
| `/api/gateway/session-patch` | `routes/gateway.ts` | POST | HTTP fallback for model changes. Thinking changes belong on WS `sessions.patch` |
|
||||
| `/api/server-info` | `routes/server-info.ts` | GET | Server time, gateway uptime, agent name |
|
||||
| `/api/version` | `routes/version.ts` | GET | Package version from `package.json` |
|
||||
| `/api/git-info` | `routes/git-info.ts` | GET, POST, DELETE | Git branch/status. Session workdir registration |
|
||||
| `/api/workspace/:key` | `routes/workspace.ts` | GET, PUT | Read/write workspace files (strict key→file allowlist: soul, tools, identity, user, agents, heartbeat) |
|
||||
| `/api/version/check` | `routes/version-check.ts` | GET | Check whether a newer published version is available |
|
||||
| `/api/channels` | `routes/channels.ts` | GET | List configured messaging channels from OpenClaw config |
|
||||
| `/api/gateway/restart` | `routes/gateway.ts` | POST | Restart the OpenClaw gateway service and verify readiness |
|
||||
| `/api/sessions/hidden` | `routes/sessions.ts` | GET | List hidden cron-like sessions from stored session metadata |
|
||||
| `/api/sessions/:id/model` | `routes/sessions.ts` | GET | Read the actual model used by a session from its transcript |
|
||||
| `/api/workspace` | `routes/workspace.ts` | GET | List allowlisted workspace files for the selected agent workspace |
|
||||
| `/api/workspace/:key` | `routes/workspace.ts` | GET, PUT | Read/write allowlisted workspace files (`soul`, `tools`, `identity`, `user`, `agents`, `heartbeat`) via `agentId` |
|
||||
| `/api/crons` | `routes/crons.ts` | GET, POST, PATCH, DELETE | Cron job CRUD via gateway tool invocation |
|
||||
| `/api/crons/:id/toggle` | `routes/crons.ts` | POST | Toggle cron enabled/disabled |
|
||||
| `/api/crons/:id/run` | `routes/crons.ts` | POST | Run cron job immediately |
|
||||
| `/api/crons/:id/runs` | `routes/crons.ts` | GET | Cron run history |
|
||||
| `/api/skills` | `routes/skills.ts` | GET | List skills via `openclaw skills list --json` |
|
||||
| `/api/skills` | `routes/skills.ts` | GET | List skills for the selected agent workspace via a scoped OpenClaw config |
|
||||
| `/api/keys` | `routes/api-keys.ts` | GET, PUT | Read API-key presence and persist updated key values to `.env` |
|
||||
| `/api/files` | `routes/files.ts` | GET | Serve local image files (MIME-type restricted, directory traversal blocked) |
|
||||
| `/api/files/tree` | `routes/file-browser.ts` | GET | Workspace directory tree (excludes node_modules, .git, etc.) |
|
||||
| `/api/files/read` | `routes/file-browser.ts` | GET | Read file contents with mtime for conflict detection |
|
||||
| `/api/files/write` | `routes/file-browser.ts` | POST | Write file with mtime-based optimistic concurrency (409 on conflict) |
|
||||
| `/api/files/tree` | `routes/file-browser.ts` | GET | Agent-scoped workspace directory tree (excludes node_modules, .git, etc.) |
|
||||
| `/api/files/read` | `routes/file-browser.ts` | GET | Read scoped file contents with mtime for conflict detection |
|
||||
| `/api/files/write` | `routes/file-browser.ts` | PUT | Write scoped file contents with optimistic concurrency (409 on conflict) |
|
||||
| `/api/files/rename` | `routes/file-browser.ts` | POST | Rename a file or directory within the selected workspace |
|
||||
| `/api/files/move` | `routes/file-browser.ts` | POST | Move a file or directory within the selected workspace |
|
||||
| `/api/files/trash` | `routes/file-browser.ts` | POST | Trash a file or directory, or permanently delete when using `FILE_BROWSER_ROOT` |
|
||||
| `/api/files/restore` | `routes/file-browser.ts` | POST | Restore a trashed file or directory |
|
||||
| `/api/files/raw` | `routes/file-browser.ts` | GET | Serve scoped image previews from the selected workspace |
|
||||
| `/api/claude-code-limits` | `routes/claude-code-limits.ts` | GET | Claude Code rate limits via PTY + CLI parsing |
|
||||
| `/api/codex-limits` | `routes/codex-limits.ts` | GET | Codex rate limits via OpenAI API with local file fallback |
|
||||
| `/api/kanban/tasks` | `routes/kanban.ts` | GET, POST | Task CRUD -- list (with filters/pagination) and create |
|
||||
| `/api/kanban/tasks/:id` | `routes/kanban.ts` | PATCH, DELETE | Update (CAS-versioned) and delete tasks |
|
||||
| `/api/kanban/tasks/:id` | `routes/kanban.ts` | GET, PATCH, DELETE | Get, update (CAS-versioned), and delete tasks |
|
||||
| `/api/kanban/tasks/:id/reorder` | `routes/kanban.ts` | POST | Reorder/move tasks across columns |
|
||||
| `/api/kanban/tasks/:id/execute` | `routes/kanban.ts` | POST | Spawn agent session for task |
|
||||
| `/api/kanban/tasks/:id/complete` | `routes/kanban.ts` | POST | Complete a running task (auto-called by poller) |
|
||||
|
|
@ -374,7 +397,7 @@ Applied in order in `app.ts`:
|
|||
| `lib/ws-proxy.ts` | WebSocket proxy — client→gateway with session cookie auth on upgrade and Ed25519 device identity injection |
|
||||
| `lib/device-identity.ts` | Ed25519 keypair generation/persistence (`~/.nerve/device-identity.json`). Builds signed connect blocks for gateway auth |
|
||||
| `lib/gateway-client.ts` | HTTP client for gateway tool invocation API (`/tools/invoke`) |
|
||||
| `lib/file-watcher.ts` | Watches MEMORY.md, `memory/`, and workspace directory (recursive). Broadcasts `file.changed` SSE events for real-time sync |
|
||||
| `lib/file-watcher.ts` | Discovers agent workspaces, watches each `MEMORY.md` and `memory/` directory, and optionally watches full workspaces recursively. Broadcasts agent-tagged `memory.changed` / `file.changed` SSE events |
|
||||
| `lib/file-utils.ts` | File browser utilities — path validation, directory exclusions, binary file detection |
|
||||
| `lib/files.ts` | Async file helpers (`readJSON`, `writeJSON`, `readText`) |
|
||||
| `lib/mutex.ts` | Async mutex for serializing file read-modify-write. Includes keyed mutex variant |
|
||||
|
|
@ -444,7 +467,8 @@ Browser → GET /api/events → SSE stream (text/event-stream)
|
|||
```
|
||||
|
||||
Events pushed by the server:
|
||||
- `memory.changed` — File watcher detects MEMORY.md or daily file changes
|
||||
- `memory.changed` — File watcher or memory API detects `MEMORY.md` / daily file changes, tagged with `agentId`
|
||||
- `file.changed` — File watcher detects a workspace file change, tagged with `agentId`
|
||||
- `tokens.updated` — Token usage data changed
|
||||
- `status.changed` — Gateway status changed
|
||||
- `ping` — Keep-alive every 30 seconds
|
||||
|
|
@ -467,7 +491,7 @@ The frontend calls gateway methods via `GatewayContext.rpc()`:
|
|||
| `sessions.list` | List active sessions |
|
||||
| `sessions.delete` | Delete a session |
|
||||
| `sessions.reset` | Clear session context |
|
||||
| `sessions.patch` | Rename a session |
|
||||
| `sessions.patch` | Patch session metadata and settings, including rename/model/thinking flows the gateway supports |
|
||||
| `chat.send` | Send a message (with idempotency key) |
|
||||
| `chat.history` | Load message history |
|
||||
| `chat.abort` | Abort current generation |
|
||||
|
|
@ -494,11 +518,11 @@ The kanban board provides task management with agent execution, drag-and-drop re
|
|||
### Store Design
|
||||
|
||||
```
|
||||
server/data/kanban/tasks.json -- single JSON file (tasks + proposals + config)
|
||||
server/data/kanban/audit.log -- append-only audit log (JSONL)
|
||||
${NERVE_DATA_DIR:-~/.nerve}/kanban/tasks.json -- single JSON file (tasks + proposals + config)
|
||||
${NERVE_DATA_DIR:-~/.nerve}/kanban/audit.log -- append-only audit log (JSONL)
|
||||
```
|
||||
|
||||
All data lives in one JSON file (`StoreData`). Every mutation acquires an async mutex, reads the file, applies the change, and writes back atomically via temp-file rename. This guarantees consistency under concurrent requests without a database.
|
||||
All data lives in one JSON file (`StoreData`). Every mutation acquires an async mutex, reads the file, applies the change, and writes back atomically via temp-file rename. This guarantees consistency under concurrent requests without a database. On first startup, the store migrates legacy data from `server-dist/data/kanban/` or `server/data/kanban/` into the canonical runtime directory if needed.
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
|
|
@ -545,21 +569,45 @@ This prevents stale overwrites from concurrent editors (drag-and-drop, API clien
|
|||
|
||||
```
|
||||
1. POST /api/kanban/tasks/:id/execute
|
||||
+-- store.executeTask() -> status = in-progress, run.status = running
|
||||
+-- invokeGatewayTool('sessions_spawn', { label: 'kanban-<id>', ... })
|
||||
+-- fire-and-forget
|
||||
+-- withMutex(`kanban-execute:${id}`) prevents double-launch races
|
||||
+-- if task already in-progress: return 409 duplicate_execution
|
||||
+-- if task has an assignee root:
|
||||
| +-- resolve assignee root -> agent:<assignee>:main
|
||||
| +-- gatewayRpcCall('sessions.list', ...) confirms the parent root exists
|
||||
| +-- store.executeTask(..., { sessionKey }) -> status = in-progress, run.status = running
|
||||
| +-- launchKanbanFallbackSubagentViaRpc({ label, task, parentSessionKey, model?, thinking? })
|
||||
| +-- gatewayRpcCall('sessions.create', { key: childSessionKey, parentSessionKey, label, model? })
|
||||
| +-- gatewayRpcCall('sessions.send', { key: childSessionKey, message: task, thinking?, idempotencyKey })
|
||||
| +-- if send fails after create: best-effort gatewayRpcCall('sessions.delete', { key: childSessionKey, deleteTranscript: true })
|
||||
| +-- return correlationKey + childSessionKey + runId?
|
||||
| +-- attach childSessionKey / runId immediately when available
|
||||
| +-- start pollFallbackSessionCompletion(taskId, { correlationKey, parentSessionKey, childSessionKey?, expectedChildLabel, knownSessionKeysBefore, runId? })
|
||||
|
|
||||
+-- else if task is unassigned / operator:
|
||||
| +-- on macOS: return 409 invalid_execution_target
|
||||
| +-- otherwise use invokeGatewayTool('sessions_spawn', { task, mode:'run', label: runSessionKey, model?, thinking? })
|
||||
| +-- attach childSessionKey / runId when available
|
||||
| +-- start pollSessionCompletion(taskId, { correlationKey: runSessionKey, childSessionKey?, runId? })
|
||||
|
|
||||
+-- if an assigned parent root is missing: return 409 invalid_execution_target
|
||||
+-- on launch failure: store.completeRun(taskId, sessionKey, undefined, 'Spawn failed: ...')
|
||||
|
||||
2. pollSessionCompletion() -> polls gateway subagents every 5s (max 360 attempts / 30 min)
|
||||
+-- invokeGatewayTool('subagents', { action: 'list' })
|
||||
+-- match by label
|
||||
+-- if status=done:
|
||||
| fetch session history (last 3 messages)
|
||||
2. pollSessionCompletion() / pollFallbackSessionCompletion()
|
||||
+-- sessions_spawn path polls gateway subagents by correlation key / childSessionKey / runId
|
||||
+-- assignee-root path polls gateway RPC sessions.list every 5s (max 720 attempts / 60 min)
|
||||
+-- assignee-root path prefers the known childSessionKey; otherwise it discovers the new child beneath the parent root and attaches it
|
||||
+-- if the child completes successfully:
|
||||
| fetch child history via sessions.get / sessions_history
|
||||
| parseKanbanMarkers(resultText) -> create proposals
|
||||
| stripKanbanMarkers(resultText) -> clean result
|
||||
+-- store.completeRun(taskId, cleanResult)
|
||||
| store.completeRun(taskId, sessionKey, cleanResult)
|
||||
+-- gatewayRpcCall('sessions.send', { key: parentSessionKey, message: completionReport })
|
||||
+-- if status=error/failed:
|
||||
+-- store.completeRun(taskId, undefined, errorMsg)
|
||||
+-- if status=running:
|
||||
| store.completeRun(taskId, sessionKey, undefined, errorMsg)
|
||||
+-- gatewayRpcCall('sessions.send', { key: parentSessionKey, message: failureReport })
|
||||
+-- if task/run no longer matches the active session key:
|
||||
+-- stop polling as stale
|
||||
+-- otherwise:
|
||||
+-- schedule next poll
|
||||
|
||||
3. store.completeRun()
|
||||
|
|
@ -567,7 +615,7 @@ This prevents stale overwrites from concurrent editors (drag-and-drop, API clien
|
|||
+-- error -> run.status = error, task.status = todo
|
||||
```
|
||||
|
||||
The model cascade is: task's `model` -> board config `defaultModel` -> `anthropic/claude-sonnet-4-5`.
|
||||
Assigned-root execution now uses real session primitives instead of synthetic marker-message spawn conventions. The model cascade is: execute request `model` -> task `model` -> board config `defaultModel` -> OpenClaw's configured default model. Thinking follows the same pattern with `defaultThinking`.
|
||||
|
||||
### Marker Parsing
|
||||
|
||||
|
|
@ -601,7 +649,7 @@ The `proposalPolicy` config controls behavior:
|
|||
| Proposals | Frontend | 5s | Show new proposals in inbox |
|
||||
| Gateway subagents | Backend | 5s | Detect when agent runs complete |
|
||||
|
||||
Backend polling for each running task is independent -- each `executeTask` call starts its own poll loop (capped at 360 attempts = 30 minutes). Stale runs are reconciled by `reconcileStaleRuns()`.
|
||||
Backend polling for each running task is independent -- each `executeTask` call starts its own poll loop (capped at 720 attempts = 60 minutes). Stale runs are reconciled by `reconcileStaleRuns()`.
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -1,302 +1,113 @@
|
|||
# Code Review Guide
|
||||
|
||||
Standards, patterns, and review checklist for the Nerve codebase.
|
||||
Review Nerve as it exists today, not as an imaginary perfect architecture.
|
||||
|
||||
---
|
||||
## First principle
|
||||
|
||||
## Coding Standards
|
||||
Prefer consistency with the surrounding subsystem over introducing a brand-new pattern just because it looks cleaner in isolation. Nerve has strong structure, but it is still a living codebase with some mixed styles and a few rough edges. Good review reduces drift. It does not create more of it.
|
||||
|
||||
## Repo reality, right now
|
||||
|
||||
### TypeScript
|
||||
|
||||
- **Strict mode** enabled across all tsconfig project references
|
||||
- **Explicit types** on all public interfaces, context values, and hook returns
|
||||
- **Discriminated unions** for message types (`GatewayEvent | GatewayRequest | GatewayResponse` via `type` field)
|
||||
- **Typed event payloads** — `AgentEventPayload`, `ChatEventPayload`, `CronEventPayload` instead of `any`
|
||||
- **Zod validation** on all API request bodies (server-side)
|
||||
- **No `any`** — use `unknown` with type narrowing
|
||||
- The repo runs TypeScript in strict mode.
|
||||
- Most application code is strongly typed.
|
||||
- A few internal utilities and tests still use `any`. Do not spread that pattern. If you can tighten a touched area safely, do it.
|
||||
- `@ts-ignore` should be rare and justified inline.
|
||||
|
||||
### React
|
||||
### Frontend structure
|
||||
|
||||
- **Functional components only** — no class components
|
||||
- **`useCallback` / `useMemo`** on all callbacks and derived values passed to children or used in dependency arrays
|
||||
- **`React.memo`** is not used broadly; instead, stable references via `useMemo`/`useCallback` prevent unnecessary re-renders
|
||||
- **Ref-based state access** in callbacks that shouldn't trigger re-registration (e.g., `currentSessionRef`, `isGeneratingRef`, `soundEnabledRef`)
|
||||
- **ESLint annotations** when intentionally breaking rules: `// eslint-disable-next-line react-hooks/set-state-in-effect -- valid: <reason>`
|
||||
- The frontend is organized mostly by feature under `src/features/`.
|
||||
- Shared layers live in `src/components/`, `src/contexts/`, `src/hooks/`, and `src/lib/`.
|
||||
- Large UI surfaces are often lazy-loaded, especially settings, sessions, workspace, kanban, charts, and file editing.
|
||||
- Cross-feature imports do exist. Keep them narrow, stable, and free of circular dependencies.
|
||||
|
||||
### Naming
|
||||
### React patterns worth preserving
|
||||
|
||||
- **Files:** PascalCase for components (`ChatPanel.tsx`), camelCase for hooks/utils (`useWebSocket.ts`, `helpers.ts`)
|
||||
- **Contexts:** `<Name>Context` with `<Name>Provider` and `use<Name>` hook co-located in same file
|
||||
- **Feature directories:** kebab-case (`command-palette/`)
|
||||
- **Types:** PascalCase interfaces/types, `I` prefix NOT used
|
||||
- Functional components and hooks only.
|
||||
- Stable callbacks and memoized derived values matter in hot paths like chat, sessions, file-browser, workspace switching, and kanban.
|
||||
- Ref-synchronized state is used in a few places where callbacks need fresh values without constantly re-registering listeners.
|
||||
- Optional or heavy panels are usually wrapped in `Suspense` and `PanelErrorBoundary`.
|
||||
|
||||
---
|
||||
### Backend structure
|
||||
|
||||
## Architectural Patterns
|
||||
- Backend routes live in `server/routes/` and are mounted from `server/app.ts`.
|
||||
- Shared behavior lives in `server/lib/`, `server/services/`, and `server/middleware/`.
|
||||
- Some write endpoints use `zValidator` and Zod today, but not all of them. New write endpoints should validate input. When you touch an older route, tightening validation is a good upgrade if it stays low-risk.
|
||||
- File and state mutations that can race are often protected with a mutex.
|
||||
- `/api/events` is an SSE endpoint and must not be buffered or compressed.
|
||||
|
||||
### 1. Feature-Based Directory Structure
|
||||
### Security and config baseline
|
||||
|
||||
```
|
||||
src/features/
|
||||
chat/
|
||||
ChatPanel.tsx # Main component
|
||||
components/ # Sub-components
|
||||
operations/ # Pure business logic (no React)
|
||||
types.ts # Feature-specific types
|
||||
utils.ts # Feature utilities
|
||||
sessions/
|
||||
workspace/
|
||||
settings/
|
||||
tts/
|
||||
voice/
|
||||
...
|
||||
```
|
||||
- Auth, origin handling, body limits, and WebSocket allowlists are part of the product surface, not optional polish.
|
||||
- `HOST=0.0.0.0` without auth is intentionally blocked unless the explicit insecure override is set.
|
||||
- New env vars should land in `.env.example` and `docs/CONFIGURATION.md` in the same PR.
|
||||
|
||||
Each feature is self-contained. Cross-feature imports go through context providers, not direct imports.
|
||||
## Review priorities
|
||||
|
||||
### 2. Context Provider Pattern
|
||||
1. Correctness and regressions
|
||||
2. Security and data exposure
|
||||
3. Consistency with nearby patterns
|
||||
4. Operability, tests, docs, and maintainability
|
||||
5. Style polish
|
||||
|
||||
Every context follows the same structure:
|
||||
## Review checklist
|
||||
|
||||
```tsx
|
||||
const MyContext = createContext<MyContextValue | null>(null);
|
||||
### General
|
||||
|
||||
export function MyProvider({ children }: { children: ReactNode }) {
|
||||
// State, effects, callbacks
|
||||
const value = useMemo<MyContextValue>(() => ({
|
||||
// All exposed values
|
||||
}), [/* dependencies */]);
|
||||
|
||||
return <MyContext.Provider value={value}>{children}</MyContext.Provider>;
|
||||
}
|
||||
- [ ] The behavior change is intentional, and the PR description matches the diff.
|
||||
- [ ] Commands, ports, env vars, and docs match the current repo behavior.
|
||||
- [ ] New files live in the right area instead of creating a parallel structure.
|
||||
- [ ] Naming follows nearby code more than abstract preference.
|
||||
- [ ] Dead branches, debug noise, and commented-out code are not slipping in.
|
||||
|
||||
export function useMyContext() {
|
||||
const ctx = useContext(MyContext);
|
||||
if (!ctx) throw new Error('useMyContext must be used within MyProvider');
|
||||
return ctx;
|
||||
}
|
||||
```
|
||||
### Frontend
|
||||
|
||||
Key characteristics:
|
||||
- Context value is always `useMemo`-wrapped with explicit type annotation
|
||||
- `null` default with runtime check in the hook
|
||||
- Provider, context, and hook co-located in one file (ESLint `react-refresh/only-export-components` disabled with reason)
|
||||
- [ ] Changes fit the current feature, context, and hook split used in that part of the app.
|
||||
- [ ] Chat, sessions, file-browser, workspace, and kanban changes avoid obvious rerender or subscription churn.
|
||||
- [ ] Timers, listeners, sockets, observers, and intervals clean up correctly.
|
||||
- [ ] Heavy or optional UI stays lazy-loaded unless there is a clear reason to change that.
|
||||
- [ ] Error states, loading states, and mobile behavior still make sense.
|
||||
- [ ] Keyboard navigation and focus behavior are preserved for dialogs, drawers, and menus.
|
||||
|
||||
### 3. Ref-Synchronized State
|
||||
### Backend
|
||||
|
||||
For callbacks that need current state but shouldn't re-register:
|
||||
- [ ] New route files are mounted in `server/app.ts`.
|
||||
- [ ] Auth, CORS, body limits, and rate limiting are preserved or improved.
|
||||
- [ ] Request bodies are validated or parsed narrowly, especially on write endpoints.
|
||||
- [ ] Shared gateway helpers are reused instead of duplicating request logic.
|
||||
- [ ] File writes remain atomic where concurrent access is possible.
|
||||
- [ ] SSE and WebSocket behavior are not broken by buffering, compression, or auth changes.
|
||||
|
||||
```tsx
|
||||
const currentSessionRef = useRef(currentSession);
|
||||
useEffect(() => {
|
||||
currentSessionRef.current = currentSession;
|
||||
}, [currentSession]);
|
||||
### Tests
|
||||
|
||||
// In callbacks: use currentSessionRef.current instead of currentSession
|
||||
const handleSend = useCallback(async (text: string) => {
|
||||
await sendChatMessage({ sessionKey: currentSessionRef.current, ... });
|
||||
}, [rpc]); // Note: currentSession NOT in deps
|
||||
```
|
||||
- [ ] New parsing, state, routing, or persistence logic has tests where the repo already tests similar code.
|
||||
- [ ] Existing tests were updated when behavior changed.
|
||||
- [ ] Assertions were not weakened just to get green.
|
||||
|
||||
This pattern is used extensively in `ChatContext`, `SessionContext`, and `GatewayContext`.
|
||||
### Docs and operations
|
||||
|
||||
### 4. Lazy Loading
|
||||
- [ ] User-facing changes update README or docs when needed.
|
||||
- [ ] New config or migration work updates `.env.example`, setup docs, and upgrade notes.
|
||||
- [ ] Deployment, updater, or gateway-integration changes keep the docs honest.
|
||||
|
||||
Heavy components are code-split via `React.lazy`:
|
||||
## High-signal review comments
|
||||
|
||||
```tsx
|
||||
const SettingsDrawer = lazy(() => import('@/features/settings/SettingsDrawer')
|
||||
.then(m => ({ default: m.SettingsDrawer })));
|
||||
const CommandPalette = lazy(() => import('@/features/command-palette/CommandPalette')
|
||||
.then(m => ({ default: m.CommandPalette })));
|
||||
const SessionList = lazy(() => import('@/features/sessions/SessionList')
|
||||
.then(m => ({ default: m.SessionList })));
|
||||
const WorkspacePanel = lazy(() => import('@/features/workspace/WorkspacePanel')
|
||||
.then(m => ({ default: m.WorkspacePanel })));
|
||||
```
|
||||
Good review comments in this repo are concrete:
|
||||
|
||||
Each wrapped in `<Suspense>` and `<PanelErrorBoundary>` for graceful degradation.
|
||||
- point to the exact mismatch
|
||||
- explain the user or operator impact
|
||||
- suggest the smallest fix that matches local patterns
|
||||
|
||||
### 5. Operations Layer (Pure Logic Extraction)
|
||||
Examples:
|
||||
|
||||
`ChatContext` delegates to pure functions in `features/chat/operations/`:
|
||||
- "This route writes state but skips input validation, while nearby write routes parse JSON explicitly. Can we add a schema or a narrow parser here?"
|
||||
- "This panel is now imported eagerly, which pulls file editor code into the initial bundle. Was that intentional?"
|
||||
- "The doc says `npm run dev:server` uses `:3081`, but the script only does that when `PORT=3081` is set."
|
||||
|
||||
```
|
||||
operations/
|
||||
index.ts # Re-exports all operations
|
||||
loadHistory.ts # loadChatHistory()
|
||||
sendMessage.ts # buildUserMessage(), sendChatMessage()
|
||||
streamEventHandler.ts # classifyStreamEvent(), extractStreamDelta(), etc.
|
||||
```
|
||||
## Avoid this
|
||||
|
||||
This separates React state management from business logic, making operations testable without rendering.
|
||||
|
||||
### 6. Event Fan-Out (Pub/Sub)
|
||||
|
||||
`GatewayContext` implements a subscriber pattern:
|
||||
|
||||
```tsx
|
||||
const subscribersRef = useRef<Set<EventHandler>>(new Set());
|
||||
|
||||
const subscribe = useCallback((handler: EventHandler) => {
|
||||
subscribersRef.current.add(handler);
|
||||
return () => { subscribersRef.current.delete(handler); };
|
||||
}, []);
|
||||
|
||||
// In onEvent:
|
||||
for (const handler of subscribersRef.current) {
|
||||
try { handler(msg); } catch (e) { console.error(e); }
|
||||
}
|
||||
```
|
||||
|
||||
Consumers (`SessionContext`, `ChatContext`) subscribe in `useEffect` and receive all gateway events.
|
||||
|
||||
### 7. Smart Session Diffing
|
||||
|
||||
`SessionContext.refreshSessions()` preserves object references for unchanged sessions:
|
||||
|
||||
```tsx
|
||||
setSessions(prev => {
|
||||
const prevMap = new Map(prev.map(s => [getSessionKey(s), s]));
|
||||
let hasChanges = false;
|
||||
const merged = newSessions.map(newSession => {
|
||||
const existing = prevMap.get(key);
|
||||
if (!existing) { hasChanges = true; return newSession; }
|
||||
const changed = existing.state !== newSession.state || ...;
|
||||
if (changed) { hasChanges = true; return newSession; }
|
||||
return existing; // Preserve reference
|
||||
});
|
||||
return hasChanges ? merged : prev;
|
||||
});
|
||||
```
|
||||
|
||||
### 8. Server Route Pattern (Hono)
|
||||
|
||||
Each route file exports a Hono sub-app:
|
||||
|
||||
```tsx
|
||||
const app = new Hono();
|
||||
app.get('/api/something', rateLimitGeneral, async (c) => { ... });
|
||||
export default app;
|
||||
```
|
||||
|
||||
Routes are mounted in `app.ts` via `app.route('/', route)`.
|
||||
|
||||
### 9. Gateway Tool Invocation
|
||||
|
||||
Server routes that need gateway interaction use the shared client:
|
||||
|
||||
```tsx
|
||||
import { invokeGatewayTool } from '../lib/gateway-client.js';
|
||||
|
||||
const result = await invokeGatewayTool('cron', { action: 'list' });
|
||||
```
|
||||
|
||||
### 10. Mutex-Protected File I/O
|
||||
|
||||
File operations that need atomicity use the mutex:
|
||||
|
||||
```tsx
|
||||
import { createMutex } from '../lib/mutex.js';
|
||||
const withLock = createMutex();
|
||||
|
||||
await withLock(async () => {
|
||||
const data = await readJSON(file, []);
|
||||
data.push(entry);
|
||||
await writeJSON(file, data);
|
||||
});
|
||||
```
|
||||
|
||||
### 11. Cached Fetch with Deduplication
|
||||
|
||||
Expensive operations use `createCachedFetch` which deduplicates in-flight requests:
|
||||
|
||||
```tsx
|
||||
const fetchLimits = createCachedFetch(
|
||||
() => expensiveApiCall(),
|
||||
5 * 60 * 1000, // 5 min TTL
|
||||
{ isValid: (result) => result.available }
|
||||
);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Server-Side Patterns
|
||||
|
||||
### Security
|
||||
|
||||
- **Authentication:** Session-cookie auth via `middleware/auth.ts`. When enabled, all `/api/*` routes (except auth/health) require a valid HMAC-SHA256 signed cookie. WebSocket upgrades checked in `ws-proxy.ts`
|
||||
- **Session tokens:** Stateless signed cookies (`HttpOnly`, `SameSite=Strict`). Password hashing via scrypt. Gateway token accepted as fallback password
|
||||
- **CORS:** Strict origin allowlist — only localhost variants and explicitly configured origins
|
||||
- **Token exposure:** Managed gateway auth uses server-side token injection. `/api/connect-defaults` returns `token: null` and trust metadata instead of the raw gateway token
|
||||
- **Device identity:** Ed25519 keypair for gateway WS auth (`~/.nerve/device-identity.json`). Required for operator scopes on OpenClaw 2026.2.19+
|
||||
- **File serving:** MIME-type allowlist + directory traversal prevention + allowed prefix check
|
||||
- **Body limits:** Configurable per-route (general API vs transcribe uploads)
|
||||
- **Rate limiting:** Per-IP sliding window with separate limits for expensive operations
|
||||
- **Credentials:** Browser connection config persists in `localStorage` as `oc-config`. Official managed gateway flows can keep the token empty; custom manual tokens may persist until cleared
|
||||
- **Input validation:** Zod schemas on all POST/PUT request bodies
|
||||
|
||||
### Graceful Shutdown
|
||||
|
||||
`server/index.ts` handles SIGTERM/SIGINT:
|
||||
1. Stop file watchers
|
||||
2. Close all WebSocket connections
|
||||
3. Close HTTP + HTTPS servers
|
||||
4. Force exit after 5s drain timeout
|
||||
|
||||
### Dual HTTP/HTTPS
|
||||
|
||||
Server runs on both HTTP (port 3080) and HTTPS (port 3443). HTTPS auto-enables if `certs/cert.pem` + `certs/key.pem` exist. HTTPS is required for:
|
||||
- Microphone access (secure context)
|
||||
- WSS proxy (encrypted WebSocket)
|
||||
|
||||
The HTTPS server manually converts Node.js `req`/`res` to `fetch` `Request`/`Response` for Hono compatibility, with special handling for SSE streaming.
|
||||
|
||||
---
|
||||
|
||||
## Review Checklist
|
||||
|
||||
### All PRs
|
||||
|
||||
- [ ] TypeScript strict — no `any`, no `@ts-ignore`
|
||||
- [ ] All new API endpoints have rate limiting middleware
|
||||
- [ ] All POST/PUT bodies validated with Zod
|
||||
- [ ] New state in contexts is `useMemo`/`useCallback`-wrapped
|
||||
- [ ] No secrets in client-side code or localStorage
|
||||
- [ ] Error boundaries around lazy-loaded or side-panel components
|
||||
- [ ] Tests for new utilities/hooks (at minimum)
|
||||
|
||||
### Frontend PRs
|
||||
|
||||
- [ ] New components follow feature directory structure
|
||||
- [ ] Heavy components are lazy-loaded if not needed at initial render
|
||||
- [ ] Callbacks use `useCallback` if passed as props or in dependency arrays
|
||||
- [ ] State-setting in effects has ESLint annotation with justification
|
||||
- [ ] No direct cross-feature imports (use contexts)
|
||||
- [ ] Cleanup functions in `useEffect` for subscriptions/timers/RAF
|
||||
- [ ] Keyboard shortcuts registered via `useKeyboardShortcuts`
|
||||
|
||||
### Backend PRs
|
||||
|
||||
- [ ] Routes export a Hono sub-app, mounted in `app.ts`
|
||||
- [ ] File I/O wrapped in mutex when read-modify-write
|
||||
- [ ] Gateway calls use `invokeGatewayTool()` from shared client
|
||||
- [ ] Expensive fetches wrapped in `createCachedFetch`
|
||||
- [ ] SSE-aware: don't break compression exclusion for `/api/events`
|
||||
- [ ] CORS: new endpoints automatically covered by global middleware
|
||||
- [ ] Security: file serving paths validated against allowlist
|
||||
|
||||
### Performance
|
||||
|
||||
- [ ] No unnecessary re-renders (check with React DevTools Profiler)
|
||||
- [ ] Session list uses smart diffing (preserves references)
|
||||
- [ ] Streaming updates use `requestAnimationFrame` batching
|
||||
- [ ] Large data (history) uses infinite scroll, not full render
|
||||
- [ ] Activity sparkline and polling respect `document.visibilityState`
|
||||
|
||||
### Accessibility
|
||||
|
||||
- [ ] Skip-to-content link present (`<a href="#main-chat" class="sr-only">`)
|
||||
- [ ] Dialogs have proper focus management
|
||||
- [ ] Keyboard navigation works for all interactive elements
|
||||
- [ ] Color contrast meets WCAG AA (themes should preserve this)
|
||||
- Enforcing absolutes the repo does not actually follow
|
||||
- Requesting wide refactors in a focused bugfix PR
|
||||
- Rejecting a change for not matching an architecture that is not present in the codebase
|
||||
- Treating review as style theater while missing correctness or security issues
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ Nerve is configured via a `.env` file in the project root. All variables have se
|
|||
The interactive setup wizard is the recommended way to configure Nerve:
|
||||
|
||||
```bash
|
||||
npm run setup # Interactive setup (5 steps)
|
||||
npm run setup # Interactive setup (6 steps)
|
||||
npm run setup -- --check # Validate existing config & test gateway
|
||||
npm run setup -- --defaults # Non-interactive with auto-detected values
|
||||
npm run setup -- --help # Show help
|
||||
|
|
@ -17,7 +17,7 @@ npm run setup -- --help # Show help
|
|||
|
||||
### Wizard Steps
|
||||
|
||||
The wizard walks through **5 sections**:
|
||||
The wizard walks through **6 sections**:
|
||||
|
||||
#### 1. Gateway Connection
|
||||
|
||||
|
|
@ -26,10 +26,11 @@ Connects Nerve to your OpenClaw gateway. The wizard auto-detects the gateway tok
|
|||
2. Environment variable `OPENCLAW_GATEWAY_TOKEN`
|
||||
3. `~/.openclaw/openclaw.json` (auto-detected)
|
||||
|
||||
Tests the connection before proceeding. If the gateway is unreachable, you can continue anyway. On OpenClaw 2026.2.19+, the wizard also:
|
||||
Tests the connection before proceeding. If the gateway is unreachable, setup stops so you can fix the gateway or token first. On current OpenClaw builds, the wizard also:
|
||||
- Reads the real gateway token from the systemd service file (works around a known bug where `openclaw onboard` writes different tokens to systemd and `openclaw.json`)
|
||||
- Bootstraps `paired.json` and `device-auth.json` with full operator scopes if they don't exist yet
|
||||
- Pre-registers Nerve's device identity so it can connect without manual `openclaw devices approve`
|
||||
- Pre-pairs Nerve's device identity in the normal setup path so it can connect without manual approval (`openclaw devices approve`)
|
||||
- Adds `cron`, `gateway`, and `sessions_spawn` to `gateway.tools.allow` when they are missing
|
||||
- Restarts the gateway to apply changes
|
||||
|
||||
#### 2. Agent Identity
|
||||
|
|
@ -43,21 +44,30 @@ Determines how you'll access Nerve. The wizard auto-configures `HOST`, `ALLOWED_
|
|||
| Mode | Bind | Description |
|
||||
|------|------|-------------|
|
||||
| **Localhost** | `127.0.0.1` | Only accessible from this machine. Safest option. |
|
||||
| **Tailscale** | `0.0.0.0` | Accessible from your Tailscale network. Auto-detected if Tailscale is running. Sets CORS + CSP for your Tailscale IP. |
|
||||
| **Tailscale IP** | `0.0.0.0` | Accessible from your Tailscale network over the machine's tailnet IP. Sets CORS + CSP for that IP. |
|
||||
| **Tailscale Serve** | `127.0.0.1` | Keeps Nerve loopback-only and exposes it through a Tailscale Serve HTTPS hostname when available. |
|
||||
| **Network (LAN)** | `0.0.0.0` | Accessible from your local network. Prompts for your LAN IP. Sets CORS + CSP for that IP. |
|
||||
| **Custom** | Manual | Full manual control: custom port, bind address, HTTPS certificate generation, CORS. |
|
||||
|
||||
**HTTPS (Custom mode only):** The wizard can generate self-signed certificates via `openssl` and configure `SSL_PORT`.
|
||||
**HTTPS (Network and Custom modes):** The wizard can offer self-signed certificate generation via `openssl` and configure `SSL_PORT` for non-localhost access.
|
||||
|
||||
#### 4. TTS Configuration (Optional)
|
||||
#### 4. Authentication
|
||||
|
||||
If you choose a network-exposed mode, the wizard prompts you to enable auth and either:
|
||||
- set a password, or
|
||||
- reuse the gateway token as the password fallback
|
||||
|
||||
For localhost-only installs, auth can stay off.
|
||||
|
||||
#### 5. TTS Configuration (Optional)
|
||||
|
||||
Prompts for optional API keys:
|
||||
- `OPENAI_API_KEY` — enables OpenAI TTS + Whisper transcription
|
||||
- `REPLICATE_API_TOKEN` — enables Qwen TTS via Replicate (warns if `ffmpeg` is missing)
|
||||
- `OPENAI_API_KEY`, enables OpenAI TTS + Whisper transcription
|
||||
- `REPLICATE_API_TOKEN`, enables Qwen TTS via Replicate (warns if `ffmpeg` is missing)
|
||||
|
||||
Edge TTS always works without any keys.
|
||||
Edge TTS always works without any keys. Xiaomi MiMo can be enabled later by setting `MIMO_API_KEY` manually or saving it from Settings, Audio.
|
||||
|
||||
#### 5. Advanced Settings (Optional)
|
||||
#### 6. Advanced Settings (Optional)
|
||||
|
||||
Custom file paths for `MEMORY_PATH`, `MEMORY_DIR`, `SESSIONS_DIR`. Most users skip this.
|
||||
|
||||
|
|
@ -68,8 +78,10 @@ Custom file paths for `MEMORY_PATH`, `MEMORY_DIR`, `SESSIONS_DIR`. Most users sk
|
|||
| *(none)* | Full interactive wizard. If `.env` exists, asks whether to update or start fresh. |
|
||||
| `--check` | Validates all config values, tests gateway connectivity, and exits. Non-destructive. |
|
||||
| `--defaults` | Auto-detects gateway token, applies defaults for everything else, writes `.env`. No prompts. |
|
||||
| `--defaults --access-mode tailscale-ip` | Non-interactive setup for direct tailnet IP access. |
|
||||
| `--defaults --access-mode tailscale-serve` | Non-interactive setup for loopback + Tailscale Serve HTTPS access. |
|
||||
|
||||
The wizard backs up existing `.env` files (e.g. `.env.bak.1708100000000`) before overwriting and applies `chmod 600` to both `.env` and backup files.
|
||||
The wizard backs up existing `.env` files as `.env.backup` or `.env.backup.YYYY-MM-DD` before overwriting and applies `chmod 600` to both `.env` and backup files.
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -97,18 +109,31 @@ HOST=127.0.0.1
|
|||
|----------|---------|----------|-------------|
|
||||
| `GATEWAY_TOKEN` | — | **Yes** | Authentication token for the OpenClaw gateway. The setup wizard auto-detects this. See note below |
|
||||
| `GATEWAY_URL` | `http://127.0.0.1:18789` | No | Gateway HTTP endpoint URL |
|
||||
| `NERVE_PUBLIC_ORIGIN` | *(empty)* | No | Explicit browser-facing Nerve origin used when server-side gateway RPC fallback must open its own WebSocket to OpenClaw. Useful for reverse-proxy, cloud, and hybrid deployments. |
|
||||
|
||||
```bash
|
||||
GATEWAY_TOKEN=your-token-here
|
||||
GATEWAY_URL=http://127.0.0.1:18789
|
||||
|
||||
# Optional for reverse-proxy / cloud / hybrid installs
|
||||
NERVE_PUBLIC_ORIGIN=https://nerve.example.com
|
||||
```
|
||||
|
||||
For non-interactive installs that should talk to a remote gateway, pass the URL directly to the installer:
|
||||
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/daggerhashimoto/openclaw-nerve/master/install.sh \
|
||||
| bash -s -- --gateway-url https://gw.example.com --gateway-token <token> --skip-setup
|
||||
```
|
||||
|
||||
If remote workspace panels (Files, Memory, Config, Skills) fail with `origin not allowed` while chat still works, set `NERVE_PUBLIC_ORIGIN` to the exact browser origin and add that same origin to `gateway.controlUi.allowedOrigins` on the gateway.
|
||||
|
||||
### Token Injection
|
||||
|
||||
Nerve performs **server-side token injection**. When a connection is established through the WebSocket proxy, Nerve automatically injects the configured `GATEWAY_TOKEN` into the connection request if the client is considered **trusted**.
|
||||
|
||||
**Trust is granted if:**
|
||||
1. The connection is from a **local loopback address** (`127.0.0.1` or `::1`), accounting for `X-Forwarded-For` and `X-Real-IP` when behind a trusted proxy (see `TRUSTED_PROXIES`).
|
||||
1. The connection is from a **local loopback address** (`127.0.0.1` or `::1`). When Nerve is behind a trusted reverse proxy, proxy-aware client IP handling can preserve that loopback detection (see `TRUSTED_PROXIES`).
|
||||
2. OR, the connection has a valid **authenticated session** (`NERVE_AUTH=true`).
|
||||
|
||||
This allows the browser UI to connect without having to manually enter or store the gateway token in the browser's persistent storage. If a connection is not trusted (e.g., remote access without authentication), the token field in the UI must be filled manually.
|
||||
|
|
@ -133,10 +158,12 @@ AGENT_NAME=Friday
|
|||
|----------|-------------|
|
||||
| `OPENAI_API_KEY` | Enables OpenAI TTS (multiple voices) and Whisper audio transcription |
|
||||
| `REPLICATE_API_TOKEN` | Enables Replicate-hosted TTS models (e.g. Qwen TTS). Requires `ffmpeg` for WAV→MP3 |
|
||||
| `MIMO_API_KEY` | Enables Xiaomi MiMo TTS when the Xiaomi provider is selected in Settings, Audio |
|
||||
|
||||
```bash
|
||||
OPENAI_API_KEY=sk-...
|
||||
REPLICATE_API_TOKEN=r8_...
|
||||
MIMO_API_KEY=sk-mimo-...
|
||||
```
|
||||
|
||||
TTS provider fallback chain (when no explicit provider is requested):
|
||||
|
|
@ -144,12 +171,14 @@ TTS provider fallback chain (when no explicit provider is requested):
|
|||
2. **Replicate** — if `REPLICATE_API_TOKEN` is set
|
||||
3. **Edge TTS** — always available, no API key needed (default for new installs)
|
||||
|
||||
Xiaomi MiMo is available as an explicit provider option when `MIMO_API_KEY` is set. It is not part of the automatic fallback chain.
|
||||
|
||||
### Speech-to-Text (STT)
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `STT_PROVIDER` | `local` | STT provider: `local` (whisper.cpp, no API key needed) or `openai` (requires `OPENAI_API_KEY`) |
|
||||
| `WHISPER_MODEL` | `tiny` | Local whisper model: `tiny` (75 MB), `base` (142 MB), or `small` (466 MB) — multilingual variants. English-only variants (`tiny.en`, `base.en`, `small.en`) are also available. |
|
||||
| `WHISPER_MODEL` | `base` | Local whisper model: `tiny` (75 MB), `base` (142 MB), or `small` (466 MB) — multilingual variants. English-only variants (`tiny.en`, `base.en`, `small.en`) are also available. |
|
||||
| `WHISPER_MODEL_DIR` | `~/.nerve/models` | Directory for downloaded whisper model files |
|
||||
| `NERVE_LANGUAGE` | `en` | Preferred voice language (ISO 639-1). Legacy `LANGUAGE` is still accepted but deprecated |
|
||||
| `EDGE_VOICE_GENDER` | `female` | Edge TTS voice gender: `female` or `male` |
|
||||
|
|
@ -157,7 +186,7 @@ TTS provider fallback chain (when no explicit provider is requested):
|
|||
```bash
|
||||
# Use local speech-to-text (no API key needed)
|
||||
STT_PROVIDER=local
|
||||
WHISPER_MODEL=tiny
|
||||
WHISPER_MODEL=base
|
||||
NERVE_LANGUAGE=en
|
||||
```
|
||||
|
||||
|
|
@ -188,6 +217,8 @@ WS_ALLOWED_HOSTS=100.64.0.5
|
|||
TRUSTED_PROXIES=127.0.0.1,::1,10.0.0.1
|
||||
```
|
||||
|
||||
If you are retrofitting Tailscale onto an existing install, see [Add Tailscale to an Existing Nerve Install](TAILSCALE.md).
|
||||
|
||||
### Authentication
|
||||
|
||||
Nerve includes a built-in authentication layer that protects all API endpoints, WebSocket connections, and SSE streams with a session cookie. Auth is opt-in for localhost users and auto-prompted during setup when binding to a network interface.
|
||||
|
|
@ -254,14 +285,13 @@ REPLICATE_BASE_URL=https://api.replicate.com/v1
|
|||
|
||||
| Variable | Default | Description |
|
||||
|---------|---------|-------------|
|
||||
| `FILE_BROWSER_ROOT` | `""` (disabled) | If set, overrides OpenClaw workspace as the root directory for the workspace directory tree. In this mode, default exclusion rules are disabled and delete operations are permanent (no `.trash` recovery). |
|
||||
| `MEMORY_PATH` | `~/.openclaw/workspace/MEMORY.md` | Path to the agent's long-term memory file |
|
||||
| `MEMORY_DIR` | `~/.openclaw/workspace/memory/` | Directory for daily memory files (`YYYY-MM-DD.md`) |
|
||||
| `FILE_BROWSER_ROOT` | `""` (disabled) | If set, overrides the file browser root directory for all sessions. In this mode, file-browser `agentId` scoping is bypassed, default exclusion rules are disabled, and delete operations are permanent (no `.trash` recovery). |
|
||||
| `MEMORY_PATH` | `~/.openclaw/workspace/MEMORY.md` | Path to the main agent's long-term memory file |
|
||||
| `MEMORY_DIR` | `~/.openclaw/workspace/memory/` | Directory for the main agent's daily memory files (`YYYY-MM-DD.md`) |
|
||||
| `SESSIONS_DIR` | `~/.openclaw/agents/main/sessions/` | Session transcript directory (scanned for token usage) |
|
||||
| `USAGE_FILE` | `~/.openclaw/token-usage.json` | Persistent cumulative token usage data |
|
||||
| `NERVE_VOICE_PHRASES_PATH` | `~/.nerve/voice-phrases.json` | Override location for per-language voice phrase overrides |
|
||||
| `NERVE_WATCH_WORKSPACE_RECURSIVE` | `false` | Enables recursive `fs.watch` for the entire workspace (legacy behavior). Disabled by default to prevent Linux inotify `ENOSPC` watcher exhaustion. |
|
||||
| `WORKSPACE_ROOT` | *(auto-detected)* | Allowed base directory for git workdir registration. Auto-derived from `git worktree list` or parent of `process.cwd()` |
|
||||
| `NERVE_WATCH_WORKSPACE_RECURSIVE` | `true` | Enables recursive `fs.watch` for full workspace `file.changed` SSE events outside `MEMORY.md` and `memory/`. Set this to `false` to disable full-workspace watching if you hit Linux inotify `ENOSPC` watcher exhaustion. Memory watchers stay enabled for discovered agent workspaces even when this is `false`. |
|
||||
|
||||
```bash
|
||||
FILE_BROWSER_ROOT=/home/user
|
||||
|
|
@ -305,7 +335,7 @@ The updater stores state in `~/.nerve/updater/`. These are not configurable via
|
|||
|
||||
## Kanban
|
||||
|
||||
Kanban board configuration is stored in the data file (`server/data/kanban/tasks.json`), not in `.env`. Manage it via the REST API:
|
||||
Kanban board configuration is stored in the runtime data file (`${NERVE_DATA_DIR:-~/.nerve}/kanban/tasks.json`), not in `.env`. Manage it via the REST API:
|
||||
|
||||
```bash
|
||||
# Read current config
|
||||
|
|
@ -328,7 +358,7 @@ curl -X PUT http://localhost:3080/api/kanban/config \
|
|||
| `allowDoneDragBypass` | `boolean` | `false` | Allow dragging tasks directly to done (skipping review) |
|
||||
| `quickViewLimit` | `number` | `5` | Max tasks shown in workspace quick view (1--50) |
|
||||
| `proposalPolicy` | `string` | `"confirm"` | How agent proposals are handled: `"confirm"` (manual review) or `"auto"` (apply immediately) |
|
||||
| `defaultModel` | `string` | *(none)* | Default model for agent execution (max 100 chars). Falls back to `anthropic/claude-sonnet-4-5` |
|
||||
| `defaultModel` | `string` | *(none)* | Default model for agent execution (max 100 chars). If unset, execution falls back to OpenClaw's configured default model |
|
||||
|
||||
### Column Schema
|
||||
|
||||
|
|
@ -410,10 +440,11 @@ NERVE_SESSION_TTL=2592000000
|
|||
# API Keys
|
||||
OPENAI_API_KEY=sk-...
|
||||
REPLICATE_API_TOKEN=r8_...
|
||||
MIMO_API_KEY=sk-mimo-...
|
||||
|
||||
# Speech / Language
|
||||
STT_PROVIDER=local
|
||||
WHISPER_MODEL=tiny
|
||||
WHISPER_MODEL=base
|
||||
NERVE_LANGUAGE=en
|
||||
EDGE_VOICE_GENDER=female
|
||||
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ On the cloud host config:
|
|||
```json
|
||||
"gateway": {
|
||||
"tools": {
|
||||
"allow": ["cron", "gateway"]
|
||||
"allow": ["cron", "gateway", "sessions_spawn"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
|
|
|||
|
|
@ -72,6 +72,13 @@ Nerve auto-detects certificates at `certs/cert.pem` and `certs/key.pem`.
|
|||
|
||||
Follow the same-host steps for Nerve, then add:
|
||||
|
||||
### Install with remote gateway settings up front
|
||||
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/daggerhashimoto/openclaw-nerve/master/install.sh \
|
||||
| bash -s -- --gateway-url https://gw.example.com --gateway-token <token> --skip-setup
|
||||
```
|
||||
|
||||
### Point Nerve to remote gateway
|
||||
|
||||
In `.env`:
|
||||
|
|
@ -79,6 +86,7 @@ In `.env`:
|
|||
```bash
|
||||
GATEWAY_URL=<remote-gateway-url>
|
||||
WS_ALLOWED_HOSTS=<remote-gateway-hostname-or-ip>
|
||||
NERVE_PUBLIC_ORIGIN=https://nerve.example.com
|
||||
```
|
||||
|
||||
### Patch remote gateway allowed origins
|
||||
|
|
@ -94,7 +102,7 @@ https://nerve.example.com
|
|||
```json
|
||||
"gateway": {
|
||||
"tools": {
|
||||
"allow": ["cron", "gateway"]
|
||||
"allow": ["cron", "gateway", "sessions_spawn"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
|
|
|||
233
docs/INSTALL.md
Normal file
233
docs/INSTALL.md
Normal file
|
|
@ -0,0 +1,233 @@
|
|||
# Nerve Agent Install Contract
|
||||
|
||||
Use this file as the operational contract when you are installing, configuring, or validating Nerve on a machine.
|
||||
|
||||
## Hard gate
|
||||
|
||||
You must be able to:
|
||||
- read files
|
||||
- run shell commands
|
||||
|
||||
Supported operating systems:
|
||||
- macOS
|
||||
- Linux
|
||||
|
||||
Stop and say so if any of the following are true:
|
||||
- you cannot read files
|
||||
- you cannot run shell commands
|
||||
- the machine is not macOS or Linux
|
||||
- the request would require you to invent a Windows flow
|
||||
|
||||
Do not claim success without validation.
|
||||
|
||||
## Default behavior
|
||||
|
||||
Unless the user clearly asks for something else, use these defaults:
|
||||
- install path: `~/nerve`
|
||||
- preferred install method: this repo's `install.sh`
|
||||
- preferred initial topology: local / same machine
|
||||
- preferred access mode: localhost-only
|
||||
- preferred config path: setup wizard first
|
||||
- if the setup wizard is unavailable, skipped, or fails: fall back to explicit manual config
|
||||
- install, configure, and validate before reporting completion
|
||||
|
||||
## Existing install handling
|
||||
|
||||
If `~/nerve` already exists:
|
||||
1. inspect it first
|
||||
2. determine whether it is an existing Nerve install
|
||||
3. prefer safe validation, restart, repair, or non-destructive reconfigure work
|
||||
4. ask before reinstalling, replacing, deleting, or hard-resetting anything
|
||||
|
||||
## OpenClaw dependency handling
|
||||
|
||||
Nerve depends on an OpenClaw gateway.
|
||||
|
||||
You must:
|
||||
1. detect whether a usable gateway already exists
|
||||
2. prefer using an existing reachable gateway
|
||||
3. explain the dependency if no usable gateway is found
|
||||
4. ask before installing OpenClaw if it is missing
|
||||
|
||||
You may apply minimal localhost-safe OpenClaw changes automatically when needed for the default local path. Examples:
|
||||
- adding missing local control UI origins
|
||||
- adding required gateway tool allow entries such as `cron`, `gateway`, and `sessions_spawn`
|
||||
- fixing local device pairing or scopes needed for Nerve to connect
|
||||
|
||||
Ask first before any OpenClaw change that is remote, public, security-sensitive, destructive, or changes network exposure.
|
||||
|
||||
## Prerequisite handling
|
||||
|
||||
For supported local installs, you may automatically install missing core prerequisites required by the installer or build flow, including package-manager-installed dependencies such as:
|
||||
- Node.js 22+
|
||||
- npm
|
||||
- git
|
||||
- build tools required for native modules
|
||||
|
||||
Ask before optional or network-affecting extras such as:
|
||||
- Tailscale setup
|
||||
- reverse proxy setup
|
||||
- public DNS, TLS, or internet exposure changes
|
||||
|
||||
If prerequisite installation is impossible, fail clearly and say what is missing.
|
||||
|
||||
## Consent boundaries
|
||||
|
||||
Ask for confirmation before any of the following:
|
||||
- installing OpenClaw
|
||||
- exposing Nerve beyond localhost
|
||||
- LAN exposure
|
||||
- public internet exposure
|
||||
- reverse proxy setup
|
||||
- Tailscale changes
|
||||
- cloud-hosted assumptions that change exposure or security posture
|
||||
- destructive reinstall, replacement, deletion, or reset actions
|
||||
- remote gateway allowlist or auth changes
|
||||
|
||||
## Auto-allowed actions
|
||||
|
||||
For the common local path, you may automatically:
|
||||
- inspect current install state
|
||||
- inspect local gateway state
|
||||
- detect the gateway token and gateway URL
|
||||
- run the installer
|
||||
- run the setup wizard
|
||||
- use defaults mode for local-only setup when no TTY is available
|
||||
- write minimal manual config if the wizard cannot be used
|
||||
- restart local services or processes
|
||||
- run validation and smoke checks
|
||||
|
||||
## Common-path install flow
|
||||
|
||||
Follow this order unless the user explicitly asks for a different topology.
|
||||
|
||||
### 1. Inspect before changing
|
||||
|
||||
Check:
|
||||
- whether `~/nerve` already exists
|
||||
- whether `openclaw` is installed
|
||||
- whether the OpenClaw gateway is reachable
|
||||
- whether a gateway token can be detected
|
||||
|
||||
### 2. Prefer the installer
|
||||
|
||||
If you are inside a local checkout of this repo, prefer:
|
||||
|
||||
```bash
|
||||
./install.sh
|
||||
```
|
||||
|
||||
If you are operating from GitHub or a raw-doc context without a local checkout, prefer:
|
||||
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/daggerhashimoto/openclaw-nerve/master/install.sh | bash
|
||||
```
|
||||
|
||||
### 3. Prefer the setup wizard
|
||||
|
||||
If the installer finishes and the setup wizard is available, use it.
|
||||
|
||||
If no TTY is available and the target remains the safe local path, you may use:
|
||||
|
||||
```bash
|
||||
cd ~/nerve
|
||||
npm run setup -- --defaults
|
||||
```
|
||||
|
||||
If the requested topology is LAN, Tailscale, remote gateway, cloud, or any other non-localhost path, do not guess. Ask first unless the user already specified that topology clearly.
|
||||
|
||||
### 4. Manual fallback when needed
|
||||
|
||||
If the installer or wizard cannot be used, do the smallest correct manual fallback.
|
||||
|
||||
If the repo is not present locally:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/daggerhashimoto/openclaw-nerve.git ~/nerve
|
||||
cd ~/nerve
|
||||
npm install
|
||||
```
|
||||
|
||||
If you need explicit minimal local config, write `.env` with at least:
|
||||
|
||||
```bash
|
||||
PORT=3080
|
||||
HOST=127.0.0.1
|
||||
GATEWAY_URL=http://127.0.0.1:18789
|
||||
GATEWAY_TOKEN=<detected-token>
|
||||
```
|
||||
|
||||
Then handle runtime like this:
|
||||
|
||||
1. if the installer already configured a service manager, use that instead of starting a duplicate foreground process
|
||||
2. on Linux, check for `systemd` service management via `nerve.service`
|
||||
3. on macOS, check for `launchd` management via `~/Library/LaunchAgents/com.nerve.server.plist`
|
||||
4. if no service manager is configured, run Nerve directly with the production entrypoint
|
||||
|
||||
Typical commands:
|
||||
|
||||
```bash
|
||||
# build artifacts
|
||||
npm run build
|
||||
|
||||
# Linux, service managed
|
||||
sudo systemctl restart nerve.service
|
||||
|
||||
# macOS, service managed
|
||||
launchctl stop com.nerve.server || true
|
||||
launchctl start com.nerve.server
|
||||
|
||||
# no service manager present
|
||||
npm run prod
|
||||
```
|
||||
|
||||
## Topology branching
|
||||
|
||||
Stay self-contained for the common path, then branch by user intent:
|
||||
- Local / same machine: [DEPLOYMENT-A.md](./DEPLOYMENT-A.md)
|
||||
- Hybrid / remote gateway + local Nerve: [DEPLOYMENT-B.md](./DEPLOYMENT-B.md)
|
||||
- Cloud / remote Nerve: [DEPLOYMENT-C.md](./DEPLOYMENT-C.md)
|
||||
- Tailscale retrofit: [TAILSCALE.md](./TAILSCALE.md)
|
||||
|
||||
Choose the branch based on the user's intended topology, not on low-level subsystem details.
|
||||
|
||||
## Done criteria
|
||||
|
||||
Only report success when all of the following are true:
|
||||
- Nerve is installed at the intended path
|
||||
- Nerve starts successfully
|
||||
- it is configured against the intended OpenClaw gateway
|
||||
- access and auth behavior match the chosen mode
|
||||
- a minimal smoke test passes
|
||||
|
||||
## Smoke test
|
||||
|
||||
Keep the smoke test small and explicit.
|
||||
|
||||
1. Confirm the Nerve process or service is running.
|
||||
2. Confirm the expected Nerve URL responds.
|
||||
- local default: `http://127.0.0.1:3080/health`
|
||||
3. Confirm the intended OpenClaw gateway is reachable.
|
||||
4. Confirm `.env` points to that gateway.
|
||||
5. If auth is enabled or network access was requested, confirm the login surface or expected protected access behavior is present.
|
||||
|
||||
Useful checks:
|
||||
|
||||
```bash
|
||||
openclaw gateway status
|
||||
curl -fsS http://127.0.0.1:18789/health
|
||||
curl -fsS http://127.0.0.1:3080/health
|
||||
```
|
||||
|
||||
Adjust host, port, and URL to match the chosen topology.
|
||||
|
||||
## Failure handling
|
||||
|
||||
If any step fails, report:
|
||||
- the exact failed step
|
||||
- what you checked
|
||||
- what you changed
|
||||
- what worked
|
||||
- what still needs user input or approval
|
||||
|
||||
Do not use vague completion text. Do not loop blindly.
|
||||
|
|
@ -28,6 +28,7 @@ It initializes defaults such as:
|
|||
- `VERSION` (optional pinned release version)
|
||||
- `BRANCH_EXPLICIT` (tracks whether `--branch` was set)
|
||||
- `GATEWAY_TOKEN` (optional CLI override)
|
||||
- `ACCESS_MODE` (optional explicit setup mode such as `tailscale-ip` or `tailscale-serve`)
|
||||
- `ENV_MISSING` (tracks partial installs)
|
||||
|
||||
### 0.3 OS family detection
|
||||
|
|
@ -54,8 +55,20 @@ Supported flags:
|
|||
- `--skip-setup`
|
||||
- `--dry-run`
|
||||
- `--gateway-token <token>`
|
||||
- `--gateway-url <url>`
|
||||
- `--access-mode <mode>`
|
||||
- `--help`
|
||||
|
||||
Supported `--access-mode` values:
|
||||
- `local`
|
||||
- `network`
|
||||
- `custom`
|
||||
- `tailscale-ip`
|
||||
- `tailscale-serve`
|
||||
|
||||
Backward compatibility:
|
||||
- legacy `tailscale` is normalized to `tailscale-ip`
|
||||
|
||||
`--version` and `--branch` are mutually exclusive.
|
||||
|
||||
Unknown or malformed args exit with error.
|
||||
|
|
@ -183,9 +196,11 @@ This stage controls `.env` provisioning and setup wizard behavior.
|
|||
When called (and `.env` doesn’t already exist), it:
|
||||
|
||||
1. Reads gateway token (`--gateway-token` first, then auto-detect)
|
||||
2. Reads gateway port from `openclaw.json` (fallback 18789)
|
||||
2. Resolves gateway URL:
|
||||
- `--gateway-url <url>` first (validated as absolute `http://` or `https://` URL)
|
||||
- otherwise local gateway from `openclaw.json` port (fallback `http://127.0.0.1:18789`)
|
||||
3. Writes minimal `.env`:
|
||||
- `GATEWAY_URL=http://127.0.0.1:<port>`
|
||||
- `GATEWAY_URL=<resolved-url>`
|
||||
- `GATEWAY_TOKEN=<token>`
|
||||
- `PORT=3080`
|
||||
|
||||
|
|
@ -206,6 +221,7 @@ If token exists but port `3080` is already occupied:
|
|||
#### `--skip-setup`
|
||||
- If `.env` exists: keep it.
|
||||
- If no `.env`: auto-generate from gateway config.
|
||||
- When combined with `--gateway-url <url>`, the generated `.env` uses that URL instead of the local default.
|
||||
|
||||
#### Interactive mode (no `--skip-setup`)
|
||||
- If `.env` exists:
|
||||
|
|
@ -216,9 +232,46 @@ If token exists but port `3080` is already occupied:
|
|||
- run setup wizard
|
||||
- if wizard fails, fallback to auto-generate `.env`
|
||||
|
||||
Inside the interactive setup wizard, access mode now splits Tailscale into two explicit choices:
|
||||
- `tailnet IP`
|
||||
- `Tailscale Serve`
|
||||
|
||||
Behavior by interactive profile:
|
||||
- `tailnet IP`
|
||||
- configures direct tailnet-IP access
|
||||
- keeps Nerve network-reachable
|
||||
- patches gateway allowed origins using the tailnet IP origin
|
||||
- `Tailscale Serve`
|
||||
- keeps Nerve on `127.0.0.1`
|
||||
- asks whether to run `tailscale serve --bg http://127.0.0.1:<PORT>`
|
||||
- detects the resulting `https://<node>.tail<id>.ts.net` origin
|
||||
- patches both Nerve and the gateway for that `*.ts.net` origin
|
||||
- if Serve cannot be confirmed, asks whether to fall back to `tailnet IP` or stop
|
||||
|
||||
If Tailscale is installed but not logged in:
|
||||
- setup guides the operator to run the browser URL login flow with `tailscale up`
|
||||
- setup can wait and re-check, or exit and let the user rerun later
|
||||
|
||||
If Tailscale is missing:
|
||||
- setup explains that clearly
|
||||
- prints the install/login next steps
|
||||
- exits instead of pretending setup succeeded
|
||||
|
||||
#### Non-interactive mode (no `--skip-setup`)
|
||||
- If `.env` exists: keep it.
|
||||
- If no `.env`: auto-generate from gateway.
|
||||
- If no `.env` and no explicit `--access-mode`: auto-generate from gateway.
|
||||
- If `--access-mode` is provided:
|
||||
- route through `npm run setup -- --defaults --access-mode <mode>`
|
||||
- do not bypass setup with raw `.env` generation
|
||||
|
||||
Non-interactive Tailscale behavior:
|
||||
- `--access-mode tailscale-ip`
|
||||
- attempts direct tailnet-IP setup if Tailscale state is usable
|
||||
- otherwise keeps the safest supported config and prints exact follow-up steps
|
||||
- `--access-mode tailscale-serve`
|
||||
- never hangs waiting for login or Serve activation
|
||||
- if a usable `*.ts.net` origin is not confirmed, falls back to `tailscale-ip`
|
||||
- if even `tailscale-ip` is not ready, keeps localhost-only config and prints exact follow-up steps
|
||||
|
||||
### 4.3 Gateway config patching (inside setup wizard)
|
||||
|
||||
|
|
@ -227,8 +280,10 @@ After `.env` is written, the setup wizard detects and applies pending OpenClaw g
|
|||
#### Possible changes detected:
|
||||
1. **Device scopes** — bootstraps `~/.openclaw/devices/paired.json` with full operator scopes if missing or incomplete
|
||||
2. **Pre-pair Nerve device** — registers Nerve's Ed25519 identity in `paired.json` so it can connect without manual `openclaw devices approve`
|
||||
3. **Tools allow** — adds `"cron"` and `"gateway"` to `gateway.tools.allow` in `~/.openclaw/openclaw.json` (required for OpenClaw ≥2026.2.23 which denies these tools on `/tools/invoke` by default)
|
||||
4. **Allowed origins** — adds Nerve's HTTP/HTTPS origins to `gateway.controlUi.allowedOrigins` (network access modes only)
|
||||
3. **Tools allow** — adds `"cron"`, `"gateway"`, and `"sessions_spawn"` to `gateway.tools.allow` in `~/.openclaw/openclaw.json` (required for OpenClaw ≥2026.2.23, which denies these tools on `/tools/invoke` by default; `sessions_spawn` is required for Kanban task execution)
|
||||
4. **Allowed origins** — adds all required Nerve browser origins to `gateway.controlUi.allowedOrigins`
|
||||
- LAN or tailnet-IP mode: `http://<ip>:<port>`
|
||||
- Tailscale Serve mode: `https://<node>.tail<id>.ts.net`
|
||||
|
||||
#### Interactive mode:
|
||||
- Shows a numbered list of all pending changes
|
||||
|
|
@ -238,8 +293,10 @@ After `.env` is written, the setup wizard detects and applies pending OpenClaw g
|
|||
|
||||
#### `--defaults` mode:
|
||||
- All changes applied silently (implicit consent)
|
||||
- Origins computed automatically from `HOST` and `PORT`
|
||||
- Treats any non-loopback HOST as network mode
|
||||
- Allowed origins come from the computed setup access plan, not just `HOST` and `PORT`
|
||||
- `--access-mode tailscale-ip` and `--access-mode tailscale-serve` are supported explicitly
|
||||
- legacy `--access-mode tailscale` maps to `tailscale-ip`
|
||||
- if `tailscale-serve` cannot confirm a usable `*.ts.net` origin, defaults mode falls back to the safest supported path and prints follow-up steps
|
||||
|
||||
#### Post-apply:
|
||||
- Single gateway restart after all patches
|
||||
|
|
|
|||
|
|
@ -1,25 +1,30 @@
|
|||
# Nerve Documentation
|
||||
# Nerve Docs
|
||||
|
||||
Use this folder as the docs hub for Nerve.
|
||||
Start here when you need setup, operations, or contributor guidance.
|
||||
|
||||
## Core Docs
|
||||
## Top-level entry points
|
||||
|
||||
- [Architecture](./ARCHITECTURE.md)
|
||||
- [Configuration](./CONFIGURATION.md)
|
||||
- [API](./API.md)
|
||||
- [Security](./SECURITY.md)
|
||||
- [Troubleshooting](./TROUBLESHOOTING.md)
|
||||
- [Updating](./UPDATING.md)
|
||||
- [Installer Steps](./INSTALLER-STEPS.md)
|
||||
- [Agent Markers](./AGENT-MARKERS.md)
|
||||
- [Code Review](./CODE_REVIEW.md)
|
||||
- [Project README](../README.md), product overview and quick start
|
||||
- [Contributing](../CONTRIBUTING.md), local development, tests, and PR expectations
|
||||
- [Changelog](../CHANGELOG.md), release notes
|
||||
|
||||
## Deployment Guides
|
||||
## Core docs
|
||||
|
||||
- [Architecture](./ARCHITECTURE.md), codebase structure and system design
|
||||
- [Configuration](./CONFIGURATION.md), `.env`, auth, access modes, TTS providers, and UI settings
|
||||
- [API](./API.md), backend endpoints and behavior
|
||||
- [Security](./SECURITY.md), threat model and hardening notes
|
||||
- [Troubleshooting](./TROUBLESHOOTING.md), common failures and fixes
|
||||
- [Updating](./UPDATING.md), built-in updater flow and rollback
|
||||
- [Installer Steps](./INSTALLER-STEPS.md), what the installer does
|
||||
- [Agent Markers](./AGENT-MARKERS.md), TTS, charts, and kanban markers
|
||||
- [Code Review](./CODE_REVIEW.md), review guidance for the current codebase
|
||||
|
||||
## Setup and deployment
|
||||
|
||||
- [AI Agent Setup](./AI_SETUP.md)
|
||||
- [Nerve Agent Install Contract](./INSTALL.md)
|
||||
- [Run everything on one machine](./DEPLOYMENT-A.md)
|
||||
- [Use a cloud Gateway with Nerve on your laptop](./DEPLOYMENT-B.md)
|
||||
- [Run both Nerve and Gateway in the cloud](./DEPLOYMENT-C.md)
|
||||
|
||||
## Release Notes
|
||||
|
||||
- [Changelog](../CHANGELOG.md)
|
||||
- [Add Tailscale to an existing Nerve install](./TAILSCALE.md)
|
||||
|
|
|
|||
|
|
@ -227,7 +227,6 @@ All POST/PUT endpoints validate request bodies with [Zod](https://zod.dev/) sche
|
|||
| `PUT /api/memories/section` | `title` (1–200), `content` (≤50000), `date` (YYYY-MM-DD regex) |
|
||||
| `DELETE /api/memories` | `query` (1–1000), `type` (enum), `date` (YYYY-MM-DD regex) |
|
||||
| `PUT /api/workspace/:key` | `content` (string, ≤100 KB), `key` checked against strict allowlist |
|
||||
| `POST /api/git-info/workdir` | `sessionKey` (non-empty), `workdir` (non-empty, validated against allowed base) |
|
||||
|
||||
Validation errors return **HTTP 400** with the first Zod issue message as plain text or JSON.
|
||||
|
||||
|
|
@ -295,7 +294,7 @@ This prevents the proxy from being used to connect to arbitrary external hosts.
|
|||
|
||||
### Token Injection
|
||||
|
||||
Nerve performs **server-side token injection** to provide a zero-config connection experience for local and authenticated users without exposing the `GATEWAY_TOKEN` to the browser storage.
|
||||
Nerve performs **server-side token injection** to provide a zero-config connection experience for local and authenticated users without exposing the `GATEWAY_TOKEN` to the browser storage. Trusted-proxy configuration only affects how Nerve interprets forwarded client IPs, for example for loopback detection and rate limiting. It does not grant authentication by itself.
|
||||
|
||||
**Injection Logic:**
|
||||
1. `GET /api/connect-defaults` returns the official gateway WebSocket URL, `token: null`, and a `serverSideAuth` flag.
|
||||
|
|
@ -314,14 +313,15 @@ OpenClaw 2026.2.19+ requires a signed device identity (Ed25519 keypair) for WebS
|
|||
|
||||
Nerve generates a persistent device identity on first start (stored at `~/.nerve/device-identity.json`) and injects it into the connect handshake. The gateway always stays on loopback (`127.0.0.1`) — Nerve proxies all external connections through its WS proxy.
|
||||
|
||||
**First-time pairing (required once):**
|
||||
**Normal setup path:** the setup wizard now pre-pairs Nerve's device identity while it is configuring the gateway, so a fresh install usually does **not** require a manual `openclaw devices approve` step.
|
||||
|
||||
**Manual approval is fallback / recovery guidance:**
|
||||
|
||||
1. Start Nerve and open the UI in a browser
|
||||
2. The first connection creates a pending pairing request on the gateway
|
||||
3. Approve it: `openclaw devices list` → `openclaw devices approve <requestId>`
|
||||
4. All subsequent connections are automatically authenticated
|
||||
2. If the device is still pending, list requests: `openclaw devices list`
|
||||
3. Approve the Nerve device: `openclaw devices approve <requestId>`
|
||||
|
||||
If the device is rejected (e.g. after a gateway reset), the proxy falls back to token-only auth. The connection succeeds but with reduced scopes — chat and tool calls may fail with "missing scope" errors. Re-approve the device to restore full functionality.
|
||||
If the device is rejected (for example after a gateway reset), the proxy falls back to token-only auth. The connection succeeds but with reduced scopes, and chat or tool calls may fail with "missing scope" errors until the device is approved again.
|
||||
|
||||
**Architecture:** `Browser (remote) → Nerve (0.0.0.0:3080) → WS proxy → Gateway (127.0.0.1:18789)`. The gateway never needs to bind to LAN or be directly network-accessible.
|
||||
|
||||
|
|
@ -353,7 +353,6 @@ Multiple layers prevent directory traversal attacks:
|
|||
| `/api/files` | `path.resolve()` + prefix allowlist + symlink resolution + re-check |
|
||||
| `/api/memories` (date params) | Regex validation: `/^\d{4}-\d{2}-\d{2}$/` — prevents injection in file paths |
|
||||
| `/api/workspace/:key` | Strict key→filename allowlist (`soul`→`SOUL.md`, etc.) — no user-controlled paths |
|
||||
| `/api/git-info/workdir` | Resolved path checked against allowed base directory (derived from git worktrees or `WORKSPACE_ROOT`). Exact match or child-path check with separator guard |
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -403,7 +402,7 @@ The setup wizard:
|
|||
1. Writes `.env` atomically (via temp file + rename)
|
||||
2. Applies `chmod 600` to `.env` and backup files
|
||||
3. Cleans up `.env.tmp` on interruption (Ctrl+C handler)
|
||||
4. Backs up existing `.env` before overwriting (timestamped `.env.bak.*`)
|
||||
4. Backs up existing `.env` before overwriting (`.env.backup` or `.env.backup.YYYY-MM-DD`)
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
299
docs/TAILSCALE.md
Normal file
299
docs/TAILSCALE.md
Normal file
|
|
@ -0,0 +1,299 @@
|
|||
# Add Tailscale to an Existing Nerve Install
|
||||
|
||||
This guide is for the case where **Nerve is already installed and working**, and you want to add private remote access afterward.
|
||||
|
||||
Use one of these two paths:
|
||||
|
||||
- **Tailnet IP**: quickest path, Nerve listens on the Tailscale IP and you open `http://100.x.y.z:3080`
|
||||
- **Tailscale Serve**: better default for phones and voice input, Nerve stays on `127.0.0.1` and Tailscale exposes `https://<node>.tail<id>.ts.net`
|
||||
|
||||
If you are starting from scratch, use the normal installer/setup flow first, then come back here only if you need to retrofit Tailscale onto an existing machine.
|
||||
|
||||
## Before you change anything
|
||||
|
||||
Make sure all of this is already true:
|
||||
|
||||
- Nerve starts locally and `curl http://127.0.0.1:3080/health` works
|
||||
- OpenClaw gateway is healthy and `openclaw gateway status` works
|
||||
- Tailscale is installed on the Nerve machine
|
||||
- Tailscale is logged in on the Nerve machine and on the client device you want to use
|
||||
- You know where your Nerve install lives, default is usually `~/nerve`
|
||||
|
||||
Back up your current config first:
|
||||
|
||||
```bash
|
||||
cd ~/nerve
|
||||
cp .env .env.before-tailscale.bak
|
||||
cp ~/.openclaw/openclaw.json ~/.openclaw/openclaw.json.before-tailscale.bak
|
||||
```
|
||||
|
||||
## Which mode should you use?
|
||||
|
||||
Choose **Tailnet IP** if:
|
||||
- you want the simplest possible setup
|
||||
- plain HTTP on the tailnet is fine
|
||||
- you are okay with Nerve binding to `0.0.0.0`
|
||||
|
||||
Choose **Tailscale Serve** if:
|
||||
- you want Nerve to stay private on localhost
|
||||
- you want an HTTPS URL for phone access
|
||||
- you want the least surprising path for microphone access on mobile browsers
|
||||
|
||||
## Option A: Tailnet IP
|
||||
|
||||
This exposes Nerve on the machine's Tailscale IP and patches both Nerve and the gateway to allow that origin.
|
||||
|
||||
### 1. Get the Tailscale IPv4 address
|
||||
|
||||
```bash
|
||||
tailscale ip -4
|
||||
```
|
||||
|
||||
Example output:
|
||||
|
||||
```bash
|
||||
100.64.0.42
|
||||
```
|
||||
|
||||
Save that value, this guide calls it `<tailscale-ip>` below.
|
||||
|
||||
### 2. Update Nerve `.env`
|
||||
|
||||
Open `~/nerve/.env` and make sure these values are set:
|
||||
|
||||
```bash
|
||||
HOST=0.0.0.0
|
||||
ALLOWED_ORIGINS=http://<tailscale-ip>:3080
|
||||
CSP_CONNECT_EXTRA=http://<tailscale-ip>:3080 ws://<tailscale-ip>:3080
|
||||
WS_ALLOWED_HOSTS=<tailscale-ip>
|
||||
NERVE_AUTH=true
|
||||
```
|
||||
|
||||
Notes:
|
||||
- `HOST=0.0.0.0` is required for direct tailnet-IP access
|
||||
- `NERVE_AUTH=true` is strongly recommended whenever Nerve is reachable over the network, including Tailscale
|
||||
- if you do not already have a password hash configured, Nerve accepts the `GATEWAY_TOKEN` as a fallback password
|
||||
- if `ALLOWED_ORIGINS` or `CSP_CONNECT_EXTRA` already contains other values you still need, append instead of replacing
|
||||
|
||||
### 3. Patch the gateway allowlist
|
||||
|
||||
Add the same origin to `~/.openclaw/openclaw.json`:
|
||||
|
||||
```bash
|
||||
ORIGIN="http://<tailscale-ip>:3080" node - <<'NODE'
|
||||
const fs = require('fs');
|
||||
const path = `${process.env.HOME}/.openclaw/openclaw.json`;
|
||||
const origin = process.env.ORIGIN;
|
||||
const cfg = JSON.parse(fs.readFileSync(path, 'utf8'));
|
||||
|
||||
cfg.gateway ??= {};
|
||||
cfg.gateway.controlUi ??= {};
|
||||
const existing = cfg.gateway.controlUi.allowedOrigins || [];
|
||||
cfg.gateway.controlUi.allowedOrigins = [...new Set([...existing, origin])];
|
||||
|
||||
fs.writeFileSync(path, `${JSON.stringify(cfg, null, 2)}\n`);
|
||||
console.log(`Added ${origin} to ${path}`);
|
||||
NODE
|
||||
```
|
||||
|
||||
### 4. Restart Nerve and the gateway
|
||||
|
||||
```bash
|
||||
sudo systemctl restart nerve.service
|
||||
openclaw gateway restart
|
||||
```
|
||||
|
||||
### 5. Validate
|
||||
|
||||
On the Nerve machine:
|
||||
|
||||
```bash
|
||||
curl -fsS http://127.0.0.1:3080/health
|
||||
openclaw gateway status
|
||||
```
|
||||
|
||||
From another Tailscale-connected device, open:
|
||||
|
||||
```text
|
||||
http://<tailscale-ip>:3080
|
||||
```
|
||||
|
||||
Expected result:
|
||||
- the page loads
|
||||
- login works
|
||||
- sessions load
|
||||
- chat connects without origin errors
|
||||
|
||||
## Option B: Tailscale Serve
|
||||
|
||||
This keeps Nerve on localhost and lets Tailscale publish a private HTTPS URL.
|
||||
|
||||
### 1. Enable Tailscale Serve
|
||||
|
||||
On the Nerve machine:
|
||||
|
||||
```bash
|
||||
tailscale serve --bg http://127.0.0.1:3080
|
||||
```
|
||||
|
||||
### 2. Find the Serve URL
|
||||
|
||||
```bash
|
||||
tailscale serve status --json | node - <<'NODE'
|
||||
let text = '';
|
||||
process.stdin.on('data', chunk => text += chunk);
|
||||
process.stdin.on('end', () => {
|
||||
const data = JSON.parse(text || '{}');
|
||||
const key = Object.keys(data.Web || {})[0];
|
||||
if (!key) {
|
||||
console.error('No Tailscale Serve web origin found');
|
||||
process.exit(1);
|
||||
}
|
||||
const host = key.replace(/:\d+$/, '');
|
||||
console.log(`https://${host}`);
|
||||
});
|
||||
NODE
|
||||
```
|
||||
|
||||
Example output:
|
||||
|
||||
```text
|
||||
https://example-node.tail0000.ts.net
|
||||
```
|
||||
|
||||
Save that value, this guide calls it `<serve-origin>` below.
|
||||
|
||||
### 3. Update Nerve `.env`
|
||||
|
||||
Open `~/nerve/.env` and make sure these values are set:
|
||||
|
||||
```bash
|
||||
HOST=127.0.0.1
|
||||
ALLOWED_ORIGINS=<serve-origin>
|
||||
CSP_CONNECT_EXTRA=<serve-origin> wss://<serve-host>
|
||||
NERVE_AUTH=true
|
||||
```
|
||||
|
||||
Where `<serve-host>` is the hostname without `https://`.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
HOST=127.0.0.1
|
||||
ALLOWED_ORIGINS=https://example-node.tail0000.ts.net
|
||||
CSP_CONNECT_EXTRA=https://example-node.tail0000.ts.net wss://example-node.tail0000.ts.net
|
||||
NERVE_AUTH=true
|
||||
```
|
||||
|
||||
Notes:
|
||||
- if `HOST` is missing entirely, Nerve defaults to localhost, which is also fine
|
||||
- remove stale `WS_ALLOWED_HOSTS` if you previously used tailnet-IP mode and are switching to Serve-only access
|
||||
- `NERVE_AUTH=true` is still recommended, even though Serve is private by default
|
||||
|
||||
### 4. Patch the gateway allowlist
|
||||
|
||||
Add the same Serve origin to `~/.openclaw/openclaw.json`:
|
||||
|
||||
```bash
|
||||
ORIGIN="<serve-origin>" node - <<'NODE'
|
||||
const fs = require('fs');
|
||||
const path = `${process.env.HOME}/.openclaw/openclaw.json`;
|
||||
const origin = process.env.ORIGIN;
|
||||
const cfg = JSON.parse(fs.readFileSync(path, 'utf8'));
|
||||
|
||||
cfg.gateway ??= {};
|
||||
cfg.gateway.controlUi ??= {};
|
||||
const existing = cfg.gateway.controlUi.allowedOrigins || [];
|
||||
cfg.gateway.controlUi.allowedOrigins = [...new Set([...existing, origin])];
|
||||
|
||||
fs.writeFileSync(path, `${JSON.stringify(cfg, null, 2)}\n`);
|
||||
console.log(`Added ${origin} to ${path}`);
|
||||
NODE
|
||||
```
|
||||
|
||||
### 5. Restart Nerve and the gateway
|
||||
|
||||
```bash
|
||||
sudo systemctl restart nerve.service
|
||||
openclaw gateway restart
|
||||
```
|
||||
|
||||
### 6. Validate
|
||||
|
||||
On the Nerve machine:
|
||||
|
||||
```bash
|
||||
curl -fsS http://127.0.0.1:3080/health
|
||||
openclaw gateway status
|
||||
tailscale serve status
|
||||
```
|
||||
|
||||
From another Tailscale-connected device, open:
|
||||
|
||||
```text
|
||||
<serve-origin>
|
||||
```
|
||||
|
||||
Expected result:
|
||||
- the page loads over HTTPS
|
||||
- login works
|
||||
- chat connects without `origin not allowed`
|
||||
- phone access works without exposing Nerve directly on `0.0.0.0`
|
||||
|
||||
## Switching from one mode to the other
|
||||
|
||||
If you switch modes later, update both layers:
|
||||
|
||||
- Nerve `.env`
|
||||
- OpenClaw `gateway.controlUi.allowedOrigins`
|
||||
|
||||
Common cleanup when switching to **Serve**:
|
||||
- change `HOST` back to `127.0.0.1`
|
||||
- replace IP-based `ALLOWED_ORIGINS`
|
||||
- replace IP-based `CSP_CONNECT_EXTRA`
|
||||
- remove `WS_ALLOWED_HOSTS` if you no longer need direct IP access
|
||||
|
||||
Common cleanup when switching to **Tailnet IP**:
|
||||
- set `HOST=0.0.0.0`
|
||||
- replace `ALLOWED_ORIGINS` with the IP origin
|
||||
- replace `CSP_CONNECT_EXTRA` with the IP origin + `ws://...`
|
||||
- set `WS_ALLOWED_HOSTS=<tailscale-ip>`
|
||||
|
||||
## Common failures
|
||||
|
||||
### `Auth failed: origin not allowed`
|
||||
|
||||
Cause:
|
||||
- the Serve or tailnet origin is missing from `gateway.controlUi.allowedOrigins`
|
||||
|
||||
Fix:
|
||||
- patch `~/.openclaw/openclaw.json`
|
||||
- restart the gateway
|
||||
|
||||
### WebSocket upgrade fails or chat never connects
|
||||
|
||||
Cause:
|
||||
- the browser origin is missing from `ALLOWED_ORIGINS`
|
||||
- or you kept stale `WS_ALLOWED_HOSTS` / `HOST` values from the other mode
|
||||
|
||||
Fix:
|
||||
- clean up `.env` so it matches the mode you actually want
|
||||
- restart Nerve
|
||||
|
||||
### Microphone access is flaky on phone
|
||||
|
||||
Use **Tailscale Serve**, not plain `http://<tailscale-ip>:3080`.
|
||||
|
||||
Mobile browsers are much happier with HTTPS for microphone access.
|
||||
|
||||
## Security notes
|
||||
|
||||
- Do **not** expose OpenClaw gateway port `18789` publicly just because Nerve is on Tailscale
|
||||
- Keep `NERVE_AUTH=true` for any non-localhost access
|
||||
- If you shared gateway tokens while debugging, rotate them afterward
|
||||
|
||||
## Recommendation
|
||||
|
||||
If you only need one answer:
|
||||
- use **Tailnet IP** for the fastest manual retrofit
|
||||
- use **Tailscale Serve** for the cleanest long-term remote setup, especially on phone
|
||||
|
|
@ -129,7 +129,7 @@ The server detects `EADDRINUSE` and exits with a clear error (see `server/index.
|
|||
|
||||
**Symptom:** Red reconnecting banner appears periodically.
|
||||
|
||||
**Cause:** WebSocket connection to gateway dropped. Nerve auto-reconnects with exponential backoff (1s base, 30s max, up to 50 attempts).
|
||||
**Cause:** WebSocket connection to gateway dropped. Nerve auto-reconnects with exponential backoff (1s base, 30s max).
|
||||
|
||||
**Diagnosis:**
|
||||
```bash
|
||||
|
|
@ -192,6 +192,20 @@ rm ~/.nerve/device-identity.json
|
|||
WS_ALLOWED_HOSTS=mygateway.local npm start
|
||||
```
|
||||
|
||||
### Workspace panels fail with `origin not allowed`
|
||||
|
||||
**Symptom:** Chat connects, but remote workspace panels like Files, Memory, Config, or Skills fail with gateway `origin not allowed` errors.
|
||||
|
||||
**Cause:** Those panels can use the server-side gateway RPC fallback, which opens its own WebSocket to the gateway. If Nerve does not know its real browser-facing origin, that fallback can present the wrong origin even though the browser chat path is already working.
|
||||
|
||||
**Fix:** Set the exact browser origin in `.env` and allow the same origin on the gateway:
|
||||
|
||||
```bash
|
||||
NERVE_PUBLIC_ORIGIN=https://nerve.example.com
|
||||
```
|
||||
|
||||
Also add `https://nerve.example.com` to `gateway.controlUi.allowedOrigins`, then restart both Nerve and the gateway.
|
||||
|
||||
### "device token mismatch" on WebSocket connect
|
||||
|
||||
**Symptom:** Server logs show `[ws-proxy] Gateway closed: code=1008, reason=unauthorized: device token mismatch`.
|
||||
|
|
@ -238,6 +252,19 @@ openclaw devices approve <requestId>
|
|||
|
||||
After approval, reconnect from the browser (refresh the page or click reconnect).
|
||||
|
||||
### Cron tab says "Tool not available: cron"
|
||||
|
||||
**Symptom:** Settings → Cron fails with `Tool not available: cron` or shows a cron access warning.
|
||||
|
||||
**Cause:** Fresh setup patches this, but older installs, upgrade paths that never re-ran setup, or remote gateway setups can still be missing the required HTTP tool allowlist.
|
||||
|
||||
**Fix:** Add these tools to `gateway.tools.allow` on the gateway, then restart it:
|
||||
```json
|
||||
["cron", "gateway", "sessions_spawn"]
|
||||
```
|
||||
|
||||
If Nerve and the gateway are on the same machine, re-running `npm run setup` can patch it for you.
|
||||
|
||||
### Messages buffered indefinitely
|
||||
|
||||
**Symptom:** Messages sent immediately after connecting are lost.
|
||||
|
|
@ -256,8 +283,9 @@ After approval, reconnect from the browser (refresh the page or click reconnect)
|
|||
1. **Sound enabled?** Check Settings → Audio → Sound toggle is on
|
||||
2. **TTS provider configured?** Check Settings → Audio → TTS Provider
|
||||
3. **API key present?**
|
||||
- OpenAI: requires `OPENAI_API_KEY` env var
|
||||
- Replicate: requires `REPLICATE_API_TOKEN` env var
|
||||
- OpenAI: requires `OPENAI_API_KEY`
|
||||
- Replicate: requires `REPLICATE_API_TOKEN`
|
||||
- Xiaomi MiMo: requires `MIMO_API_KEY`
|
||||
- Edge: no key needed (free)
|
||||
4. **Server-side check:**
|
||||
```bash
|
||||
|
|
@ -265,9 +293,9 @@ After approval, reconnect from the browser (refresh the page or click reconnect)
|
|||
-H "Content-Type: application/json" \
|
||||
-d '{"text": "hello", "provider": "edge"}'
|
||||
```
|
||||
Should return audio/mpeg binary.
|
||||
Should return playable audio binary.
|
||||
|
||||
**Provider auto-fallback:** If no explicit provider is selected, the server tries: OpenAI (if key) → Replicate (if key) → Edge (always available).
|
||||
**Provider auto-fallback:** If no explicit provider is selected, the server tries: OpenAI (if key) → Replicate (if key) → Edge (always available). Xiaomi MiMo is explicit-only and is used only when you select the Xiaomi provider.
|
||||
|
||||
### TTS plays old/wrong responses
|
||||
|
||||
|
|
@ -320,7 +348,7 @@ After approval, reconnect from the browser (refresh the page or click reconnect)
|
|||
**Fix (local STT):**
|
||||
- Models auto-download on first use. Check server logs for download progress or errors
|
||||
- Ensure `ffmpeg` is installed (the installer handles this): `ffmpeg -version`
|
||||
- Check model file exists: `ls ~/.nerve/models/ggml-tiny.bin`
|
||||
- Check model file exists: `ls ~/.nerve/models/ggml-base.bin`
|
||||
|
||||
**Fix (OpenAI STT):**
|
||||
- Set `STT_PROVIDER=openai` and `OPENAI_API_KEY` in `.env`
|
||||
|
|
@ -335,7 +363,7 @@ After approval, reconnect from the browser (refresh the page or click reconnect)
|
|||
|
||||
**Causes:**
|
||||
- Language is set incorrectly
|
||||
- Local model is `tiny` (fast, but less accurate for conversational non-English)
|
||||
- Local model is set to a smaller low-accuracy model
|
||||
- English-only model (`*.en`) selected for non-English speech
|
||||
|
||||
**Fix:**
|
||||
|
|
@ -407,25 +435,50 @@ MEMORY_PATH=/path/to/.openclaw/workspace/MEMORY.md
|
|||
|
||||
### Sessions don't appear in sidebar
|
||||
|
||||
**Symptom:** Session list is empty or only shows one recent root session.
|
||||
**Symptom:** Session list is empty, obviously incomplete, or older top-level chats are missing.
|
||||
|
||||
**Cause:** Sessions are fetched via gateway RPC `sessions.list` with `activeMinutes: 120` filter.
|
||||
**Expected behavior in 1.5.0+:** Older top-level chats should remain visible in the sidebar. They are no longer supposed to disappear just because they fell outside a recent-activity window.
|
||||
|
||||
**Likely causes:**
|
||||
- Gateway connectivity or auth problems prevented the session list from refreshing
|
||||
- The browser is showing stale client state after a reconnect or upgrade
|
||||
- A session fetch failed and the sidebar did not recover yet
|
||||
|
||||
**Fix:**
|
||||
- Sessions inactive for >2 hours won't appear — this is by design
|
||||
- Check gateway connectivity (sessions come from the gateway, not local state)
|
||||
- Force refresh: click refresh button or Cmd+K → "Refresh Sessions"
|
||||
- Check gateway connectivity first, sessions come from the gateway, not local browser state
|
||||
- Force refresh with the refresh button or Cmd+K → "Refresh Sessions"
|
||||
- Reload the page after upgrades or reconnect storms
|
||||
- If the problem persists, inspect browser console and server logs for session-list or WebSocket errors
|
||||
|
||||
### Sub-agent spawn times out
|
||||
|
||||
**Symptom:** "Timed out waiting for subagent to spawn" error.
|
||||
|
||||
**Cause:** Spawning uses a polling approach — sends a `[spawn-subagent]` chat message to the selected root session, then polls `sessions.list` every 2s for up to 30s waiting for a new subagent session to appear.
|
||||
**Cause:** Nerve requested a child session, but the gateway never surfaced a matching worker session before the timeout. Depending on the path, that usually means the selected root session could not launch the child, the normal `sessions_spawn` path failed, or the child session metadata never became visible to Nerve's poller.
|
||||
|
||||
**Fix:**
|
||||
- The selected root agent must be running and able to process the spawn request
|
||||
- Check that the selected root session isn't busy with another task
|
||||
- Check gateway logs for spawn errors
|
||||
- Make sure the selected root agent exists and is healthy
|
||||
- Check whether that root is already busy with another task
|
||||
- Inspect gateway logs for `sessions.create`, `sessions.send`, or `sessions_spawn` failures
|
||||
- Refresh sessions and retry after gateway reconnects if the session tree looks stale
|
||||
|
||||
### Kanban task execution cannot attach to a worker
|
||||
|
||||
**Symptom:** A Kanban task enters `in-progress`, but no worker session links up cleanly, the task never reaches `review`, or the parent root never gets the completion update.
|
||||
|
||||
**Cause:** Kanban has two execution paths now:
|
||||
- **Assigned tasks** create a real child session beneath the assignee's live root via `sessions.create(parentSessionKey=...)`, then send the task with `sessions.send`.
|
||||
- **Unassigned or `operator` tasks** use the normal `sessions_spawn` path.
|
||||
|
||||
That means failures can come from a missing assignee root, a child-session create/send failure, the normal `sessions_spawn` path, or a stalled completion poller. On macOS, unassigned or `operator` tasks are rejected outright and must be assigned to a live worker root first.
|
||||
|
||||
**Fix:**
|
||||
- If the task is assigned, make sure that assignee's root session exists and is healthy
|
||||
- If the task is unassigned, verify the normal `sessions_spawn` path is working
|
||||
- On macOS, assign the task to a live worker root before executing it
|
||||
- Check gateway RPC/session logs for the assignee-root path, and HTTP tool logs for the normal spawn path
|
||||
- If the child session finishes but the parent root never updates, inspect gateway RPC logs and recent session events for the parent-report step
|
||||
- If the worker never appears in the session list, inspect gateway connectivity and recent session events first
|
||||
|
||||
### Session status stuck on "THINKING"
|
||||
|
||||
|
|
@ -446,23 +499,28 @@ MEMORY_PATH=/path/to/.openclaw/workspace/MEMORY.md
|
|||
|
||||
**Symptom:** Model selector is empty or shows only the current model.
|
||||
|
||||
**Cause:** Models are fetched via `GET /api/gateway/models`, which runs `openclaw models list --json`.
|
||||
**Cause:** Models are fetched via `GET /api/gateway/models`, which reads the active OpenClaw config. If that config is unreadable, or it has no configured models, the dropdown can stay empty or sparse.
|
||||
|
||||
**Fix:**
|
||||
- Ensure the `openclaw` binary is in PATH (the server searches multiple locations — see `lib/openclaw-bin.ts`)
|
||||
- Set `OPENCLAW_BIN` env var to the explicit path
|
||||
- Check server logs for model list errors
|
||||
- An allowlist can restrict visible models (configured server-side)
|
||||
- Verify the expected models are configured in OpenClaw (`agents.defaults.model` / `agents.defaults.models`)
|
||||
- Check that Nerve can read the active OpenClaw config file
|
||||
- Check server logs for `gateway/models` read errors
|
||||
- After fixing config, reopen the spawn dialog or refresh the page
|
||||
|
||||
### Model change doesn't take effect
|
||||
|
||||
**Symptom:** Switched model in UI but responses still come from the old model.
|
||||
|
||||
**Cause:** Model/thinking changes go through `POST /api/gateway/session-patch`, which invokes the gateway's session patch API.
|
||||
**Cause:** There are two different paths now:
|
||||
- **Model changes** can fall back to `POST /api/gateway/session-patch`
|
||||
- **Thinking changes** must go through WebSocket RPC `sessions.patch`
|
||||
|
||||
If you call the HTTP fallback with only `thinkingLevel`, it returns **501**. If Nerve cannot find an active root session and you omitted `sessionKey`, it can return **409**.
|
||||
|
||||
**Fix:**
|
||||
- The change applies per-session — switching sessions will show that session's model
|
||||
- Verify the patch succeeded: check for `{ ok: true }` response
|
||||
- For model changes, verify the HTTP fallback returned `{ ok: true }`
|
||||
- For thinking changes, retry after the WebSocket reconnects and use the normal `sessions.patch` flow
|
||||
- If you see a 409 from the HTTP fallback, pass `sessionKey` explicitly or make sure an active root session exists
|
||||
- Some models may not be available for the current session type
|
||||
|
||||
---
|
||||
|
|
|
|||
|
|
@ -2,6 +2,20 @@
|
|||
|
||||
Nerve ships a built-in updater that pulls the latest published release from GitHub, rebuilds, restarts the service, and verifies health — all in one command.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before using the updater, make sure this checkout has an HTTPS GitHub `origin`, for example:
|
||||
|
||||
```bash
|
||||
git remote -v
|
||||
```
|
||||
|
||||
`origin` should point to `https://github.com/<owner>/<repo>.git`. If it does not, fix it first:
|
||||
|
||||
```bash
|
||||
git remote set-url origin https://github.com/<owner>/<repo>.git
|
||||
```
|
||||
|
||||
## Quick start
|
||||
|
||||
```bash
|
||||
|
|
@ -9,7 +23,7 @@ npm run update -- --yes
|
|||
```
|
||||
|
||||
This will:
|
||||
1. Check prerequisites (git, Node.js, npm)
|
||||
1. Check prerequisites (git, Node.js, npm, and an HTTPS GitHub `origin` remote)
|
||||
2. Resolve the latest published GitHub release (fallback: latest semver tag)
|
||||
3. Snapshot the current state for rollback
|
||||
4. `git fetch --tags && git checkout <tag>`
|
||||
|
|
@ -119,6 +133,22 @@ If no service manager is found, the updater skips restart and prints manual star
|
|||
| `~/.nerve/updater/snapshots/<timestamp>/.env` | Backed-up `.env` files |
|
||||
| `~/.nerve/updater/nerve-update.lock` | PID lock file |
|
||||
|
||||
## Upgrade notes
|
||||
|
||||
### 1.5.0, Kanban data path migration
|
||||
|
||||
On first start after upgrading to 1.5.0, Nerve automatically migrates legacy Kanban runtime data from:
|
||||
- `server-dist/data/kanban`
|
||||
- `server/data/kanban`
|
||||
|
||||
into the canonical runtime location:
|
||||
- `${NERVE_DATA_DIR:-~/.nerve}/kanban`
|
||||
|
||||
What to do:
|
||||
- Let the first post-upgrade start complete before judging the migration
|
||||
- Update backup scripts to follow `${NERVE_DATA_DIR:-~/.nerve}/kanban`
|
||||
- Do not keep writing to the old `server-dist` or `server` data paths after upgrade
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Could not fetch release or semver tags"
|
||||
|
|
@ -127,8 +157,9 @@ The updater resolves versions from GitHub Releases first. If release lookup fail
|
|||
|
||||
**Fix:** Verify remote/release access and tags:
|
||||
```bash
|
||||
git remote -v # Verify origin points to the right repo
|
||||
git fetch --tags origin # Pull any missing tags
|
||||
git remote -v
|
||||
git remote set-url origin https://github.com/<owner>/<repo>.git
|
||||
git fetch --tags origin
|
||||
curl -sSf https://api.github.com/repos/<owner>/<repo>/releases/latest | jq .tag_name
|
||||
```
|
||||
|
||||
|
|
|
|||
BIN
docs/font-size-dropdown.png
Normal file
BIN
docs/font-size-dropdown.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 457 KiB |
BIN
docs/font-size-setting.png
Normal file
BIN
docs/font-size-setting.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 423 KiB |
|
|
@ -6,7 +6,7 @@ import tseslint from 'typescript-eslint'
|
|||
import { defineConfig, globalIgnores } from 'eslint/config'
|
||||
|
||||
export default defineConfig([
|
||||
globalIgnores(['dist', 'server-dist']),
|
||||
globalIgnores(['dist', 'server-dist', '.worktrees']),
|
||||
{
|
||||
files: ['**/*.{ts,tsx}'],
|
||||
extends: [
|
||||
|
|
|
|||
150
install.sh
150
install.sh
|
|
@ -8,6 +8,7 @@
|
|||
# Or with options:
|
||||
# curl -fsSL ... | bash -s -- --dir ~/nerve --version v1.4.4
|
||||
# curl -fsSL ... | bash -s -- --dir ~/nerve --branch main
|
||||
# curl -fsSL ... | bash -s -- --gateway-url https://gw.example.com --gateway-token <token> --skip-setup
|
||||
# ──────────────────────────────────────────────────────────────────────
|
||||
set -euo pipefail
|
||||
|
||||
|
|
@ -37,6 +38,8 @@ NODE_MIN=22
|
|||
SKIP_SETUP=false
|
||||
DRY_RUN=false
|
||||
GATEWAY_TOKEN=""
|
||||
GATEWAY_URL_OVERRIDE=""
|
||||
ACCESS_MODE=""
|
||||
ENV_MISSING=false
|
||||
|
||||
# ── Colors ────────────────────────────────────────────────────────────
|
||||
|
|
@ -68,6 +71,41 @@ elif command -v dnf &>/dev/null || command -v yum &>/dev/null; then IS_FEDORA=tr
|
|||
hint() { echo -e " ${RAIL}"; echo -e " ${RAIL} ${BOLD}$1${NC}"; echo -e " ${RAIL}"; }
|
||||
cmd() { echo -e " ${RAIL} ${CYAN}\$ $1${NC}"; }
|
||||
|
||||
print_deployment_guides() {
|
||||
local guides_file="${INSTALL_DIR}/scripts/lib/deployment-guides.json"
|
||||
local rendered_guides
|
||||
|
||||
[[ -r "$guides_file" ]] || return 1
|
||||
|
||||
rendered_guides="$(node - "$guides_file" <<'EOF'
|
||||
const fs = require('node:fs');
|
||||
|
||||
const guidesPath = process.argv[2];
|
||||
|
||||
try {
|
||||
const guides = JSON.parse(fs.readFileSync(guidesPath, 'utf8'));
|
||||
if (!Array.isArray(guides)) process.exit(0);
|
||||
|
||||
const rendered = [];
|
||||
for (const guide of guides) {
|
||||
if (guide && typeof guide.title === 'string' && typeof guide.url === 'string') {
|
||||
rendered.push(` ${guide.title}: ${guide.url}`);
|
||||
}
|
||||
}
|
||||
|
||||
process.stdout.write(rendered.join('\n'));
|
||||
} catch {
|
||||
process.exit(0);
|
||||
}
|
||||
EOF
|
||||
)" || return 1
|
||||
|
||||
[[ -n "$rendered_guides" ]] || return 1
|
||||
|
||||
echo " Deployment guides:"
|
||||
printf '%s\n' "$rendered_guides"
|
||||
}
|
||||
|
||||
# Check if a port is already in use. Returns 0 if port is free, 1 if occupied.
|
||||
check_port() {
|
||||
local port="$1"
|
||||
|
|
@ -227,18 +265,22 @@ while [[ $# -gt 0 ]]; do
|
|||
--skip-setup) SKIP_SETUP=true; shift ;;
|
||||
--dry-run) DRY_RUN=true; shift ;;
|
||||
--gateway-token) [[ $# -ge 2 ]] || { echo "Missing value for --gateway-token"; exit 1; }; GATEWAY_TOKEN="$2"; shift 2 ;;
|
||||
--gateway-url) [[ $# -ge 2 ]] || { echo "Missing value for --gateway-url"; exit 1; }; GATEWAY_URL_OVERRIDE="$2"; shift 2 ;;
|
||||
--access-mode) [[ $# -ge 2 ]] || { echo "Missing value for --access-mode"; exit 1; }; ACCESS_MODE="$2"; shift 2 ;;
|
||||
--help|-h)
|
||||
echo "Nerve Installer"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --dir <path> Install directory (default: ~/nerve)"
|
||||
echo " --version <vX.Y.Z> Install a specific release version"
|
||||
echo " --branch <name> Install from a branch (dev override; bypasses release mode)"
|
||||
echo " --repo <url> Git repo URL"
|
||||
echo " --skip-setup Skip the interactive setup wizard"
|
||||
echo " --gateway-token <t> Gateway token (for non-interactive installs)"
|
||||
echo " --dry-run Simulate the install without changing anything"
|
||||
echo " --help Show this help"
|
||||
echo " --dir <path> Install directory (default: ~/nerve)"
|
||||
echo " --version <vX.Y.Z> Install a specific release version"
|
||||
echo " --branch <name> Install from a branch (dev override; bypasses release mode)"
|
||||
echo " --repo <url> Git repo URL"
|
||||
echo " --skip-setup Skip the interactive setup wizard"
|
||||
echo " --gateway-token <t> Gateway token (for non-interactive installs)"
|
||||
echo " --gateway-url <url> Gateway URL (for remote/non-interactive installs)"
|
||||
echo " --access-mode <m> Setup access mode: local|network|custom|tailscale-ip|tailscale-serve"
|
||||
echo " --dry-run Simulate the install without changing anything"
|
||||
echo " --help Show this help"
|
||||
exit 0
|
||||
;;
|
||||
*) echo "Unknown option: $1"; exit 1 ;;
|
||||
|
|
@ -250,6 +292,42 @@ if [[ -n "$VERSION" && "$BRANCH_EXPLICIT" == "true" ]]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
normalize_access_mode() {
|
||||
case "$1" in
|
||||
"") echo "" ;;
|
||||
tailscale) echo "tailscale-ip" ;;
|
||||
local|network|custom|tailscale-ip|tailscale-serve) echo "$1" ;;
|
||||
*)
|
||||
fail "Invalid --access-mode: $1"
|
||||
echo " Supported values: local, network, custom, tailscale-ip, tailscale-serve"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
normalize_gateway_url() {
|
||||
local url="$1"
|
||||
|
||||
if command -v node &>/dev/null; then
|
||||
node -e 'const input=process.argv[1];try{const u=new URL(input);if(!["http:","https:"].includes(u.protocol))throw new Error("protocol");if(u.search||u.hash)throw new Error("query-or-fragment");process.stdout.write(u.toString().replace(/\/+$/,""));}catch{process.exit(1)}' "$url" 2>/dev/null || return 1
|
||||
else
|
||||
[[ "$url" =~ ^https?://[^[:space:]?#]+$ ]] || return 1
|
||||
printf '%s' "${url%/}"
|
||||
fi
|
||||
}
|
||||
|
||||
ACCESS_MODE=$(normalize_access_mode "$ACCESS_MODE")
|
||||
|
||||
if [[ -n "$GATEWAY_URL_OVERRIDE" ]]; then
|
||||
normalized_gateway_url=$(normalize_gateway_url "$GATEWAY_URL_OVERRIDE" || true)
|
||||
if [[ -z "$normalized_gateway_url" ]]; then
|
||||
fail "Invalid --gateway-url: $GATEWAY_URL_OVERRIDE"
|
||||
echo " Expected an absolute http:// or https:// URL without query or fragment"
|
||||
exit 1
|
||||
fi
|
||||
GATEWAY_URL_OVERRIDE="$normalized_gateway_url"
|
||||
fi
|
||||
|
||||
# ── Detect interactive mode ───────────────────────────────────────────
|
||||
# When piped via curl | bash, stdin is the pipe — but /dev/tty still
|
||||
# provides access to the controlling terminal for interactive prompts.
|
||||
|
|
@ -481,11 +559,11 @@ check_build_tools() {
|
|||
|
||||
# ── Check: Gateway reachable ──────────────────────────────────────────
|
||||
check_gateway() {
|
||||
local gw_url="http://127.0.0.1:18789"
|
||||
local gw_url="${GATEWAY_URL_OVERRIDE:-http://127.0.0.1:18789}"
|
||||
|
||||
# Try to read from openclaw.json
|
||||
# Try to read from openclaw.json when no explicit gateway URL was provided
|
||||
local config_file="${HOME}/.openclaw/openclaw.json"
|
||||
if [[ -f "$config_file" ]]; then
|
||||
if [[ -z "$GATEWAY_URL_OVERRIDE" && -f "$config_file" ]]; then
|
||||
local port
|
||||
port=$(node -e "try{const c=JSON.parse(require('fs').readFileSync('$config_file','utf8'));console.log(c.gateway?.port??18789)}catch{console.log(18789)}" 2>/dev/null || echo "18789")
|
||||
gw_url="http://127.0.0.1:${port}"
|
||||
|
|
@ -786,6 +864,7 @@ generate_env_from_gateway() {
|
|||
fi
|
||||
|
||||
local gw_token="${GATEWAY_TOKEN:-}"
|
||||
local gw_url="${GATEWAY_URL_OVERRIDE:-}"
|
||||
local gw_port="18789"
|
||||
local config_file="${HOME}/.openclaw/openclaw.json"
|
||||
|
||||
|
|
@ -793,8 +872,12 @@ generate_env_from_gateway() {
|
|||
if [[ -z "$gw_token" ]]; then
|
||||
gw_token=$(detect_gateway_token)
|
||||
fi
|
||||
if [[ -f "$config_file" ]]; then
|
||||
if [[ -z "$gw_url" && -f "$config_file" ]]; then
|
||||
gw_port=$(node -e "try{const c=JSON.parse(require('fs').readFileSync('$config_file','utf8'));console.log(c.gateway?.port??18789)}catch{console.log(18789)}" 2>/dev/null || echo "18789")
|
||||
gw_url="http://127.0.0.1:${gw_port}"
|
||||
fi
|
||||
if [[ -z "$gw_url" ]]; then
|
||||
gw_url="http://127.0.0.1:${gw_port}"
|
||||
fi
|
||||
|
||||
if [[ -n "$gw_token" ]]; then
|
||||
|
|
@ -823,7 +906,7 @@ generate_env_from_gateway() {
|
|||
fi
|
||||
fi
|
||||
cat > .env <<ENVEOF
|
||||
GATEWAY_URL=http://127.0.0.1:${gw_port}
|
||||
GATEWAY_URL=${gw_url}
|
||||
GATEWAY_TOKEN=${gw_token}
|
||||
PORT=${nerve_port}
|
||||
ENVEOF
|
||||
|
|
@ -841,10 +924,15 @@ stage "Configure"
|
|||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
if [[ "$SKIP_SETUP" == "true" ]]; then
|
||||
dry "Would skip setup wizard (--skip-setup)"
|
||||
elif [[ -n "$ACCESS_MODE" ]]; then
|
||||
dry "Would run non-interactive setup wizard (--defaults --access-mode ${ACCESS_MODE})"
|
||||
else
|
||||
dry "Would launch interactive setup wizard"
|
||||
dry "Would prompt for: gateway token, port, TTS config"
|
||||
fi
|
||||
if [[ -n "$GATEWAY_URL_OVERRIDE" ]]; then
|
||||
dry "Would write GATEWAY_URL=${GATEWAY_URL_OVERRIDE}"
|
||||
fi
|
||||
else
|
||||
if [[ "$SKIP_SETUP" == "true" ]]; then
|
||||
if [[ -f .env ]]; then
|
||||
|
|
@ -854,8 +942,8 @@ else
|
|||
generate_env_from_gateway
|
||||
fi
|
||||
else
|
||||
if [[ "$INTERACTIVE" == "true" ]]; then
|
||||
if [[ -f .env ]]; then
|
||||
if [[ -f .env ]]; then
|
||||
if [[ "$INTERACTIVE" == "true" && -z "$ACCESS_MODE" ]]; then
|
||||
ok "Existing .env found"
|
||||
printf " ${RAIL} ${YELLOW}?${NC} Run setup wizard anyway? (y/N) "
|
||||
if read -r answer < /dev/tty 2>/dev/null; then
|
||||
|
|
@ -871,18 +959,22 @@ else
|
|||
warn "Cannot read input — run ${CYAN}npm run setup${NC} manually to reconfigure"
|
||||
fi
|
||||
else
|
||||
NERVE_INSTALLER=1 npm run setup < /dev/tty 2>/dev/null || {
|
||||
warn "Setup wizard failed — attempting auto-config from gateway..."
|
||||
generate_env_from_gateway
|
||||
}
|
||||
fi
|
||||
else
|
||||
if [[ -f .env ]]; then
|
||||
ok "Existing .env found — keeping current configuration"
|
||||
else
|
||||
info "Non-interactive mode — generating .env from gateway config..."
|
||||
generate_env_from_gateway
|
||||
fi
|
||||
elif [[ -n "$ACCESS_MODE" ]]; then
|
||||
info "Explicit access mode requested — running non-interactive setup wizard..."
|
||||
NERVE_INSTALLER=1 npm run setup -- --defaults --access-mode "$ACCESS_MODE" || {
|
||||
fail "Setup failed for --access-mode ${ACCESS_MODE}"
|
||||
exit 1
|
||||
}
|
||||
elif [[ "$INTERACTIVE" == "true" ]]; then
|
||||
NERVE_INSTALLER=1 npm run setup < /dev/tty 2>/dev/null || {
|
||||
warn "Setup wizard failed — attempting auto-config from gateway..."
|
||||
generate_env_from_gateway
|
||||
}
|
||||
else
|
||||
info "Non-interactive mode — generating .env from gateway config..."
|
||||
generate_env_from_gateway
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
|
@ -1099,7 +1191,7 @@ if [[ "$(uname -s)" == "Darwin" ]]; then
|
|||
if [[ "$(echo "$answer" | tr "[:upper:]" "[:lower:]")" != "n" ]]; then
|
||||
setup_launchd
|
||||
else
|
||||
ok "Skipped — start manually with: npm start"
|
||||
ok "Skipped — start manually with: cd ${INSTALL_DIR} && npm run prod"
|
||||
fi
|
||||
else
|
||||
info "Cannot read input — installing launchd service by default"
|
||||
|
|
@ -1137,7 +1229,7 @@ elif command -v systemctl &>/dev/null; then
|
|||
if [[ "$(echo "$answer" | tr "[:upper:]" "[:lower:]")" != "n" ]]; then
|
||||
setup_systemd
|
||||
else
|
||||
ok "Skipped — start manually with: npm start"
|
||||
ok "Skipped — start manually with: cd ${INSTALL_DIR} && npm run prod"
|
||||
fi
|
||||
else
|
||||
info "Cannot read input — installing systemd service by default"
|
||||
|
|
@ -1199,6 +1291,8 @@ else
|
|||
echo -e " ${ORANGE}│${NC}$(printf ' %.0s' $(seq 1 $box_inner))${ORANGE}│${NC}"
|
||||
echo -e " ${ORANGE}╰$(printf '─%.0s' $(seq 1 $box_inner))╯${NC}"
|
||||
echo ""
|
||||
print_deployment_guides || true
|
||||
echo ""
|
||||
echo -e " ${DIM}Directory: cd ${INSTALL_DIR}${NC}"
|
||||
if $IS_MAC; then
|
||||
echo -e " ${DIM}Restart: launchctl stop com.nerve.server && launchctl start com.nerve.server${NC}"
|
||||
|
|
@ -1207,7 +1301,7 @@ else
|
|||
echo -e " ${DIM}Restart: sudo systemctl restart nerve.service${NC}"
|
||||
echo -e " ${DIM}Logs: sudo journalctl -u nerve.service -f${NC}"
|
||||
else
|
||||
echo -e " ${DIM}Start: cd ${INSTALL_DIR} && npm start${NC}"
|
||||
echo -e " ${DIM}Start: cd ${INSTALL_DIR} && npm run prod${NC}"
|
||||
fi
|
||||
fi
|
||||
echo ""
|
||||
|
|
|
|||
8
package-lock.json
generated
8
package-lock.json
generated
|
|
@ -1,12 +1,12 @@
|
|||
{
|
||||
"name": "openclaw-nerve",
|
||||
"version": "1.4.9",
|
||||
"version": "1.5.2",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "openclaw-nerve",
|
||||
"version": "1.4.9",
|
||||
"version": "1.5.2",
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
|
|
@ -36,6 +36,7 @@
|
|||
"dotenv": "^17.2.4",
|
||||
"highlight.js": "^11.11.1",
|
||||
"hono": "^4.11.7",
|
||||
"json5": "^2.2.3",
|
||||
"lightweight-charts": "^5.1.0",
|
||||
"lucide-react": "^0.563.0",
|
||||
"node-pty": "^1.1.0",
|
||||
|
|
@ -9314,7 +9315,8 @@
|
|||
},
|
||||
"node_modules/json5": {
|
||||
"version": "2.2.3",
|
||||
"dev": true,
|
||||
"resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
|
||||
"integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
|
||||
"license": "MIT",
|
||||
"bin": {
|
||||
"json5": "lib/cli.js"
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "openclaw-nerve",
|
||||
"version": "1.4.9",
|
||||
"version": "1.5.2",
|
||||
"description": "Web interface for OpenClaw — chat, voice input, TTS, and agent monitoring in the browser",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
|
|
@ -65,6 +65,7 @@
|
|||
"dotenv": "^17.2.4",
|
||||
"highlight.js": "^11.11.1",
|
||||
"hono": "^4.11.7",
|
||||
"json5": "^2.2.3",
|
||||
"lightweight-charts": "^5.1.0",
|
||||
"lucide-react": "^0.563.0",
|
||||
"node-pty": "^1.1.0",
|
||||
|
|
|
|||
73
scripts/lib/access-plan.test.ts
Normal file
73
scripts/lib/access-plan.test.ts
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
import { describe, it, expect } from 'vitest';
|
||||
import { buildAccessPlan, applyAccessPlanToConfig } from './access-plan.js';
|
||||
|
||||
const EXAMPLE_TS_DNS = 'example-node.tail0000.ts.net';
|
||||
const EXAMPLE_TS_IPV4 = '100.64.0.42';
|
||||
|
||||
const connectedTailscale = {
|
||||
installed: true,
|
||||
authenticated: true,
|
||||
ipv4: EXAMPLE_TS_IPV4,
|
||||
dnsName: EXAMPLE_TS_DNS,
|
||||
serveOrigins: [`https://${EXAMPLE_TS_DNS}`],
|
||||
};
|
||||
|
||||
describe('buildAccessPlan', () => {
|
||||
it('builds a tailscale-ip plan with network bind and IP origin', () => {
|
||||
expect(buildAccessPlan({
|
||||
profile: 'tailscale-ip',
|
||||
port: '3080',
|
||||
tailscale: connectedTailscale,
|
||||
})).toMatchObject({
|
||||
bindHost: '0.0.0.0',
|
||||
browserOrigins: [`http://${EXAMPLE_TS_IPV4}:3080`],
|
||||
gatewayAllowedOrigins: [`http://${EXAMPLE_TS_IPV4}:3080`],
|
||||
wsAllowedHosts: [EXAMPLE_TS_IPV4],
|
||||
});
|
||||
});
|
||||
|
||||
it('builds a tailscale-serve plan with loopback bind and ts.net origin', () => {
|
||||
expect(buildAccessPlan({
|
||||
profile: 'tailscale-serve',
|
||||
port: '3080',
|
||||
tailscale: connectedTailscale,
|
||||
})).toMatchObject({
|
||||
bindHost: '127.0.0.1',
|
||||
browserOrigins: [`https://${EXAMPLE_TS_DNS}`],
|
||||
gatewayAllowedOrigins: [`https://${EXAMPLE_TS_DNS}`],
|
||||
wsAllowedHosts: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('adds follow-up steps when tailscale-serve is selected without a confirmed ts.net origin', () => {
|
||||
const plan = buildAccessPlan({
|
||||
profile: 'tailscale-serve',
|
||||
port: '3080',
|
||||
tailscale: {
|
||||
installed: true,
|
||||
authenticated: true,
|
||||
ipv4: EXAMPLE_TS_IPV4,
|
||||
dnsName: null,
|
||||
serveOrigins: [],
|
||||
},
|
||||
});
|
||||
expect(plan.followUpSteps.length).toBeGreaterThan(0);
|
||||
expect(plan.followUpSteps[0]).toContain('tailscale serve --bg http://127.0.0.1:3080');
|
||||
expect(plan.followUpSteps[0]).not.toContain('--bg 443');
|
||||
});
|
||||
});
|
||||
|
||||
describe('applyAccessPlanToConfig', () => {
|
||||
it('maps the access plan back onto env config fields', () => {
|
||||
expect(applyAccessPlanToConfig({ PORT: '3080' }, buildAccessPlan({
|
||||
profile: 'tailscale-ip',
|
||||
port: '3080',
|
||||
tailscale: connectedTailscale,
|
||||
}))).toMatchObject({
|
||||
HOST: '0.0.0.0',
|
||||
ALLOWED_ORIGINS: `http://${EXAMPLE_TS_IPV4}:3080`,
|
||||
CSP_CONNECT_EXTRA: `http://${EXAMPLE_TS_IPV4}:3080 ws://${EXAMPLE_TS_IPV4}:3080`,
|
||||
WS_ALLOWED_HOSTS: EXAMPLE_TS_IPV4,
|
||||
});
|
||||
});
|
||||
});
|
||||
154
scripts/lib/access-plan.ts
Normal file
154
scripts/lib/access-plan.ts
Normal file
|
|
@ -0,0 +1,154 @@
|
|||
import type { EnvConfig } from './env-writer.js';
|
||||
import type { TailscaleState } from './tailscale.js';
|
||||
|
||||
export type InstallerAccessProfile =
|
||||
| 'local'
|
||||
| 'network'
|
||||
| 'custom'
|
||||
| 'tailscale-ip'
|
||||
| 'tailscale-serve';
|
||||
|
||||
export interface AccessPlan {
|
||||
profile: InstallerAccessProfile;
|
||||
bindHost: string;
|
||||
browserOrigins: string[];
|
||||
gatewayAllowedOrigins: string[];
|
||||
cspConnectExtra: string[];
|
||||
wsAllowedHosts: string[];
|
||||
followUpSteps: string[];
|
||||
}
|
||||
|
||||
export interface BuildAccessPlanInput {
|
||||
profile: InstallerAccessProfile;
|
||||
port: string;
|
||||
sslPort?: string;
|
||||
remoteHost?: string | null;
|
||||
tailscale?: TailscaleState;
|
||||
}
|
||||
|
||||
function dedupe(values: Array<string | null | undefined>): string[] {
|
||||
return [...new Set(values.map(value => value?.trim()).filter((value): value is string => Boolean(value)))];
|
||||
}
|
||||
|
||||
function isLoopback(host: string | null | undefined): boolean {
|
||||
return !host || host === '127.0.0.1' || host === 'localhost' || host === '::1';
|
||||
}
|
||||
|
||||
function httpOrigin(host: string, port: string): string {
|
||||
return `http://${host}:${port}`;
|
||||
}
|
||||
|
||||
function httpsOrigin(host: string, port: string): string {
|
||||
return `https://${host}:${port}`;
|
||||
}
|
||||
|
||||
function websocketOrigin(origin: string): string {
|
||||
if (origin.startsWith('https://')) return origin.replace(/^https:\/\//, 'wss://');
|
||||
if (origin.startsWith('http://')) return origin.replace(/^http:\/\//, 'ws://');
|
||||
return origin;
|
||||
}
|
||||
|
||||
function emptyPlan(profile: InstallerAccessProfile, bindHost: string): AccessPlan {
|
||||
return {
|
||||
profile,
|
||||
bindHost,
|
||||
browserOrigins: [],
|
||||
gatewayAllowedOrigins: [],
|
||||
cspConnectExtra: [],
|
||||
wsAllowedHosts: [],
|
||||
followUpSteps: [],
|
||||
};
|
||||
}
|
||||
|
||||
export function buildAccessPlan(input: BuildAccessPlanInput): AccessPlan {
|
||||
const port = input.port;
|
||||
const tailscale = input.tailscale;
|
||||
|
||||
switch (input.profile) {
|
||||
case 'local':
|
||||
return emptyPlan('local', '127.0.0.1');
|
||||
|
||||
case 'network': {
|
||||
const host = input.remoteHost?.trim() || '';
|
||||
const plan = emptyPlan('network', '0.0.0.0');
|
||||
if (!host) {
|
||||
plan.followUpSteps.push('Provide a reachable LAN IP address for network mode.');
|
||||
return plan;
|
||||
}
|
||||
const origin = httpOrigin(host, port);
|
||||
plan.browserOrigins = [origin];
|
||||
plan.gatewayAllowedOrigins = [origin];
|
||||
plan.cspConnectExtra = [origin, websocketOrigin(origin)];
|
||||
plan.wsAllowedHosts = isLoopback(host) ? [] : [host];
|
||||
return plan;
|
||||
}
|
||||
|
||||
case 'custom': {
|
||||
const host = input.remoteHost?.trim() || '127.0.0.1';
|
||||
const plan = emptyPlan('custom', host);
|
||||
if (!isLoopback(host)) {
|
||||
const origin = httpOrigin(host, port);
|
||||
plan.browserOrigins = [origin];
|
||||
plan.gatewayAllowedOrigins = [origin];
|
||||
plan.cspConnectExtra = [origin, websocketOrigin(origin)];
|
||||
plan.wsAllowedHosts = [host];
|
||||
if (input.sslPort) {
|
||||
const secureOrigin = httpsOrigin(host, input.sslPort);
|
||||
plan.browserOrigins = dedupe([...plan.browserOrigins, secureOrigin]);
|
||||
plan.gatewayAllowedOrigins = dedupe([...plan.gatewayAllowedOrigins, secureOrigin]);
|
||||
plan.cspConnectExtra = dedupe([...plan.cspConnectExtra, secureOrigin, websocketOrigin(secureOrigin)]);
|
||||
}
|
||||
}
|
||||
return plan;
|
||||
}
|
||||
|
||||
case 'tailscale-ip': {
|
||||
const plan = emptyPlan('tailscale-ip', '0.0.0.0');
|
||||
const ip = tailscale?.ipv4;
|
||||
if (!ip) {
|
||||
plan.followUpSteps.push('Connect Tailscale and obtain a tailnet IPv4 address, then re-run setup.');
|
||||
return plan;
|
||||
}
|
||||
const origin = httpOrigin(ip, port);
|
||||
plan.browserOrigins = [origin];
|
||||
plan.gatewayAllowedOrigins = [origin];
|
||||
plan.cspConnectExtra = [origin, websocketOrigin(origin)];
|
||||
plan.wsAllowedHosts = [ip];
|
||||
return plan;
|
||||
}
|
||||
|
||||
case 'tailscale-serve': {
|
||||
const plan = emptyPlan('tailscale-serve', '127.0.0.1');
|
||||
const origin = tailscale?.serveOrigins?.[0] || null;
|
||||
if (!origin) {
|
||||
plan.followUpSteps = dedupe([
|
||||
`Run: tailscale serve --bg http://127.0.0.1:${port}`,
|
||||
'Confirm Tailscale Serve exposes a usable https://<node>.tail<id>.ts.net origin, then re-run setup.',
|
||||
]);
|
||||
return plan;
|
||||
}
|
||||
plan.browserOrigins = [origin];
|
||||
plan.gatewayAllowedOrigins = [origin];
|
||||
plan.cspConnectExtra = [origin, websocketOrigin(origin)];
|
||||
return plan;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function applyAccessPlanToConfig(config: EnvConfig, plan: AccessPlan): EnvConfig {
|
||||
const next: EnvConfig = {
|
||||
...config,
|
||||
HOST: plan.bindHost,
|
||||
};
|
||||
|
||||
if (plan.browserOrigins.length > 0) next.ALLOWED_ORIGINS = dedupe(plan.browserOrigins).join(',');
|
||||
else delete next.ALLOWED_ORIGINS;
|
||||
|
||||
if (plan.cspConnectExtra.length > 0) next.CSP_CONNECT_EXTRA = dedupe(plan.cspConnectExtra).join(' ');
|
||||
else delete next.CSP_CONNECT_EXTRA;
|
||||
|
||||
if (plan.wsAllowedHosts.length > 0) next.WS_ALLOWED_HOSTS = dedupe(plan.wsAllowedHosts).join(',');
|
||||
else delete next.WS_ALLOWED_HOSTS;
|
||||
|
||||
return next;
|
||||
}
|
||||
42
scripts/lib/agent-name-default.test.ts
Normal file
42
scripts/lib/agent-name-default.test.ts
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
import { mkdtempSync, mkdirSync, writeFileSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import { tmpdir } from 'node:os';
|
||||
import { describe, expect, it } from 'vitest';
|
||||
|
||||
import { detectAgentDisplayNameDefault } from './agent-name-default.js';
|
||||
|
||||
describe('detectAgentDisplayNameDefault', () => {
|
||||
it('prefers existing AGENT_NAME when present', () => {
|
||||
const result = detectAgentDisplayNameDefault(' Existing Agent ', 'Agent', []);
|
||||
expect(result).toBe('Existing Agent');
|
||||
});
|
||||
|
||||
it('detects Name from IDENTITY.md metadata', () => {
|
||||
const tempDir = mkdtempSync(join(tmpdir(), 'nerve-agent-name-'));
|
||||
const identityPath = join(tempDir, 'IDENTITY.md');
|
||||
writeFileSync(identityPath, '# IDENTITY\n- **Name:** Chip\n', 'utf8');
|
||||
|
||||
const result = detectAgentDisplayNameDefault(undefined, 'Agent', [identityPath]);
|
||||
expect(result).toBe('Chip');
|
||||
});
|
||||
|
||||
it('falls back to second identity candidate if first is missing/invalid', () => {
|
||||
const tempDir = mkdtempSync(join(tmpdir(), 'nerve-agent-name-'));
|
||||
const firstPath = join(tempDir, 'missing.md');
|
||||
const secondPath = join(tempDir, 'nested', 'IDENTITY.md');
|
||||
mkdirSync(join(tempDir, 'nested'), { recursive: true });
|
||||
writeFileSync(secondPath, 'Notes\n- **Name:** Cookie\n', 'utf8');
|
||||
|
||||
const result = detectAgentDisplayNameDefault(undefined, 'Agent', [firstPath, secondPath]);
|
||||
expect(result).toBe('Cookie');
|
||||
});
|
||||
|
||||
it('falls back to literal default when metadata is unavailable or malformed', () => {
|
||||
const tempDir = mkdtempSync(join(tmpdir(), 'nerve-agent-name-'));
|
||||
const identityPath = join(tempDir, 'IDENTITY.md');
|
||||
writeFileSync(identityPath, '# IDENTITY\nNo name field here\n', 'utf8');
|
||||
|
||||
const result = detectAgentDisplayNameDefault(undefined, 'Agent', [identityPath]);
|
||||
expect(result).toBe('Agent');
|
||||
});
|
||||
});
|
||||
34
scripts/lib/agent-name-default.ts
Normal file
34
scripts/lib/agent-name-default.ts
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
import { existsSync, readFileSync } from 'node:fs';
|
||||
import { homedir } from 'node:os';
|
||||
import { resolve } from 'node:path';
|
||||
|
||||
function defaultIdentityPaths(): string[] {
|
||||
const home = homedir();
|
||||
return [
|
||||
resolve(home, '.openclaw', 'workspace', 'IDENTITY.md'),
|
||||
resolve(home, '.openclaw', 'workspace', 'projects', 'openclaw-agent', 'IDENTITY.md'),
|
||||
];
|
||||
}
|
||||
|
||||
export function detectAgentDisplayNameDefault(
|
||||
existingName: string | undefined,
|
||||
fallbackName: string,
|
||||
identityPaths: string[] = defaultIdentityPaths(),
|
||||
): string {
|
||||
if (existingName?.trim()) return existingName.trim();
|
||||
|
||||
for (const identityPath of identityPaths) {
|
||||
if (!existsSync(identityPath)) continue;
|
||||
|
||||
try {
|
||||
const raw = readFileSync(identityPath, 'utf-8');
|
||||
const match = raw.match(/^-[ \t]*\*\*Name:\*\*[ \t]*(.+)$/m);
|
||||
const detected = match?.[1]?.trim();
|
||||
if (detected) return detected;
|
||||
} catch {
|
||||
// Non-fatal — keep falling back.
|
||||
}
|
||||
}
|
||||
|
||||
return fallbackName;
|
||||
}
|
||||
14
scripts/lib/deployment-guides.json
Normal file
14
scripts/lib/deployment-guides.json
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
[
|
||||
{
|
||||
"title": "Run everything on one machine",
|
||||
"url": "https://docs.nerve.zone/guide/deployment-local"
|
||||
},
|
||||
{
|
||||
"title": "Use a cloud Gateway with Nerve on your laptop",
|
||||
"url": "https://docs.nerve.zone/guide/deployment-remote-gateway"
|
||||
},
|
||||
{
|
||||
"title": "Run both Nerve and Gateway in the cloud",
|
||||
"url": "https://docs.nerve.zone/guide/deployment-cloud"
|
||||
}
|
||||
]
|
||||
43
scripts/lib/deployment-guides.test.ts
Normal file
43
scripts/lib/deployment-guides.test.ts
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
import { describe, expect, it, vi } from 'vitest';
|
||||
import guides from './deployment-guides.json';
|
||||
import { printDeploymentGuides, shouldPrintDeploymentGuides } from './deployment-guides.js';
|
||||
|
||||
describe('deployment guide metadata', () => {
|
||||
it('prints in standalone setup flows', () => {
|
||||
expect(shouldPrintDeploymentGuides({ invokedFromInstaller: false, defaultsMode: false })).toBe(true);
|
||||
expect(shouldPrintDeploymentGuides({ invokedFromInstaller: false, defaultsMode: true })).toBe(true);
|
||||
});
|
||||
|
||||
it('skips setup-side printing for installer defaults flow', () => {
|
||||
expect(shouldPrintDeploymentGuides({ invokedFromInstaller: true, defaultsMode: true })).toBe(false);
|
||||
});
|
||||
|
||||
it('contains the expected public docs links and human-readable titles', () => {
|
||||
expect(guides).toEqual([
|
||||
{
|
||||
title: 'Run everything on one machine',
|
||||
url: 'https://docs.nerve.zone/guide/deployment-local',
|
||||
},
|
||||
{
|
||||
title: 'Use a cloud Gateway with Nerve on your laptop',
|
||||
url: 'https://docs.nerve.zone/guide/deployment-remote-gateway',
|
||||
},
|
||||
{
|
||||
title: 'Run both Nerve and Gateway in the cloud',
|
||||
url: 'https://docs.nerve.zone/guide/deployment-cloud',
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('prints the rendered deployment guide block', () => {
|
||||
const log = vi.fn();
|
||||
|
||||
printDeploymentGuides(log);
|
||||
|
||||
expect(log.mock.calls.map(([line]) => line)).toEqual([
|
||||
' Deployment guides:',
|
||||
...guides.map((guide) => ` ${guide.title}: \x1b[36m${guide.url}\x1b[0m`),
|
||||
'',
|
||||
]);
|
||||
});
|
||||
});
|
||||
16
scripts/lib/deployment-guides.ts
Normal file
16
scripts/lib/deployment-guides.ts
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
import guides from './deployment-guides.json';
|
||||
|
||||
export function shouldPrintDeploymentGuides(params: {
|
||||
invokedFromInstaller: boolean;
|
||||
defaultsMode: boolean;
|
||||
}): boolean {
|
||||
return !(params.invokedFromInstaller && params.defaultsMode);
|
||||
}
|
||||
|
||||
export function printDeploymentGuides(log: (line: string) => void = console.log): void {
|
||||
log(' Deployment guides:');
|
||||
for (const guide of guides) {
|
||||
log(` ${guide.title}: \x1b[36m${guide.url}\x1b[0m`);
|
||||
}
|
||||
log('');
|
||||
}
|
||||
613
scripts/lib/gateway-detect.test.ts
Normal file
613
scripts/lib/gateway-detect.test.ts
Normal file
|
|
@ -0,0 +1,613 @@
|
|||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import { mkdtempSync, mkdirSync, readFileSync, rmSync, writeFileSync } from 'node:fs';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
|
||||
const EXAMPLE_TS_DNS = 'example-node.tail0000.ts.net';
|
||||
const EXAMPLE_TS_IPV4 = '100.64.0.42';
|
||||
|
||||
const FULL_OPERATOR_SCOPES = [
|
||||
'operator.admin',
|
||||
'operator.read',
|
||||
'operator.write',
|
||||
'operator.approvals',
|
||||
'operator.pairing',
|
||||
];
|
||||
|
||||
async function importGatewayDetect(execSyncImpl = vi.fn()): Promise<{
|
||||
execSyncMock: ReturnType<typeof vi.fn>;
|
||||
mod: typeof import('./gateway-detect.js');
|
||||
}> {
|
||||
vi.doUnmock('node:child_process');
|
||||
vi.resetModules();
|
||||
vi.doMock('node:child_process', async () => {
|
||||
const actual = await vi.importActual<typeof import('node:child_process')>('node:child_process');
|
||||
return {
|
||||
...actual,
|
||||
default: actual,
|
||||
execSync: execSyncImpl,
|
||||
};
|
||||
});
|
||||
const mod = await import('./gateway-detect.js');
|
||||
return { execSyncMock: execSyncImpl, mod };
|
||||
}
|
||||
|
||||
describe('gateway detection and repair', () => {
|
||||
const originalEnv = { ...process.env };
|
||||
let tempHome = '';
|
||||
|
||||
beforeEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
vi.resetModules();
|
||||
tempHome = mkdtempSync(path.join(os.tmpdir(), 'nerve-gateway-detect-'));
|
||||
process.env.HOME = tempHome;
|
||||
process.env.NERVE_DATA_DIR = path.join(tempHome, '.nerve');
|
||||
delete process.env.OPENCLAW_GATEWAY_TOKEN;
|
||||
|
||||
mkdirSync(path.join(tempHome, '.openclaw', 'devices'), { recursive: true });
|
||||
mkdirSync(path.join(tempHome, '.openclaw', 'identity'), { recursive: true });
|
||||
mkdirSync(path.join(tempHome, '.openclaw'), { recursive: true });
|
||||
mkdirSync(path.join(tempHome, '.nerve'), { recursive: true });
|
||||
|
||||
writeFileSync(path.join(tempHome, '.openclaw', 'openclaw.json'), JSON.stringify({
|
||||
gateway: {
|
||||
port: 18789,
|
||||
auth: { token: 'test-token' },
|
||||
tools: { allow: ['cron', 'gateway'] },
|
||||
controlUi: {
|
||||
allowedOrigins: ['http://localhost:3080'],
|
||||
},
|
||||
},
|
||||
}, null, 2));
|
||||
|
||||
writeFileSync(path.join(tempHome, '.nerve', 'device-identity.json'), JSON.stringify({
|
||||
deviceId: 'nerve-device',
|
||||
publicKeyB64url: 'nerve-public-key',
|
||||
}, null, 2));
|
||||
|
||||
writeFileSync(path.join(tempHome, '.openclaw', 'devices', 'paired.json'), JSON.stringify({
|
||||
'gateway-device': {
|
||||
deviceId: 'gateway-device',
|
||||
scopes: FULL_OPERATOR_SCOPES,
|
||||
tokens: {
|
||||
operator: {
|
||||
token: 'gateway-token',
|
||||
scopes: FULL_OPERATOR_SCOPES,
|
||||
},
|
||||
},
|
||||
},
|
||||
'nerve-device': {
|
||||
deviceId: 'nerve-device',
|
||||
scopes: FULL_OPERATOR_SCOPES,
|
||||
displayName: 'Nerve UI',
|
||||
platform: 'web',
|
||||
clientId: 'webchat-ui',
|
||||
clientMode: 'webchat',
|
||||
tokens: {
|
||||
operator: {
|
||||
token: 'test-token',
|
||||
scopes: FULL_OPERATOR_SCOPES,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, null, 2));
|
||||
|
||||
writeFileSync(path.join(tempHome, '.openclaw', 'identity', 'device.json'), JSON.stringify({
|
||||
deviceId: 'gateway-device',
|
||||
publicKeyPem: '-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEA2sI3DpP2u80EIk1BddY5hAzvY4xXHzkwmo7aX6ixkm0=\n-----END PUBLIC KEY-----\n',
|
||||
}, null, 2));
|
||||
|
||||
writeFileSync(path.join(tempHome, '.openclaw', 'identity', 'device-auth.json'), JSON.stringify({
|
||||
version: 1,
|
||||
deviceId: 'gateway-device',
|
||||
tokens: {
|
||||
operator: {
|
||||
token: 'gateway-token',
|
||||
scopes: ['operator.read'],
|
||||
},
|
||||
},
|
||||
}, null, 2));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.doUnmock('node:child_process');
|
||||
process.env = { ...originalEnv };
|
||||
if (tempHome) rmSync(tempHome, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('emits one change per missing origin and patches both when applied', async () => {
|
||||
const { mod } = await importGatewayDetect();
|
||||
|
||||
const changes = mod.detectNeededConfigChanges({
|
||||
gatewayToken: 'test-token',
|
||||
allowedOrigins: [
|
||||
` http://${EXAMPLE_TS_IPV4}:3080 `,
|
||||
`https://${EXAMPLE_TS_DNS}`,
|
||||
],
|
||||
});
|
||||
|
||||
expect(changes.some(change => change.description.includes(`${EXAMPLE_TS_IPV4}:3080`))).toBe(true);
|
||||
expect(changes.some(change => change.description.includes(EXAMPLE_TS_DNS))).toBe(true);
|
||||
|
||||
for (const change of changes.filter(change => change.description.includes('allowed origins'))) {
|
||||
const result = change.apply();
|
||||
expect(result.ok).toBe(true);
|
||||
}
|
||||
|
||||
const updated = JSON.parse(readFileSync(path.join(tempHome, '.openclaw', 'openclaw.json'), 'utf8'));
|
||||
expect(updated.gateway.controlUi.allowedOrigins).toEqual(expect.arrayContaining([
|
||||
'http://localhost:3080',
|
||||
`http://${EXAMPLE_TS_IPV4}:3080`,
|
||||
`https://${EXAMPLE_TS_DNS}`,
|
||||
]));
|
||||
expect(updated.gateway.controlUi.allowedOrigins).not.toContain(` http://${EXAMPLE_TS_IPV4}:3080 `);
|
||||
});
|
||||
|
||||
it('detects missing sessions_spawn in gateway.tools.allow and patches it for kanban execution', async () => {
|
||||
const { mod } = await importGatewayDetect();
|
||||
|
||||
const changes = mod.detectNeededConfigChanges({
|
||||
gatewayToken: 'test-token',
|
||||
});
|
||||
const toolsAllowChange = changes.find((change) => change.id === 'tools-allow');
|
||||
|
||||
expect(toolsAllowChange).toBeDefined();
|
||||
expect(toolsAllowChange?.description).toContain('sessions_spawn');
|
||||
|
||||
const result = toolsAllowChange!.apply();
|
||||
expect(result.ok).toBe(true);
|
||||
|
||||
const updated = JSON.parse(readFileSync(path.join(tempHome, '.openclaw', 'openclaw.json'), 'utf8'));
|
||||
expect(updated.gateway.tools.allow).toEqual(expect.arrayContaining([
|
||||
'cron',
|
||||
'gateway',
|
||||
'sessions_spawn',
|
||||
]));
|
||||
});
|
||||
|
||||
it('prefers a detected config token over a stale shell env token during setup', async () => {
|
||||
process.env.OPENCLAW_GATEWAY_TOKEN = 'stale-shell-token';
|
||||
|
||||
const { mod } = await importGatewayDetect();
|
||||
const detected = mod.detectGatewayConfig();
|
||||
|
||||
expect(detected.token).toBe('test-token');
|
||||
expect(mod.chooseSetupGatewayToken({
|
||||
envToken: mod.getEnvGatewayToken(),
|
||||
detectedToken: detected.token,
|
||||
})).toEqual({
|
||||
token: 'test-token',
|
||||
source: 'detected',
|
||||
});
|
||||
});
|
||||
|
||||
it('prefers the systemd runtime token over a stale shell env token during setup', async () => {
|
||||
process.env.OPENCLAW_GATEWAY_TOKEN = 'stale-shell-token';
|
||||
mkdirSync(path.join(tempHome, '.config', 'systemd', 'user'), { recursive: true });
|
||||
writeFileSync(
|
||||
path.join(tempHome, '.config', 'systemd', 'user', 'openclaw-gateway.service'),
|
||||
'[Service]\nEnvironment=OPENCLAW_GATEWAY_TOKEN=real-systemd-token\n',
|
||||
);
|
||||
|
||||
const { mod } = await importGatewayDetect();
|
||||
const detected = mod.detectGatewayConfig();
|
||||
|
||||
expect(detected.token).toBe('real-systemd-token');
|
||||
expect(mod.chooseSetupGatewayToken({
|
||||
envToken: mod.getEnvGatewayToken(),
|
||||
detectedToken: detected.token,
|
||||
})).toEqual({
|
||||
token: 'real-systemd-token',
|
||||
source: 'detected',
|
||||
});
|
||||
});
|
||||
|
||||
it('detects a systemd-only runtime token even when openclaw.json is missing', async () => {
|
||||
process.env.OPENCLAW_GATEWAY_TOKEN = 'stale-shell-token';
|
||||
rmSync(path.join(tempHome, '.openclaw', 'openclaw.json'));
|
||||
mkdirSync(path.join(tempHome, '.config', 'systemd', 'user'), { recursive: true });
|
||||
writeFileSync(
|
||||
path.join(tempHome, '.config', 'systemd', 'user', 'openclaw-gateway.service'),
|
||||
'[Service]\nEnvironment=OPENCLAW_GATEWAY_TOKEN=real-systemd-token\n',
|
||||
);
|
||||
|
||||
const { mod } = await importGatewayDetect();
|
||||
const detected = mod.detectGatewayConfig();
|
||||
|
||||
expect(detected.token).toBe('real-systemd-token');
|
||||
expect(mod.chooseSetupGatewayToken({
|
||||
envToken: mod.getEnvGatewayToken(),
|
||||
detectedToken: detected.token,
|
||||
})).toEqual({
|
||||
token: 'real-systemd-token',
|
||||
source: 'detected',
|
||||
});
|
||||
});
|
||||
|
||||
it('approves only the pending request that matches Nerve and leaves unrelated requests untouched', async () => {
|
||||
const execSyncMock = vi.fn((command: string) => {
|
||||
if (command.includes('devices list --json')) {
|
||||
return Buffer.from(JSON.stringify({
|
||||
pending: [
|
||||
{
|
||||
requestId: 'req-nerve',
|
||||
deviceId: 'nerve-device',
|
||||
publicKey: 'nerve-public-key',
|
||||
displayName: 'Nerve UI',
|
||||
},
|
||||
{
|
||||
requestId: 'req-other',
|
||||
deviceId: 'other-device',
|
||||
publicKey: 'other-public-key',
|
||||
displayName: 'Other Device',
|
||||
},
|
||||
],
|
||||
}));
|
||||
}
|
||||
|
||||
if (command === 'openclaw devices approve req-nerve') {
|
||||
return Buffer.from('approved');
|
||||
}
|
||||
|
||||
throw new Error(`Unexpected command: ${command}`);
|
||||
});
|
||||
|
||||
const { mod } = await importGatewayDetect();
|
||||
const result = mod.approvePendingNerveDevice({
|
||||
exec: execSyncMock,
|
||||
});
|
||||
|
||||
expect(result).toMatchObject({
|
||||
ok: true,
|
||||
approved: 1,
|
||||
});
|
||||
expect(execSyncMock).toHaveBeenCalledWith(
|
||||
'openclaw devices approve req-nerve',
|
||||
expect.objectContaining({ timeout: 10000, stdio: 'pipe' }),
|
||||
);
|
||||
expect(execSyncMock).not.toHaveBeenCalledWith(
|
||||
'openclaw devices approve req-other',
|
||||
expect.anything(),
|
||||
);
|
||||
});
|
||||
|
||||
it('does not approve a pending request with an invalid requestId', async () => {
|
||||
const execSyncMock = vi.fn((command: string) => {
|
||||
if (command.includes('devices list --json')) {
|
||||
return Buffer.from(JSON.stringify({
|
||||
pending: [
|
||||
{
|
||||
requestId: 'req-nerve; rm -rf /',
|
||||
deviceId: 'nerve-device',
|
||||
publicKey: 'nerve-public-key',
|
||||
displayName: 'Nerve UI',
|
||||
},
|
||||
],
|
||||
}));
|
||||
}
|
||||
|
||||
throw new Error(`Unexpected command: ${command}`);
|
||||
});
|
||||
|
||||
const { mod } = await importGatewayDetect();
|
||||
const result = mod.approvePendingNerveDevice({
|
||||
exec: execSyncMock,
|
||||
});
|
||||
|
||||
expect(result.ok).toBe(false);
|
||||
expect(result.approved).toBe(0);
|
||||
expect(result.message.toLowerCase()).toContain('manual');
|
||||
expect(execSyncMock).not.toHaveBeenCalledWith(
|
||||
expect.stringContaining('openclaw devices approve'),
|
||||
expect.anything(),
|
||||
);
|
||||
});
|
||||
|
||||
it('does not approve any pending request when Nerve cannot be identified safely', async () => {
|
||||
const execSyncMock = vi.fn((command: string) => {
|
||||
if (command.includes('devices list --json')) {
|
||||
return Buffer.from(JSON.stringify({
|
||||
pending: [
|
||||
{
|
||||
requestId: 'req-a',
|
||||
displayName: 'Nerve UI',
|
||||
},
|
||||
{
|
||||
requestId: 'req-b',
|
||||
displayName: 'Nerve UI',
|
||||
},
|
||||
],
|
||||
}));
|
||||
}
|
||||
|
||||
throw new Error(`Unexpected command: ${command}`);
|
||||
});
|
||||
|
||||
const { mod } = await importGatewayDetect();
|
||||
const result = mod.approvePendingNerveDevice({
|
||||
exec: execSyncMock,
|
||||
});
|
||||
|
||||
expect(result.ok).toBe(false);
|
||||
expect(result.approved).toBe(0);
|
||||
expect(result.message.toLowerCase()).toContain('manual');
|
||||
expect(execSyncMock).not.toHaveBeenCalledWith(
|
||||
expect.stringContaining('openclaw devices approve'),
|
||||
expect.anything(),
|
||||
);
|
||||
});
|
||||
|
||||
it('fails closed when devices list returns parseable JSON with an unusable pending shape', async () => {
|
||||
const execSyncMock = vi.fn((command: string) => {
|
||||
if (command.includes('devices list --json')) {
|
||||
return Buffer.from(JSON.stringify({
|
||||
pending: {
|
||||
requestId: 'req-nerve',
|
||||
deviceId: 'nerve-device',
|
||||
publicKey: 'nerve-public-key',
|
||||
},
|
||||
}));
|
||||
}
|
||||
|
||||
throw new Error(`Unexpected command: ${command}`);
|
||||
});
|
||||
|
||||
const { mod } = await importGatewayDetect();
|
||||
const result = mod.approvePendingNerveDevice({
|
||||
exec: execSyncMock,
|
||||
});
|
||||
|
||||
expect(result.ok).toBe(false);
|
||||
expect(result.approved).toBe(0);
|
||||
expect(result.message.toLowerCase()).toContain('manual');
|
||||
expect(execSyncMock).not.toHaveBeenCalledWith(
|
||||
expect.stringContaining('openclaw devices approve'),
|
||||
expect.anything(),
|
||||
);
|
||||
});
|
||||
|
||||
it('fails closed when a pending request matches only one of Nerve\'s known identifiers', async () => {
|
||||
const execSyncMock = vi.fn((command: string) => {
|
||||
if (command.includes('devices list --json')) {
|
||||
return Buffer.from(JSON.stringify({
|
||||
pending: [
|
||||
{
|
||||
requestId: 'req-partial',
|
||||
deviceId: 'nerve-device',
|
||||
publicKey: 'wrong-public-key',
|
||||
displayName: 'Nerve UI',
|
||||
},
|
||||
{
|
||||
requestId: 'req-other',
|
||||
deviceId: 'other-device',
|
||||
publicKey: 'other-public-key',
|
||||
displayName: 'Other Device',
|
||||
},
|
||||
],
|
||||
}));
|
||||
}
|
||||
|
||||
throw new Error(`Unexpected command: ${command}`);
|
||||
});
|
||||
|
||||
const { mod } = await importGatewayDetect();
|
||||
const result = mod.approvePendingNerveDevice({
|
||||
exec: execSyncMock,
|
||||
});
|
||||
|
||||
expect(result.ok).toBe(false);
|
||||
expect(result.approved).toBe(0);
|
||||
expect(result.message.toLowerCase()).toContain('manual');
|
||||
expect(execSyncMock).not.toHaveBeenCalledWith(
|
||||
expect.stringContaining('openclaw devices approve'),
|
||||
expect.anything(),
|
||||
);
|
||||
});
|
||||
|
||||
it('fails closed when pending-request inspection cannot run safely', async () => {
|
||||
const execSyncMock = vi.fn((command: string) => {
|
||||
if (command.includes('devices list --json')) {
|
||||
throw new Error('openclaw devices list failed');
|
||||
}
|
||||
|
||||
throw new Error(`Unexpected command: ${command}`);
|
||||
});
|
||||
|
||||
const { mod } = await importGatewayDetect();
|
||||
const result = mod.approvePendingNerveDevice({
|
||||
exec: execSyncMock,
|
||||
});
|
||||
|
||||
expect(result.ok).toBe(false);
|
||||
expect(result.approved).toBe(0);
|
||||
expect(result.message.toLowerCase()).toContain('manual');
|
||||
expect(execSyncMock).not.toHaveBeenCalledWith(
|
||||
expect.stringContaining('openclaw devices approve'),
|
||||
expect.anything(),
|
||||
);
|
||||
});
|
||||
|
||||
it('repairs only the Nerve paired device record and preserves unrelated devices', async () => {
|
||||
writeFileSync(path.join(tempHome, '.openclaw', 'devices', 'paired.json'), JSON.stringify({
|
||||
'gateway-device': {
|
||||
deviceId: 'gateway-device',
|
||||
scopes: FULL_OPERATOR_SCOPES,
|
||||
tokens: { operator: { token: 'gateway-token', scopes: FULL_OPERATOR_SCOPES } },
|
||||
},
|
||||
'nerve-device': {
|
||||
deviceId: 'nerve-device',
|
||||
scopes: ['operator.read'],
|
||||
displayName: 'Nerve UI',
|
||||
platform: 'web',
|
||||
clientId: 'webchat-ui',
|
||||
clientMode: 'webchat',
|
||||
tokens: { operator: { token: 'old-token', scopes: ['operator.read'] } },
|
||||
},
|
||||
'other-device': {
|
||||
deviceId: 'other-device',
|
||||
scopes: ['operator.read'],
|
||||
displayName: 'Other Device',
|
||||
platform: 'cli',
|
||||
clientId: 'other-cli',
|
||||
clientMode: 'terminal',
|
||||
tokens: { operator: { token: 'other-token', scopes: ['operator.read'] } },
|
||||
},
|
||||
}, null, 2));
|
||||
|
||||
const { mod } = await importGatewayDetect();
|
||||
const result = mod.prePairNerveDevice('test-token');
|
||||
const paired = JSON.parse(readFileSync(path.join(tempHome, '.openclaw', 'devices', 'paired.json'), 'utf8'));
|
||||
|
||||
expect(result.ok).toBe(true);
|
||||
expect(paired['nerve-device'].scopes).toEqual(FULL_OPERATOR_SCOPES);
|
||||
expect(paired['nerve-device'].tokens.operator.scopes).toEqual(FULL_OPERATOR_SCOPES);
|
||||
expect(paired['nerve-device'].tokens.operator.token).toBe('test-token');
|
||||
expect(paired['other-device'].scopes).toEqual(['operator.read']);
|
||||
expect(paired['other-device'].tokens.operator.scopes).toEqual(['operator.read']);
|
||||
});
|
||||
|
||||
it('repairs only the explicitly targeted identity and does not broaden every paired device', async () => {
|
||||
writeFileSync(path.join(tempHome, '.openclaw', 'devices', 'paired.json'), JSON.stringify({
|
||||
'gateway-device': {
|
||||
deviceId: 'gateway-device',
|
||||
scopes: ['operator.read'],
|
||||
tokens: { operator: { token: 'gateway-token', scopes: ['operator.read'] } },
|
||||
},
|
||||
'other-device': {
|
||||
deviceId: 'other-device',
|
||||
scopes: ['operator.read'],
|
||||
tokens: { operator: { token: 'other-token', scopes: ['operator.read'] } },
|
||||
},
|
||||
}, null, 2));
|
||||
|
||||
const { mod } = await importGatewayDetect();
|
||||
const result = mod.fixGatewayDeviceScopes({ targetDeviceId: 'gateway-device' });
|
||||
const paired = JSON.parse(readFileSync(path.join(tempHome, '.openclaw', 'devices', 'paired.json'), 'utf8'));
|
||||
const deviceAuth = JSON.parse(readFileSync(path.join(tempHome, '.openclaw', 'identity', 'device-auth.json'), 'utf8'));
|
||||
|
||||
expect(result.ok).toBe(true);
|
||||
expect(paired['gateway-device'].scopes).toEqual(FULL_OPERATOR_SCOPES);
|
||||
expect(paired['gateway-device'].tokens.operator.scopes).toEqual(FULL_OPERATOR_SCOPES);
|
||||
expect(paired['other-device'].scopes).toEqual(['operator.read']);
|
||||
expect(paired['other-device'].tokens.operator.scopes).toEqual(['operator.read']);
|
||||
expect(deviceAuth.tokens.operator.scopes).toEqual(FULL_OPERATOR_SCOPES);
|
||||
});
|
||||
|
||||
it('requests a gateway scope repair when the targeted paired operator token scopes are stale', async () => {
|
||||
writeFileSync(path.join(tempHome, '.openclaw', 'devices', 'paired.json'), JSON.stringify({
|
||||
'gateway-device': {
|
||||
deviceId: 'gateway-device',
|
||||
scopes: FULL_OPERATOR_SCOPES,
|
||||
tokens: { operator: { token: 'gateway-token', scopes: ['operator.read'] } },
|
||||
},
|
||||
'nerve-device': {
|
||||
deviceId: 'nerve-device',
|
||||
scopes: FULL_OPERATOR_SCOPES,
|
||||
displayName: 'Nerve UI',
|
||||
platform: 'web',
|
||||
clientId: 'webchat-ui',
|
||||
clientMode: 'webchat',
|
||||
tokens: { operator: { token: 'test-token', scopes: FULL_OPERATOR_SCOPES } },
|
||||
},
|
||||
}, null, 2));
|
||||
|
||||
writeFileSync(path.join(tempHome, '.openclaw', 'identity', 'device-auth.json'), JSON.stringify({
|
||||
version: 1,
|
||||
deviceId: 'gateway-device',
|
||||
tokens: {
|
||||
operator: {
|
||||
token: 'gateway-token',
|
||||
scopes: FULL_OPERATOR_SCOPES,
|
||||
},
|
||||
},
|
||||
}, null, 2));
|
||||
|
||||
const { mod } = await importGatewayDetect();
|
||||
const changes = mod.detectNeededConfigChanges({ gatewayToken: 'test-token' });
|
||||
|
||||
expect(changes.map(change => change.id)).toContain('device-scopes');
|
||||
expect(changes.map(change => change.id)).not.toContain('pre-pair');
|
||||
});
|
||||
|
||||
it('requests a gateway scope repair when the local targeted identity token scopes are stale', async () => {
|
||||
writeFileSync(path.join(tempHome, '.openclaw', 'devices', 'paired.json'), JSON.stringify({
|
||||
'gateway-device': {
|
||||
deviceId: 'gateway-device',
|
||||
scopes: FULL_OPERATOR_SCOPES,
|
||||
tokens: { operator: { token: 'gateway-token', scopes: FULL_OPERATOR_SCOPES } },
|
||||
},
|
||||
'nerve-device': {
|
||||
deviceId: 'nerve-device',
|
||||
scopes: FULL_OPERATOR_SCOPES,
|
||||
displayName: 'Nerve UI',
|
||||
platform: 'web',
|
||||
clientId: 'webchat-ui',
|
||||
clientMode: 'webchat',
|
||||
tokens: { operator: { token: 'test-token', scopes: FULL_OPERATOR_SCOPES } },
|
||||
},
|
||||
'other-device': {
|
||||
deviceId: 'other-device',
|
||||
scopes: ['operator.read'],
|
||||
tokens: { operator: { token: 'other-token', scopes: ['operator.read'] } },
|
||||
},
|
||||
}, null, 2));
|
||||
|
||||
writeFileSync(path.join(tempHome, '.openclaw', 'identity', 'device-auth.json'), JSON.stringify({
|
||||
version: 1,
|
||||
deviceId: 'gateway-device',
|
||||
tokens: {
|
||||
operator: {
|
||||
token: 'gateway-token',
|
||||
scopes: ['operator.read'],
|
||||
},
|
||||
},
|
||||
}, null, 2));
|
||||
|
||||
const { mod } = await importGatewayDetect();
|
||||
const changes = mod.detectNeededConfigChanges({ gatewayToken: 'test-token' });
|
||||
|
||||
expect(changes.map(change => change.id)).toContain('device-scopes');
|
||||
expect(changes.map(change => change.id)).not.toContain('pre-pair');
|
||||
});
|
||||
|
||||
it('does not request a blanket scope repair just because an unrelated paired device is under-scoped', async () => {
|
||||
writeFileSync(path.join(tempHome, '.openclaw', 'devices', 'paired.json'), JSON.stringify({
|
||||
'gateway-device': {
|
||||
deviceId: 'gateway-device',
|
||||
scopes: FULL_OPERATOR_SCOPES,
|
||||
tokens: { operator: { token: 'gateway-token', scopes: FULL_OPERATOR_SCOPES } },
|
||||
},
|
||||
'nerve-device': {
|
||||
deviceId: 'nerve-device',
|
||||
scopes: FULL_OPERATOR_SCOPES,
|
||||
displayName: 'Nerve UI',
|
||||
platform: 'web',
|
||||
clientId: 'webchat-ui',
|
||||
clientMode: 'webchat',
|
||||
tokens: { operator: { token: 'test-token', scopes: FULL_OPERATOR_SCOPES } },
|
||||
},
|
||||
'other-device': {
|
||||
deviceId: 'other-device',
|
||||
scopes: ['operator.read'],
|
||||
tokens: { operator: { token: 'other-token', scopes: ['operator.read'] } },
|
||||
},
|
||||
}, null, 2));
|
||||
|
||||
writeFileSync(path.join(tempHome, '.openclaw', 'identity', 'device-auth.json'), JSON.stringify({
|
||||
version: 1,
|
||||
deviceId: 'gateway-device',
|
||||
tokens: {
|
||||
operator: {
|
||||
token: 'gateway-token',
|
||||
scopes: FULL_OPERATOR_SCOPES,
|
||||
},
|
||||
},
|
||||
}, null, 2));
|
||||
|
||||
const { mod } = await importGatewayDetect();
|
||||
const changes = mod.detectNeededConfigChanges({ gatewayToken: 'test-token' });
|
||||
|
||||
expect(changes.map(change => change.id)).not.toContain('device-scopes');
|
||||
expect(changes.map(change => change.id)).not.toContain('pre-pair');
|
||||
});
|
||||
});
|
||||
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'node:fs';
|
||||
import { execSync } from 'node:child_process';
|
||||
import type { ExecSyncOptions } from 'node:child_process';
|
||||
import { join } from 'node:path';
|
||||
import crypto from 'node:crypto';
|
||||
import os from 'node:os';
|
||||
|
|
@ -37,6 +38,13 @@ export interface DetectedGateway {
|
|||
url: string | null;
|
||||
}
|
||||
|
||||
export type GatewayTokenSource = 'existing' | 'detected' | 'env' | 'none';
|
||||
|
||||
export interface GatewayTokenChoice {
|
||||
token: string | null;
|
||||
source: GatewayTokenSource;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to auto-detect gateway configuration from the local OpenClaw install.
|
||||
* Returns null values for anything that can't be detected.
|
||||
|
|
@ -44,6 +52,13 @@ export interface DetectedGateway {
|
|||
export function detectGatewayConfig(): DetectedGateway {
|
||||
const result: DetectedGateway = { token: null, url: null };
|
||||
|
||||
// The gateway process prefers the systemd env var over the config file token,
|
||||
// so detect it first even when openclaw.json is absent or broken.
|
||||
const systemdToken = readSystemdGatewayToken();
|
||||
if (systemdToken) {
|
||||
result.token = systemdToken;
|
||||
}
|
||||
|
||||
if (!existsSync(OPENCLAW_CONFIG)) {
|
||||
return result;
|
||||
}
|
||||
|
|
@ -52,13 +67,7 @@ export function detectGatewayConfig(): DetectedGateway {
|
|||
const raw = readFileSync(OPENCLAW_CONFIG, 'utf-8');
|
||||
const config = JSON.parse(raw) as OpenClawConfig;
|
||||
|
||||
// Extract token — systemd env var takes priority over config file because
|
||||
// the gateway process uses the env var when both exist (known 2026.2.19 bug:
|
||||
// onboard writes different tokens to the service file and openclaw.json).
|
||||
const systemdToken = readSystemdGatewayToken();
|
||||
if (systemdToken) {
|
||||
result.token = systemdToken;
|
||||
} else if (config.gateway?.auth?.token) {
|
||||
if (!result.token && config.gateway?.auth?.token) {
|
||||
result.token = config.gateway.auth.token;
|
||||
}
|
||||
|
||||
|
|
@ -66,7 +75,7 @@ export function detectGatewayConfig(): DetectedGateway {
|
|||
const port = config.gateway?.port || 18789;
|
||||
result.url = `http://127.0.0.1:${port}`;
|
||||
} catch {
|
||||
// Config exists but can't be parsed — return nulls
|
||||
// Config exists but can't be parsed — keep any detected token and return null URL
|
||||
}
|
||||
|
||||
return result;
|
||||
|
|
@ -100,6 +109,23 @@ export function getEnvGatewayToken(): string | null {
|
|||
return process.env.OPENCLAW_GATEWAY_TOKEN || null;
|
||||
}
|
||||
|
||||
export function chooseSetupGatewayToken(opts: {
|
||||
existingToken?: string | null;
|
||||
detectedToken?: string | null;
|
||||
envToken?: string | null;
|
||||
}): GatewayTokenChoice {
|
||||
const existingToken = opts.existingToken?.trim();
|
||||
if (existingToken) return { token: existingToken, source: 'existing' };
|
||||
|
||||
const detectedToken = opts.detectedToken?.trim();
|
||||
if (detectedToken) return { token: detectedToken, source: 'detected' };
|
||||
|
||||
const envToken = opts.envToken?.trim();
|
||||
if (envToken) return { token: envToken, source: 'env' };
|
||||
|
||||
return { token: null, source: 'none' };
|
||||
}
|
||||
|
||||
export interface GatewayPatchResult {
|
||||
ok: boolean;
|
||||
message: string;
|
||||
|
|
@ -146,7 +172,7 @@ export function patchGatewayAllowedOrigins(origin: string): GatewayPatchResult {
|
|||
}
|
||||
}
|
||||
|
||||
const REQUIRED_HTTP_TOOLS = ['cron', 'gateway'] as const;
|
||||
const REQUIRED_HTTP_TOOLS = ['cron', 'gateway', 'sessions_spawn'] as const;
|
||||
|
||||
// Must match the connect metadata sent by Nerve's browser WS client
|
||||
// (src/hooks/useWebSocket.ts) to avoid OpenClaw 2026.2.26+ metadata-repair prompts.
|
||||
|
|
@ -203,6 +229,116 @@ const FULL_OPERATOR_SCOPES = [
|
|||
'operator.pairing',
|
||||
];
|
||||
|
||||
interface DeviceIdentityMatch {
|
||||
deviceId?: string;
|
||||
publicKey?: string;
|
||||
}
|
||||
|
||||
interface PendingDeviceRequest {
|
||||
requestId?: string;
|
||||
deviceId?: string;
|
||||
publicKey?: string;
|
||||
}
|
||||
|
||||
const SAFE_DEVICE_REQUEST_ID_RE = /^[A-Za-z0-9_-]+$/;
|
||||
|
||||
type PendingDeviceExec = (
|
||||
command: string,
|
||||
options?: Pick<ExecSyncOptions, 'timeout' | 'stdio'>,
|
||||
) => string | Buffer;
|
||||
|
||||
function hasFullOperatorScopes(scopes?: string[]): boolean {
|
||||
return FULL_OPERATOR_SCOPES.every(scope => (scopes || []).includes(scope));
|
||||
}
|
||||
|
||||
function readGatewayDeviceId(): string | null {
|
||||
const deviceJsonPath = join(HOME, '.openclaw', 'identity', 'device.json');
|
||||
if (!existsSync(deviceJsonPath)) return null;
|
||||
|
||||
try {
|
||||
const device = JSON.parse(readFileSync(deviceJsonPath, 'utf-8')) as { deviceId?: string };
|
||||
return device.deviceId || null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function readNerveDeviceIdentity(): DeviceIdentityMatch | null {
|
||||
const nerveDir = process.env.NERVE_DATA_DIR || join(process.env.HOME || HOME, '.nerve');
|
||||
const identityPath = join(nerveDir, 'device-identity.json');
|
||||
if (!existsSync(identityPath)) return null;
|
||||
|
||||
try {
|
||||
const stored = JSON.parse(readFileSync(identityPath, 'utf-8')) as {
|
||||
deviceId?: string;
|
||||
publicKeyB64url?: string;
|
||||
};
|
||||
const deviceId = stored.deviceId?.trim();
|
||||
const publicKey = stored.publicKeyB64url?.trim();
|
||||
if (!deviceId && !publicKey) return null;
|
||||
return { deviceId, publicKey };
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function matchesPendingDeviceRequest(item: PendingDeviceRequest, identity: DeviceIdentityMatch): boolean {
|
||||
const requestDeviceId = item.deviceId?.trim();
|
||||
const requestPublicKey = item.publicKey?.trim();
|
||||
|
||||
if (identity.deviceId && identity.publicKey) {
|
||||
return requestDeviceId === identity.deviceId && requestPublicKey === identity.publicKey;
|
||||
}
|
||||
|
||||
if (identity.deviceId) {
|
||||
return requestDeviceId === identity.deviceId;
|
||||
}
|
||||
|
||||
if (identity.publicKey) {
|
||||
return requestPublicKey === identity.publicKey;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
function localIdentityNeedsScopeFix(targetDeviceId: string): boolean {
|
||||
const identityPath = join(HOME, '.openclaw', 'identity', 'device-auth.json');
|
||||
if (!existsSync(identityPath)) return false;
|
||||
|
||||
try {
|
||||
const identity = JSON.parse(readFileSync(identityPath, 'utf-8')) as {
|
||||
deviceId?: string;
|
||||
tokens?: Record<string, { scopes?: string[] }>;
|
||||
};
|
||||
const identityDeviceId = identity.deviceId?.trim();
|
||||
if (identityDeviceId && identityDeviceId !== targetDeviceId) {
|
||||
return false;
|
||||
}
|
||||
return Object.values(identity.tokens || {}).some(token => !hasFullOperatorScopes(token.scopes));
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function repairPairedDeviceScopes(device: {
|
||||
scopes?: string[];
|
||||
tokens?: Record<string, { scopes?: string[] }>;
|
||||
}): boolean {
|
||||
let changed = false;
|
||||
|
||||
if (!hasFullOperatorScopes(device.scopes)) {
|
||||
device.scopes = [...FULL_OPERATOR_SCOPES];
|
||||
changed = true;
|
||||
}
|
||||
|
||||
if (device.tokens?.operator && !hasFullOperatorScopes(device.tokens.operator.scopes)) {
|
||||
device.tokens.operator.scopes = [...FULL_OPERATOR_SCOPES];
|
||||
changed = true;
|
||||
}
|
||||
|
||||
return changed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Bootstrap paired.json from scratch on a fresh install.
|
||||
* Reads the gateway's own device identity and creates the paired file
|
||||
|
|
@ -298,7 +434,9 @@ function bootstrapPairedJson(): { ok: boolean; message: string; needsRestart: bo
|
|||
* This function upgrades the gateway's own device scopes in paired.json and
|
||||
* restarts the gateway, breaking the deadlock.
|
||||
*/
|
||||
export function fixGatewayDeviceScopes(): { ok: boolean; message: string; needsRestart: boolean } {
|
||||
export function fixGatewayDeviceScopes(opts: {
|
||||
targetDeviceId?: string;
|
||||
} = {}): { ok: boolean; message: string; needsRestart: boolean } {
|
||||
const pairedPath = join(HOME, '.openclaw', 'devices', 'paired.json');
|
||||
|
||||
if (!existsSync(pairedPath)) {
|
||||
|
|
@ -307,54 +445,51 @@ export function fixGatewayDeviceScopes(): { ok: boolean; message: string; needsR
|
|||
return bootstrapPairedJson();
|
||||
}
|
||||
|
||||
const targetDeviceId = opts.targetDeviceId || readGatewayDeviceId();
|
||||
if (!targetDeviceId) {
|
||||
return { ok: false, message: 'Could not determine which gateway device to repair', needsRestart: false };
|
||||
}
|
||||
|
||||
try {
|
||||
const raw = readFileSync(pairedPath, 'utf-8');
|
||||
const paired = JSON.parse(raw) as Record<string, {
|
||||
scopes?: string[];
|
||||
tokens?: Record<string, { scopes?: string[] }>;
|
||||
clientId?: string;
|
||||
}>;
|
||||
|
||||
let fixed = false;
|
||||
for (const [, device] of Object.entries(paired)) {
|
||||
const currentScopes = device.scopes || [];
|
||||
const missing = FULL_OPERATOR_SCOPES.filter(s => !currentScopes.includes(s));
|
||||
|
||||
if (missing.length > 0) {
|
||||
device.scopes = FULL_OPERATOR_SCOPES;
|
||||
if (device.tokens?.operator) {
|
||||
device.tokens.operator.scopes = FULL_OPERATOR_SCOPES;
|
||||
}
|
||||
fixed = true;
|
||||
}
|
||||
const targetDevice = paired[targetDeviceId];
|
||||
if (!targetDevice) {
|
||||
return { ok: false, message: `Target device not found in paired.json: ${targetDeviceId}`, needsRestart: false };
|
||||
}
|
||||
|
||||
if (!fixed) {
|
||||
return { ok: true, message: 'Device scopes already correct', needsRestart: false };
|
||||
const pairedChanged = repairPairedDeviceScopes(targetDevice);
|
||||
if (pairedChanged) {
|
||||
writeFileSync(pairedPath, JSON.stringify(paired, null, 2) + '\n');
|
||||
}
|
||||
|
||||
writeFileSync(pairedPath, JSON.stringify(paired, null, 2) + '\n');
|
||||
|
||||
// Also fix the CLI's own identity file — without this the gateway sees a
|
||||
// scope mismatch (token claims operator.read, paired.json says full set)
|
||||
// and triggers a scope-upgrade request that requires approval scopes to
|
||||
// approve, creating another deadlock.
|
||||
let identityChanged = false;
|
||||
const identityPath = join(HOME, '.openclaw', 'identity', 'device-auth.json');
|
||||
if (existsSync(identityPath)) {
|
||||
try {
|
||||
const idRaw = readFileSync(identityPath, 'utf-8');
|
||||
const identity = JSON.parse(idRaw) as {
|
||||
deviceId?: string;
|
||||
tokens?: Record<string, { scopes?: string[] }>;
|
||||
};
|
||||
let idFixed = false;
|
||||
for (const [, tok] of Object.entries(identity.tokens || {})) {
|
||||
const missing = FULL_OPERATOR_SCOPES.filter(s => !(tok.scopes || []).includes(s));
|
||||
if (missing.length > 0) {
|
||||
tok.scopes = FULL_OPERATOR_SCOPES;
|
||||
idFixed = true;
|
||||
const identityDeviceId = identity.deviceId?.trim();
|
||||
if (!identityDeviceId || identityDeviceId === targetDeviceId) {
|
||||
for (const [, tok] of Object.entries(identity.tokens || {})) {
|
||||
if (!hasFullOperatorScopes(tok.scopes)) {
|
||||
tok.scopes = [...FULL_OPERATOR_SCOPES];
|
||||
identityChanged = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (idFixed) {
|
||||
if (identityChanged) {
|
||||
writeFileSync(identityPath, JSON.stringify(identity, null, 2) + '\n');
|
||||
}
|
||||
} catch {
|
||||
|
|
@ -362,6 +497,10 @@ export function fixGatewayDeviceScopes(): { ok: boolean; message: string; needsR
|
|||
}
|
||||
}
|
||||
|
||||
if (!pairedChanged && !identityChanged) {
|
||||
return { ok: true, message: 'Device scopes already correct', needsRestart: false };
|
||||
}
|
||||
|
||||
return { ok: true, message: 'Upgraded gateway device scopes', needsRestart: true };
|
||||
} catch (err) {
|
||||
return {
|
||||
|
|
@ -373,54 +512,76 @@ export function fixGatewayDeviceScopes(): { ok: boolean; message: string; needsR
|
|||
}
|
||||
|
||||
/**
|
||||
* Approve all pending device pairing requests via the CLI.
|
||||
* Call after fixing scopes + restarting the gateway.
|
||||
* Approve only the pending pairing request that can be safely matched to Nerve.
|
||||
* If the request cannot be identified unambiguously, fail closed and require manual approval.
|
||||
*/
|
||||
export function approveAllPendingDevices(): { ok: boolean; approved: number; message: string } {
|
||||
export function approvePendingNerveDevice(deps: {
|
||||
exec?: PendingDeviceExec;
|
||||
} = {}): { ok: boolean; approved: number; message: string } {
|
||||
const run = deps.exec || execSync;
|
||||
const identity = readNerveDeviceIdentity();
|
||||
if (!identity) {
|
||||
return {
|
||||
ok: false,
|
||||
approved: 0,
|
||||
message: 'Could not identify Nerve device identity, approve manually with `openclaw devices list`',
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
const listOutput = execSync('openclaw devices list --json 2>/dev/null || echo "[]"', {
|
||||
const listOutput = run('openclaw devices list --json 2>/dev/null', {
|
||||
timeout: 10000,
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
}).toString();
|
||||
|
||||
// Parse pending requests — try JSON first, fall back to box-drawing table regex
|
||||
const pendingIds: string[] = [];
|
||||
let pendingItems: PendingDeviceRequest[] = [];
|
||||
try {
|
||||
const parsed = JSON.parse(listOutput);
|
||||
const items = Array.isArray(parsed?.pending) ? parsed.pending : [];
|
||||
for (const item of items) {
|
||||
if (item.requestId && typeof item.requestId === 'string') {
|
||||
pendingIds.push(item.requestId);
|
||||
}
|
||||
if (!Array.isArray(parsed?.pending)) {
|
||||
return {
|
||||
ok: false,
|
||||
approved: 0,
|
||||
message: 'Could not safely inspect pending requests, approve Nerve manually with `openclaw devices list`',
|
||||
};
|
||||
}
|
||||
pendingItems = parsed.pending;
|
||||
} catch {
|
||||
// Not valid JSON — fall back to table regex
|
||||
const requestPattern = /│\s+([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\s+│/g;
|
||||
let match;
|
||||
while ((match = requestPattern.exec(listOutput)) !== null) {
|
||||
pendingIds.push(match[1]);
|
||||
return {
|
||||
ok: false,
|
||||
approved: 0,
|
||||
message: 'Could not safely inspect pending requests, approve Nerve manually with `openclaw devices list`',
|
||||
};
|
||||
}
|
||||
|
||||
const matches = pendingItems.filter((item) => {
|
||||
if (!item?.requestId || typeof item.requestId !== 'string') return false;
|
||||
if (!SAFE_DEVICE_REQUEST_ID_RE.test(item.requestId)) return false;
|
||||
return matchesPendingDeviceRequest(item, identity);
|
||||
});
|
||||
|
||||
if (matches.length === 0) {
|
||||
if (pendingItems.length === 0) {
|
||||
return { ok: true, approved: 0, message: 'No pending requests' };
|
||||
}
|
||||
return {
|
||||
ok: false,
|
||||
approved: 0,
|
||||
message: 'Could not safely identify the Nerve request, approve manually with `openclaw devices list`',
|
||||
};
|
||||
}
|
||||
|
||||
if (pendingIds.length === 0) {
|
||||
return { ok: true, approved: 0, message: 'No pending requests' };
|
||||
if (matches.length !== 1) {
|
||||
return {
|
||||
ok: false,
|
||||
approved: 0,
|
||||
message: 'Could not safely identify a single Nerve request, approve manually with `openclaw devices list`',
|
||||
};
|
||||
}
|
||||
|
||||
let approved = 0;
|
||||
for (const id of pendingIds) {
|
||||
try {
|
||||
execSync(`openclaw devices approve ${id}`, { timeout: 10000, stdio: 'pipe' });
|
||||
approved++;
|
||||
} catch { /* skip individual failures */ }
|
||||
}
|
||||
|
||||
return {
|
||||
ok: approved > 0,
|
||||
approved,
|
||||
message: approved > 0 ? `Approved ${approved} pending device(s)` : 'Failed to approve pending devices',
|
||||
};
|
||||
run(`openclaw devices approve ${matches[0].requestId}`, { timeout: 10000, stdio: 'pipe' });
|
||||
return { ok: true, approved: 1, message: 'Approved Nerve pending device request' };
|
||||
} catch {
|
||||
return { ok: false, approved: 0, message: 'Could not list pending devices' };
|
||||
return { ok: false, approved: 0, message: 'Could not inspect pending requests safely, approve Nerve manually with `openclaw devices list`' };
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -485,6 +646,7 @@ export function prePairNerveDevice(gatewayToken?: string): { ok: boolean; messag
|
|||
// Update metadata/token if device already exists
|
||||
if (paired[deviceId]) {
|
||||
const existing = paired[deviceId] as {
|
||||
scopes?: string[];
|
||||
displayName?: string;
|
||||
platform?: string;
|
||||
clientId?: string;
|
||||
|
|
@ -509,7 +671,7 @@ export function prePairNerveDevice(gatewayToken?: string): { ok: boolean; messag
|
|||
existing.tokens.operator = {
|
||||
token,
|
||||
role: 'operator',
|
||||
scopes: FULL_OPERATOR_SCOPES,
|
||||
scopes: [...FULL_OPERATOR_SCOPES],
|
||||
createdAtMs: now,
|
||||
};
|
||||
changed = true;
|
||||
|
|
@ -520,6 +682,11 @@ export function prePairNerveDevice(gatewayToken?: string): { ok: boolean; messag
|
|||
changedFields.push('token');
|
||||
}
|
||||
|
||||
if (repairPairedDeviceScopes(existing)) {
|
||||
changed = true;
|
||||
changedFields.push('scopes');
|
||||
}
|
||||
|
||||
// OpenClaw 2026.2.26+ pins platform/device metadata on paired devices.
|
||||
// These must match the browser connect metadata Nerve sends.
|
||||
if (existing.platform !== NERVE_PAIRED_PLATFORM) {
|
||||
|
|
@ -548,7 +715,7 @@ export function prePairNerveDevice(gatewayToken?: string): { ok: boolean; messag
|
|||
}
|
||||
|
||||
writeFileSync(pairedPath, JSON.stringify(paired, null, 2) + '\n');
|
||||
const fieldsLabel = changedFields.length > 0 ? ` (${changedFields.join(', ')})` : '';
|
||||
const fieldsLabel = changedFields.length > 0 ? ` (${[...new Set(changedFields)].join(', ')})` : '';
|
||||
return {
|
||||
ok: true,
|
||||
message: `Updated Nerve paired device ${deviceId.substring(0, 12)}…${fieldsLabel}`,
|
||||
|
|
@ -609,15 +776,21 @@ function needsDeviceScopeFix(): boolean {
|
|||
return existsSync(deviceJsonPath);
|
||||
}
|
||||
|
||||
const gatewayDeviceId = readGatewayDeviceId();
|
||||
if (!gatewayDeviceId) return false;
|
||||
|
||||
try {
|
||||
const raw = readFileSync(pairedPath, 'utf-8');
|
||||
const paired = JSON.parse(raw) as Record<string, { scopes?: string[] }>;
|
||||
const paired = JSON.parse(raw) as Record<string, {
|
||||
scopes?: string[];
|
||||
tokens?: Record<string, { scopes?: string[] }>;
|
||||
}>;
|
||||
const targetDevice = paired[gatewayDeviceId];
|
||||
|
||||
if (!hasFullOperatorScopes(targetDevice?.scopes)) return true;
|
||||
if (targetDevice?.tokens?.operator && !hasFullOperatorScopes(targetDevice.tokens.operator.scopes)) return true;
|
||||
if (localIdentityNeedsScopeFix(gatewayDeviceId)) return true;
|
||||
|
||||
for (const [, device] of Object.entries(paired)) {
|
||||
const currentScopes = device.scopes || [];
|
||||
const missing = FULL_OPERATOR_SCOPES.filter(s => !currentScopes.includes(s));
|
||||
if (missing.length > 0) return true;
|
||||
}
|
||||
return false;
|
||||
} catch {
|
||||
return false;
|
||||
|
|
@ -646,17 +819,20 @@ function needsPrePair(gatewayToken?: string): boolean {
|
|||
if (!paired[deviceId]) return true; // Nerve not registered
|
||||
|
||||
const existing = paired[deviceId] as {
|
||||
scopes?: string[];
|
||||
displayName?: string;
|
||||
platform?: string;
|
||||
clientId?: string;
|
||||
clientMode?: string;
|
||||
tokens?: Record<string, { token?: string }>;
|
||||
tokens?: Record<string, { token?: string; scopes?: string[] }>;
|
||||
};
|
||||
|
||||
// Check token match — if no token is available, assume mismatch (apply will generate one)
|
||||
const token = gatewayToken || detectGatewayConfig().token;
|
||||
if (!token) return true;
|
||||
if (existing.tokens?.operator?.token !== token) return true;
|
||||
if (!hasFullOperatorScopes(existing.scopes)) return true;
|
||||
if (!hasFullOperatorScopes(existing.tokens?.operator?.scopes)) return true;
|
||||
|
||||
// OpenClaw 2026.2.26+ metadata pinning requires these to match runtime connect metadata.
|
||||
if (existing.platform !== NERVE_PAIRED_PLATFORM) return true;
|
||||
|
|
@ -709,9 +885,11 @@ function needsOriginPatch(origin: string): boolean {
|
|||
export function detectNeededConfigChanges(opts: {
|
||||
nerveOrigin?: string;
|
||||
nerveHttpsOrigin?: string;
|
||||
allowedOrigins?: string[];
|
||||
gatewayToken?: string;
|
||||
}): ConfigChange[] {
|
||||
const changes: ConfigChange[] = [];
|
||||
const pairedPath = join(HOME, '.openclaw', 'devices', 'paired.json');
|
||||
|
||||
const deviceScopeFixNeeded = needsDeviceScopeFix();
|
||||
|
||||
|
|
@ -725,7 +903,7 @@ export function detectNeededConfigChanges(opts: {
|
|||
|
||||
// If device-scopes will bootstrap paired.json, always include pre-pair
|
||||
// (paired.json won't exist yet for detection, but will after device-scopes runs)
|
||||
if (deviceScopeFixNeeded || needsPrePair(opts.gatewayToken)) {
|
||||
if ((!existsSync(pairedPath) && deviceScopeFixNeeded) || needsPrePair(opts.gatewayToken)) {
|
||||
changes.push({
|
||||
id: 'pre-pair',
|
||||
description: 'Pre-pair Nerve device identity (skip manual approval step)',
|
||||
|
|
@ -736,7 +914,7 @@ export function detectNeededConfigChanges(opts: {
|
|||
if (needsToolsAllow()) {
|
||||
changes.push({
|
||||
id: 'tools-allow',
|
||||
description: 'Allow cron + gateway tools on /tools/invoke (needed for cron and gateway management)',
|
||||
description: 'Allow cron + gateway + sessions_spawn tools on /tools/invoke (needed for cron, gateway management, and kanban task execution)',
|
||||
apply: () => {
|
||||
const r = patchGatewayToolsAllow();
|
||||
return { ok: r.ok, message: r.message, needsRestart: r.ok };
|
||||
|
|
@ -744,23 +922,27 @@ export function detectNeededConfigChanges(opts: {
|
|||
});
|
||||
}
|
||||
|
||||
if (opts.nerveOrigin && needsOriginPatch(opts.nerveOrigin)) {
|
||||
changes.push({
|
||||
id: 'allowed-origins',
|
||||
description: `Add ${opts.nerveOrigin} to allowed origins (needed for WebSocket)`,
|
||||
apply: () => {
|
||||
const r = patchGatewayAllowedOrigins(opts.nerveOrigin!);
|
||||
return { ok: r.ok, message: r.message, needsRestart: r.ok };
|
||||
},
|
||||
});
|
||||
}
|
||||
const trimmedNerveOrigin = opts.nerveOrigin?.trim() || undefined;
|
||||
const trimmedNerveHttpsOrigin = opts.nerveHttpsOrigin?.trim() || undefined;
|
||||
|
||||
const origins = [...new Set([
|
||||
...(opts.allowedOrigins || []),
|
||||
trimmedNerveOrigin,
|
||||
trimmedNerveHttpsOrigin,
|
||||
].map(origin => origin?.trim()).filter((origin): origin is string => Boolean(origin)))];
|
||||
|
||||
for (const origin of origins) {
|
||||
if (!needsOriginPatch(origin)) continue;
|
||||
|
||||
let id = `allowed-origins:${origin}`;
|
||||
if (origin === trimmedNerveOrigin) id = 'allowed-origins';
|
||||
else if (origin === trimmedNerveHttpsOrigin) id = 'allowed-origins-https';
|
||||
|
||||
if (opts.nerveHttpsOrigin && needsOriginPatch(opts.nerveHttpsOrigin)) {
|
||||
changes.push({
|
||||
id: 'allowed-origins-https',
|
||||
description: `Add ${opts.nerveHttpsOrigin} to allowed origins (needed for HTTPS WebSocket)`,
|
||||
id,
|
||||
description: `Add ${origin} to allowed origins (needed for WebSocket)`,
|
||||
apply: () => {
|
||||
const r = patchGatewayAllowedOrigins(opts.nerveHttpsOrigin!);
|
||||
const r = patchGatewayAllowedOrigins(origin);
|
||||
return { ok: r.ok, message: r.message, needsRestart: r.ok };
|
||||
},
|
||||
});
|
||||
|
|
|
|||
37
scripts/lib/prereq-check.test.ts
Normal file
37
scripts/lib/prereq-check.test.ts
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
import { afterEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
const EXAMPLE_TS_DNS = 'example-node.tail0000.ts.net';
|
||||
const EXAMPLE_TS_IPV4 = '100.64.0.42';
|
||||
|
||||
describe('checkPrerequisites', () => {
|
||||
const originalEnv = { ...process.env };
|
||||
|
||||
afterEach(() => {
|
||||
process.env = { ...originalEnv };
|
||||
vi.resetModules();
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('includes Tailscale authentication, dns name, and serve origins when available', async () => {
|
||||
vi.doMock('./tailscale.js', () => ({
|
||||
getTailscaleState: () => ({
|
||||
installed: true,
|
||||
authenticated: true,
|
||||
ipv4: EXAMPLE_TS_IPV4,
|
||||
dnsName: EXAMPLE_TS_DNS,
|
||||
serveOrigins: [`https://${EXAMPLE_TS_DNS}`],
|
||||
}),
|
||||
}));
|
||||
|
||||
const { checkPrerequisites } = await import('./prereq-check.js');
|
||||
const result = checkPrerequisites({ quiet: true });
|
||||
|
||||
expect(result.tailscale).toEqual({
|
||||
installed: true,
|
||||
authenticated: true,
|
||||
ipv4: EXAMPLE_TS_IPV4,
|
||||
dnsName: EXAMPLE_TS_DNS,
|
||||
serveOrigins: [`https://${EXAMPLE_TS_DNS}`],
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
import { execSync } from 'node:child_process';
|
||||
import { success, warn, fail } from './banner.js';
|
||||
import { getTailscaleState, type TailscaleState } from './tailscale.js';
|
||||
|
||||
export interface PrereqResult {
|
||||
nodeOk: boolean;
|
||||
|
|
@ -13,6 +14,7 @@ export interface PrereqResult {
|
|||
opensslOk: boolean;
|
||||
tailscaleOk: boolean;
|
||||
tailscaleIp: string | null;
|
||||
tailscale: TailscaleState;
|
||||
}
|
||||
|
||||
/** Check all prerequisites and print results. */
|
||||
|
|
@ -48,19 +50,16 @@ export function checkPrerequisites(opts?: { quiet?: boolean }): PrereqResult {
|
|||
else warn('openssl not found (optional — needed for self-signed HTTPS certs)');
|
||||
}
|
||||
|
||||
const tailscaleOk = commandExists('tailscale');
|
||||
let tailscaleIp: string | null = null;
|
||||
if (tailscaleOk) {
|
||||
try {
|
||||
tailscaleIp = execSync('tailscale ip -4 2>/dev/null', { timeout: 3000 }).toString().trim() || null;
|
||||
} catch { /* not connected */ }
|
||||
if (!quiet) {
|
||||
if (tailscaleIp) success(`Tailscale detected (${tailscaleIp})`);
|
||||
else warn('Tailscale installed but not connected');
|
||||
}
|
||||
const tailscale = getTailscaleState();
|
||||
const tailscaleOk = tailscale.installed;
|
||||
const tailscaleIp = tailscale.ipv4;
|
||||
if (!quiet && tailscaleOk) {
|
||||
if (tailscaleIp) success(`Tailscale detected (${tailscaleIp})`);
|
||||
else if (tailscale.authenticated && tailscale.dnsName) success(`Tailscale detected (${tailscale.dnsName})`);
|
||||
else warn('Tailscale installed but not connected');
|
||||
}
|
||||
|
||||
return { nodeOk, nodeVersion, npmOk, ffmpegOk, opensslOk, tailscaleOk, tailscaleIp };
|
||||
return { nodeOk, nodeVersion, npmOk, ffmpegOk, opensslOk, tailscaleOk, tailscaleIp, tailscale };
|
||||
}
|
||||
|
||||
/** Check if a command exists on the system. */
|
||||
|
|
|
|||
102
scripts/lib/tailscale.test.ts
Normal file
102
scripts/lib/tailscale.test.ts
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
import { describe, it, expect } from 'vitest';
|
||||
import {
|
||||
normalizeDnsName,
|
||||
extractServeOrigins,
|
||||
parseTailscaleStatus,
|
||||
getTailscaleState,
|
||||
} from './tailscale.js';
|
||||
|
||||
const EXAMPLE_TS_DNS = 'example-node.tail0000.ts.net';
|
||||
const EXAMPLE_TS_IPV4 = '100.64.0.42';
|
||||
const EXAMPLE_TS_IPV6 = 'fd7a:115c:a1e0::42';
|
||||
|
||||
describe('normalizeDnsName', () => {
|
||||
it('strips the trailing dot from Self.DNSName', () => {
|
||||
expect(normalizeDnsName(`${EXAMPLE_TS_DNS}.`)).toBe(EXAMPLE_TS_DNS);
|
||||
});
|
||||
|
||||
it('returns null for blank input', () => {
|
||||
expect(normalizeDnsName('')).toBeNull();
|
||||
expect(normalizeDnsName(' ')).toBeNull();
|
||||
expect(normalizeDnsName(undefined)).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('extractServeOrigins', () => {
|
||||
it('returns https origins for default HTTPS listeners', () => {
|
||||
expect(extractServeOrigins({
|
||||
Web: {
|
||||
[`${EXAMPLE_TS_DNS}:443`]: {
|
||||
Handlers: {
|
||||
'/': { Proxy: 'http://127.0.0.1:3080' },
|
||||
},
|
||||
},
|
||||
},
|
||||
})).toEqual([`https://${EXAMPLE_TS_DNS}`]);
|
||||
});
|
||||
|
||||
it('maps port 80 listeners to http origins', () => {
|
||||
expect(extractServeOrigins({
|
||||
Web: {
|
||||
[`${EXAMPLE_TS_DNS}:80`]: { Handlers: {} },
|
||||
},
|
||||
})).toEqual([`http://${EXAMPLE_TS_DNS}`]);
|
||||
});
|
||||
|
||||
it('preserves non-standard HTTPS ports in the origin', () => {
|
||||
expect(extractServeOrigins({
|
||||
Web: {
|
||||
[`${EXAMPLE_TS_DNS}:8443`]: { Handlers: {} },
|
||||
},
|
||||
})).toEqual([`https://${EXAMPLE_TS_DNS}:8443`]);
|
||||
});
|
||||
|
||||
it('dedupes duplicate hosts and ignores invalid entries', () => {
|
||||
expect(extractServeOrigins({
|
||||
Web: {
|
||||
[`${EXAMPLE_TS_DNS}:443`]: { Handlers: {} },
|
||||
[EXAMPLE_TS_DNS]: { Handlers: {} },
|
||||
':443': { Handlers: {} },
|
||||
},
|
||||
})).toEqual([`https://${EXAMPLE_TS_DNS}`]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseTailscaleStatus', () => {
|
||||
it('marks authenticated false when tailscale is installed but Self is missing a usable address', () => {
|
||||
expect(parseTailscaleStatus({})).toMatchObject({
|
||||
authenticated: false,
|
||||
ipv4: null,
|
||||
dnsName: null,
|
||||
});
|
||||
});
|
||||
|
||||
it('extracts the first IPv4 and normalized dns name from status json', () => {
|
||||
expect(parseTailscaleStatus({
|
||||
Self: {
|
||||
DNSName: `${EXAMPLE_TS_DNS}.`,
|
||||
TailscaleIPs: [EXAMPLE_TS_IPV6, EXAMPLE_TS_IPV4],
|
||||
},
|
||||
})).toEqual({
|
||||
authenticated: true,
|
||||
ipv4: EXAMPLE_TS_IPV4,
|
||||
dnsName: EXAMPLE_TS_DNS,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('getTailscaleState', () => {
|
||||
it('returns installed false when tailscale is unavailable', () => {
|
||||
const exec = () => {
|
||||
throw new Error('not found');
|
||||
};
|
||||
|
||||
expect(getTailscaleState(exec as never)).toEqual({
|
||||
installed: false,
|
||||
authenticated: false,
|
||||
ipv4: null,
|
||||
dnsName: null,
|
||||
serveOrigins: [],
|
||||
});
|
||||
});
|
||||
});
|
||||
128
scripts/lib/tailscale.ts
Normal file
128
scripts/lib/tailscale.ts
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
import { execSync } from 'node:child_process';
|
||||
|
||||
export interface TailscaleState {
|
||||
installed: boolean;
|
||||
authenticated: boolean;
|
||||
ipv4: string | null;
|
||||
dnsName: string | null;
|
||||
serveOrigins: string[];
|
||||
}
|
||||
|
||||
type ExecLike = (command: string, options?: Record<string, unknown>) => string | Buffer;
|
||||
|
||||
function toText(value: string | Buffer): string {
|
||||
return typeof value === 'string' ? value : value.toString('utf8');
|
||||
}
|
||||
|
||||
function isRecord(value: unknown): value is Record<string, unknown> {
|
||||
return !!value && typeof value === 'object' && !Array.isArray(value);
|
||||
}
|
||||
|
||||
function findFirstIpv4(values: unknown): string | null {
|
||||
if (!Array.isArray(values)) return null;
|
||||
for (const value of values) {
|
||||
if (typeof value === 'string' && /^\d{1,3}(?:\.\d{1,3}){3}$/.test(value.trim())) {
|
||||
return value.trim();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function parseJson(text: string): unknown {
|
||||
return JSON.parse(text) as unknown;
|
||||
}
|
||||
|
||||
export function normalizeDnsName(value: string | null | undefined): string | null {
|
||||
const trimmed = (value || '').trim();
|
||||
if (!trimmed) return null;
|
||||
return trimmed.endsWith('.') ? trimmed.slice(0, -1) : trimmed;
|
||||
}
|
||||
|
||||
export function extractServeOrigins(json: unknown): string[] {
|
||||
if (!isRecord(json) || !isRecord(json.Web)) return [];
|
||||
|
||||
const origins = new Set<string>();
|
||||
for (const rawKey of Object.keys(json.Web)) {
|
||||
const key = rawKey.trim();
|
||||
if (!key) continue;
|
||||
|
||||
const portMatch = key.match(/:(\d+)$/);
|
||||
const port = portMatch?.[1] || null;
|
||||
const hostPart = portMatch ? key.slice(0, -portMatch[0].length) : key;
|
||||
const host = normalizeDnsName(hostPart);
|
||||
if (!host) continue;
|
||||
|
||||
if (port === '80') {
|
||||
origins.add(`http://${host}`);
|
||||
} else if (!port || port === '443') {
|
||||
origins.add(`https://${host}`);
|
||||
} else {
|
||||
origins.add(`https://${host}:${port}`);
|
||||
}
|
||||
}
|
||||
|
||||
return [...origins];
|
||||
}
|
||||
|
||||
export function parseTailscaleStatus(json: unknown): Pick<TailscaleState, 'authenticated' | 'ipv4' | 'dnsName'> {
|
||||
if (!isRecord(json) || !isRecord(json.Self)) {
|
||||
return {
|
||||
authenticated: false,
|
||||
ipv4: null,
|
||||
dnsName: null,
|
||||
};
|
||||
}
|
||||
|
||||
const self = json.Self;
|
||||
const dnsName = normalizeDnsName(typeof self.DNSName === 'string' ? self.DNSName : null);
|
||||
const ipv4 = findFirstIpv4(self.TailscaleIPs);
|
||||
|
||||
return {
|
||||
authenticated: Boolean(ipv4 || dnsName),
|
||||
ipv4,
|
||||
dnsName,
|
||||
};
|
||||
}
|
||||
|
||||
export function getTailscaleState(exec: ExecLike = execSync): TailscaleState {
|
||||
try {
|
||||
exec('command -v tailscale', { stdio: 'pipe', timeout: 3000 });
|
||||
} catch {
|
||||
return {
|
||||
installed: false,
|
||||
authenticated: false,
|
||||
ipv4: null,
|
||||
dnsName: null,
|
||||
serveOrigins: [],
|
||||
};
|
||||
}
|
||||
|
||||
let status: Pick<TailscaleState, 'authenticated' | 'ipv4' | 'dnsName'> = {
|
||||
authenticated: false,
|
||||
ipv4: null,
|
||||
dnsName: null,
|
||||
};
|
||||
let serveOrigins: string[] = [];
|
||||
|
||||
try {
|
||||
const statusJson = parseJson(toText(exec('tailscale status --json 2>/dev/null', { stdio: 'pipe', timeout: 3000 })));
|
||||
status = parseTailscaleStatus(statusJson);
|
||||
} catch {
|
||||
// leave default unauthenticated state
|
||||
}
|
||||
|
||||
try {
|
||||
const serveJson = parseJson(toText(exec('tailscale serve status --json 2>/dev/null', { stdio: 'pipe', timeout: 3000 })));
|
||||
serveOrigins = extractServeOrigins(serveJson);
|
||||
} catch {
|
||||
// serve may be inactive or unsupported
|
||||
}
|
||||
|
||||
return {
|
||||
installed: true,
|
||||
authenticated: status.authenticated,
|
||||
ipv4: status.ipv4,
|
||||
dnsName: status.dnsName,
|
||||
serveOrigins,
|
||||
};
|
||||
}
|
||||
74
scripts/lib/validators.test.ts
Normal file
74
scripts/lib/validators.test.ts
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
function jsonResponse(body: unknown, status = 200): Response {
|
||||
return new Response(JSON.stringify(body), {
|
||||
status,
|
||||
headers: { 'content-type': 'application/json' },
|
||||
});
|
||||
}
|
||||
|
||||
describe('testGatewayConnection', () => {
|
||||
beforeEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
vi.resetModules();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.unstubAllGlobals();
|
||||
});
|
||||
|
||||
it('passes only when an authenticated gateway route accepts the token', async () => {
|
||||
const fetchMock = vi.fn()
|
||||
.mockResolvedValueOnce(jsonResponse({ status: 'ok' }))
|
||||
.mockResolvedValueOnce(jsonResponse({ ok: true, result: { session: 'main' } }));
|
||||
|
||||
vi.stubGlobal('fetch', fetchMock);
|
||||
|
||||
const { testGatewayConnection } = await import('./validators.js');
|
||||
const result = await testGatewayConnection('http://127.0.0.1:18789', 'real-token');
|
||||
|
||||
expect(result.ok).toBe(true);
|
||||
expect(fetchMock).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
'http://127.0.0.1:18789/tools/invoke',
|
||||
expect.objectContaining({
|
||||
method: 'POST',
|
||||
headers: expect.objectContaining({
|
||||
Authorization: 'Bearer real-token',
|
||||
'Content-Type': 'application/json',
|
||||
}),
|
||||
body: JSON.stringify({ tool: 'sessions_list', args: { limit: 1 } }),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('fails when the auth token is wrong even if /health is healthy', async () => {
|
||||
const fetchMock = vi.fn()
|
||||
.mockResolvedValueOnce(jsonResponse({ status: 'ok' }))
|
||||
.mockResolvedValueOnce(new Response('unauthorized', { status: 401 }));
|
||||
|
||||
vi.stubGlobal('fetch', fetchMock);
|
||||
|
||||
const { testGatewayConnection } = await import('./validators.js');
|
||||
const result = await testGatewayConnection('http://127.0.0.1:18789', 'wrong-token');
|
||||
|
||||
expect(result.ok).toBe(false);
|
||||
expect(result.message.toLowerCase()).toContain('token');
|
||||
expect(result.message.toLowerCase()).toContain('reject');
|
||||
});
|
||||
|
||||
it('does not treat /health alone as sufficient auth validation when token correctness is being claimed', async () => {
|
||||
const fetchMock = vi.fn()
|
||||
.mockResolvedValueOnce(jsonResponse({ status: 'ok' }))
|
||||
.mockResolvedValueOnce(new Response('not found', { status: 404 }));
|
||||
|
||||
vi.stubGlobal('fetch', fetchMock);
|
||||
|
||||
const { testGatewayConnection } = await import('./validators.js');
|
||||
const result = await testGatewayConnection('http://127.0.0.1:18789', 'maybe-token');
|
||||
|
||||
expect(result.ok).toBe(false);
|
||||
expect(result.message.toLowerCase()).toContain('auth');
|
||||
expect(result.message.toLowerCase()).toContain('confirm');
|
||||
});
|
||||
});
|
||||
|
|
@ -32,14 +32,44 @@ export async function isPortAvailable(port: number, host: string = '127.0.0.1'):
|
|||
});
|
||||
}
|
||||
|
||||
/** Test if the OpenClaw gateway is reachable at the given URL. */
|
||||
export async function testGatewayConnection(url: string): Promise<{ ok: boolean; message: string }> {
|
||||
/** Test if the OpenClaw gateway is reachable and, when provided, the token is actually accepted. */
|
||||
export async function testGatewayConnection(url: string, token?: string): Promise<{ ok: boolean; message: string }> {
|
||||
try {
|
||||
const resp = await fetch(`${url}/health`, { signal: AbortSignal.timeout(5000) });
|
||||
if (resp.ok) {
|
||||
const healthResp = await fetch(`${url}/health`, { signal: AbortSignal.timeout(5000) });
|
||||
if (!healthResp.ok) {
|
||||
return { ok: false, message: `Gateway returned HTTP ${healthResp.status}` };
|
||||
}
|
||||
|
||||
if (!token?.trim()) {
|
||||
return { ok: true, message: 'Gateway reachable' };
|
||||
}
|
||||
return { ok: false, message: `Gateway returned HTTP ${resp.status}` };
|
||||
|
||||
const authResp = await fetch(`${url}/tools/invoke`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${token}`,
|
||||
},
|
||||
body: JSON.stringify({ tool: 'sessions_list', args: { limit: 1 } }),
|
||||
signal: AbortSignal.timeout(5000),
|
||||
});
|
||||
|
||||
if (!authResp.ok) {
|
||||
if (authResp.status === 401 || authResp.status === 403) {
|
||||
return { ok: false, message: 'Gateway auth token rejected' };
|
||||
}
|
||||
return { ok: false, message: `Could not confirm gateway auth, validation returned HTTP ${authResp.status}` };
|
||||
}
|
||||
|
||||
const payload = await authResp.json() as { ok?: boolean; error?: { message?: string } };
|
||||
if (payload.ok === true) {
|
||||
return { ok: true, message: 'Gateway reachable and token validated' };
|
||||
}
|
||||
|
||||
return {
|
||||
ok: false,
|
||||
message: `Could not confirm gateway auth, tool call failed: ${payload.error?.message || 'unexpected response'}`,
|
||||
};
|
||||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
return { ok: false, message: `Cannot reach gateway: ${msg}` };
|
||||
|
|
|
|||
578
scripts/setup.ts
578
scripts/setup.ts
|
|
@ -12,6 +12,7 @@
|
|||
// Show token in prompts so users can verify what they entered
|
||||
|
||||
import { existsSync, readdirSync, mkdirSync, copyFileSync, lstatSync } from 'node:fs';
|
||||
import { execSync } from 'node:child_process';
|
||||
import { resolve, join } from 'node:path';
|
||||
import { homedir } from 'node:os';
|
||||
import { randomBytes } from 'node:crypto';
|
||||
|
|
@ -35,7 +36,11 @@ import {
|
|||
type EnvConfig,
|
||||
} from './lib/env-writer.js';
|
||||
import { generateSelfSignedCert } from './lib/cert-gen.js';
|
||||
import { detectGatewayConfig, getEnvGatewayToken, restartGateway, approveAllPendingDevices, detectNeededConfigChanges, type ConfigChange } from './lib/gateway-detect.js';
|
||||
import { detectGatewayConfig, getEnvGatewayToken, chooseSetupGatewayToken, restartGateway, approvePendingNerveDevice, detectNeededConfigChanges, type ConfigChange } from './lib/gateway-detect.js';
|
||||
import { applyAccessPlanToConfig, buildAccessPlan, type InstallerAccessProfile } from './lib/access-plan.js';
|
||||
import { getTailscaleState, type TailscaleState } from './lib/tailscale.js';
|
||||
import { detectAgentDisplayNameDefault } from './lib/agent-name-default.js';
|
||||
import { printDeploymentGuides, shouldPrintDeploymentGuides } from './lib/deployment-guides.js';
|
||||
|
||||
const PROJECT_ROOT = resolve(process.cwd());
|
||||
const ENV_PATH = resolve(PROJECT_ROOT, '.env');
|
||||
|
|
@ -48,6 +53,32 @@ const isHelp = args.includes('--help') || args.includes('-h');
|
|||
const isCheck = args.includes('--check');
|
||||
const isDefaults = args.includes('--defaults');
|
||||
|
||||
type AccessMode = 'local' | 'network' | 'custom' | 'tailscale-ip' | 'tailscale-serve';
|
||||
|
||||
function getArgValue(flag: string): string | undefined {
|
||||
const index = args.indexOf(flag);
|
||||
if (index === -1) return undefined;
|
||||
return args[index + 1];
|
||||
}
|
||||
|
||||
function normalizeAccessMode(value?: string | null): AccessMode | undefined {
|
||||
if (!value) return undefined;
|
||||
|
||||
const normalized = value.trim().toLowerCase();
|
||||
if (!normalized) return undefined;
|
||||
if (normalized === 'tailscale') return 'tailscale-ip';
|
||||
|
||||
if (normalized === 'local' || normalized === 'network' || normalized === 'custom' || normalized === 'tailscale-ip' || normalized === 'tailscale-serve') {
|
||||
return normalized;
|
||||
}
|
||||
|
||||
fail(`Invalid --access-mode value: ${value}`);
|
||||
console.log(' Supported values: local, network, custom, tailscale-ip, tailscale-serve');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const requestedAccessMode = normalizeAccessMode(getArgValue('--access-mode'));
|
||||
|
||||
function detectPrimaryIpv4(): string | null {
|
||||
const nets = networkInterfaces();
|
||||
for (const addrs of Object.values(nets)) {
|
||||
|
|
@ -63,31 +94,8 @@ function isLoopback(host: string): boolean {
|
|||
return !host || host === '127.0.0.1' || host === 'localhost' || host === '::1';
|
||||
}
|
||||
|
||||
function computeGatewayOrigins(config: EnvConfig, accessMode: string): {
|
||||
nerveOrigin?: string;
|
||||
nerveHttpsOrigin?: string;
|
||||
} {
|
||||
if (accessMode === 'local') return {};
|
||||
|
||||
const nervePort = config.PORT || DEFAULTS.PORT;
|
||||
let accessIp = config.HOST === '0.0.0.0'
|
||||
? (config.ALLOWED_ORIGINS?.split(',')[0]?.trim()?.replace(/^https?:\/\//, '').replace(/:\d+$/, '') || '0.0.0.0')
|
||||
: (config.HOST || 'localhost');
|
||||
|
||||
if (accessIp === '0.0.0.0') {
|
||||
accessIp = detectPrimaryIpv4() || '';
|
||||
}
|
||||
|
||||
// If we couldn't resolve a usable IP, skip origin generation
|
||||
if (!accessIp || accessIp === '0.0.0.0') {
|
||||
return {};
|
||||
}
|
||||
|
||||
const nerveOrigin = `http://${accessIp}:${nervePort}`;
|
||||
const sslPort = config.SSL_PORT;
|
||||
const nerveHttpsOrigin = sslPort ? `https://${accessIp}:${sslPort}` : undefined;
|
||||
|
||||
return { nerveOrigin, nerveHttpsOrigin };
|
||||
function sleep(ms: number): Promise<void> {
|
||||
return new Promise(resolveTimer => setTimeout(resolveTimer, ms));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -126,7 +134,7 @@ async function applyConfigChanges(changes: ConfigChange[]): Promise<void> {
|
|||
if (restart.ok) {
|
||||
await new Promise(r => setTimeout(r, 3000));
|
||||
if (shouldApprovePending) {
|
||||
const approved = approveAllPendingDevices();
|
||||
const approved = approvePendingNerveDevice();
|
||||
if (approved.ok && approved.approved > 0) {
|
||||
success(approved.message);
|
||||
} else if (!approved.ok) {
|
||||
|
|
@ -203,22 +211,31 @@ async function main(): Promise<void> {
|
|||
Usage: npm run setup [options]
|
||||
|
||||
Options:
|
||||
--check Validate existing .env config and test gateway connection
|
||||
--defaults Non-interactive setup using auto-detected values
|
||||
--help, -h Show this help message
|
||||
--check Validate existing .env config and test gateway connection
|
||||
--defaults Non-interactive setup using auto-detected values
|
||||
--access-mode <mode> Explicit non-interactive access mode
|
||||
--help, -h Show this help message
|
||||
|
||||
Access modes:
|
||||
local Localhost only
|
||||
network LAN-reachable
|
||||
custom Manual bind and HTTPS choices
|
||||
tailscale-ip Direct tailnet IP access
|
||||
tailscale-serve Loopback + Tailscale Serve hostname
|
||||
|
||||
The setup wizard guides you through 6 steps:
|
||||
1. Gateway Connection — connect to your OpenClaw gateway
|
||||
2. Agent Identity — set your agent's display name
|
||||
3. Access Mode — local, Tailscale, LAN, or custom
|
||||
3. Access Mode — local, Tailscale IP, Tailscale Serve, LAN, or custom
|
||||
4. Authentication — password protection (network mode)
|
||||
5. TTS Configuration — optional text-to-speech API keys
|
||||
6. Advanced Settings — custom file paths (most users skip this)
|
||||
|
||||
Examples:
|
||||
npm run setup # Interactive setup
|
||||
npm run setup -- --check # Validate existing config
|
||||
npm run setup -- --defaults # Auto-configure with detected values
|
||||
npm run setup # Interactive setup
|
||||
npm run setup -- --check # Validate existing config
|
||||
npm run setup -- --defaults # Auto-configure with detected values
|
||||
npm run setup -- --defaults --access-mode tailscale-serve
|
||||
`);
|
||||
return;
|
||||
}
|
||||
|
|
@ -254,7 +271,7 @@ async function main(): Promise<void> {
|
|||
|
||||
// --defaults mode: non-interactive
|
||||
if (isDefaults) {
|
||||
await runDefaults(existing);
|
||||
await runDefaults(existing, prereqs);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -300,6 +317,7 @@ async function main(): Promise<void> {
|
|||
// When invoked from install.sh, build is already done — skip misleading "next steps"
|
||||
if (!process.env.NERVE_INSTALLER) {
|
||||
printNextSteps(config);
|
||||
printDeploymentGuides();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -321,15 +339,19 @@ async function collectInteractive(
|
|||
// Auto-detect gateway config
|
||||
const detected = detectGatewayConfig();
|
||||
const envToken = getEnvGatewayToken();
|
||||
const tokenChoice = chooseSetupGatewayToken({
|
||||
existingToken: existing.GATEWAY_TOKEN,
|
||||
detectedToken: detected.token,
|
||||
envToken,
|
||||
});
|
||||
|
||||
// Determine default token (priority: existing > env > detected)
|
||||
const defaultToken = existing.GATEWAY_TOKEN || envToken || detected.token || '';
|
||||
const defaultToken = tokenChoice.token || '';
|
||||
const defaultUrl = existing.GATEWAY_URL || detected.url || DEFAULTS.GATEWAY_URL;
|
||||
|
||||
if (detected.token && !existing.GATEWAY_TOKEN) {
|
||||
success('Auto-detected gateway token from ~/.openclaw/openclaw.json');
|
||||
if (tokenChoice.source === 'detected') {
|
||||
success('Auto-detected gateway token from local gateway config');
|
||||
}
|
||||
if (envToken && !existing.GATEWAY_TOKEN && !detected.token) {
|
||||
if (tokenChoice.source === 'env') {
|
||||
success('Found OPENCLAW_GATEWAY_TOKEN in environment');
|
||||
}
|
||||
|
||||
|
|
@ -345,9 +367,10 @@ async function collectInteractive(
|
|||
|
||||
// If we have an auto-detected token, offer to use it
|
||||
if (defaultToken && !existing.GATEWAY_TOKEN) {
|
||||
const tokenLabel = tokenChoice.source === 'env' ? 'environment token' : 'detected token';
|
||||
const useDetected = await confirm({
|
||||
theme: promptTheme,
|
||||
message: `Use detected token (${defaultToken})?`,
|
||||
message: `Use ${tokenLabel} (${defaultToken})?`,
|
||||
default: true,
|
||||
});
|
||||
if (useDetected) {
|
||||
|
|
@ -397,22 +420,14 @@ async function collectInteractive(
|
|||
const rail = ` \x1b[2m│\x1b[0m`;
|
||||
const testPrefix = process.env.NERVE_INSTALLER ? `${rail} ` : ' ';
|
||||
process.stdout.write(`${testPrefix}Testing connection... `);
|
||||
const gwTest = await testGatewayConnection(config.GATEWAY_URL!);
|
||||
const gwTest = await testGatewayConnection(config.GATEWAY_URL!, config.GATEWAY_TOKEN);
|
||||
if (gwTest.ok) {
|
||||
console.log(`\x1b[32m✓\x1b[0m ${gwTest.message}`);
|
||||
} else {
|
||||
console.log(`\x1b[31m✗\x1b[0m ${gwTest.message}`);
|
||||
dim(' Start it with: openclaw gateway start');
|
||||
const proceed = await confirm({
|
||||
theme: promptTheme,
|
||||
message: 'Gateway is unreachable. Continue with this URL anyway?',
|
||||
default: false,
|
||||
});
|
||||
if (!proceed) {
|
||||
console.log('\n Start your gateway with: \x1b[36mopenclaw gateway start\x1b[0m');
|
||||
console.log(' Then re-run: \x1b[36mnpm run setup\x1b[0m\n');
|
||||
process.exit(1);
|
||||
}
|
||||
console.log('\n Setup could not verify your gateway token. Fix the gateway or token, then re-run setup.\n');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// ── 2/5: Agent Identity ──────────────────────────────────────────
|
||||
|
|
@ -422,29 +437,30 @@ async function collectInteractive(
|
|||
config.AGENT_NAME = await input({
|
||||
theme: promptTheme,
|
||||
message: 'Agent display name',
|
||||
default: existing.AGENT_NAME || DEFAULTS.AGENT_NAME,
|
||||
default: detectAgentDisplayNameDefault(existing.AGENT_NAME, DEFAULTS.AGENT_NAME),
|
||||
});
|
||||
|
||||
// ── 3/5: Access Mode ──────────────────────────────────────────────
|
||||
|
||||
section(3, TOTAL_SECTIONS, 'How will you access Nerve?');
|
||||
|
||||
// Build access mode choices dynamically
|
||||
type AccessMode = 'local' | 'tailscale' | 'network' | 'custom';
|
||||
const accessChoices: { name: string; value: AccessMode; description: string }[] = [
|
||||
{ name: 'This machine only (localhost)', value: 'local', description: 'Safest — only accessible from this computer' },
|
||||
];
|
||||
if (prereqs.tailscaleIp) {
|
||||
accessChoices.push({
|
||||
name: `Via Tailscale (${prereqs.tailscaleIp})`,
|
||||
value: 'tailscale',
|
||||
description: 'Access from any device on your Tailscale network — secure, no port forwarding needed',
|
||||
});
|
||||
}
|
||||
accessChoices.push(
|
||||
{ name: 'From other devices on my network', value: 'network', description: 'Opens to LAN — you may need to configure your firewall' },
|
||||
{ name: 'This machine only (localhost)', value: 'local', description: 'Safest, only accessible from this computer' },
|
||||
{
|
||||
name: prereqs.tailscale.ipv4 ? `Via Tailscale tailnet IP (${prereqs.tailscale.ipv4})` : 'Via Tailscale tailnet IP',
|
||||
value: 'tailscale-ip',
|
||||
description: prereqs.tailscale.installed
|
||||
? 'Direct access from other devices on your tailnet'
|
||||
: 'Requires Tailscale on this machine',
|
||||
},
|
||||
{
|
||||
name: prereqs.tailscale.dnsName ? `Via Tailscale Serve (${prereqs.tailscale.dnsName})` : 'Via Tailscale Serve',
|
||||
value: 'tailscale-serve',
|
||||
description: 'Private by default, Nerve stays on 127.0.0.1 and is exposed through *.ts.net',
|
||||
},
|
||||
{ name: 'From other devices on my network', value: 'network', description: 'Opens to LAN, you may need to configure your firewall' },
|
||||
{ name: 'Custom setup (I know what I\'m doing)', value: 'custom', description: 'Manual port, bind address, HTTPS, CORS configuration' },
|
||||
);
|
||||
];
|
||||
|
||||
const accessMode = await select<AccessMode>({
|
||||
theme: promptTheme,
|
||||
|
|
@ -452,11 +468,20 @@ async function collectInteractive(
|
|||
choices: accessChoices,
|
||||
});
|
||||
|
||||
const port = existing.PORT || DEFAULTS.PORT;
|
||||
let port = existing.PORT || DEFAULTS.PORT;
|
||||
config.PORT = port;
|
||||
let sslPort: string | undefined;
|
||||
let accessPlan = buildAccessPlan({ profile: 'local', port });
|
||||
let tailscaleState: TailscaleState = prereqs.tailscale;
|
||||
|
||||
// Helper: offer HTTPS setup for non-localhost access modes (voice input needs secure context)
|
||||
async function offerHttpsSetup(remoteIp: string): Promise<void> {
|
||||
function printFollowUpSteps(steps: string[]): void {
|
||||
if (steps.length === 0) return;
|
||||
for (const step of steps) {
|
||||
dim(` • ${step}`);
|
||||
}
|
||||
}
|
||||
|
||||
async function offerHttpsSetup(remoteHost: string): Promise<string | undefined> {
|
||||
console.log('');
|
||||
warn('Voice input (microphone) requires HTTPS on non-localhost connections.');
|
||||
dim('Browsers block microphone access over plain HTTP for security.');
|
||||
|
|
@ -468,64 +493,197 @@ async function collectInteractive(
|
|||
default: true,
|
||||
});
|
||||
|
||||
if (enableHttps) {
|
||||
let certsReady = false;
|
||||
if (prereqs.opensslOk) {
|
||||
const certResult = generateSelfSignedCert(PROJECT_ROOT);
|
||||
if (certResult.ok) {
|
||||
success(certResult.message);
|
||||
certsReady = true;
|
||||
} else {
|
||||
fail(certResult.message);
|
||||
}
|
||||
} else {
|
||||
warn('openssl not found — cannot generate self-signed certificate');
|
||||
dim('Install openssl and run: mkdir -p certs && openssl req -x509 -newkey rsa:2048 \\');
|
||||
dim(' -keyout certs/key.pem -out certs/cert.pem -days 365 -nodes -subj "/CN=localhost"');
|
||||
}
|
||||
if (!enableHttps) {
|
||||
dim('Voice input will only work when accessing Nerve from localhost');
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (certsReady) {
|
||||
const sslPort = existing.SSL_PORT || DEFAULTS.SSL_PORT;
|
||||
config.SSL_PORT = sslPort;
|
||||
// Add HTTPS origins to CORS and CSP
|
||||
const httpsUrl = `https://${remoteIp}:${sslPort}`;
|
||||
const existingOrigins = config.ALLOWED_ORIGINS || '';
|
||||
config.ALLOWED_ORIGINS = existingOrigins ? `${existingOrigins},${httpsUrl}` : httpsUrl;
|
||||
const existingCsp = config.CSP_CONNECT_EXTRA || '';
|
||||
config.CSP_CONNECT_EXTRA = existingCsp
|
||||
? `${existingCsp} ${httpsUrl} wss://${remoteIp}:${sslPort}`
|
||||
: `${httpsUrl} wss://${remoteIp}:${sslPort}`;
|
||||
success(`HTTPS will be available at ${httpsUrl}`);
|
||||
dim('Note: Self-signed certs will show a browser warning on first visit — click "Advanced" → "Proceed"');
|
||||
let certsReady = false;
|
||||
if (prereqs.opensslOk) {
|
||||
const certResult = generateSelfSignedCert(PROJECT_ROOT);
|
||||
if (certResult.ok) {
|
||||
success(certResult.message);
|
||||
certsReady = true;
|
||||
} else {
|
||||
warn('HTTPS disabled — voice input will only work on localhost');
|
||||
fail(certResult.message);
|
||||
}
|
||||
} else {
|
||||
dim('Voice input will only work when accessing Nerve from localhost');
|
||||
warn('openssl not found, cannot generate self-signed certificate');
|
||||
dim('Install openssl and run: mkdir -p certs && openssl req -x509 -newkey rsa:2048 \\');
|
||||
dim(' -keyout certs/key.pem -out certs/cert.pem -days 365 -nodes -subj "/CN=localhost"');
|
||||
}
|
||||
|
||||
if (!certsReady) {
|
||||
warn('HTTPS disabled, voice input will only work on localhost');
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const selectedSslPort = await input({
|
||||
theme: promptTheme,
|
||||
message: 'SSL port',
|
||||
default: existing.SSL_PORT || DEFAULTS.SSL_PORT,
|
||||
validate: (val) => {
|
||||
const n = parseInt(val, 10);
|
||||
if (!isValidPort(n)) return 'Please enter a valid port (1–65535)';
|
||||
if (n === parseInt(port, 10)) return 'SSL port must differ from HTTP port';
|
||||
return true;
|
||||
},
|
||||
});
|
||||
|
||||
success(`HTTPS will be available at https://${remoteHost}:${selectedSslPort}`);
|
||||
dim('Note: Self-signed certs will show a browser warning on first visit, click "Advanced" then "Proceed"');
|
||||
return selectedSslPort;
|
||||
}
|
||||
|
||||
async function ensureInteractiveTailscale(): Promise<TailscaleState> {
|
||||
let state = tailscaleState;
|
||||
|
||||
if (!state.installed) {
|
||||
console.log('');
|
||||
warn('Tailscale is not installed on this machine.');
|
||||
dim('Install it first, then complete browser login with: tailscale up');
|
||||
dim('Download: https://tailscale.com/download/linux');
|
||||
console.log('\n Re-run: \x1b[36mnpm run setup\x1b[0m\n');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (state.authenticated) {
|
||||
return state;
|
||||
}
|
||||
|
||||
console.log('');
|
||||
warn('Tailscale is installed but not connected.');
|
||||
dim('In another terminal, start the browser URL login flow with: tailscale up');
|
||||
console.log('');
|
||||
|
||||
const nextAction = await select<'wait' | 'exit'>({
|
||||
theme: promptTheme,
|
||||
message: 'How should setup continue?',
|
||||
choices: [
|
||||
{ name: 'Wait and continue automatically once Tailscale is connected', value: 'wait' },
|
||||
{ name: 'Exit and re-run setup later', value: 'exit' },
|
||||
],
|
||||
});
|
||||
|
||||
if (nextAction === 'exit') {
|
||||
console.log('\n Finish login with: \x1b[36mtailscale up\x1b[0m');
|
||||
console.log(' Then re-run: \x1b[36mnpm run setup\x1b[0m\n');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
process.stdout.write(' Waiting for Tailscale login... ');
|
||||
for (let attempt = 0; attempt < 60; attempt++) {
|
||||
await sleep(2000);
|
||||
state = getTailscaleState();
|
||||
if (state.authenticated) {
|
||||
tailscaleState = state;
|
||||
console.log(`\x1b[32m✓\x1b[0m ${state.dnsName || state.ipv4 || 'Connected'}`);
|
||||
return state;
|
||||
}
|
||||
}
|
||||
|
||||
console.log('\x1b[31m✗\x1b[0m Timed out waiting for Tailscale login');
|
||||
dim('Finish login with: tailscale up');
|
||||
console.log('');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (accessMode === 'local') {
|
||||
config.HOST = '127.0.0.1';
|
||||
accessPlan = buildAccessPlan({ profile: 'local', port });
|
||||
success(`Nerve will be available at http://localhost:${port}`);
|
||||
|
||||
} else if (accessMode === 'tailscale') {
|
||||
config.HOST = '0.0.0.0';
|
||||
const tsIp = prereqs.tailscaleIp!;
|
||||
const tsUrl = `http://${tsIp}:${port}`;
|
||||
config.ALLOWED_ORIGINS = tsUrl;
|
||||
config.WS_ALLOWED_HOSTS = tsIp;
|
||||
config.CSP_CONNECT_EXTRA = `${tsUrl} ws://${tsIp}:${port}`;
|
||||
success(`Nerve will be available at ${tsUrl}`);
|
||||
} else if (accessMode === 'tailscale-ip') {
|
||||
tailscaleState = await ensureInteractiveTailscale();
|
||||
accessPlan = buildAccessPlan({ profile: 'tailscale-ip', port, tailscale: tailscaleState });
|
||||
if (accessPlan.followUpSteps.length > 0) {
|
||||
warn('Tailscale tailnet IP access is not ready yet.');
|
||||
printFollowUpSteps(accessPlan.followUpSteps);
|
||||
console.log('');
|
||||
process.exit(1);
|
||||
}
|
||||
success(`Nerve will be available at ${accessPlan.browserOrigins[0]}`);
|
||||
dim('Accessible from any device on your Tailscale network');
|
||||
await offerHttpsSetup(tsIp);
|
||||
|
||||
} else if (accessMode === 'tailscale-serve') {
|
||||
tailscaleState = await ensureInteractiveTailscale();
|
||||
|
||||
console.log('');
|
||||
const configureServe = await confirm({
|
||||
theme: promptTheme,
|
||||
message: `Configure Tailscale Serve now? (tailscale serve --bg http://127.0.0.1:${port})`,
|
||||
default: true,
|
||||
});
|
||||
|
||||
if (configureServe) {
|
||||
try {
|
||||
execSync(`tailscale serve --bg http://127.0.0.1:${port}`, { stdio: 'pipe', timeout: 15000, encoding: 'utf8' });
|
||||
success(`Tailscale Serve configured for http://127.0.0.1:${port}`);
|
||||
} catch (err) {
|
||||
const execErr = err as {
|
||||
stderr?: string | Buffer;
|
||||
message?: string;
|
||||
status?: number;
|
||||
signal?: string | null;
|
||||
};
|
||||
const stderr = typeof execErr.stderr === 'string'
|
||||
? execErr.stderr.trim()
|
||||
: Buffer.isBuffer(execErr.stderr)
|
||||
? execErr.stderr.toString('utf8').trim()
|
||||
: '';
|
||||
const status = typeof execErr.status === 'number'
|
||||
? ` (exit ${execErr.status})`
|
||||
: execErr.signal
|
||||
? ` (signal ${execErr.signal})`
|
||||
: '';
|
||||
const detail = stderr || execErr.message || String(err);
|
||||
const detailWithStatus = status && !detail.includes(status.trim()) ? `${detail}${status}` : detail;
|
||||
warn(`Failed to configure Tailscale Serve automatically: ${detailWithStatus}`);
|
||||
}
|
||||
} else {
|
||||
dim(`Run later: tailscale serve --bg http://127.0.0.1:${port}`);
|
||||
}
|
||||
|
||||
tailscaleState = getTailscaleState();
|
||||
accessPlan = buildAccessPlan({ profile: 'tailscale-serve', port, tailscale: tailscaleState });
|
||||
|
||||
if (accessPlan.followUpSteps.length > 0) {
|
||||
console.log('');
|
||||
warn('Could not confirm a usable Tailscale Serve hostname.');
|
||||
printFollowUpSteps(accessPlan.followUpSteps);
|
||||
console.log('');
|
||||
|
||||
const fallback = await select<'tailscale-ip' | 'stop'>({
|
||||
theme: promptTheme,
|
||||
message: 'How should setup continue?',
|
||||
choices: [
|
||||
{ name: 'Continue with tailnet IP access instead', value: 'tailscale-ip' },
|
||||
{ name: 'Stop setup and finish Tailscale Serve manually', value: 'stop' },
|
||||
],
|
||||
});
|
||||
|
||||
if (fallback === 'stop') {
|
||||
console.log('\n Finish Tailscale Serve setup, then re-run: \x1b[36mnpm run setup\x1b[0m\n');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
accessPlan = buildAccessPlan({ profile: 'tailscale-ip', port, tailscale: tailscaleState });
|
||||
if (accessPlan.followUpSteps.length > 0) {
|
||||
warn('Tailnet IP fallback is also unavailable.');
|
||||
printFollowUpSteps(accessPlan.followUpSteps);
|
||||
console.log('');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
success(`Falling back to tailnet IP access at ${accessPlan.browserOrigins[0]}`);
|
||||
} else {
|
||||
success(`Nerve will be available at ${accessPlan.browserOrigins[0]}`);
|
||||
dim('Nerve will stay private on 127.0.0.1 and be reached through Tailscale Serve');
|
||||
}
|
||||
|
||||
} else if (accessMode === 'network') {
|
||||
config.HOST = '0.0.0.0';
|
||||
// Auto-detect LAN IP
|
||||
const detectedIp = detectPrimaryIpv4();
|
||||
const lanIp = await input({
|
||||
theme: promptTheme,
|
||||
theme: promptTheme,
|
||||
message: 'Your LAN IP address',
|
||||
default: detectedIp || '',
|
||||
validate: (val) => {
|
||||
|
|
@ -535,19 +693,15 @@ async function collectInteractive(
|
|||
},
|
||||
});
|
||||
const ip = lanIp.trim();
|
||||
const lanUrl = `http://${ip}:${port}`;
|
||||
config.ALLOWED_ORIGINS = lanUrl;
|
||||
config.WS_ALLOWED_HOSTS = ip;
|
||||
config.CSP_CONNECT_EXTRA = `${lanUrl} ws://${ip}:${port}`;
|
||||
success(`Nerve will be available at ${lanUrl}`);
|
||||
dim('Make sure your firewall allows traffic on port ' + port);
|
||||
sslPort = await offerHttpsSetup(ip);
|
||||
accessPlan = buildAccessPlan({ profile: 'network', port, remoteHost: ip, sslPort });
|
||||
success(`Nerve will be available at http://${ip}:${port}`);
|
||||
dim(`Make sure your firewall allows traffic on port ${port}`);
|
||||
dim('Need access from multiple devices? Add more origins to ALLOWED_ORIGINS in .env');
|
||||
await offerHttpsSetup(ip);
|
||||
|
||||
} else {
|
||||
// Custom — full manual control
|
||||
const portStr = await input({
|
||||
theme: promptTheme,
|
||||
port = await input({
|
||||
theme: promptTheme,
|
||||
message: 'HTTP port',
|
||||
default: existing.PORT || DEFAULTS.PORT,
|
||||
validate: (val) => {
|
||||
|
|
@ -556,74 +710,35 @@ async function collectInteractive(
|
|||
return true;
|
||||
},
|
||||
});
|
||||
config.PORT = portStr;
|
||||
config.PORT = port;
|
||||
|
||||
config.HOST = await input({
|
||||
theme: promptTheme,
|
||||
const customHost = await input({
|
||||
theme: promptTheme,
|
||||
message: 'Bind address (127.0.0.1 = local only, 0.0.0.0 = all interfaces)',
|
||||
default: existing.HOST || DEFAULTS.HOST,
|
||||
});
|
||||
|
||||
// HTTPS
|
||||
const enableHttps = await confirm({
|
||||
theme: promptTheme,
|
||||
message: 'Enable HTTPS? (needed for microphone access over network)',
|
||||
default: false,
|
||||
});
|
||||
|
||||
if (enableHttps) {
|
||||
let certsReady = false;
|
||||
if (prereqs.opensslOk) {
|
||||
const certResult = generateSelfSignedCert(PROJECT_ROOT);
|
||||
if (certResult.ok) {
|
||||
success(certResult.message);
|
||||
certsReady = true;
|
||||
} else {
|
||||
fail(certResult.message);
|
||||
}
|
||||
} else {
|
||||
warn('openssl not found — cannot generate self-signed certificate');
|
||||
dim('Install openssl and run: mkdir -p certs && openssl req -x509 -newkey rsa:2048 \\');
|
||||
dim(' -keyout certs/key.pem -out certs/cert.pem -days 365 -nodes -subj "/CN=localhost"');
|
||||
}
|
||||
|
||||
if (certsReady) {
|
||||
config.SSL_PORT = await input({
|
||||
theme: promptTheme,
|
||||
message: 'SSL port',
|
||||
default: existing.SSL_PORT || DEFAULTS.SSL_PORT,
|
||||
validate: (val) => {
|
||||
const n = parseInt(val, 10);
|
||||
if (!isValidPort(n)) return 'Please enter a valid port (1–65535)';
|
||||
if (n === parseInt(config.PORT || DEFAULTS.PORT, 10)) return 'SSL port must differ from HTTP port';
|
||||
return true;
|
||||
},
|
||||
});
|
||||
// Add HTTPS/WSS origins to CORS and CSP when bound to a non-loopback address
|
||||
const customHost = config.HOST || DEFAULTS.HOST;
|
||||
if (customHost !== '127.0.0.1' && customHost !== 'localhost' && customHost !== '::1') {
|
||||
const httpsUrl = `https://${customHost}:${config.SSL_PORT}`;
|
||||
config.ALLOWED_ORIGINS = config.ALLOWED_ORIGINS
|
||||
? `${config.ALLOWED_ORIGINS},${httpsUrl}`
|
||||
: httpsUrl;
|
||||
config.CSP_CONNECT_EXTRA = config.CSP_CONNECT_EXTRA
|
||||
? `${config.CSP_CONNECT_EXTRA} ${httpsUrl} wss://${customHost}:${config.SSL_PORT}`
|
||||
: `${httpsUrl} wss://${customHost}:${config.SSL_PORT}`;
|
||||
}
|
||||
} else {
|
||||
warn('HTTPS disabled — no certificates available');
|
||||
dim('You can generate certs manually and add SSL_PORT to .env later');
|
||||
}
|
||||
if (!isLoopback(customHost)) {
|
||||
sslPort = await offerHttpsSetup(customHost);
|
||||
} else {
|
||||
delete config.SSL_PORT;
|
||||
}
|
||||
|
||||
accessPlan = buildAccessPlan({ profile: 'custom', port, remoteHost: customHost, sslPort });
|
||||
success(`Nerve will be available at http://${customHost}:${port}`);
|
||||
}
|
||||
|
||||
delete config.ALLOWED_ORIGINS;
|
||||
delete config.CSP_CONNECT_EXTRA;
|
||||
delete config.WS_ALLOWED_HOSTS;
|
||||
delete config.SSL_PORT;
|
||||
Object.assign(config, applyAccessPlanToConfig(config, accessPlan));
|
||||
if (sslPort) config.SSL_PORT = sslPort;
|
||||
|
||||
// ── Gateway config updates ─────────────────────────────────────────
|
||||
|
||||
const { nerveOrigin, nerveHttpsOrigin } = computeGatewayOrigins(config, accessMode);
|
||||
|
||||
const neededChanges = detectNeededConfigChanges({
|
||||
nerveOrigin,
|
||||
nerveHttpsOrigin,
|
||||
allowedOrigins: accessPlan.gatewayAllowedOrigins,
|
||||
gatewayToken: config.GATEWAY_TOKEN,
|
||||
});
|
||||
|
||||
|
|
@ -654,11 +769,7 @@ async function collectInteractive(
|
|||
} else if (change.id === 'pre-pair') {
|
||||
dim(' • Pre-pair: run `openclaw devices approve` after starting Nerve');
|
||||
} else if (change.id === 'tools-allow') {
|
||||
dim(' • HTTP tools: add "cron" and "gateway" to gateway.tools.allow in ~/.openclaw/openclaw.json');
|
||||
} else if (change.id === 'allowed-origins' && nerveOrigin) {
|
||||
dim(` • Origins: add ${nerveOrigin} to gateway.controlUi.allowedOrigins in ~/.openclaw/openclaw.json`);
|
||||
} else if (change.id === 'allowed-origins-https' && nerveHttpsOrigin) {
|
||||
dim(` • Origins: add ${nerveHttpsOrigin} to gateway.controlUi.allowedOrigins in ~/.openclaw/openclaw.json`);
|
||||
dim(' • HTTP tools: add "cron", "gateway", and "sessions_spawn" to gateway.tools.allow in ~/.openclaw/openclaw.json');
|
||||
} else if (change.id.startsWith('allowed-origins')) {
|
||||
dim(' • Origins: add the required origin(s) to gateway.controlUi.allowedOrigins in ~/.openclaw/openclaw.json');
|
||||
}
|
||||
|
|
@ -919,13 +1030,14 @@ async function runCheck(config: EnvConfig): Promise<void> {
|
|||
if (isValidUrl(gwUrl)) {
|
||||
success(`GATEWAY_URL is valid: ${gwUrl}`);
|
||||
|
||||
// Test connectivity
|
||||
// Test connectivity and token validity
|
||||
process.stdout.write(' Testing gateway connection... ');
|
||||
const gwTest = await testGatewayConnection(gwUrl);
|
||||
const gwTest = await testGatewayConnection(gwUrl, config.GATEWAY_TOKEN);
|
||||
if (gwTest.ok) {
|
||||
console.log(`\x1b[32m✓\x1b[0m ${gwTest.message}`);
|
||||
} else {
|
||||
console.log(`\x1b[33m⚠\x1b[0m ${gwTest.message}`);
|
||||
console.log(`\x1b[31m✗\x1b[0m ${gwTest.message}`);
|
||||
errors++;
|
||||
}
|
||||
} else {
|
||||
fail(`GATEWAY_URL is invalid: ${gwUrl}`);
|
||||
|
|
@ -1004,22 +1116,32 @@ async function runCheck(config: EnvConfig): Promise<void> {
|
|||
|
||||
// ── --defaults mode ──────────────────────────────────────────────────
|
||||
|
||||
async function runDefaults(existing: EnvConfig): Promise<void> {
|
||||
async function runDefaults(existing: EnvConfig, prereqs: PrereqResult): Promise<void> {
|
||||
console.log('');
|
||||
info('Non-interactive mode — using defaults where possible');
|
||||
console.log('');
|
||||
|
||||
const config: EnvConfig = { ...existing };
|
||||
const followUpSteps: string[] = [];
|
||||
|
||||
function appendFollowUp(steps: string[]): void {
|
||||
for (const step of steps) {
|
||||
if (step && !followUpSteps.includes(step)) followUpSteps.push(step);
|
||||
}
|
||||
}
|
||||
|
||||
// Try to auto-detect gateway token
|
||||
if (!config.GATEWAY_TOKEN) {
|
||||
const detected = detectGatewayConfig();
|
||||
const envToken = getEnvGatewayToken();
|
||||
const token = envToken || detected.token;
|
||||
const tokenChoice = chooseSetupGatewayToken({
|
||||
detectedToken: detected.token,
|
||||
envToken,
|
||||
});
|
||||
|
||||
if (token) {
|
||||
config.GATEWAY_TOKEN = token;
|
||||
success('Auto-detected gateway token');
|
||||
if (tokenChoice.token) {
|
||||
config.GATEWAY_TOKEN = tokenChoice.token;
|
||||
success(`Auto-detected gateway token${tokenChoice.source === 'env' ? ' from environment' : ''}`);
|
||||
} else {
|
||||
fail('GATEWAY_TOKEN is required but could not be auto-detected');
|
||||
console.log(' Set OPENCLAW_GATEWAY_TOKEN in your environment, or run setup interactively.');
|
||||
|
|
@ -1028,12 +1150,47 @@ async function runDefaults(existing: EnvConfig): Promise<void> {
|
|||
}
|
||||
}
|
||||
|
||||
// Apply defaults for everything else
|
||||
if (!config.GATEWAY_URL) config.GATEWAY_URL = DEFAULTS.GATEWAY_URL;
|
||||
if (!config.AGENT_NAME) config.AGENT_NAME = DEFAULTS.AGENT_NAME;
|
||||
if (!config.AGENT_NAME) config.AGENT_NAME = detectAgentDisplayNameDefault(undefined, DEFAULTS.AGENT_NAME);
|
||||
if (!config.PORT) config.PORT = DEFAULTS.PORT;
|
||||
if (!config.HOST) config.HOST = DEFAULTS.HOST;
|
||||
|
||||
if (requestedAccessMode) {
|
||||
let accessPlan = buildAccessPlan({
|
||||
profile: requestedAccessMode as InstallerAccessProfile,
|
||||
port: config.PORT,
|
||||
sslPort: config.SSL_PORT,
|
||||
remoteHost: !isLoopback(config.HOST || '') ? config.HOST : detectPrimaryIpv4() || config.HOST || DEFAULTS.HOST,
|
||||
tailscale: prereqs.tailscale,
|
||||
});
|
||||
|
||||
if (requestedAccessMode === 'tailscale-serve' && accessPlan.followUpSteps.length > 0) {
|
||||
warn('Tailscale Serve could not be confirmed in non-interactive mode. Falling back to tailnet IP support only.');
|
||||
appendFollowUp(accessPlan.followUpSteps);
|
||||
accessPlan = buildAccessPlan({
|
||||
profile: 'tailscale-ip',
|
||||
port: config.PORT,
|
||||
tailscale: prereqs.tailscale,
|
||||
});
|
||||
}
|
||||
|
||||
if ((requestedAccessMode === 'tailscale-ip' || requestedAccessMode === 'tailscale-serve') && accessPlan.followUpSteps.length > 0) {
|
||||
warn('Requested Tailscale access mode is not ready in non-interactive mode. Keeping localhost-only access for now.');
|
||||
appendFollowUp(accessPlan.followUpSteps);
|
||||
accessPlan = buildAccessPlan({ profile: 'local', port: config.PORT });
|
||||
}
|
||||
|
||||
delete config.ALLOWED_ORIGINS;
|
||||
delete config.CSP_CONNECT_EXTRA;
|
||||
delete config.WS_ALLOWED_HOSTS;
|
||||
Object.assign(config, applyAccessPlanToConfig(config, accessPlan));
|
||||
|
||||
success(`Using access mode: ${accessPlan.profile}`);
|
||||
if (accessPlan.browserOrigins[0]) {
|
||||
dim(`Primary origin: ${accessPlan.browserOrigins[0]}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Auth: auto-enable when network-exposed with gateway token, generate session secret
|
||||
if (!config.NERVE_SESSION_SECRET) {
|
||||
config.NERVE_SESSION_SECRET = randomBytes(32).toString('hex');
|
||||
|
|
@ -1047,7 +1204,17 @@ async function runDefaults(existing: EnvConfig): Promise<void> {
|
|||
}
|
||||
}
|
||||
|
||||
// Write
|
||||
process.stdout.write(' Testing gateway connection... ');
|
||||
const gwTest = await testGatewayConnection(config.GATEWAY_URL!, config.GATEWAY_TOKEN);
|
||||
if (gwTest.ok) {
|
||||
console.log(`\x1b[32m✓\x1b[0m ${gwTest.message}`);
|
||||
} else {
|
||||
console.log(`\x1b[31m✗\x1b[0m ${gwTest.message}`);
|
||||
fail('Refusing to write .env because gateway auth could not be verified.');
|
||||
console.log('');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (existsSync(ENV_PATH)) {
|
||||
const backupPath = backupExistingEnv(ENV_PATH);
|
||||
info(`Previous config backed up to ${backupPath.replace(PROJECT_ROOT + '/', '')}`);
|
||||
|
|
@ -1056,18 +1223,15 @@ async function runDefaults(existing: EnvConfig): Promise<void> {
|
|||
|
||||
success('Configuration written to .env');
|
||||
|
||||
// Install bundled agent skills
|
||||
installBundledSkills();
|
||||
|
||||
printSummary(config);
|
||||
|
||||
// Apply all gateway config patches silently (non-interactive = implicit consent)
|
||||
const defaultsAccessMode = isLoopback(config.HOST || '') ? 'local' : 'network';
|
||||
const { nerveOrigin, nerveHttpsOrigin } = computeGatewayOrigins(config, defaultsAccessMode);
|
||||
if (shouldPrintDeploymentGuides({ invokedFromInstaller: process.env.NERVE_INSTALLER === '1', defaultsMode: true })) {
|
||||
printDeploymentGuides();
|
||||
}
|
||||
|
||||
const changes = detectNeededConfigChanges({
|
||||
nerveOrigin,
|
||||
nerveHttpsOrigin,
|
||||
allowedOrigins: config.ALLOWED_ORIGINS?.split(',').map(origin => origin.trim()).filter(Boolean),
|
||||
gatewayToken: config.GATEWAY_TOKEN,
|
||||
});
|
||||
|
||||
|
|
@ -1075,6 +1239,14 @@ async function runDefaults(existing: EnvConfig): Promise<void> {
|
|||
await applyConfigChanges(changes);
|
||||
}
|
||||
|
||||
if (followUpSteps.length > 0) {
|
||||
console.log('');
|
||||
warn('Additional follow-up is required:');
|
||||
for (const step of followUpSteps) {
|
||||
dim(` • ${step}`);
|
||||
}
|
||||
}
|
||||
|
||||
console.log('');
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -45,7 +45,10 @@ import skillsRoutes from './routes/skills.js';
|
|||
import filesRoutes from './routes/files.js';
|
||||
import voicePhrasesRoutes from './routes/voice-phrases.js';
|
||||
import fileBrowserRoutes from './routes/file-browser.js';
|
||||
import uploadConfigRoutes from './routes/upload-config.js';
|
||||
import uploadReferenceRoutes from './routes/upload-reference.js';
|
||||
import kanbanRoutes from './routes/kanban.js';
|
||||
import beadsRoutes from './routes/beads.js';
|
||||
// activity routes removed — tab dropped from workspace panel
|
||||
|
||||
const app = new Hono();
|
||||
|
|
@ -75,7 +78,7 @@ app.use(
|
|||
app.use('*', authMiddleware);
|
||||
// Apply compression to all routes except SSE (compression buffers chunks and breaks streaming)
|
||||
app.use('*', async (c, next) => {
|
||||
if (c.req.path === '/api/events') return next();
|
||||
if (c.req.path === '/api/events' || c.req.path === '/api/files/raw') return next();
|
||||
return compress()(c, next);
|
||||
});
|
||||
app.use('*', cacheHeaders);
|
||||
|
|
@ -88,7 +91,7 @@ const routes = [
|
|||
codexLimitsRoutes, claudeCodeLimitsRoutes, versionRoutes, versionCheckRoutes,
|
||||
gatewayRoutes, connectDefaultsRoutes,
|
||||
workspaceRoutes, cronsRoutes, sessionsRoutes, skillsRoutes, filesRoutes, apiKeysRoutes,
|
||||
voicePhrasesRoutes, fileBrowserRoutes, channelsRoutes, kanbanRoutes,
|
||||
voicePhrasesRoutes, fileBrowserRoutes, uploadConfigRoutes, uploadReferenceRoutes, channelsRoutes, kanbanRoutes, beadsRoutes,
|
||||
];
|
||||
for (const route of routes) app.route('/', route);
|
||||
|
||||
|
|
|
|||
120
server/lib/agent-workspace.test.ts
Normal file
120
server/lib/agent-workspace.test.ts
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import fs from 'node:fs/promises';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
|
||||
describe('agent-workspace', () => {
|
||||
let homeDir: string;
|
||||
let memoryPath: string;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
homeDir = await fs.mkdtemp(path.join(os.tmpdir(), 'agent-workspace-test-'));
|
||||
memoryPath = path.join(homeDir, '.openclaw', 'workspace', 'MEMORY.md');
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
vi.restoreAllMocks();
|
||||
await fs.rm(homeDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
async function loadModule() {
|
||||
vi.doMock('./config.js', () => ({
|
||||
config: {
|
||||
home: homeDir,
|
||||
memoryPath,
|
||||
},
|
||||
}));
|
||||
|
||||
return import('./agent-workspace.js');
|
||||
}
|
||||
|
||||
it('resolves main to the default workspace root', async () => {
|
||||
const { resolveAgentWorkspace } = await loadModule();
|
||||
|
||||
expect(resolveAgentWorkspace()).toEqual({
|
||||
agentId: 'main',
|
||||
workspaceRoot: path.dirname(memoryPath),
|
||||
memoryPath,
|
||||
memoryDir: path.join(path.dirname(memoryPath), 'memory'),
|
||||
});
|
||||
|
||||
expect(resolveAgentWorkspace(' ')).toEqual({
|
||||
agentId: 'main',
|
||||
workspaceRoot: path.dirname(memoryPath),
|
||||
memoryPath,
|
||||
memoryDir: path.join(path.dirname(memoryPath), 'memory'),
|
||||
});
|
||||
});
|
||||
|
||||
it('falls back to a per-agent workspace for non-main agents', async () => {
|
||||
const { resolveAgentWorkspace } = await loadModule();
|
||||
|
||||
expect(resolveAgentWorkspace('research')).toEqual({
|
||||
agentId: 'research',
|
||||
workspaceRoot: path.join(homeDir, '.openclaw', 'workspace-research'),
|
||||
memoryPath: path.join(homeDir, '.openclaw', 'workspace-research', 'MEMORY.md'),
|
||||
memoryDir: path.join(homeDir, '.openclaw', 'workspace-research', 'memory'),
|
||||
});
|
||||
});
|
||||
|
||||
it('prefers explicitly configured agent workspaces from openclaw.json', async () => {
|
||||
const configDir = path.join(homeDir, '.openclaw');
|
||||
await fs.mkdir(configDir, { recursive: true });
|
||||
await fs.writeFile(path.join(configDir, 'openclaw.json'), JSON.stringify({
|
||||
agents: {
|
||||
defaults: { workspace: '/managed/workspaces' },
|
||||
list: [
|
||||
{ id: 'research', workspace: '/vaults/research' },
|
||||
],
|
||||
},
|
||||
}, null, 2));
|
||||
|
||||
const { resolveAgentWorkspace } = await loadModule();
|
||||
|
||||
expect(resolveAgentWorkspace('research')).toEqual({
|
||||
agentId: 'research',
|
||||
workspaceRoot: '/vaults/research',
|
||||
memoryPath: '/vaults/research/MEMORY.md',
|
||||
memoryDir: '/vaults/research/memory',
|
||||
});
|
||||
});
|
||||
|
||||
it('uses agents.defaults.workspace for new non-main agents when configured', async () => {
|
||||
const configDir = path.join(homeDir, '.openclaw');
|
||||
await fs.mkdir(configDir, { recursive: true });
|
||||
await fs.writeFile(path.join(configDir, 'openclaw.json'), JSON.stringify({
|
||||
agents: {
|
||||
defaults: { workspace: '/managed/workspaces' },
|
||||
},
|
||||
}, null, 2));
|
||||
|
||||
const { resolveAgentWorkspace } = await loadModule();
|
||||
|
||||
expect(resolveAgentWorkspace('research')).toEqual({
|
||||
agentId: 'research',
|
||||
workspaceRoot: '/managed/workspaces/research',
|
||||
memoryPath: '/managed/workspaces/research/MEMORY.md',
|
||||
memoryDir: '/managed/workspaces/research/memory',
|
||||
});
|
||||
});
|
||||
|
||||
it('returns workspaceRoot, memoryPath, and memoryDir together', async () => {
|
||||
const { resolveAgentWorkspace } = await loadModule();
|
||||
|
||||
const workspace = resolveAgentWorkspace('research');
|
||||
|
||||
expect(workspace.workspaceRoot).toBe(path.join(homeDir, '.openclaw', 'workspace-research'));
|
||||
expect(workspace.memoryPath).toBe(path.join(workspace.workspaceRoot, 'MEMORY.md'));
|
||||
expect(workspace.memoryDir).toBe(path.join(workspace.workspaceRoot, 'memory'));
|
||||
});
|
||||
|
||||
it('rejects invalid agent ids', async () => {
|
||||
const { resolveAgentWorkspace } = await loadModule();
|
||||
|
||||
expect(() => resolveAgentWorkspace('../oops')).toThrow(/agent id/i);
|
||||
expect(() => resolveAgentWorkspace('bad/name')).toThrow(/agent id/i);
|
||||
expect(() => resolveAgentWorkspace('two words')).toThrow(/agent id/i);
|
||||
expect(() => resolveAgentWorkspace('bad_agent')).toThrow(/agent id/i);
|
||||
});
|
||||
});
|
||||
52
server/lib/agent-workspace.ts
Normal file
52
server/lib/agent-workspace.ts
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
import path from 'node:path';
|
||||
import { config } from './config.js';
|
||||
import { buildDefaultAgentWorkspacePath, getConfiguredAgentWorkspace } from './openclaw-config.js';
|
||||
|
||||
export interface AgentWorkspace {
|
||||
agentId: string;
|
||||
workspaceRoot: string;
|
||||
memoryPath: string;
|
||||
memoryDir: string;
|
||||
}
|
||||
|
||||
const AGENT_ID_PATTERN = /^[a-z0-9](?:[a-z0-9-]{0,62}[a-z0-9])?$/;
|
||||
|
||||
export class InvalidAgentIdError extends Error {
|
||||
constructor(agentId: string) {
|
||||
super(`Invalid agent id: ${agentId}`);
|
||||
this.name = 'InvalidAgentIdError';
|
||||
}
|
||||
}
|
||||
|
||||
export function normalizeAgentId(agentId?: string): string {
|
||||
const normalized = (agentId || '').trim();
|
||||
if (!normalized) return 'main';
|
||||
if (normalized === 'main') return 'main';
|
||||
if (!AGENT_ID_PATTERN.test(normalized)) {
|
||||
throw new InvalidAgentIdError(normalized);
|
||||
}
|
||||
return normalized;
|
||||
}
|
||||
|
||||
export function resolveAgentWorkspace(agentId?: string): AgentWorkspace {
|
||||
const normalizedAgentId = normalizeAgentId(agentId);
|
||||
|
||||
if (normalizedAgentId === 'main') {
|
||||
const workspaceRoot = path.dirname(config.memoryPath);
|
||||
return {
|
||||
agentId: 'main',
|
||||
workspaceRoot,
|
||||
memoryPath: config.memoryPath,
|
||||
memoryDir: config.memoryDir || path.join(workspaceRoot, 'memory'),
|
||||
};
|
||||
}
|
||||
|
||||
const workspaceRoot = getConfiguredAgentWorkspace(normalizedAgentId)
|
||||
|| buildDefaultAgentWorkspacePath(normalizedAgentId);
|
||||
return {
|
||||
agentId: normalizedAgentId,
|
||||
workspaceRoot,
|
||||
memoryPath: path.join(workspaceRoot, 'MEMORY.md'),
|
||||
memoryDir: path.join(workspaceRoot, 'memory'),
|
||||
};
|
||||
}
|
||||
234
server/lib/beads.test.ts
Normal file
234
server/lib/beads.test.ts
Normal file
|
|
@ -0,0 +1,234 @@
|
|||
import { mkdtempSync, mkdirSync, rmSync, symlinkSync } from 'node:fs';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
const WORKSPACE_ROOT = path.resolve(path.sep, 'workspace');
|
||||
const RESEARCH_WORKSPACE_ROOT = path.resolve(path.sep, 'workspace-research');
|
||||
const REPO_ROOT = path.join(WORKSPACE_ROOT, 'repo', 'nerve');
|
||||
const OUTSIDE_REPO_ROOT = path.resolve(path.sep, 'repos', 'demo');
|
||||
const PROMISIFY_CUSTOM = Symbol.for('nodejs.util.promisify.custom');
|
||||
|
||||
const { execFileMock, findRepoPlanByBeadIdMock, resolveAgentWorkspaceMock } = vi.hoisted(() => {
|
||||
const execFileMock = vi.fn();
|
||||
execFileMock[Symbol.for('nodejs.util.promisify.custom')] = vi.fn();
|
||||
|
||||
return {
|
||||
execFileMock,
|
||||
findRepoPlanByBeadIdMock: vi.fn(),
|
||||
resolveAgentWorkspaceMock: vi.fn((agentId?: string) => ({
|
||||
agentId: agentId?.trim() || 'main',
|
||||
workspaceRoot: agentId?.trim() === 'research' ? RESEARCH_WORKSPACE_ROOT : WORKSPACE_ROOT,
|
||||
memoryPath: path.join(WORKSPACE_ROOT, 'MEMORY.md'),
|
||||
memoryDir: path.join(WORKSPACE_ROOT, 'memory'),
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('node:child_process', () => ({
|
||||
execFile: execFileMock,
|
||||
default: {
|
||||
execFile: execFileMock,
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock('./plans.js', () => ({
|
||||
findRepoPlanByBeadId: findRepoPlanByBeadIdMock,
|
||||
}));
|
||||
|
||||
vi.mock('./agent-workspace.js', () => ({
|
||||
resolveAgentWorkspace: resolveAgentWorkspaceMock,
|
||||
}));
|
||||
|
||||
import { BeadValidationError, getBeadDetail, resolveBeadLookupRepoRoot } from './beads.js';
|
||||
|
||||
function resetMocks(): void {
|
||||
vi.restoreAllMocks();
|
||||
execFileMock.mockReset();
|
||||
execFileMock[PROMISIFY_CUSTOM].mockReset();
|
||||
findRepoPlanByBeadIdMock.mockReset();
|
||||
resolveAgentWorkspaceMock.mockReset();
|
||||
resolveAgentWorkspaceMock.mockImplementation((agentId?: string) => ({
|
||||
agentId: agentId?.trim() || 'main',
|
||||
workspaceRoot: agentId?.trim() === 'research' ? RESEARCH_WORKSPACE_ROOT : WORKSPACE_ROOT,
|
||||
memoryPath: path.join(WORKSPACE_ROOT, 'MEMORY.md'),
|
||||
memoryDir: path.join(WORKSPACE_ROOT, 'memory'),
|
||||
}));
|
||||
}
|
||||
|
||||
describe('resolveBeadLookupRepoRoot', () => {
|
||||
beforeEach(() => {
|
||||
resetMocks();
|
||||
});
|
||||
|
||||
it('defaults legacy lookup to process cwd for the main workspace', () => {
|
||||
const cwdSpy = vi.spyOn(process, 'cwd').mockReturnValue(REPO_ROOT);
|
||||
expect(resolveBeadLookupRepoRoot()).toBe(REPO_ROOT);
|
||||
cwdSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('maps the default repo root into the requested workspace when workspaceAgentId is provided', () => {
|
||||
const cwdSpy = vi.spyOn(process, 'cwd').mockReturnValue(REPO_ROOT);
|
||||
expect(resolveBeadLookupRepoRoot({ workspaceAgentId: 'research' })).toBe(
|
||||
path.join(RESEARCH_WORKSPACE_ROOT, 'repo', 'nerve'),
|
||||
);
|
||||
cwdSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('anchors shorthand lookup to the requested workspace when cwd is outside the default workspace', () => {
|
||||
const cwdSpy = vi.spyOn(process, 'cwd').mockReturnValue(OUTSIDE_REPO_ROOT);
|
||||
expect(resolveBeadLookupRepoRoot({ workspaceAgentId: 'research' })).toBe(RESEARCH_WORKSPACE_ROOT);
|
||||
cwdSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('anchors legacy shorthand lookup to the current document repo instead of cwd', () => {
|
||||
const tempRoot = mkdtempSync(path.join(os.tmpdir(), 'beads-legacy-context-'));
|
||||
const workspaceRoot = path.join(tempRoot, 'workspace');
|
||||
const repoOneRoot = path.join(workspaceRoot, 'repo-one');
|
||||
const repoTwoRoot = path.join(workspaceRoot, 'repo-two');
|
||||
const currentDocumentPath = path.join('repo-one', 'docs', 'beads.md');
|
||||
|
||||
mkdirSync(path.join(repoOneRoot, '.beads'), { recursive: true });
|
||||
mkdirSync(path.join(repoOneRoot, 'docs'), { recursive: true });
|
||||
mkdirSync(path.join(repoTwoRoot, '.beads'), { recursive: true });
|
||||
mkdirSync(path.join(repoTwoRoot, 'docs'), { recursive: true });
|
||||
|
||||
resolveAgentWorkspaceMock.mockImplementation(() => ({
|
||||
agentId: 'main',
|
||||
workspaceRoot,
|
||||
memoryPath: path.join(workspaceRoot, 'MEMORY.md'),
|
||||
memoryDir: path.join(workspaceRoot, 'memory'),
|
||||
}));
|
||||
|
||||
const cwdSpy = vi.spyOn(process, 'cwd').mockReturnValue(repoTwoRoot);
|
||||
|
||||
try {
|
||||
expect(resolveBeadLookupRepoRoot({ currentDocumentPath })).toBe(repoOneRoot);
|
||||
} finally {
|
||||
cwdSpy.mockRestore();
|
||||
rmSync(tempRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it('uses explicit absolute repo roots directly when they stay within the workspace', () => {
|
||||
expect(resolveBeadLookupRepoRoot({ targetPath: path.join(WORKSPACE_ROOT, 'repos', 'demo') })).toBe(
|
||||
path.join(WORKSPACE_ROOT, 'repos', 'demo'),
|
||||
);
|
||||
});
|
||||
|
||||
it('normalizes explicit .beads targets to the owning repo root', () => {
|
||||
expect(resolveBeadLookupRepoRoot({ targetPath: path.join(WORKSPACE_ROOT, 'repos', 'demo', '.beads') })).toBe(
|
||||
path.join(WORKSPACE_ROOT, 'repos', 'demo'),
|
||||
);
|
||||
});
|
||||
|
||||
it('rejects explicit absolute targets outside the workspace root', () => {
|
||||
expect(() => resolveBeadLookupRepoRoot({ targetPath: OUTSIDE_REPO_ROOT })).toThrow(BeadValidationError);
|
||||
});
|
||||
|
||||
it('rejects explicit absolute targets whose real path escapes the workspace root through a symlink', () => {
|
||||
const tempRoot = mkdtempSync(path.join(os.tmpdir(), 'beads-symlink-'));
|
||||
const workspaceRoot = path.join(tempRoot, 'workspace');
|
||||
const outsideRoot = path.join(tempRoot, 'outside');
|
||||
const linkedRepo = path.join(workspaceRoot, 'linked-repo');
|
||||
|
||||
mkdirSync(workspaceRoot, { recursive: true });
|
||||
mkdirSync(path.join(outsideRoot, 'demo'), { recursive: true });
|
||||
symlinkSync(path.join(outsideRoot, 'demo'), linkedRepo, 'dir');
|
||||
resolveAgentWorkspaceMock.mockImplementation(() => ({
|
||||
agentId: 'main',
|
||||
workspaceRoot,
|
||||
memoryPath: path.join(workspaceRoot, 'MEMORY.md'),
|
||||
memoryDir: path.join(workspaceRoot, 'memory'),
|
||||
}));
|
||||
|
||||
try {
|
||||
expect(() => resolveBeadLookupRepoRoot({ targetPath: linkedRepo })).toThrow(BeadValidationError);
|
||||
} finally {
|
||||
rmSync(tempRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it('resolves relative explicit targets against the current markdown document directory', () => {
|
||||
expect(resolveBeadLookupRepoRoot({
|
||||
targetPath: '../projects/demo/.beads',
|
||||
currentDocumentPath: path.join('docs', 'specs', 'links.md'),
|
||||
})).toBe(path.resolve(WORKSPACE_ROOT, 'docs', 'projects', 'demo'));
|
||||
});
|
||||
|
||||
it('uses the scoped workspace root when resolving relative explicit targets', () => {
|
||||
expect(resolveBeadLookupRepoRoot({
|
||||
targetPath: './repos/demo',
|
||||
currentDocumentPath: path.join('notes', 'beads.md'),
|
||||
workspaceAgentId: 'research',
|
||||
})).toBe(path.resolve(RESEARCH_WORKSPACE_ROOT, 'notes', 'repos', 'demo'));
|
||||
});
|
||||
|
||||
it('rejects absolute current document paths outside the workspace root', () => {
|
||||
expect(() => resolveBeadLookupRepoRoot({
|
||||
targetPath: './repos/demo',
|
||||
currentDocumentPath: path.resolve(path.sep, 'tmp', 'beads.md'),
|
||||
})).toThrow(BeadValidationError);
|
||||
});
|
||||
|
||||
it('rejects resolved repo roots that escape the workspace root', () => {
|
||||
expect(() => resolveBeadLookupRepoRoot({
|
||||
targetPath: '../../../outside-repo',
|
||||
currentDocumentPath: path.join('docs', 'specs', 'links.md'),
|
||||
})).toThrow(BeadValidationError);
|
||||
});
|
||||
|
||||
it('rejects relative explicit targets when no current document path is available', () => {
|
||||
expect(() => resolveBeadLookupRepoRoot({ targetPath: '../projects/demo' })).toThrow(BeadValidationError);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getBeadDetail', () => {
|
||||
beforeEach(() => {
|
||||
resetMocks();
|
||||
});
|
||||
|
||||
it('rejects blank bead ids as validation errors', async () => {
|
||||
await expect(getBeadDetail(' ')).rejects.toBeInstanceOf(BeadValidationError);
|
||||
});
|
||||
|
||||
it('rejects missing repo roots before spawning bd', async () => {
|
||||
await expect(getBeadDetail('nerve-fms2', {
|
||||
targetPath: path.join(WORKSPACE_ROOT, 'repos', 'missing-demo'),
|
||||
})).rejects.toBeInstanceOf(BeadValidationError);
|
||||
expect(execFileMock[PROMISIFY_CUSTOM]).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('degrades linked plan enrichment failures to a null linkedPlan result', async () => {
|
||||
const tempRoot = mkdtempSync(path.join(os.tmpdir(), 'beads-detail-'));
|
||||
const workspaceRoot = path.join(tempRoot, 'workspace');
|
||||
const repoRoot = path.join(workspaceRoot, 'repo');
|
||||
mkdirSync(repoRoot, { recursive: true });
|
||||
resolveAgentWorkspaceMock.mockImplementation(() => ({
|
||||
agentId: 'main',
|
||||
workspaceRoot,
|
||||
memoryPath: path.join(workspaceRoot, 'MEMORY.md'),
|
||||
memoryDir: path.join(workspaceRoot, 'memory'),
|
||||
}));
|
||||
|
||||
execFileMock[PROMISIFY_CUSTOM].mockResolvedValue({
|
||||
stdout: JSON.stringify({
|
||||
id: 'nerve-fms2',
|
||||
title: 'Demo bead',
|
||||
status: 'open',
|
||||
}),
|
||||
stderr: '',
|
||||
});
|
||||
findRepoPlanByBeadIdMock.mockRejectedValue(new Error('plan lookup failed'));
|
||||
|
||||
try {
|
||||
await expect(getBeadDetail('nerve-fms2', { targetPath: repoRoot })).resolves.toMatchObject({
|
||||
id: 'nerve-fms2',
|
||||
title: 'Demo bead',
|
||||
linkedPlan: null,
|
||||
});
|
||||
} finally {
|
||||
rmSync(tempRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
390
server/lib/beads.ts
Normal file
390
server/lib/beads.ts
Normal file
|
|
@ -0,0 +1,390 @@
|
|||
import { execFile as execFileCallback } from 'node:child_process';
|
||||
import { accessSync, constants, realpathSync, statSync } from 'node:fs';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { promisify } from 'node:util';
|
||||
import { findRepoPlanByBeadId } from './plans.js';
|
||||
import { resolveAgentWorkspace } from './agent-workspace.js';
|
||||
|
||||
const execFile = promisify(execFileCallback);
|
||||
const BD_TIMEOUT_MS = 15_000;
|
||||
const BD_MAX_BUFFER_BYTES = 4 * 1024 * 1024;
|
||||
|
||||
export class BeadNotFoundError extends Error {
|
||||
constructor(beadId: string) {
|
||||
super(`Bead not found: ${beadId}`);
|
||||
this.name = 'BeadNotFoundError';
|
||||
}
|
||||
}
|
||||
|
||||
export class BeadAdapterError extends Error {
|
||||
stderr: string;
|
||||
|
||||
constructor(message: string, stderr = '') {
|
||||
super(message);
|
||||
this.name = 'BeadAdapterError';
|
||||
this.stderr = stderr;
|
||||
}
|
||||
}
|
||||
|
||||
export class BeadValidationError extends Error {
|
||||
constructor(message: string) {
|
||||
super(message);
|
||||
this.name = 'BeadValidationError';
|
||||
}
|
||||
}
|
||||
|
||||
export interface BeadRelationSummary {
|
||||
id: string;
|
||||
title: string | null;
|
||||
status: string | null;
|
||||
dependencyType: string | null;
|
||||
}
|
||||
|
||||
export interface BeadLinkedPlanSummary {
|
||||
path: string;
|
||||
workspacePath: string | null;
|
||||
title: string;
|
||||
planId: string | null;
|
||||
archived: boolean;
|
||||
status: string | null;
|
||||
updatedAt: number;
|
||||
}
|
||||
|
||||
export interface BeadDetail {
|
||||
id: string;
|
||||
title: string;
|
||||
notes: string | null;
|
||||
status: string | null;
|
||||
priority: number | null;
|
||||
issueType: string | null;
|
||||
owner: string | null;
|
||||
createdAt: string | null;
|
||||
updatedAt: string | null;
|
||||
closedAt: string | null;
|
||||
closeReason: string | null;
|
||||
dependencies: BeadRelationSummary[];
|
||||
dependents: BeadRelationSummary[];
|
||||
linkedPlan: BeadLinkedPlanSummary | null;
|
||||
}
|
||||
|
||||
export interface BeadLookupOptions {
|
||||
targetPath?: string;
|
||||
currentDocumentPath?: string;
|
||||
workspaceAgentId?: string;
|
||||
}
|
||||
|
||||
interface RawBeadRelation {
|
||||
id?: unknown;
|
||||
title?: unknown;
|
||||
status?: unknown;
|
||||
dependency_type?: unknown;
|
||||
}
|
||||
|
||||
interface RawBeadRecord {
|
||||
id?: unknown;
|
||||
title?: unknown;
|
||||
notes?: unknown;
|
||||
status?: unknown;
|
||||
priority?: unknown;
|
||||
issue_type?: unknown;
|
||||
owner?: unknown;
|
||||
created_at?: unknown;
|
||||
updated_at?: unknown;
|
||||
closed_at?: unknown;
|
||||
close_reason?: unknown;
|
||||
dependencies?: RawBeadRelation[];
|
||||
dependents?: RawBeadRelation[];
|
||||
}
|
||||
|
||||
function normalizeString(value: unknown): string | null {
|
||||
return typeof value === 'string' && value.trim() ? value.trim() : null;
|
||||
}
|
||||
|
||||
function normalizeNumber(value: unknown): number | null {
|
||||
return typeof value === 'number' && Number.isFinite(value) ? value : null;
|
||||
}
|
||||
|
||||
function getPreferredLocalBinDirs(): string[] {
|
||||
const home = process.env.HOME || os.homedir();
|
||||
return [
|
||||
path.join(home, '.local', 'bin'),
|
||||
path.join(home, '.npm-global', 'bin'),
|
||||
path.join(home, '.volta', 'bin'),
|
||||
path.join(home, '.bun', 'bin'),
|
||||
];
|
||||
}
|
||||
|
||||
function buildRuntimePath(basePath?: string): string {
|
||||
const segments = [...getPreferredLocalBinDirs(), ...(basePath || '').split(path.delimiter).filter(Boolean)];
|
||||
return [...new Set(segments)].join(path.delimiter);
|
||||
}
|
||||
|
||||
function resolveBdBin(): string {
|
||||
if (process.env.BD_BIN?.trim()) return process.env.BD_BIN.trim();
|
||||
|
||||
for (const dir of getPreferredLocalBinDirs()) {
|
||||
const candidate = path.join(dir, 'bd');
|
||||
try {
|
||||
accessSync(candidate, constants.X_OK);
|
||||
return candidate;
|
||||
} catch {
|
||||
// continue
|
||||
}
|
||||
}
|
||||
|
||||
return 'bd';
|
||||
}
|
||||
|
||||
function parseJsonPayload(stdout: string): unknown {
|
||||
const trimmed = stdout.trim();
|
||||
if (!trimmed) return [];
|
||||
|
||||
try {
|
||||
return JSON.parse(trimmed);
|
||||
} catch {
|
||||
// Fall through to warning-tolerant parsing.
|
||||
}
|
||||
|
||||
for (let index = 0; index < trimmed.length; index += 1) {
|
||||
const ch = trimmed[index];
|
||||
if (ch !== '{' && ch !== '[') continue;
|
||||
try {
|
||||
return JSON.parse(trimmed.slice(index));
|
||||
} catch {
|
||||
// continue
|
||||
}
|
||||
}
|
||||
|
||||
throw new BeadAdapterError('Failed to parse bd JSON output');
|
||||
}
|
||||
|
||||
function normalizeRelations(value: unknown): BeadRelationSummary[] {
|
||||
if (!Array.isArray(value)) return [];
|
||||
|
||||
return value.flatMap((entry) => {
|
||||
const relation = entry as RawBeadRelation;
|
||||
const id = normalizeString(relation.id);
|
||||
if (!id) return [];
|
||||
return [{
|
||||
id,
|
||||
title: normalizeString(relation.title),
|
||||
status: normalizeString(relation.status),
|
||||
dependencyType: normalizeString(relation.dependency_type),
|
||||
} satisfies BeadRelationSummary];
|
||||
});
|
||||
}
|
||||
|
||||
function normalizeBeadRepoRoot(repoRoot: string): string {
|
||||
const trimmed = repoRoot.trim();
|
||||
if (!trimmed) return trimmed;
|
||||
return path.basename(trimmed) === '.beads' ? path.dirname(trimmed) : trimmed;
|
||||
}
|
||||
|
||||
function resolveExistingRealPath(candidatePath: string): string | null {
|
||||
try {
|
||||
return realpathSync.native(candidatePath);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function isPathWithinRoot(candidatePath: string, rootPath: string): boolean {
|
||||
const normalizedCandidate = path.resolve(candidatePath);
|
||||
const normalizedRoot = path.resolve(rootPath);
|
||||
|
||||
const candidateRealPath = resolveExistingRealPath(normalizedCandidate);
|
||||
const rootRealPath = resolveExistingRealPath(normalizedRoot);
|
||||
|
||||
const containmentCandidate = candidateRealPath ?? normalizedCandidate;
|
||||
const containmentRoot = rootRealPath ?? normalizedRoot;
|
||||
const relative = path.relative(containmentRoot, containmentCandidate);
|
||||
return relative === '' || (!relative.startsWith('..') && !path.isAbsolute(relative));
|
||||
}
|
||||
|
||||
function assertPathWithinWorkspace(candidatePath: string, workspaceRoot: string, label: string): string {
|
||||
const normalizedCandidate = path.resolve(candidatePath);
|
||||
if (!isPathWithinRoot(normalizedCandidate, workspaceRoot)) {
|
||||
throw new BeadValidationError(`${label} must stay within the workspace root`);
|
||||
}
|
||||
return normalizedCandidate;
|
||||
}
|
||||
|
||||
function assertRepoRootReadableDirectory(repoRoot: string): string {
|
||||
const normalizedRepoRoot = path.resolve(repoRoot);
|
||||
try {
|
||||
if (!statSync(normalizedRepoRoot).isDirectory()) {
|
||||
throw new BeadValidationError('Resolved bead repo root must be an existing directory');
|
||||
}
|
||||
} catch (error) {
|
||||
if (error instanceof BeadValidationError) {
|
||||
throw error;
|
||||
}
|
||||
throw new BeadValidationError('Resolved bead repo root must be an existing directory');
|
||||
}
|
||||
return normalizedRepoRoot;
|
||||
}
|
||||
|
||||
function resolveAbsoluteCurrentDocumentPath(currentDocumentPath: string, workspaceRoot: string): string {
|
||||
return assertPathWithinWorkspace(
|
||||
path.isAbsolute(currentDocumentPath)
|
||||
? currentDocumentPath
|
||||
: path.resolve(workspaceRoot, currentDocumentPath),
|
||||
workspaceRoot,
|
||||
'Current document path',
|
||||
);
|
||||
}
|
||||
|
||||
function resolveLegacyBeadLookupRepoRootFromCurrentDocumentPath(currentDocumentPath: string, workspaceRoot: string): string {
|
||||
const absoluteDocumentPath = resolveAbsoluteCurrentDocumentPath(currentDocumentPath, workspaceRoot);
|
||||
let currentDir = path.dirname(absoluteDocumentPath);
|
||||
|
||||
while (isPathWithinRoot(currentDir, workspaceRoot)) {
|
||||
const beadDir = path.join(currentDir, '.beads');
|
||||
try {
|
||||
if (statSync(beadDir).isDirectory()) {
|
||||
return currentDir;
|
||||
}
|
||||
} catch {
|
||||
// Keep walking toward the workspace root.
|
||||
}
|
||||
|
||||
if (currentDir === path.resolve(workspaceRoot)) {
|
||||
break;
|
||||
}
|
||||
|
||||
const parentDir = path.dirname(currentDir);
|
||||
if (parentDir === currentDir) {
|
||||
break;
|
||||
}
|
||||
currentDir = parentDir;
|
||||
}
|
||||
|
||||
return path.dirname(absoluteDocumentPath);
|
||||
}
|
||||
|
||||
export function resolveBeadLookupRepoRoot(options: BeadLookupOptions = {}): string {
|
||||
const workspaceRoot = resolveAgentWorkspace(options.workspaceAgentId).workspaceRoot;
|
||||
const currentDocumentPath = options.currentDocumentPath?.trim();
|
||||
|
||||
if (!options.targetPath?.trim()) {
|
||||
if (currentDocumentPath) {
|
||||
return normalizeBeadRepoRoot(resolveLegacyBeadLookupRepoRootFromCurrentDocumentPath(currentDocumentPath, workspaceRoot));
|
||||
}
|
||||
|
||||
const cwd = process.cwd();
|
||||
const defaultWorkspaceRoot = resolveAgentWorkspace().workspaceRoot;
|
||||
if (!isPathWithinRoot(cwd, defaultWorkspaceRoot)) {
|
||||
return options.workspaceAgentId ? normalizeBeadRepoRoot(workspaceRoot) : cwd;
|
||||
}
|
||||
return normalizeBeadRepoRoot(path.resolve(workspaceRoot, path.relative(defaultWorkspaceRoot, cwd)));
|
||||
}
|
||||
|
||||
const targetPath = options.targetPath.trim();
|
||||
if (path.isAbsolute(targetPath)) {
|
||||
return normalizeBeadRepoRoot(assertPathWithinWorkspace(targetPath, workspaceRoot, 'Explicit bead target path'));
|
||||
}
|
||||
|
||||
if (!currentDocumentPath) {
|
||||
throw new BeadValidationError('Relative explicit bead URIs require a current document path');
|
||||
}
|
||||
|
||||
const absoluteDocumentPath = resolveAbsoluteCurrentDocumentPath(currentDocumentPath, workspaceRoot);
|
||||
|
||||
const repoRoot = normalizeBeadRepoRoot(path.resolve(path.dirname(absoluteDocumentPath), targetPath));
|
||||
return assertPathWithinWorkspace(repoRoot, workspaceRoot, 'Resolved bead repo root');
|
||||
}
|
||||
|
||||
export async function getBeadDetail(beadId: string, options: BeadLookupOptions = {}): Promise<BeadDetail> {
|
||||
const normalizedBeadId = beadId.trim();
|
||||
if (!normalizedBeadId) {
|
||||
throw new BeadValidationError('Bead id is required');
|
||||
}
|
||||
|
||||
const repoRoot = assertRepoRootReadableDirectory(resolveBeadLookupRepoRoot(options));
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
|
||||
try {
|
||||
const result = await execFile(resolveBdBin(), ['show', normalizedBeadId, '--json'], {
|
||||
cwd: repoRoot,
|
||||
timeout: BD_TIMEOUT_MS,
|
||||
maxBuffer: BD_MAX_BUFFER_BYTES,
|
||||
env: {
|
||||
...process.env,
|
||||
PATH: buildRuntimePath(process.env.PATH),
|
||||
},
|
||||
});
|
||||
stdout = result.stdout;
|
||||
stderr = result.stderr;
|
||||
} catch (error) {
|
||||
const err = error as NodeJS.ErrnoException & { stderr?: string; code?: string; killed?: boolean; signal?: string };
|
||||
const stderrLine = (err.stderr || '').trim().split('\n').find(Boolean) || '';
|
||||
|
||||
if (err.code === 'ENOENT') {
|
||||
throw new BeadAdapterError('bd CLI not found in PATH', stderrLine);
|
||||
}
|
||||
|
||||
if (err.killed && err.signal === 'SIGTERM') {
|
||||
throw new BeadAdapterError(`bd show timed out after ${BD_TIMEOUT_MS}ms`, stderrLine);
|
||||
}
|
||||
|
||||
if (stderrLine.toLowerCase().includes('not found') || stderrLine.toLowerCase().includes('no issue')) {
|
||||
throw new BeadNotFoundError(normalizedBeadId);
|
||||
}
|
||||
|
||||
throw new BeadAdapterError(stderrLine || err.message || 'Failed to read bead', stderrLine);
|
||||
}
|
||||
|
||||
const payload = parseJsonPayload(stdout || stderr);
|
||||
const records = Array.isArray(payload) ? payload : [payload];
|
||||
const raw = records.find((entry) => normalizeString((entry as RawBeadRecord)?.id) === normalizedBeadId) as RawBeadRecord | undefined;
|
||||
|
||||
if (!raw || !normalizeString(raw.id) || !normalizeString(raw.title)) {
|
||||
throw new BeadNotFoundError(normalizedBeadId);
|
||||
}
|
||||
|
||||
let linkedPlan: Awaited<ReturnType<typeof findRepoPlanByBeadId>> = null;
|
||||
let linkedPlanWorkspacePath: string | null = null;
|
||||
|
||||
try {
|
||||
linkedPlan = await findRepoPlanByBeadId(normalizedBeadId, repoRoot);
|
||||
if (linkedPlan) {
|
||||
const workspaceRoot = resolveAgentWorkspace(options.workspaceAgentId).workspaceRoot;
|
||||
const absoluteLinkedPlanPath = path.resolve(repoRoot, linkedPlan.path);
|
||||
if (isPathWithinRoot(absoluteLinkedPlanPath, workspaceRoot)) {
|
||||
const relativePath = path.relative(workspaceRoot, absoluteLinkedPlanPath).split(path.sep).join('/');
|
||||
linkedPlanWorkspacePath = relativePath && relativePath !== '.' ? relativePath : null;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
linkedPlan = null;
|
||||
linkedPlanWorkspacePath = null;
|
||||
}
|
||||
|
||||
return {
|
||||
id: normalizeString(raw.id) ?? normalizedBeadId,
|
||||
title: normalizeString(raw.title) ?? normalizedBeadId,
|
||||
notes: normalizeString(raw.notes),
|
||||
status: normalizeString(raw.status),
|
||||
priority: normalizeNumber(raw.priority),
|
||||
issueType: normalizeString(raw.issue_type),
|
||||
owner: normalizeString(raw.owner),
|
||||
createdAt: normalizeString(raw.created_at),
|
||||
updatedAt: normalizeString(raw.updated_at),
|
||||
closedAt: normalizeString(raw.closed_at),
|
||||
closeReason: normalizeString(raw.close_reason),
|
||||
dependencies: normalizeRelations(raw.dependencies),
|
||||
dependents: normalizeRelations(raw.dependents),
|
||||
linkedPlan: linkedPlan ? {
|
||||
path: linkedPlan.path,
|
||||
workspacePath: linkedPlanWorkspacePath,
|
||||
title: linkedPlan.title,
|
||||
planId: linkedPlan.planId,
|
||||
archived: linkedPlan.archived,
|
||||
status: linkedPlan.status,
|
||||
updatedAt: linkedPlan.updatedAt,
|
||||
} : null,
|
||||
};
|
||||
}
|
||||
34
server/lib/chat-path-links-config.test.ts
Normal file
34
server/lib/chat-path-links-config.test.ts
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
import { describe, expect, it } from 'vitest';
|
||||
|
||||
import {
|
||||
normalizeChatPathLinkAliases,
|
||||
parseChatPathLinksConfig,
|
||||
} from './chat-path-links-config.js';
|
||||
|
||||
describe('chat-path-links-config', () => {
|
||||
it('rejects aliases that would shadow the built-in workspace shorthand', () => {
|
||||
expect(normalizeChatPathLinkAliases({
|
||||
'workspace/': '/workspace/override/',
|
||||
'workspace/projects/': '/workspace/custom-projects/',
|
||||
'docs/': '/workspace/docs/',
|
||||
})).toEqual({
|
||||
'docs/': '/workspace/docs/',
|
||||
});
|
||||
});
|
||||
|
||||
it('parsing drops reserved workspace alias keys while keeping valid aliases', () => {
|
||||
expect(parseChatPathLinksConfig(JSON.stringify({
|
||||
prefixes: ['/workspace/'],
|
||||
aliases: {
|
||||
'workspace/': '/workspace/override/',
|
||||
'workspace/projects/': '/workspace/custom-projects/',
|
||||
'docs/': 'workspace/docs',
|
||||
},
|
||||
}))).toEqual({
|
||||
prefixes: ['/workspace/'],
|
||||
aliases: {
|
||||
'docs/': '/workspace/docs/',
|
||||
},
|
||||
});
|
||||
});
|
||||
});
|
||||
186
server/lib/chat-path-links-config.ts
Normal file
186
server/lib/chat-path-links-config.ts
Normal file
|
|
@ -0,0 +1,186 @@
|
|||
export interface ChatPathLinksConfig {
|
||||
prefixes: string[];
|
||||
aliases: Record<string, string>;
|
||||
}
|
||||
|
||||
export interface ChatPathLinksSeedContext {
|
||||
platform?: string;
|
||||
homeDir?: string;
|
||||
username?: string;
|
||||
workspaceRoot?: string;
|
||||
}
|
||||
|
||||
const DEFAULT_PREFIX = '/workspace/';
|
||||
const CANONICAL_WORKSPACE_PREFIX = '/workspace/';
|
||||
const BARE_WORKSPACE_PREFIX = 'workspace/';
|
||||
const FILE_WORKSPACE_PREFIX = 'file:///workspace/';
|
||||
const SCHEME_RE = /^[a-zA-Z][a-zA-Z\d+.-]*:/;
|
||||
|
||||
function withTrailingSlash(value: string): string {
|
||||
return value.endsWith('/') ? value : `${value}/`;
|
||||
}
|
||||
|
||||
function stripTrailingSlash(value: string): string {
|
||||
return value.replace(/\/$/, '');
|
||||
}
|
||||
|
||||
function normalizePrefixPath(value: string): string {
|
||||
const trimmed = value.trim().replaceAll('\\', '/');
|
||||
if (!trimmed) return '';
|
||||
return withTrailingSlash(trimmed);
|
||||
}
|
||||
|
||||
function dedupePrefixes(prefixes: string[]): string[] {
|
||||
const seen = new Set<string>();
|
||||
const result: string[] = [];
|
||||
|
||||
for (const prefix of prefixes) {
|
||||
const normalized = normalizePrefixPath(prefix);
|
||||
if (!normalized || seen.has(normalized)) continue;
|
||||
seen.add(normalized);
|
||||
result.push(normalized);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function inferHomeDirFromWorkspaceRoot(workspaceRoot?: string): string | null {
|
||||
if (!workspaceRoot) return null;
|
||||
|
||||
const normalizedWorkspaceRoot = stripTrailingSlash(normalizePrefixPath(workspaceRoot));
|
||||
if (!normalizedWorkspaceRoot) return null;
|
||||
|
||||
const openclawMatch = normalizedWorkspaceRoot.match(/^(.*)\/\.openclaw\/workspace(?:-[^/]+)?$/);
|
||||
if (openclawMatch?.[1]) {
|
||||
return openclawMatch[1];
|
||||
}
|
||||
|
||||
const workspaceMatch = normalizedWorkspaceRoot.match(/^(.*)\/workspace(?:-[^/]+)?$/);
|
||||
if (workspaceMatch?.[1]) {
|
||||
return workspaceMatch[1];
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function inferHomeDir(context: ChatPathLinksSeedContext): string | null {
|
||||
if (context.homeDir) return stripTrailingSlash(normalizePrefixPath(context.homeDir));
|
||||
|
||||
const workspaceRootHome = inferHomeDirFromWorkspaceRoot(context.workspaceRoot);
|
||||
if (workspaceRootHome) return workspaceRootHome;
|
||||
|
||||
const username = context.username?.trim();
|
||||
if (!username) return null;
|
||||
|
||||
const platform = (context.platform ?? '').toLowerCase();
|
||||
if (platform === 'darwin' || platform === 'mac' || platform === 'macos') {
|
||||
return `/Users/${username}`;
|
||||
}
|
||||
if (platform === 'linux') {
|
||||
return `/home/${username}`;
|
||||
}
|
||||
if (platform === 'win32' || platform === 'windows') {
|
||||
return `C:/Users/${username}`;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
export function createDefaultChatPathLinksConfig(context: ChatPathLinksSeedContext = {}): ChatPathLinksConfig {
|
||||
const prefixes: string[] = [DEFAULT_PREFIX];
|
||||
const workspaceRoot = context.workspaceRoot ? normalizePrefixPath(context.workspaceRoot) : '';
|
||||
const homeDir = inferHomeDir(context);
|
||||
|
||||
if (workspaceRoot) {
|
||||
prefixes.push(workspaceRoot);
|
||||
}
|
||||
|
||||
if (homeDir) {
|
||||
prefixes.push(`${homeDir}/.openclaw/workspace/`);
|
||||
prefixes.push(`${homeDir}/workspace/`);
|
||||
}
|
||||
|
||||
return {
|
||||
prefixes: dedupePrefixes(prefixes),
|
||||
aliases: {},
|
||||
};
|
||||
}
|
||||
|
||||
export const DEFAULT_CHAT_PATH_LINKS_CONFIG: ChatPathLinksConfig = createDefaultChatPathLinksConfig();
|
||||
|
||||
function normalizeAliasKey(value: string): string {
|
||||
const normalized = normalizePrefixPath(value);
|
||||
if (!normalized) return '';
|
||||
if (normalized.startsWith('/') || normalized.startsWith('file://') || SCHEME_RE.test(normalized)) return '';
|
||||
// Reserve workspace/... for the built-in product shorthand, not user-defined aliases.
|
||||
if (normalized.startsWith(BARE_WORKSPACE_PREFIX)) return '';
|
||||
return normalized;
|
||||
}
|
||||
|
||||
function normalizeAliasValue(value: string): string {
|
||||
const normalized = normalizePrefixPath(value);
|
||||
if (!normalized) return '';
|
||||
|
||||
if (normalized.startsWith(FILE_WORKSPACE_PREFIX)) {
|
||||
const canonical = normalized.slice('file://'.length);
|
||||
return canonical.startsWith(CANONICAL_WORKSPACE_PREFIX) ? canonical : '';
|
||||
}
|
||||
|
||||
if (normalized.startsWith(CANONICAL_WORKSPACE_PREFIX)) {
|
||||
return normalized;
|
||||
}
|
||||
|
||||
if (normalized.startsWith(BARE_WORKSPACE_PREFIX)) {
|
||||
return `${CANONICAL_WORKSPACE_PREFIX}${normalized.slice(BARE_WORKSPACE_PREFIX.length)}`;
|
||||
}
|
||||
|
||||
return '';
|
||||
}
|
||||
|
||||
export function normalizeChatPathLinkPrefixes(rawPrefixes: unknown): string[] {
|
||||
if (!Array.isArray(rawPrefixes)) return [...DEFAULT_CHAT_PATH_LINKS_CONFIG.prefixes];
|
||||
|
||||
const normalized = dedupePrefixes(
|
||||
rawPrefixes.filter((value): value is string => typeof value === 'string'),
|
||||
);
|
||||
|
||||
return normalized.length > 0 ? normalized : [...DEFAULT_CHAT_PATH_LINKS_CONFIG.prefixes];
|
||||
}
|
||||
|
||||
export function normalizeChatPathLinkAliases(rawAliases: unknown): Record<string, string> {
|
||||
if (!rawAliases || typeof rawAliases !== 'object' || Array.isArray(rawAliases)) return {};
|
||||
|
||||
const entries = Object.entries(rawAliases as Record<string, unknown>);
|
||||
const normalized: Record<string, string> = {};
|
||||
|
||||
for (const [rawKey, rawValue] of entries) {
|
||||
if (typeof rawValue !== 'string') continue;
|
||||
|
||||
const key = normalizeAliasKey(rawKey);
|
||||
const value = normalizeAliasValue(rawValue);
|
||||
if (!key || !value) continue;
|
||||
|
||||
normalized[key] = value;
|
||||
}
|
||||
|
||||
return normalized;
|
||||
}
|
||||
|
||||
export function parseChatPathLinksConfig(content: string): ChatPathLinksConfig {
|
||||
const parsed = JSON.parse(content) as { prefixes?: unknown; aliases?: unknown };
|
||||
return {
|
||||
prefixes: normalizeChatPathLinkPrefixes(parsed?.prefixes),
|
||||
aliases: normalizeChatPathLinkAliases(parsed?.aliases),
|
||||
};
|
||||
}
|
||||
|
||||
export function stringifyChatPathLinksConfig(config: ChatPathLinksConfig): string {
|
||||
return `${JSON.stringify({
|
||||
prefixes: normalizeChatPathLinkPrefixes(config.prefixes),
|
||||
aliases: normalizeChatPathLinkAliases(config.aliases),
|
||||
}, null, 2)}\n`;
|
||||
}
|
||||
|
||||
export function createChatPathLinksTemplate(context: ChatPathLinksSeedContext = {}): string {
|
||||
return stringifyChatPathLinksConfig(createDefaultChatPathLinksConfig(context));
|
||||
}
|
||||
|
|
@ -58,6 +58,32 @@ describe('config module', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('workspaceWatchRecursive', () => {
|
||||
it('defaults to true when env var is unset', async () => {
|
||||
vi.resetModules();
|
||||
delete process.env.NERVE_WATCH_WORKSPACE_RECURSIVE;
|
||||
|
||||
const { config } = await import('./config.js');
|
||||
expect(config.workspaceWatchRecursive).toBe(true);
|
||||
});
|
||||
|
||||
it('can be disabled explicitly with false', async () => {
|
||||
vi.resetModules();
|
||||
process.env.NERVE_WATCH_WORKSPACE_RECURSIVE = 'false';
|
||||
|
||||
const { config } = await import('./config.js');
|
||||
expect(config.workspaceWatchRecursive).toBe(false);
|
||||
});
|
||||
|
||||
it('stays enabled when set to true', async () => {
|
||||
vi.resetModules();
|
||||
process.env.NERVE_WATCH_WORKSPACE_RECURSIVE = 'true';
|
||||
|
||||
const { config } = await import('./config.js');
|
||||
expect(config.workspaceWatchRecursive).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('SESSION_COOKIE_NAME', () => {
|
||||
it('includes the port number', async () => {
|
||||
const { SESSION_COOKIE_NAME, config } = await import('./config.js');
|
||||
|
|
|
|||
|
|
@ -51,6 +51,7 @@ export const config = {
|
|||
|
||||
openaiApiKey: process.env.OPENAI_API_KEY || '',
|
||||
replicateApiToken: process.env.REPLICATE_API_TOKEN || '',
|
||||
mimoApiKey: process.env.MIMO_API_KEY || '',
|
||||
|
||||
// Speech-to-text
|
||||
sttProvider: (process.env.STT_PROVIDER || 'local') as 'local' | 'openai',
|
||||
|
|
@ -68,6 +69,7 @@ export const config = {
|
|||
// Gateway connection
|
||||
gatewayUrl: process.env.GATEWAY_URL || DEFAULT_GATEWAY_URL,
|
||||
gatewayToken: process.env.GATEWAY_TOKEN || process.env.OPENCLAW_GATEWAY_TOKEN || '',
|
||||
publicOrigin: process.env.NERVE_PUBLIC_ORIGIN || '',
|
||||
|
||||
// Agent identity (used in UI)
|
||||
agentName: process.env.AGENT_NAME || 'Agent',
|
||||
|
|
@ -82,7 +84,8 @@ export const config = {
|
|||
memoryDir: process.env.MEMORY_DIR || path.join(HOME, '.openclaw', 'workspace', 'memory'),
|
||||
sessionsDir: process.env.SESSIONS_DIR || path.join(HOME, '.openclaw', 'agents', 'main', 'sessions'),
|
||||
usageFile: process.env.USAGE_FILE || path.join(HOME, '.openclaw', 'token-usage.json'),
|
||||
workspaceWatchRecursive: process.env.NERVE_WATCH_WORKSPACE_RECURSIVE === 'true',
|
||||
workspaceWatchRecursive: process.env.NERVE_WATCH_WORKSPACE_RECURSIVE !== 'false',
|
||||
workspaceRemote: process.env.NERVE_WORKSPACE_REMOTE === 'true',
|
||||
certPath: path.join(PROJECT_ROOT, 'certs', 'cert.pem'),
|
||||
keyPath: path.join(PROJECT_ROOT, 'certs', 'key.pem'),
|
||||
bunPath: path.join(HOME, '.bun', 'bin', 'bunx'),
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import crypto from 'node:crypto';
|
|||
import {
|
||||
getWorkspaceRoot,
|
||||
isExcluded,
|
||||
resolveWorkspacePath,
|
||||
resolveWorkspacePathForRoot,
|
||||
} from './file-utils.js';
|
||||
import { config } from './config.js';
|
||||
import { withMutex } from './mutex.js';
|
||||
|
|
@ -63,12 +63,16 @@ function toPosix(rel: string): string {
|
|||
return rel.replace(/\\/g, '/');
|
||||
}
|
||||
|
||||
function workspaceRoot(): string {
|
||||
return getWorkspaceRoot();
|
||||
function normalizeWorkspaceRoot(workspaceRoot: string): string {
|
||||
return getWorkspaceRoot(workspaceRoot);
|
||||
}
|
||||
|
||||
function toWorkspaceRelative(absPath: string): string {
|
||||
const rel = path.relative(workspaceRoot(), absPath);
|
||||
function toWorkspaceRelative(absPath: string, workspaceRoot: string): string {
|
||||
const root = normalizeWorkspaceRoot(workspaceRoot);
|
||||
const rel = path.relative(root, absPath);
|
||||
if (rel.startsWith('..') || path.isAbsolute(rel)) {
|
||||
throw new FileOpError(403, 'invalid_path', 'Invalid or excluded path');
|
||||
}
|
||||
return toPosix(rel || '.');
|
||||
}
|
||||
|
||||
|
|
@ -129,16 +133,8 @@ function assertValidNewName(newName: string): void {
|
|||
}
|
||||
}
|
||||
|
||||
async function resolveExistingPathOrThrow(relPath: string): Promise<string> {
|
||||
const resolved = await resolveWorkspacePath(relPath);
|
||||
if (!resolved) {
|
||||
throw new FileOpError(403, 'invalid_path', 'Invalid or excluded path');
|
||||
}
|
||||
return resolved;
|
||||
}
|
||||
|
||||
async function resolvePathAllowNewOrThrow(relPath: string): Promise<string> {
|
||||
const resolved = await resolveWorkspacePath(relPath, { allowNonExistent: true });
|
||||
async function resolvePathAllowNewOrThrow(workspaceRoot: string, relPath: string): Promise<string> {
|
||||
const resolved = await resolveWorkspacePathForRoot(workspaceRoot, relPath, { allowNonExistent: true });
|
||||
if (!resolved) {
|
||||
throw new FileOpError(403, 'invalid_path', 'Invalid or excluded path');
|
||||
}
|
||||
|
|
@ -170,28 +166,31 @@ async function assertTargetNotExists(targetAbs: string): Promise<void> {
|
|||
}
|
||||
}
|
||||
|
||||
function trashDirAbs(): string {
|
||||
return path.join(workspaceRoot(), TRASH_DIR);
|
||||
function trashDirAbs(workspaceRoot: string): string {
|
||||
return path.join(normalizeWorkspaceRoot(workspaceRoot), TRASH_DIR);
|
||||
}
|
||||
|
||||
function trashIndexAbs(): string {
|
||||
return path.join(trashDirAbs(), TRASH_INDEX);
|
||||
function trashIndexAbs(workspaceRoot: string): string {
|
||||
return path.join(trashDirAbs(workspaceRoot), TRASH_INDEX);
|
||||
}
|
||||
|
||||
async function ensureTrashInfra(): Promise<void> {
|
||||
async function ensureTrashInfra(workspaceRoot: string): Promise<void> {
|
||||
const trashDir = trashDirAbs(workspaceRoot);
|
||||
|
||||
try {
|
||||
await fs.mkdir(trashDirAbs(), { recursive: true });
|
||||
await fs.mkdir(trashDir, { recursive: true });
|
||||
} catch {
|
||||
throw new FileOpError(422, 'trash_path_conflict', 'Reserved .trash path is not a directory');
|
||||
}
|
||||
|
||||
const trashStat = await fs.stat(trashDirAbs()).catch(() => null);
|
||||
const trashStat = await fs.stat(trashDir).catch(() => null);
|
||||
if (!trashStat || !trashStat.isDirectory()) {
|
||||
throw new FileOpError(422, 'trash_path_conflict', 'Reserved .trash path is not a directory');
|
||||
}
|
||||
|
||||
if (!(await exists(trashIndexAbs()))) {
|
||||
await fs.writeFile(trashIndexAbs(), JSON.stringify(EMPTY_INDEX, null, 2) + '\n', 'utf-8');
|
||||
const indexPath = trashIndexAbs(workspaceRoot);
|
||||
if (!(await exists(indexPath))) {
|
||||
await fs.writeFile(indexPath, JSON.stringify(EMPTY_INDEX, null, 2) + '\n', 'utf-8');
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -206,9 +205,9 @@ function isValidTrashIndexItem(item: unknown): item is TrashIndexItem {
|
|||
);
|
||||
}
|
||||
|
||||
async function readTrashIndex(): Promise<TrashIndexDoc> {
|
||||
async function readTrashIndex(workspaceRoot: string): Promise<TrashIndexDoc> {
|
||||
try {
|
||||
const raw = await fs.readFile(trashIndexAbs(), 'utf-8');
|
||||
const raw = await fs.readFile(trashIndexAbs(workspaceRoot), 'utf-8');
|
||||
const parsed = JSON.parse(raw) as Partial<TrashIndexDoc>;
|
||||
if (parsed && parsed.version === 1 && parsed.items && typeof parsed.items === 'object') {
|
||||
const validItems: Record<string, TrashIndexItem> = {};
|
||||
|
|
@ -228,8 +227,8 @@ async function readTrashIndex(): Promise<TrashIndexDoc> {
|
|||
}
|
||||
}
|
||||
|
||||
async function writeTrashIndex(index: TrashIndexDoc): Promise<void> {
|
||||
const indexPath = trashIndexAbs();
|
||||
async function writeTrashIndex(workspaceRoot: string, index: TrashIndexDoc): Promise<void> {
|
||||
const indexPath = trashIndexAbs(workspaceRoot);
|
||||
const dirPath = path.dirname(indexPath);
|
||||
const tempPath = path.join(dirPath, `${TRASH_INDEX}.${process.pid}.${Date.now()}.${randomId()}.tmp`);
|
||||
const payload = JSON.stringify(index, null, 2) + '\n';
|
||||
|
|
@ -264,7 +263,11 @@ function randomId(): string {
|
|||
return crypto.randomBytes(4).toString('hex');
|
||||
}
|
||||
|
||||
async function buildUniqueTrashTarget(sourceAbs: string, sourceIsDirectory: boolean): Promise<string> {
|
||||
async function buildUniqueTrashTarget(
|
||||
workspaceRoot: string,
|
||||
sourceAbs: string,
|
||||
sourceIsDirectory: boolean,
|
||||
): Promise<string> {
|
||||
const base = path.basename(sourceAbs);
|
||||
const parsed = path.parse(base);
|
||||
|
||||
|
|
@ -276,7 +279,7 @@ async function buildUniqueTrashTarget(sourceAbs: string, sourceIsDirectory: bool
|
|||
? `${parsed.name}--${id}${parsed.ext}`
|
||||
: `${base}--${id}`;
|
||||
|
||||
const candidateAbs = path.join(trashDirAbs(), candidateName);
|
||||
const candidateAbs = path.join(trashDirAbs(workspaceRoot), candidateName);
|
||||
if (!(await exists(candidateAbs))) {
|
||||
return candidateAbs;
|
||||
}
|
||||
|
|
@ -285,14 +288,18 @@ async function buildUniqueTrashTarget(sourceAbs: string, sourceIsDirectory: bool
|
|||
throw new FileOpError(500, 'trash_name_generation_failed', 'Failed to allocate trash path');
|
||||
}
|
||||
|
||||
async function updateTrashIndexAfterMove(fromRel: string, toRel: string): Promise<void> {
|
||||
async function updateTrashIndexAfterMove(
|
||||
workspaceRoot: string,
|
||||
fromRel: string,
|
||||
toRel: string,
|
||||
): Promise<void> {
|
||||
const fromInTrash = isInTrash(fromRel);
|
||||
const toInTrash = isInTrash(toRel);
|
||||
|
||||
if (!fromInTrash && !toInTrash) return;
|
||||
|
||||
await ensureTrashInfra();
|
||||
const index = await readTrashIndex();
|
||||
await ensureTrashInfra(workspaceRoot);
|
||||
const index = await readTrashIndex(workspaceRoot);
|
||||
|
||||
// Move/rename inside trash => rename key.
|
||||
if (fromInTrash && toInTrash) {
|
||||
|
|
@ -300,7 +307,7 @@ async function updateTrashIndexAfterMove(fromRel: string, toRel: string): Promis
|
|||
if (item) {
|
||||
delete index.items[fromRel];
|
||||
index.items[toRel] = item;
|
||||
await writeTrashIndex(index);
|
||||
await writeTrashIndex(workspaceRoot, index);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
@ -309,146 +316,154 @@ async function updateTrashIndexAfterMove(fromRel: string, toRel: string): Promis
|
|||
if (fromInTrash && !toInTrash) {
|
||||
if (index.items[fromRel]) {
|
||||
delete index.items[fromRel];
|
||||
await writeTrashIndex(index);
|
||||
await writeTrashIndex(workspaceRoot, index);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
export async function renameEntry(params: { path: string; newName: string }): Promise<FileOpResult> {
|
||||
export async function renameEntry(params: {
|
||||
workspaceRoot: string;
|
||||
sourceAbs: string;
|
||||
newName: string;
|
||||
}): Promise<FileOpResult> {
|
||||
return withFileOpsLock(async () => {
|
||||
assertValidNewName(params.newName);
|
||||
|
||||
const sourceAbs = await resolveExistingPathOrThrow(params.path);
|
||||
const sourceRel = toWorkspaceRelative(sourceAbs);
|
||||
const workspaceRoot = normalizeWorkspaceRoot(params.workspaceRoot);
|
||||
const sourceRel = toWorkspaceRelative(params.sourceAbs, workspaceRoot);
|
||||
assertNotProtected(sourceRel);
|
||||
|
||||
await statOrThrow(sourceAbs);
|
||||
await statOrThrow(params.sourceAbs);
|
||||
|
||||
const targetAbs = await resolvePathAllowNewOrThrow(
|
||||
workspaceRoot,
|
||||
toPosix(path.join(path.dirname(sourceRel), params.newName.trim())),
|
||||
);
|
||||
const targetRel = toWorkspaceRelative(targetAbs);
|
||||
const targetRel = toWorkspaceRelative(targetAbs, workspaceRoot);
|
||||
assertNotProtectedTarget(targetRel);
|
||||
|
||||
if (sourceAbs === targetAbs) {
|
||||
if (params.sourceAbs === targetAbs) {
|
||||
return { from: sourceRel, to: targetRel };
|
||||
}
|
||||
|
||||
await assertTargetNotExists(targetAbs);
|
||||
await fs.rename(sourceAbs, targetAbs);
|
||||
await updateTrashIndexAfterMove(sourceRel, targetRel);
|
||||
await fs.rename(params.sourceAbs, targetAbs);
|
||||
await updateTrashIndexAfterMove(workspaceRoot, sourceRel, targetRel);
|
||||
|
||||
return { from: sourceRel, to: targetRel };
|
||||
});
|
||||
}
|
||||
|
||||
export async function moveEntry(params: { sourcePath: string; targetDirPath: string }): Promise<FileOpResult> {
|
||||
export async function moveEntry(params: {
|
||||
workspaceRoot: string;
|
||||
sourceAbs: string;
|
||||
targetDirAbs?: string;
|
||||
}): Promise<FileOpResult> {
|
||||
return withFileOpsLock(async () => {
|
||||
const sourceAbs = await resolveExistingPathOrThrow(params.sourcePath);
|
||||
const sourceRel = toWorkspaceRelative(sourceAbs);
|
||||
const workspaceRoot = normalizeWorkspaceRoot(params.workspaceRoot);
|
||||
const sourceRel = toWorkspaceRelative(params.sourceAbs, workspaceRoot);
|
||||
assertNotProtected(sourceRel);
|
||||
|
||||
const sourceStat = await statOrThrow(sourceAbs);
|
||||
|
||||
let targetDirAbs: string;
|
||||
if (!params.targetDirPath) {
|
||||
targetDirAbs = workspaceRoot();
|
||||
} else {
|
||||
targetDirAbs = await resolveExistingPathOrThrow(params.targetDirPath);
|
||||
}
|
||||
const sourceStat = await statOrThrow(params.sourceAbs);
|
||||
const targetDirAbs = params.targetDirAbs || workspaceRoot;
|
||||
// Validation-only: this throws if the caller tries to move outside the workspace.
|
||||
toWorkspaceRelative(targetDirAbs, workspaceRoot);
|
||||
|
||||
const targetDirStat = await statOrThrow(targetDirAbs);
|
||||
if (!targetDirStat.isDirectory()) {
|
||||
throw new FileOpError(400, 'target_not_directory', 'Target must be a directory');
|
||||
}
|
||||
|
||||
const targetAbs = path.join(targetDirAbs, path.basename(sourceAbs));
|
||||
const targetRel = toWorkspaceRelative(targetAbs);
|
||||
const targetAbs = path.join(targetDirAbs, path.basename(params.sourceAbs));
|
||||
const targetRel = toWorkspaceRelative(targetAbs, workspaceRoot);
|
||||
assertNotProtectedTarget(targetRel);
|
||||
|
||||
// Allow moves to .trash in custom workspaces (treated as regular directory)
|
||||
if (!isInTrash(sourceRel) && isInTrash(targetRel)) {
|
||||
const customRoot = config.fileBrowserRoot;
|
||||
if (customRoot && customRoot.trim() !== '') {
|
||||
// Custom workspace: allow .trash moves
|
||||
} else {
|
||||
const customRoot = (config.fileBrowserRoot || '').trim();
|
||||
if (!customRoot) {
|
||||
throw new FileOpError(422, 'use_trash_api', 'Use the trash action for deleting items');
|
||||
}
|
||||
}
|
||||
|
||||
if (sourceAbs === targetAbs) {
|
||||
if (params.sourceAbs === targetAbs) {
|
||||
return { from: sourceRel, to: targetRel };
|
||||
}
|
||||
|
||||
assertNotMovingDirIntoSelf(sourceAbs, targetAbs, sourceStat.isDirectory());
|
||||
assertNotMovingDirIntoSelf(params.sourceAbs, targetAbs, sourceStat.isDirectory());
|
||||
await assertTargetNotExists(targetAbs);
|
||||
|
||||
await fs.rename(sourceAbs, targetAbs);
|
||||
await updateTrashIndexAfterMove(sourceRel, targetRel);
|
||||
await fs.rename(params.sourceAbs, targetAbs);
|
||||
await updateTrashIndexAfterMove(workspaceRoot, sourceRel, targetRel);
|
||||
|
||||
return { from: sourceRel, to: targetRel };
|
||||
});
|
||||
}
|
||||
|
||||
export async function trashEntry(params: { path: string }): Promise<FileOpResult & { undoTtlMs: number }> {
|
||||
export async function trashEntry(params: {
|
||||
workspaceRoot: string;
|
||||
sourceAbs: string;
|
||||
}): Promise<FileOpResult & { undoTtlMs: number }> {
|
||||
return withFileOpsLock(async () => {
|
||||
const sourceAbs = await resolveExistingPathOrThrow(params.path);
|
||||
const sourceRel = toWorkspaceRelative(sourceAbs);
|
||||
const workspaceRoot = normalizeWorkspaceRoot(params.workspaceRoot);
|
||||
const sourceRel = toWorkspaceRelative(params.sourceAbs, workspaceRoot);
|
||||
|
||||
assertNotProtected(sourceRel);
|
||||
if (isInTrash(sourceRel)) {
|
||||
throw new FileOpError(422, 'already_in_trash', 'Path is already in trash');
|
||||
}
|
||||
|
||||
const sourceStat = await statOrThrow(sourceAbs);
|
||||
const sourceStat = await statOrThrow(params.sourceAbs);
|
||||
|
||||
await ensureTrashInfra();
|
||||
const targetAbs = await buildUniqueTrashTarget(sourceAbs, sourceStat.isDirectory());
|
||||
const targetRel = toWorkspaceRelative(targetAbs);
|
||||
await ensureTrashInfra(workspaceRoot);
|
||||
const targetAbs = await buildUniqueTrashTarget(workspaceRoot, params.sourceAbs, sourceStat.isDirectory());
|
||||
const targetRel = toWorkspaceRelative(targetAbs, workspaceRoot);
|
||||
|
||||
await fs.rename(sourceAbs, targetAbs);
|
||||
await fs.rename(params.sourceAbs, targetAbs);
|
||||
|
||||
const index = await readTrashIndex();
|
||||
const index = await readTrashIndex(workspaceRoot);
|
||||
index.items[targetRel] = {
|
||||
id: randomId(),
|
||||
originalPath: sourceRel,
|
||||
deletedAtMs: Date.now(),
|
||||
type: sourceStat.isDirectory() ? 'directory' : 'file',
|
||||
};
|
||||
await writeTrashIndex(index);
|
||||
await writeTrashIndex(workspaceRoot, index);
|
||||
|
||||
return { from: sourceRel, to: targetRel, undoTtlMs: TRASH_UNDO_TTL_MS };
|
||||
});
|
||||
}
|
||||
|
||||
export async function restoreEntry(params: { path: string }): Promise<FileOpResult> {
|
||||
export async function restoreEntry(params: {
|
||||
workspaceRoot: string;
|
||||
sourceAbs: string;
|
||||
}): Promise<FileOpResult> {
|
||||
return withFileOpsLock(async () => {
|
||||
const sourceAbs = await resolveExistingPathOrThrow(params.path);
|
||||
const sourceRel = toWorkspaceRelative(sourceAbs);
|
||||
const workspaceRoot = normalizeWorkspaceRoot(params.workspaceRoot);
|
||||
const sourceRel = toWorkspaceRelative(params.sourceAbs, workspaceRoot);
|
||||
|
||||
if (!isInTrash(sourceRel) || isTrashRoot(sourceRel)) {
|
||||
throw new FileOpError(422, 'not_restorable', 'Only trashed items can be restored');
|
||||
}
|
||||
|
||||
await ensureTrashInfra();
|
||||
const index = await readTrashIndex();
|
||||
await ensureTrashInfra(workspaceRoot);
|
||||
const index = await readTrashIndex(workspaceRoot);
|
||||
const item = index.items[sourceRel];
|
||||
|
||||
if (!item) {
|
||||
throw new FileOpError(404, 'restore_metadata_missing', 'Restore metadata not found for this item');
|
||||
}
|
||||
|
||||
const targetAbs = await resolvePathAllowNewOrThrow(item.originalPath);
|
||||
const targetRel = toWorkspaceRelative(targetAbs);
|
||||
const targetAbs = await resolvePathAllowNewOrThrow(workspaceRoot, item.originalPath);
|
||||
const targetRel = toWorkspaceRelative(targetAbs, workspaceRoot);
|
||||
assertNotProtectedTarget(targetRel);
|
||||
|
||||
await assertTargetNotExists(targetAbs);
|
||||
await fs.mkdir(path.dirname(targetAbs), { recursive: true });
|
||||
await fs.rename(sourceAbs, targetAbs);
|
||||
await fs.rename(params.sourceAbs, targetAbs);
|
||||
|
||||
delete index.items[sourceRel];
|
||||
await writeTrashIndex(index);
|
||||
await writeTrashIndex(workspaceRoot, index);
|
||||
|
||||
return { from: sourceRel, to: targetRel };
|
||||
});
|
||||
|
|
|
|||
|
|
@ -38,14 +38,20 @@ const BINARY_EXTENSIONS = new Set([
|
|||
const EMPTY_EXCLUDED_NAMES = new Set<string>();
|
||||
const EMPTY_EXCLUDED_PATTERNS: RegExp[] = [];
|
||||
|
||||
export interface ResolveWorkspacePathOptions {
|
||||
allowNonExistent?: boolean;
|
||||
}
|
||||
|
||||
/** Get exclusion names based on current config state */
|
||||
function getExcludedNames(): Set<string> {
|
||||
return config.fileBrowserRoot && config.fileBrowserRoot.trim() !== '' ? EMPTY_EXCLUDED_NAMES : DEFAULT_EXCLUDED_NAMES;
|
||||
const customRoot = (config.fileBrowserRoot || '').trim();
|
||||
return customRoot ? EMPTY_EXCLUDED_NAMES : DEFAULT_EXCLUDED_NAMES;
|
||||
}
|
||||
|
||||
/** Get exclusion patterns based on current config state */
|
||||
function getExcludedPatterns(): RegExp[] {
|
||||
return config.fileBrowserRoot && config.fileBrowserRoot.trim() !== '' ? EMPTY_EXCLUDED_PATTERNS : DEFAULT_EXCLUDED_PATTERNS;
|
||||
const customRoot = (config.fileBrowserRoot || '').trim();
|
||||
return customRoot ? EMPTY_EXCLUDED_PATTERNS : DEFAULT_EXCLUDED_PATTERNS;
|
||||
}
|
||||
|
||||
/** Check if a file/directory name should be excluded from the tree. */
|
||||
|
|
@ -54,7 +60,7 @@ export function isExcluded(name: string): boolean {
|
|||
const excludedPatterns = getExcludedPatterns();
|
||||
|
||||
if (excludedNames.has(name)) return true;
|
||||
return excludedPatterns.some(p => p.test(name));
|
||||
return excludedPatterns.some((pattern) => pattern.test(name));
|
||||
}
|
||||
|
||||
/** Check if a file extension indicates binary content. */
|
||||
|
|
@ -64,9 +70,13 @@ export function isBinary(name: string): boolean {
|
|||
|
||||
// ── Workspace root ───────────────────────────────────────────────────
|
||||
|
||||
/** Resolve the workspace root directory. Uses FILE_BROWSER_ROOT if set and valid, otherwise parent of MEMORY.md. */
|
||||
export function getWorkspaceRoot(): string {
|
||||
const customRoot = config.fileBrowserRoot.trim();
|
||||
/** Resolve the workspace root directory. Uses the explicit root if provided, otherwise FILE_BROWSER_ROOT or parent of MEMORY.md. */
|
||||
export function getWorkspaceRoot(workspaceRoot?: string): string {
|
||||
if (workspaceRoot && workspaceRoot.trim()) {
|
||||
return path.resolve(workspaceRoot);
|
||||
}
|
||||
|
||||
const customRoot = (config.fileBrowserRoot || '').trim();
|
||||
return customRoot ? path.resolve(customRoot) : path.dirname(config.memoryPath);
|
||||
}
|
||||
|
||||
|
|
@ -76,22 +86,19 @@ export function getWorkspaceRoot(): string {
|
|||
export const MAX_FILE_SIZE = 1_048_576;
|
||||
|
||||
/**
|
||||
* Validate and resolve a relative path to an absolute path within the workspace.
|
||||
*
|
||||
* Returns the resolved absolute path, or `null` if:
|
||||
* - The path escapes the workspace root (traversal)
|
||||
* - The path resolves through a symlink to outside the workspace
|
||||
* - The path is excluded
|
||||
*
|
||||
* For write operations where the file may not exist yet, the parent
|
||||
* directory is validated instead.
|
||||
* Validate and resolve a relative path to an absolute path within an explicit workspace root.
|
||||
*/
|
||||
export async function resolveWorkspacePath(
|
||||
export async function resolveWorkspacePathForRoot(
|
||||
workspaceRoot: string,
|
||||
relativePath: string,
|
||||
options?: { allowNonExistent?: boolean },
|
||||
options?: ResolveWorkspacePathOptions,
|
||||
): Promise<string | null> {
|
||||
const root = getWorkspaceRoot();
|
||||
const root = getWorkspaceRoot(workspaceRoot);
|
||||
const rootPrefix = root.endsWith(path.sep) ? root : root + path.sep;
|
||||
const realRoot = await fs.realpath(root).catch(() => root);
|
||||
const realRootPrefix = realRoot.endsWith(path.sep) ? realRoot : realRoot + path.sep;
|
||||
const isWithinLexicalRoot = (candidate: string) => candidate === root || candidate.startsWith(rootPrefix);
|
||||
const isWithinRealRoot = (candidate: string) => candidate === realRoot || candidate.startsWith(realRootPrefix);
|
||||
|
||||
// Block obvious traversal attempts
|
||||
const normalized = path.normalize(relativePath);
|
||||
|
|
@ -101,38 +108,62 @@ export async function resolveWorkspacePath(
|
|||
|
||||
// Check each path segment for exclusions
|
||||
const segments = normalized.split(path.sep);
|
||||
if (segments.some(seg => seg && isExcluded(seg))) {
|
||||
if (segments.some((segment) => segment && isExcluded(segment))) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const resolved = path.resolve(root, normalized);
|
||||
|
||||
// Must be within workspace root
|
||||
if (!resolved.startsWith(rootPrefix) && resolved !== root) {
|
||||
if (!isWithinLexicalRoot(resolved)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Resolve symlinks and re-check
|
||||
try {
|
||||
const real = await fs.realpath(resolved);
|
||||
if (!real.startsWith(rootPrefix) && real !== root) {
|
||||
if (!isWithinRealRoot(real)) {
|
||||
return null;
|
||||
}
|
||||
return real;
|
||||
return resolved;
|
||||
} catch {
|
||||
// File doesn't exist
|
||||
if (!options?.allowNonExistent) return null;
|
||||
|
||||
// For new files, validate the parent directory
|
||||
const parent = path.dirname(resolved);
|
||||
try {
|
||||
const realParent = await fs.realpath(parent);
|
||||
if (!realParent.startsWith(rootPrefix) && realParent !== root) {
|
||||
return null;
|
||||
// Walk up until we find an existing ancestor. This allows creating the
|
||||
// first file in a fresh workspace, or nested paths whose parents will be
|
||||
// created later via mkdir({ recursive: true }).
|
||||
let current = path.dirname(resolved);
|
||||
while (current !== root) {
|
||||
try {
|
||||
const realCurrent = await fs.realpath(current);
|
||||
if (!isWithinRealRoot(realCurrent)) {
|
||||
return null;
|
||||
}
|
||||
return resolved;
|
||||
} catch {
|
||||
const next = path.dirname(current);
|
||||
if (next === current) {
|
||||
return null;
|
||||
}
|
||||
current = next;
|
||||
}
|
||||
return resolved;
|
||||
} catch {
|
||||
}
|
||||
|
||||
if (!isWithinRealRoot(realRoot)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return resolved;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate and resolve a relative path to an absolute path within the default workspace.
|
||||
*/
|
||||
export async function resolveWorkspacePath(
|
||||
relativePath: string,
|
||||
options?: ResolveWorkspacePathOptions,
|
||||
): Promise<string | null> {
|
||||
return resolveWorkspacePathForRoot(getWorkspaceRoot(), relativePath, options);
|
||||
}
|
||||
|
|
|
|||
136
server/lib/file-watcher.test.ts
Normal file
136
server/lib/file-watcher.test.ts
Normal file
|
|
@ -0,0 +1,136 @@
|
|||
/** Tests for root workspace watcher discovery. */
|
||||
import path from 'node:path';
|
||||
import { afterEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
type WatchCallback = (eventType: string, filename: string | Buffer | null) => void;
|
||||
type WatchRecord = { target: string; callback: WatchCallback; close: ReturnType<typeof vi.fn> };
|
||||
|
||||
const runtime = vi.hoisted(() => ({
|
||||
configPath: '/tmp/home/.openclaw/openclaw.json',
|
||||
existing: new Set<string>(),
|
||||
watched: [] as WatchRecord[],
|
||||
listConfiguredAgentWorkspaces: vi.fn(() => [] as Array<{ agentId: string; workspaceRoot: string }>),
|
||||
}));
|
||||
|
||||
vi.mock('node:fs', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('node:fs')>();
|
||||
const mock = {
|
||||
...actual,
|
||||
existsSync: (target: actual.PathLike) => runtime.existing.has(String(target)),
|
||||
readdirSync: vi.fn(() => []),
|
||||
watch: ((target: actual.PathLike, optionsOrListener: unknown, maybeListener?: unknown) => {
|
||||
const callback = (typeof optionsOrListener === 'function' ? optionsOrListener : maybeListener) as WatchCallback;
|
||||
const close = vi.fn();
|
||||
runtime.watched.push({ target: String(target), callback, close });
|
||||
return { close } as unknown as actual.FSWatcher;
|
||||
}) satisfies typeof actual.watch,
|
||||
};
|
||||
return { ...mock, default: mock };
|
||||
});
|
||||
|
||||
vi.mock('../routes/events.js', () => ({
|
||||
broadcast: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock('./config.js', () => ({
|
||||
config: {
|
||||
home: '/tmp/home',
|
||||
workspaceWatchRecursive: false,
|
||||
memoryPath: '/tmp/home/workspace/MEMORY.md',
|
||||
memoryDir: '/tmp/home/workspace/memory',
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock('./agent-workspace.js', () => ({
|
||||
resolveAgentWorkspace: (agentId?: string) => {
|
||||
const normalized = !agentId || agentId === 'main' ? 'main' : agentId;
|
||||
const workspaceRoot = normalized === 'main'
|
||||
? '/tmp/home/workspace'
|
||||
: path.join('/tmp/home', `workspace-${normalized}`);
|
||||
return {
|
||||
agentId: normalized,
|
||||
workspaceRoot,
|
||||
memoryPath: path.join(workspaceRoot, 'MEMORY.md'),
|
||||
memoryDir: path.join(workspaceRoot, 'memory'),
|
||||
};
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock('./file-utils.js', () => ({
|
||||
isBinary: () => false,
|
||||
isExcluded: () => false,
|
||||
}));
|
||||
|
||||
vi.mock('./openclaw-config.js', () => ({
|
||||
listConfiguredAgentWorkspaces: (...args: unknown[]) => runtime.listConfiguredAgentWorkspaces(...args),
|
||||
resolveOpenClawConfigPath: () => runtime.configPath,
|
||||
}));
|
||||
|
||||
vi.mock('./workspace-detect.js', () => ({
|
||||
isWorkspaceLocal: vi.fn(async () => true),
|
||||
}));
|
||||
|
||||
async function loadWatcherModule() {
|
||||
vi.resetModules();
|
||||
return import('./file-watcher.js');
|
||||
}
|
||||
|
||||
afterEach(async () => {
|
||||
const mod = await loadWatcherModule();
|
||||
mod.stopFileWatcher();
|
||||
runtime.watched = [];
|
||||
runtime.existing.clear();
|
||||
runtime.listConfiguredAgentWorkspaces.mockReset();
|
||||
runtime.listConfiguredAgentWorkspaces.mockReturnValue([]);
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('startFileWatcher', () => {
|
||||
it('watches the custom config directory when OPENCLAW_CONFIG_PATH points outside ~/.openclaw', async () => {
|
||||
runtime.configPath = '/tmp/custom-configs/nerve-openclaw.json';
|
||||
runtime.existing = new Set(['/tmp/home/.openclaw', '/tmp/custom-configs']);
|
||||
|
||||
const mod = await loadWatcherModule();
|
||||
await mod.startFileWatcher();
|
||||
|
||||
expect(runtime.watched.map((entry) => entry.target).sort()).toEqual([
|
||||
'/tmp/custom-configs',
|
||||
'/tmp/home/.openclaw',
|
||||
]);
|
||||
|
||||
const initialCalls = runtime.listConfiguredAgentWorkspaces.mock.calls.length;
|
||||
runtime.watched.find((entry) => entry.target === '/tmp/custom-configs')
|
||||
?.callback('rename', 'nerve-openclaw.json');
|
||||
|
||||
expect(runtime.listConfiguredAgentWorkspaces.mock.calls.length).toBeGreaterThan(initialCalls);
|
||||
});
|
||||
|
||||
it('refreshes when a custom config basename changes inside ~/.openclaw', async () => {
|
||||
runtime.configPath = '/tmp/home/.openclaw/custom-openclaw.json';
|
||||
runtime.existing = new Set(['/tmp/home/.openclaw']);
|
||||
|
||||
const mod = await loadWatcherModule();
|
||||
await mod.startFileWatcher();
|
||||
|
||||
expect(runtime.watched.map((entry) => entry.target)).toEqual(['/tmp/home/.openclaw']);
|
||||
|
||||
const initialCalls = runtime.listConfiguredAgentWorkspaces.mock.calls.length;
|
||||
runtime.watched[0]?.callback('rename', 'custom-openclaw.json');
|
||||
|
||||
expect(runtime.listConfiguredAgentWorkspaces.mock.calls.length).toBeGreaterThan(initialCalls);
|
||||
});
|
||||
|
||||
it('still refreshes when legacy workspace-* directories change', async () => {
|
||||
runtime.configPath = '/tmp/custom-configs/nerve-openclaw.json';
|
||||
runtime.existing = new Set(['/tmp/home/.openclaw', '/tmp/custom-configs']);
|
||||
|
||||
const mod = await loadWatcherModule();
|
||||
await mod.startFileWatcher();
|
||||
|
||||
const initialCalls = runtime.listConfiguredAgentWorkspaces.mock.calls.length;
|
||||
runtime.watched.find((entry) => entry.target === '/tmp/home/.openclaw')
|
||||
?.callback('rename', 'workspace-research');
|
||||
|
||||
expect(runtime.listConfiguredAgentWorkspaces.mock.calls.length).toBeGreaterThan(initialCalls);
|
||||
});
|
||||
});
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
/**
|
||||
* File watcher for workspace files.
|
||||
*
|
||||
* Watches `MEMORY.md`, the `memory/` directory, and optionally the full
|
||||
* workspace directory for changes. Broadcasts SSE events so the UI can react:
|
||||
* Watches each known workspace's `MEMORY.md`, `memory/` directory, and
|
||||
* optionally the full workspace directory. Broadcasts SSE events so the UI can react:
|
||||
* - `memory.changed` — for backward compat (memory panel refresh)
|
||||
* - `file.changed` — for file browser (editor reload / AI lock)
|
||||
*
|
||||
|
|
@ -11,22 +11,25 @@
|
|||
*/
|
||||
|
||||
import path from 'node:path';
|
||||
import { watch, type FSWatcher } from 'node:fs';
|
||||
import { existsSync } from 'node:fs';
|
||||
import { existsSync, readdirSync, watch, type FSWatcher } from 'node:fs';
|
||||
import { broadcast } from '../routes/events.js';
|
||||
import { config } from './config.js';
|
||||
import { isExcluded, isBinary } from './file-utils.js';
|
||||
import { resolveAgentWorkspace, type AgentWorkspace } from './agent-workspace.js';
|
||||
import { isBinary, isExcluded } from './file-utils.js';
|
||||
import { listConfiguredAgentWorkspaces, resolveOpenClawConfigPath } from './openclaw-config.js';
|
||||
import { isWorkspaceLocal } from './workspace-detect.js';
|
||||
|
||||
let memoryWatcher: FSWatcher | null = null;
|
||||
let memoryDirWatcher: FSWatcher | null = null;
|
||||
let workspaceWatcher: FSWatcher | null = null;
|
||||
const rootDirWatchers = new Map<string, FSWatcher>();
|
||||
const memoryWatchers = new Map<string, FSWatcher>();
|
||||
const memoryDirWatchers = new Map<string, FSWatcher>();
|
||||
const workspaceWatchers = new Map<string, FSWatcher>();
|
||||
|
||||
// Per-source debounce to avoid multiple events for single save
|
||||
// (separate timers so MEMORY.md changes don't suppress daily file changes)
|
||||
const lastBroadcastBySource = new Map<string, number>();
|
||||
const DEBOUNCE_MS = 500;
|
||||
|
||||
const MAX_SOURCES = 500;
|
||||
const WORKSPACE_PREFIX = 'workspace-';
|
||||
|
||||
function shouldBroadcast(source: string): boolean {
|
||||
const now = Date.now();
|
||||
|
|
@ -41,84 +44,222 @@ function shouldBroadcast(source: string): boolean {
|
|||
return true;
|
||||
}
|
||||
|
||||
function getWatchFilename(filename: string | Buffer | null): string | null {
|
||||
if (typeof filename === 'string') return filename;
|
||||
if (filename) return filename.toString();
|
||||
return null;
|
||||
}
|
||||
|
||||
function getScopedSourceKey(agentId: string, source: string): string {
|
||||
return `${agentId}:${source}`;
|
||||
}
|
||||
|
||||
function broadcastWorkspaceFileChanged(agentId: string, filePath: string): void {
|
||||
broadcast('file.changed', {
|
||||
path: filePath,
|
||||
agentId,
|
||||
});
|
||||
}
|
||||
|
||||
function broadcastWorkspaceMemoryChanged(agentId: string, file: string): void {
|
||||
broadcast('memory.changed', {
|
||||
source: 'file',
|
||||
file,
|
||||
agentId,
|
||||
});
|
||||
}
|
||||
|
||||
function discoverWorkspaces(): AgentWorkspace[] {
|
||||
const workspaces = new Map<string, AgentWorkspace>();
|
||||
const mainWorkspace = resolveAgentWorkspace('main');
|
||||
workspaces.set(mainWorkspace.agentId, mainWorkspace);
|
||||
|
||||
for (const configured of listConfiguredAgentWorkspaces()) {
|
||||
if (configured.agentId === 'main') continue;
|
||||
workspaces.set(configured.agentId, {
|
||||
agentId: configured.agentId,
|
||||
workspaceRoot: configured.workspaceRoot,
|
||||
memoryPath: path.join(configured.workspaceRoot, 'MEMORY.md'),
|
||||
memoryDir: path.join(configured.workspaceRoot, 'memory'),
|
||||
});
|
||||
}
|
||||
|
||||
const openclawDir = path.join(config.home, '.openclaw');
|
||||
if (!existsSync(openclawDir)) {
|
||||
return [...workspaces.values()];
|
||||
}
|
||||
|
||||
for (const entry of readdirSync(openclawDir, { withFileTypes: true })) {
|
||||
if (!entry.isDirectory() || !entry.name.startsWith(WORKSPACE_PREFIX)) continue;
|
||||
|
||||
const rawAgentId = entry.name.slice(WORKSPACE_PREFIX.length);
|
||||
if (!rawAgentId) continue;
|
||||
|
||||
try {
|
||||
const workspace = resolveAgentWorkspace(rawAgentId);
|
||||
workspaces.set(workspace.agentId, workspace);
|
||||
} catch {
|
||||
// Ignore directories that are not valid agent workspaces.
|
||||
}
|
||||
}
|
||||
|
||||
return [...workspaces.values()];
|
||||
}
|
||||
|
||||
function closeWatchers(watchers: Map<string, FSWatcher>, agentIds?: Set<string>): void {
|
||||
for (const [agentId, watcher] of watchers.entries()) {
|
||||
if (agentIds && agentIds.has(agentId)) continue;
|
||||
watcher.close();
|
||||
watchers.delete(agentId);
|
||||
}
|
||||
}
|
||||
|
||||
function watchWorkspaceMemoryFile(workspace: AgentWorkspace): void {
|
||||
if (memoryWatchers.has(workspace.agentId) || !existsSync(workspace.memoryPath)) return;
|
||||
|
||||
try {
|
||||
const watcher = watch(workspace.memoryPath, (eventType) => {
|
||||
if (eventType !== 'change') return;
|
||||
if (!shouldBroadcast(getScopedSourceKey(workspace.agentId, 'MEMORY.md'))) return;
|
||||
|
||||
console.log(`[file-watcher] ${workspace.agentId}: MEMORY.md changed`);
|
||||
broadcastWorkspaceMemoryChanged(workspace.agentId, 'MEMORY.md');
|
||||
broadcastWorkspaceFileChanged(workspace.agentId, 'MEMORY.md');
|
||||
});
|
||||
|
||||
memoryWatchers.set(workspace.agentId, watcher);
|
||||
console.log(`[file-watcher] Watching ${workspace.agentId}: MEMORY.md`);
|
||||
} catch (err) {
|
||||
console.error(`[file-watcher] Failed to watch ${workspace.agentId}: MEMORY.md:`, (err as Error).message);
|
||||
}
|
||||
}
|
||||
|
||||
function watchWorkspaceMemoryDir(workspace: AgentWorkspace): void {
|
||||
if (memoryDirWatchers.has(workspace.agentId) || !existsSync(workspace.memoryDir)) return;
|
||||
|
||||
try {
|
||||
const watcher = watch(workspace.memoryDir, (_eventType, filename) => {
|
||||
const file = getWatchFilename(filename);
|
||||
if (!file?.endsWith('.md')) return;
|
||||
if (!shouldBroadcast(getScopedSourceKey(workspace.agentId, `daily:${file}`))) return;
|
||||
|
||||
console.log(`[file-watcher] ${workspace.agentId}: ${file} changed`);
|
||||
broadcastWorkspaceMemoryChanged(workspace.agentId, file);
|
||||
broadcastWorkspaceFileChanged(workspace.agentId, `memory/${file}`);
|
||||
});
|
||||
|
||||
memoryDirWatchers.set(workspace.agentId, watcher);
|
||||
console.log(`[file-watcher] Watching ${workspace.agentId}: memory/ directory`);
|
||||
} catch (err) {
|
||||
console.error(`[file-watcher] Failed to watch ${workspace.agentId}: memory/:`, (err as Error).message);
|
||||
}
|
||||
}
|
||||
|
||||
function watchWorkspaceTree(workspace: AgentWorkspace): void {
|
||||
if (!config.workspaceWatchRecursive) return;
|
||||
if (workspaceWatchers.has(workspace.agentId) || !existsSync(workspace.workspaceRoot)) return;
|
||||
|
||||
try {
|
||||
const watcher = watch(workspace.workspaceRoot, { recursive: true }, (_eventType, filename) => {
|
||||
const file = getWatchFilename(filename);
|
||||
if (!file) return;
|
||||
|
||||
const normalized = file.replace(/\\/g, '/');
|
||||
const segments = normalized.split('/');
|
||||
if (segments.some(seg => seg && (isExcluded(seg) || seg.startsWith('.')))) return;
|
||||
if (isBinary(normalized)) return;
|
||||
|
||||
if (normalized === 'MEMORY.md' || normalized.startsWith('memory/')) return;
|
||||
if (!shouldBroadcast(getScopedSourceKey(workspace.agentId, `workspace:${normalized}`))) return;
|
||||
|
||||
console.log(`[file-watcher] ${workspace.agentId}: workspace ${normalized} changed`);
|
||||
broadcastWorkspaceFileChanged(workspace.agentId, normalized);
|
||||
});
|
||||
|
||||
workspaceWatchers.set(workspace.agentId, watcher);
|
||||
console.log(`[file-watcher] Watching ${workspace.agentId}: workspace directory (recursive)`);
|
||||
} catch (err) {
|
||||
console.warn(`[file-watcher] Recursive workspace watch failed for ${workspace.agentId}:`, (err as Error).message);
|
||||
console.warn('[file-watcher] File browser still works, use manual refresh for non-memory file updates.');
|
||||
}
|
||||
}
|
||||
|
||||
function refreshWorkspaceWatchers(): void {
|
||||
const workspaces = discoverWorkspaces();
|
||||
const activeAgentIds = new Set(workspaces.map((workspace) => workspace.agentId));
|
||||
|
||||
closeWatchers(memoryWatchers, activeAgentIds);
|
||||
closeWatchers(memoryDirWatchers, activeAgentIds);
|
||||
closeWatchers(workspaceWatchers, activeAgentIds);
|
||||
|
||||
for (const workspace of workspaces) {
|
||||
watchWorkspaceMemoryFile(workspace);
|
||||
watchWorkspaceMemoryDir(workspace);
|
||||
watchWorkspaceTree(workspace);
|
||||
}
|
||||
}
|
||||
|
||||
function startRootWorkspaceWatcher(): void {
|
||||
const openclawDir = path.join(config.home, '.openclaw');
|
||||
if (rootDirWatchers.size > 0) return;
|
||||
|
||||
try {
|
||||
const configPath = resolveOpenClawConfigPath();
|
||||
const configDir = path.dirname(configPath);
|
||||
const configBasename = path.basename(configPath);
|
||||
const watchTargets = new Set<string>();
|
||||
|
||||
if (existsSync(openclawDir)) watchTargets.add(openclawDir);
|
||||
if (existsSync(configDir)) watchTargets.add(configDir);
|
||||
|
||||
for (const watchTarget of watchTargets) {
|
||||
const watcher = watch(watchTarget, (_eventType, filename) => {
|
||||
const file = getWatchFilename(filename);
|
||||
if (!file) return;
|
||||
|
||||
const isConfigUpdate = watchTarget === configDir && file === configBasename;
|
||||
const isWorkspaceDiscovery = watchTarget === openclawDir && (
|
||||
file === 'workspace' ||
|
||||
file.startsWith(WORKSPACE_PREFIX)
|
||||
);
|
||||
|
||||
if (isConfigUpdate || isWorkspaceDiscovery) {
|
||||
refreshWorkspaceWatchers();
|
||||
}
|
||||
});
|
||||
|
||||
rootDirWatchers.set(watchTarget, watcher);
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('[file-watcher] Failed to watch workspace root for new agent workspaces:', (err as Error).message);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start watching workspace files for changes.
|
||||
* Call this during server startup.
|
||||
*
|
||||
* When the workspace is remote (NERVE_WORKSPACE_REMOTE=true or workspace
|
||||
* directory is not locally accessible), skips all file watchers since
|
||||
* there's nothing local to watch.
|
||||
*/
|
||||
export function startFileWatcher(): void {
|
||||
const workspaceRoot = path.dirname(config.memoryPath);
|
||||
export async function startFileWatcher(): Promise<void> {
|
||||
stopFileWatcher();
|
||||
|
||||
// Watch MEMORY.md
|
||||
if (existsSync(config.memoryPath)) {
|
||||
try {
|
||||
memoryWatcher = watch(config.memoryPath, (eventType) => {
|
||||
if (eventType === 'change' && shouldBroadcast('MEMORY.md')) {
|
||||
console.log('[file-watcher] MEMORY.md changed');
|
||||
broadcast('memory.changed', {
|
||||
source: 'file',
|
||||
file: 'MEMORY.md'
|
||||
});
|
||||
broadcast('file.changed', { path: 'MEMORY.md' });
|
||||
}
|
||||
});
|
||||
console.log('[file-watcher] Watching MEMORY.md');
|
||||
} catch (err) {
|
||||
console.error('[file-watcher] Failed to watch MEMORY.md:', (err as Error).message);
|
||||
}
|
||||
}
|
||||
|
||||
// Watch memory/ directory for daily files
|
||||
if (existsSync(config.memoryDir)) {
|
||||
try {
|
||||
memoryDirWatcher = watch(config.memoryDir, (eventType, filename) => {
|
||||
if (filename?.endsWith('.md') && shouldBroadcast(`daily:${filename}`)) {
|
||||
console.log(`[file-watcher] ${filename} changed`);
|
||||
broadcast('memory.changed', {
|
||||
source: 'file',
|
||||
file: filename
|
||||
});
|
||||
broadcast('file.changed', { path: `memory/${filename}` });
|
||||
}
|
||||
});
|
||||
console.log('[file-watcher] Watching memory/ directory');
|
||||
} catch (err) {
|
||||
console.error('[file-watcher] Failed to watch memory/:', (err as Error).message);
|
||||
}
|
||||
// Check if the main workspace is local before setting up watchers
|
||||
const mainWorkspace = resolveAgentWorkspace('main');
|
||||
const isLocal = await isWorkspaceLocal(mainWorkspace.workspaceRoot);
|
||||
|
||||
if (!isLocal) {
|
||||
console.log('[file-watcher] Workspace is remote — file watching disabled');
|
||||
return;
|
||||
}
|
||||
|
||||
// Watch entire workspace directory only when explicitly enabled.
|
||||
// Default is off to avoid inotify watcher exhaustion (ENOSPC) on large Linux workspaces.
|
||||
if (config.workspaceWatchRecursive) {
|
||||
if (existsSync(workspaceRoot)) {
|
||||
try {
|
||||
workspaceWatcher = watch(workspaceRoot, { recursive: true }, (_eventType, filename) => {
|
||||
if (!filename) return;
|
||||
refreshWorkspaceWatchers();
|
||||
startRootWorkspaceWatcher();
|
||||
|
||||
// Normalize path separators (Windows compat)
|
||||
const normalized = filename.replace(/\\/g, '/');
|
||||
|
||||
// Skip excluded directories/files and binaries
|
||||
const segments = normalized.split('/');
|
||||
if (segments.some(seg => seg && (isExcluded(seg) || seg.startsWith('.')))) return;
|
||||
if (isBinary(normalized)) return;
|
||||
|
||||
// Skip memory files — already handled by dedicated watchers above
|
||||
if (normalized === 'MEMORY.md' || normalized.startsWith('memory/')) return;
|
||||
|
||||
if (shouldBroadcast(`workspace:${normalized}`)) {
|
||||
console.log(`[file-watcher] workspace: ${normalized} changed`);
|
||||
broadcast('file.changed', { path: normalized });
|
||||
}
|
||||
});
|
||||
console.log('[file-watcher] Watching workspace directory (recursive)');
|
||||
} catch (err) {
|
||||
// recursive: true may not be supported on all Linux kernels
|
||||
console.warn('[file-watcher] Recursive workspace watch failed:', (err as Error).message);
|
||||
console.warn('[file-watcher] File browser still works — use manual refresh for non-memory file updates.');
|
||||
}
|
||||
}
|
||||
} else {
|
||||
console.log('[file-watcher] Workspace recursive watch disabled (default). Set NERVE_WATCH_WORKSPACE_RECURSIVE=true to re-enable SSE file.changed events outside memory/.');
|
||||
if (!config.workspaceWatchRecursive) {
|
||||
console.log('[file-watcher] Workspace recursive watch disabled via NERVE_WATCH_WORKSPACE_RECURSIVE=false. SSE file.changed events outside memory/ are off.');
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -127,16 +268,8 @@ export function startFileWatcher(): void {
|
|||
* Call this during graceful shutdown.
|
||||
*/
|
||||
export function stopFileWatcher(): void {
|
||||
if (memoryWatcher) {
|
||||
memoryWatcher.close();
|
||||
memoryWatcher = null;
|
||||
}
|
||||
if (memoryDirWatcher) {
|
||||
memoryDirWatcher.close();
|
||||
memoryDirWatcher = null;
|
||||
}
|
||||
if (workspaceWatcher) {
|
||||
workspaceWatcher.close();
|
||||
workspaceWatcher = null;
|
||||
}
|
||||
closeWatchers(rootDirWatchers);
|
||||
closeWatchers(memoryWatchers);
|
||||
closeWatchers(memoryDirWatchers);
|
||||
closeWatchers(workspaceWatchers);
|
||||
}
|
||||
|
|
|
|||
289
server/lib/gateway-rpc.test.ts
Normal file
289
server/lib/gateway-rpc.test.ts
Normal file
|
|
@ -0,0 +1,289 @@
|
|||
/** Tests for the shared gateway RPC client (persistent WebSocket). */
|
||||
import { describe, it, expect, vi, beforeEach, afterEach, beforeAll, afterAll } from 'vitest';
|
||||
import { WebSocketServer } from 'ws';
|
||||
|
||||
// Mock config to point at our test server
|
||||
let testPort: number;
|
||||
vi.mock('./config.js', () => ({
|
||||
get config() {
|
||||
return {
|
||||
gatewayUrl: `http://127.0.0.1:${testPort}`,
|
||||
gatewayToken: 'test-token',
|
||||
port: 3080,
|
||||
publicOrigin: process.env.NERVE_PUBLIC_ORIGIN || '',
|
||||
};
|
||||
},
|
||||
}));
|
||||
|
||||
const { createDeviceBlockMock } = vi.hoisted(() => ({
|
||||
createDeviceBlockMock: vi.fn(({ nonce, clientId, clientMode, role, scopes, token }) => ({
|
||||
id: 'device-123',
|
||||
publicKey: 'pubkey-123',
|
||||
signature: `sig-${nonce}`,
|
||||
signedAt: 1234567890,
|
||||
nonce,
|
||||
_debug: { clientId, clientMode, role, scopes, token },
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock('./device-identity.js', () => ({
|
||||
createDeviceBlock: createDeviceBlockMock,
|
||||
}));
|
||||
|
||||
import {
|
||||
gatewayRpcCall,
|
||||
gatewayFilesList,
|
||||
gatewayFilesGet,
|
||||
gatewayFilesSet,
|
||||
} from './gateway-rpc.js';
|
||||
|
||||
let wss: WebSocketServer;
|
||||
|
||||
async function importFreshGatewayRpc() {
|
||||
for (const client of wss.clients) client.close();
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
vi.resetModules();
|
||||
return await import('./gateway-rpc.js');
|
||||
}
|
||||
|
||||
describe('gateway-rpc (persistent WebSocket)', () => {
|
||||
/** Handler for incoming RPC method calls (after connect handshake) */
|
||||
let rpcHandler: (method: string, params: unknown) => unknown;
|
||||
let lastConnectParams: unknown = null;
|
||||
let lastRequestOrigin: string | undefined;
|
||||
let connectMode: 'accept' | 'reject' | 'close' = 'accept';
|
||||
|
||||
beforeAll(async () => {
|
||||
rpcHandler = () => ({});
|
||||
|
||||
wss = new WebSocketServer({ port: 0 });
|
||||
testPort = (wss.address() as { port: number }).port;
|
||||
|
||||
wss.on('connection', (ws, req) => {
|
||||
lastRequestOrigin = req.headers.origin;
|
||||
|
||||
// Send challenge immediately
|
||||
ws.send(JSON.stringify({
|
||||
type: 'event',
|
||||
event: 'connect.challenge',
|
||||
payload: { nonce: 'test-nonce', ts: Date.now() },
|
||||
}));
|
||||
|
||||
ws.on('message', (data) => {
|
||||
const msg = JSON.parse(data.toString());
|
||||
|
||||
if (msg.method === 'connect') {
|
||||
lastConnectParams = msg.params;
|
||||
if (connectMode === 'reject') {
|
||||
ws.send(JSON.stringify({ type: 'res', id: msg.id, ok: false, error: { message: 'connect rejected by test server' } }));
|
||||
return;
|
||||
}
|
||||
if (connectMode === 'close') {
|
||||
ws.close();
|
||||
return;
|
||||
}
|
||||
ws.send(JSON.stringify({ type: 'res', id: msg.id, ok: true, payload: {} }));
|
||||
return;
|
||||
}
|
||||
|
||||
// RPC call
|
||||
try {
|
||||
const result = rpcHandler(msg.method, msg.params);
|
||||
ws.send(JSON.stringify({ type: 'res', id: msg.id, ok: true, payload: result }));
|
||||
} catch (err) {
|
||||
ws.send(JSON.stringify({
|
||||
type: 'res', id: msg.id, ok: false,
|
||||
error: { message: (err as Error).message },
|
||||
}));
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
wss.close();
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
rpcHandler = () => ({});
|
||||
lastConnectParams = null;
|
||||
lastRequestOrigin = undefined;
|
||||
connectMode = 'accept';
|
||||
delete process.env.NERVE_PUBLIC_ORIGIN;
|
||||
delete process.env.ALLOWED_ORIGINS;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('gatewayRpcCall', () => {
|
||||
it('injects device identity into the gateway connect handshake', async () => {
|
||||
rpcHandler = () => ({ ok: true });
|
||||
|
||||
await gatewayRpcCall('test.method', { foo: 'bar' });
|
||||
|
||||
expect(createDeviceBlockMock).toHaveBeenCalledWith({
|
||||
clientId: 'openclaw-control-ui',
|
||||
clientMode: 'webchat',
|
||||
role: 'operator',
|
||||
scopes: ['operator.admin', 'operator.read', 'operator.write'],
|
||||
token: 'test-token',
|
||||
nonce: 'test-nonce',
|
||||
});
|
||||
expect(lastConnectParams).toMatchObject({
|
||||
client: {
|
||||
id: 'openclaw-control-ui',
|
||||
mode: 'webchat',
|
||||
},
|
||||
auth: { token: 'test-token' },
|
||||
device: {
|
||||
id: 'device-123',
|
||||
publicKey: 'pubkey-123',
|
||||
signature: 'sig-test-nonce',
|
||||
nonce: 'test-nonce',
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('uses the configured public origin for the gateway websocket handshake', async () => {
|
||||
process.env.NERVE_PUBLIC_ORIGIN = 'https://192.168.192.252:3443';
|
||||
rpcHandler = () => ({ ok: true });
|
||||
|
||||
const { gatewayRpcCall } = await importFreshGatewayRpc();
|
||||
await gatewayRpcCall('test.method', { foo: 'bar' });
|
||||
|
||||
expect(lastRequestOrigin).toBe('https://192.168.192.252:3443');
|
||||
});
|
||||
|
||||
it('falls back to the first non-loopback allowed origin when no public origin is configured', async () => {
|
||||
process.env.ALLOWED_ORIGINS = 'http://127.0.0.1:3080, https://192.168.192.252:3443';
|
||||
rpcHandler = () => ({ ok: true });
|
||||
|
||||
const { gatewayRpcCall } = await importFreshGatewayRpc();
|
||||
await gatewayRpcCall('test.method', { foo: 'bar' });
|
||||
|
||||
expect(lastRequestOrigin).toBe('https://192.168.192.252:3443');
|
||||
});
|
||||
|
||||
it('falls back to localhost when no public or allowed origin is configured', async () => {
|
||||
rpcHandler = () => ({ ok: true });
|
||||
|
||||
const { gatewayRpcCall } = await importFreshGatewayRpc();
|
||||
await gatewayRpcCall('test.method', { foo: 'bar' });
|
||||
|
||||
expect(lastRequestOrigin).toBe('http://127.0.0.1:3080');
|
||||
});
|
||||
|
||||
it('sends RPC request and returns payload', async () => {
|
||||
rpcHandler = (method, params) => {
|
||||
expect(method).toBe('test.method');
|
||||
expect(params).toEqual({ foo: 'bar' });
|
||||
return { result: 'ok' };
|
||||
};
|
||||
|
||||
const result = await gatewayRpcCall('test.method', { foo: 'bar' });
|
||||
expect(result).toEqual({ result: 'ok' });
|
||||
});
|
||||
|
||||
it('rejects on RPC error response', async () => {
|
||||
rpcHandler = () => { throw new Error('not found'); };
|
||||
await expect(gatewayRpcCall('test.fail', {})).rejects.toThrow('not found');
|
||||
});
|
||||
|
||||
it('handles multiple sequential calls on the same connection', async () => {
|
||||
let callCount = 0;
|
||||
rpcHandler = () => {
|
||||
callCount++;
|
||||
return { n: callCount };
|
||||
};
|
||||
|
||||
const r1 = await gatewayRpcCall('call.one', {});
|
||||
const r2 = await gatewayRpcCall('call.two', {});
|
||||
expect(r1).toEqual({ n: 1 });
|
||||
expect(r2).toEqual({ n: 2 });
|
||||
});
|
||||
|
||||
it('handles concurrent calls', async () => {
|
||||
rpcHandler = (_method, params) => {
|
||||
return { echo: (params as Record<string, unknown>).value };
|
||||
};
|
||||
|
||||
const [r1, r2, r3] = await Promise.all([
|
||||
gatewayRpcCall('echo', { value: 'a' }),
|
||||
gatewayRpcCall('echo', { value: 'b' }),
|
||||
gatewayRpcCall('echo', { value: 'c' }),
|
||||
]);
|
||||
expect(r1).toEqual({ echo: 'a' });
|
||||
expect(r2).toEqual({ echo: 'b' });
|
||||
expect(r3).toEqual({ echo: 'c' });
|
||||
});
|
||||
|
||||
it('rejects when the gateway rejects the initial connect handshake', async () => {
|
||||
connectMode = 'reject';
|
||||
const { gatewayRpcCall } = await importFreshGatewayRpc();
|
||||
await expect(gatewayRpcCall('test.method', {})).rejects.toThrow('connect rejected by test server');
|
||||
});
|
||||
|
||||
it('rejects when the socket closes before connect completes', async () => {
|
||||
connectMode = 'close';
|
||||
const { gatewayRpcCall } = await importFreshGatewayRpc();
|
||||
await expect(gatewayRpcCall('test.method', {})).rejects.toThrow(/closed before connect completed/i);
|
||||
});
|
||||
});
|
||||
|
||||
describe('gatewayFilesList', () => {
|
||||
it('returns files from gateway response', async () => {
|
||||
const mockFiles = [
|
||||
{ name: 'SOUL.md', path: 'SOUL.md', missing: false, size: 100, updatedAtMs: 1000 },
|
||||
];
|
||||
rpcHandler = () => ({ files: mockFiles });
|
||||
|
||||
const result = await gatewayFilesList('main');
|
||||
expect(result).toEqual(mockFiles);
|
||||
});
|
||||
|
||||
it('returns empty array when no files', async () => {
|
||||
rpcHandler = () => ({});
|
||||
expect(await gatewayFilesList('main')).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('gatewayFilesGet', () => {
|
||||
it('extracts content from nested file field', async () => {
|
||||
rpcHandler = () => ({
|
||||
agentId: 'main',
|
||||
workspace: '/sandbox/.openclaw/workspace',
|
||||
file: { name: 'SOUL.md', missing: false, size: 7, updatedAtMs: 1000, content: '# Soul' },
|
||||
});
|
||||
|
||||
const result = await gatewayFilesGet('main', 'SOUL.md');
|
||||
expect(result?.content).toBe('# Soul');
|
||||
});
|
||||
|
||||
it('returns null for missing files', async () => {
|
||||
rpcHandler = () => ({ file: { name: 'X.md', missing: true } });
|
||||
expect(await gatewayFilesGet('main', 'X.md')).toBeNull();
|
||||
});
|
||||
|
||||
it('returns null on error', async () => {
|
||||
rpcHandler = () => { throw new Error('unsupported'); };
|
||||
expect(await gatewayFilesGet('main', 'bad.md')).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('gatewayFilesSet', () => {
|
||||
it('sends correct params', async () => {
|
||||
let received: unknown;
|
||||
rpcHandler = (_m, p) => { received = p; return { ok: true }; };
|
||||
|
||||
await gatewayFilesSet('main', 'SOUL.md', '# New');
|
||||
expect(received).toEqual({ agentId: 'main', name: 'SOUL.md', content: '# New' });
|
||||
});
|
||||
|
||||
it('rejects on error', async () => {
|
||||
rpcHandler = () => { throw new Error('write failed'); };
|
||||
await expect(gatewayFilesSet('main', 'X', 'y')).rejects.toThrow('write failed');
|
||||
});
|
||||
});
|
||||
});
|
||||
343
server/lib/gateway-rpc.ts
Normal file
343
server/lib/gateway-rpc.ts
Normal file
|
|
@ -0,0 +1,343 @@
|
|||
/**
|
||||
* Shared gateway RPC client.
|
||||
*
|
||||
* Makes direct WebSocket RPC calls to the OpenClaw gateway for workspace
|
||||
* file access. Uses a single persistent connection that multiplexes all
|
||||
* RPC calls, avoiding the overhead and session conflicts of per-request
|
||||
* connections.
|
||||
*
|
||||
* Used as a fallback when the workspace directory is not locally accessible
|
||||
* (e.g. Nerve on DGX host, workspace in OpenShell sandbox).
|
||||
* @module
|
||||
*/
|
||||
|
||||
import { randomUUID } from 'node:crypto';
|
||||
import { WebSocket } from 'ws';
|
||||
import { config } from './config.js';
|
||||
import { createDeviceBlock } from './device-identity.js';
|
||||
|
||||
// ── Types ────────────────────────────────────────────────────────────
|
||||
|
||||
export interface GatewayFileEntry {
|
||||
name: string;
|
||||
path: string;
|
||||
missing: boolean;
|
||||
size: number;
|
||||
updatedAtMs: number;
|
||||
}
|
||||
|
||||
export interface GatewayFileWithContent extends GatewayFileEntry {
|
||||
content: string;
|
||||
}
|
||||
|
||||
// ── Persistent connection ────────────────────────────────────────────
|
||||
|
||||
const DEFAULT_TIMEOUT_MS = 10_000;
|
||||
const RECONNECT_DELAY_MS = 3_000;
|
||||
|
||||
/** Derive the WebSocket URL from the HTTP gateway URL. */
|
||||
function getGatewayWsUrl(): string {
|
||||
const httpUrl = config.gatewayUrl;
|
||||
let wsUrl: string;
|
||||
if (httpUrl.startsWith('ws://') || httpUrl.startsWith('wss://')) {
|
||||
wsUrl = httpUrl;
|
||||
} else {
|
||||
wsUrl = httpUrl.replace(/^http/, 'ws');
|
||||
}
|
||||
if (!wsUrl.endsWith('/ws')) {
|
||||
wsUrl = wsUrl.replace(/\/$/, '') + '/ws';
|
||||
}
|
||||
return wsUrl;
|
||||
}
|
||||
|
||||
interface PendingCall {
|
||||
resolve: (value: unknown) => void;
|
||||
reject: (err: Error) => void;
|
||||
timer: ReturnType<typeof setTimeout>;
|
||||
}
|
||||
|
||||
let ws: WebSocket | null = null;
|
||||
let connected = false;
|
||||
let connecting = false;
|
||||
const pending = new Map<string, PendingCall>();
|
||||
let reconnectTimer: ReturnType<typeof setTimeout> | null = null;
|
||||
let connectPromise: Promise<void> | null = null;
|
||||
let connectResolve: (() => void) | null = null;
|
||||
let connectReject: ((err: Error) => void) | null = null;
|
||||
|
||||
function normalizeOrigin(value: string | undefined | null): string | null {
|
||||
if (!value) return null;
|
||||
try {
|
||||
return new URL(value).origin;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function isLoopbackOrigin(origin: string): boolean {
|
||||
try {
|
||||
const { hostname } = new URL(origin);
|
||||
return hostname === '127.0.0.1' || hostname === 'localhost' || hostname === '::1';
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function getGatewayRequestOrigin(): string {
|
||||
const configuredPublicOrigin = normalizeOrigin(config.publicOrigin);
|
||||
if (configuredPublicOrigin) return configuredPublicOrigin;
|
||||
|
||||
const configuredAllowedOrigins = (process.env.ALLOWED_ORIGINS || '')
|
||||
.split(',')
|
||||
.map((value) => normalizeOrigin(value.trim()))
|
||||
.filter((value): value is string => Boolean(value));
|
||||
|
||||
const firstNonLoopbackOrigin = configuredAllowedOrigins.find((origin) => !isLoopbackOrigin(origin));
|
||||
if (firstNonLoopbackOrigin) return firstNonLoopbackOrigin;
|
||||
|
||||
const firstAllowedOrigin = configuredAllowedOrigins[0];
|
||||
if (firstAllowedOrigin) return firstAllowedOrigin;
|
||||
|
||||
return `http://127.0.0.1:${config.port}`;
|
||||
}
|
||||
|
||||
function buildConnectParams(nonce: string) {
|
||||
const clientId = 'openclaw-control-ui';
|
||||
const clientMode = 'webchat';
|
||||
const role = 'operator';
|
||||
const scopes = ['operator.admin', 'operator.read', 'operator.write'];
|
||||
const token = config.gatewayToken;
|
||||
|
||||
return {
|
||||
minProtocol: 3,
|
||||
maxProtocol: 3,
|
||||
client: {
|
||||
id: clientId,
|
||||
version: '0.1.0',
|
||||
platform: 'web',
|
||||
mode: clientMode,
|
||||
instanceId: `nerve-rpc-${randomUUID().slice(0, 8)}`,
|
||||
},
|
||||
role,
|
||||
scopes,
|
||||
auth: { token },
|
||||
device: createDeviceBlock({
|
||||
clientId,
|
||||
clientMode,
|
||||
role,
|
||||
scopes,
|
||||
token,
|
||||
nonce,
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
/** Send a raw message, ensuring the connection is ready. */
|
||||
function wsSend(data: string): boolean {
|
||||
if (ws && ws.readyState === WebSocket.OPEN) {
|
||||
ws.send(data);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Clean up all pending calls with an error. */
|
||||
function rejectAllPending(reason: string): void {
|
||||
for (const [id, call] of pending) {
|
||||
clearTimeout(call.timer);
|
||||
call.reject(new Error(reason));
|
||||
pending.delete(id);
|
||||
}
|
||||
}
|
||||
|
||||
/** Reject and clear the in-flight connect promise. */
|
||||
function rejectConnect(reason: string): void {
|
||||
if (connectReject) {
|
||||
connectReject(new Error(reason));
|
||||
}
|
||||
connectPromise = null;
|
||||
connectResolve = null;
|
||||
connectReject = null;
|
||||
}
|
||||
|
||||
/** Establish the persistent gateway connection. */
|
||||
function ensureConnection(): void {
|
||||
if (ws || connecting) return;
|
||||
if (!config.gatewayToken) return; // No token = can't connect
|
||||
|
||||
connecting = true;
|
||||
connectPromise = new Promise<void>((resolve, reject) => {
|
||||
connectResolve = resolve;
|
||||
connectReject = reject;
|
||||
});
|
||||
const wsUrl = getGatewayWsUrl();
|
||||
|
||||
const socket = new WebSocket(wsUrl, {
|
||||
headers: { Origin: getGatewayRequestOrigin() },
|
||||
});
|
||||
|
||||
socket.on('open', () => {
|
||||
// Wait for connect.challenge
|
||||
});
|
||||
|
||||
socket.on('message', (data: Buffer | string) => {
|
||||
try {
|
||||
const msg = JSON.parse(data.toString());
|
||||
|
||||
// Handle connect.challenge → send connect
|
||||
if (msg.type === 'event' && msg.event === 'connect.challenge' && msg.payload?.nonce) {
|
||||
socket.send(JSON.stringify({
|
||||
type: 'req',
|
||||
id: '__connect__',
|
||||
method: 'connect',
|
||||
params: buildConnectParams(msg.payload.nonce),
|
||||
}));
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle connect response
|
||||
if (msg.type === 'res' && msg.id === '__connect__') {
|
||||
connecting = false;
|
||||
if (msg.ok) {
|
||||
ws = socket;
|
||||
connected = true;
|
||||
if (connectResolve) {
|
||||
connectResolve();
|
||||
}
|
||||
connectResolve = null;
|
||||
connectReject = null;
|
||||
console.log('[gateway-rpc] Connected to gateway (persistent)');
|
||||
} else {
|
||||
const reason = msg.error?.message || 'Gateway connect rejected';
|
||||
console.error('[gateway-rpc] Gateway connect rejected:', reason);
|
||||
rejectConnect(reason);
|
||||
socket.close();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle RPC responses
|
||||
if (msg.type === 'res' && pending.has(msg.id)) {
|
||||
const call = pending.get(msg.id)!;
|
||||
pending.delete(msg.id);
|
||||
clearTimeout(call.timer);
|
||||
if (msg.ok === false) {
|
||||
call.reject(new Error(msg.error?.message || 'RPC error'));
|
||||
} else {
|
||||
call.resolve(msg.payload ?? msg.result ?? msg);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Ignore other events (chat messages, etc.)
|
||||
} catch {
|
||||
// Ignore parse errors
|
||||
}
|
||||
});
|
||||
|
||||
socket.on('error', (err) => {
|
||||
console.warn('[gateway-rpc] WebSocket error:', err.message);
|
||||
});
|
||||
|
||||
socket.on('close', () => {
|
||||
const wasConnected = connected;
|
||||
const wasConnecting = connecting;
|
||||
ws = null;
|
||||
connected = false;
|
||||
connecting = false;
|
||||
|
||||
if (!wasConnected && wasConnecting) {
|
||||
rejectConnect('Gateway connection closed before connect completed');
|
||||
} else {
|
||||
connectPromise = null;
|
||||
connectResolve = null;
|
||||
connectReject = null;
|
||||
}
|
||||
|
||||
rejectAllPending('Gateway connection closed');
|
||||
|
||||
// Auto-reconnect after a delay (only if we had a working connection)
|
||||
if (wasConnected && !reconnectTimer) {
|
||||
reconnectTimer = setTimeout(() => {
|
||||
reconnectTimer = null;
|
||||
ensureConnection();
|
||||
}, RECONNECT_DELAY_MS);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// ── Core RPC call ────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Execute a gateway RPC call via the persistent WebSocket connection.
|
||||
*/
|
||||
export async function gatewayRpcCall(
|
||||
method: string,
|
||||
params: Record<string, unknown>,
|
||||
timeoutMs = DEFAULT_TIMEOUT_MS,
|
||||
): Promise<unknown> {
|
||||
// Ensure connection exists
|
||||
ensureConnection();
|
||||
|
||||
// Wait for connection if not yet connected
|
||||
if (!connected && connectPromise) {
|
||||
await connectPromise;
|
||||
}
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const reqId = randomUUID();
|
||||
|
||||
const timer = setTimeout(() => {
|
||||
pending.delete(reqId);
|
||||
reject(new Error(`Gateway RPC timeout after ${timeoutMs}ms calling ${method}`));
|
||||
}, timeoutMs);
|
||||
|
||||
pending.set(reqId, { resolve, reject, timer });
|
||||
|
||||
const sent = wsSend(JSON.stringify({ type: 'req', id: reqId, method, params }));
|
||||
if (!sent) {
|
||||
pending.delete(reqId);
|
||||
clearTimeout(timer);
|
||||
reject(new Error('Gateway connection not ready'));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// ── Typed file RPC wrappers ──────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* List top-level workspace files for an agent via gateway RPC.
|
||||
*/
|
||||
export async function gatewayFilesList(agentId: string): Promise<GatewayFileEntry[]> {
|
||||
const result = await gatewayRpcCall('agents.files.list', { agentId }) as {
|
||||
files?: GatewayFileEntry[];
|
||||
};
|
||||
return result.files ?? [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Read a top-level workspace file via gateway RPC.
|
||||
* Returns null if the file is not found or unsupported.
|
||||
*
|
||||
* Gateway response shape: `{ agentId, workspace, file: { name, content, ... } }`
|
||||
*/
|
||||
export async function gatewayFilesGet(agentId: string, name: string): Promise<GatewayFileWithContent | null> {
|
||||
try {
|
||||
const result = await gatewayRpcCall('agents.files.get', { agentId, name }) as {
|
||||
file?: GatewayFileWithContent;
|
||||
} & GatewayFileWithContent;
|
||||
const file = result.file ?? result;
|
||||
if (!file || file.missing) return null;
|
||||
return file;
|
||||
} catch (err) {
|
||||
console.debug('[gateway-rpc] filesGet error:', (err as Error).message);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a top-level workspace file via gateway RPC.
|
||||
*/
|
||||
export async function gatewayFilesSet(agentId: string, name: string, content: string): Promise<void> {
|
||||
await gatewayRpcCall('agents.files.set', { agentId, name, content });
|
||||
}
|
||||
55
server/lib/kanban-assignee.test.ts
Normal file
55
server/lib/kanban-assignee.test.ts
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
/**
|
||||
* Tests for shared Kanban assignee normalization helpers.
|
||||
* @module
|
||||
*/
|
||||
|
||||
import { describe, expect, it } from 'vitest';
|
||||
import {
|
||||
InvalidKanbanAssigneeError,
|
||||
canonicalizeKanbanAssignee,
|
||||
resolveKanbanAssigneeRootSessionKey,
|
||||
} from './kanban-assignee.js';
|
||||
|
||||
describe('canonicalizeKanbanAssignee', () => {
|
||||
it('keeps canonical agent assignees unchanged', () => {
|
||||
expect(canonicalizeKanbanAssignee('agent:designer')).toBe('agent:designer');
|
||||
});
|
||||
|
||||
it('collapses legacy main-session assignees to the canonical agent id', () => {
|
||||
expect(canonicalizeKanbanAssignee('agent:designer:main')).toBe('agent:designer');
|
||||
});
|
||||
|
||||
it('collapses nested subagent assignees to the canonical agent id', () => {
|
||||
expect(canonicalizeKanbanAssignee('agent:designer:subagent:child')).toBe('agent:designer');
|
||||
});
|
||||
|
||||
it('preserves operator and unset assignees', () => {
|
||||
expect(canonicalizeKanbanAssignee('operator')).toBe('operator');
|
||||
expect(canonicalizeKanbanAssignee(undefined)).toBeUndefined();
|
||||
expect(canonicalizeKanbanAssignee(null)).toBeUndefined();
|
||||
});
|
||||
|
||||
it('rejects reserved or malformed assignee values', () => {
|
||||
expect(() => canonicalizeKanbanAssignee('agent:main')).toThrow(InvalidKanbanAssigneeError);
|
||||
expect(() => canonicalizeKanbanAssignee('reviewer')).toThrow('Invalid Kanban assignee: reviewer');
|
||||
});
|
||||
});
|
||||
|
||||
describe('resolveKanbanAssigneeRootSessionKey', () => {
|
||||
it('returns null for operator and unset assignees', () => {
|
||||
expect(resolveKanbanAssigneeRootSessionKey('operator')).toBeNull();
|
||||
expect(resolveKanbanAssigneeRootSessionKey(undefined)).toBeNull();
|
||||
expect(resolveKanbanAssigneeRootSessionKey(null)).toBeNull();
|
||||
});
|
||||
|
||||
it('maps canonical and legacy agent values to the owning root session', () => {
|
||||
expect(resolveKanbanAssigneeRootSessionKey('agent:designer')).toBe('agent:designer:main');
|
||||
expect(resolveKanbanAssigneeRootSessionKey('agent:designer:main')).toBe('agent:designer:main');
|
||||
expect(resolveKanbanAssigneeRootSessionKey('agent:designer:subagent:child')).toBe('agent:designer:main');
|
||||
});
|
||||
|
||||
it('returns null for reserved or malformed values', () => {
|
||||
expect(resolveKanbanAssigneeRootSessionKey('agent:main')).toBeNull();
|
||||
expect(resolveKanbanAssigneeRootSessionKey('reviewer')).toBeNull();
|
||||
});
|
||||
});
|
||||
31
server/lib/kanban-assignee.ts
Normal file
31
server/lib/kanban-assignee.ts
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
/**
|
||||
* Shared Kanban assignee normalization helpers.
|
||||
* @module
|
||||
*/
|
||||
|
||||
export class InvalidKanbanAssigneeError extends Error {
|
||||
constructor(value: string) {
|
||||
super(`Invalid Kanban assignee: ${value}`);
|
||||
this.name = 'InvalidKanbanAssigneeError';
|
||||
}
|
||||
}
|
||||
|
||||
export function canonicalizeKanbanAssignee(
|
||||
value?: string | null,
|
||||
): `agent:${string}` | 'operator' | undefined {
|
||||
if (value == null) return undefined;
|
||||
if (value === 'operator') return 'operator';
|
||||
|
||||
const match = value.match(/^agent:([^:]+)(?::.*)?$/);
|
||||
if (!match) throw new InvalidKanbanAssigneeError(String(value));
|
||||
if (match[1] === 'main') throw new InvalidKanbanAssigneeError(value);
|
||||
return `agent:${match[1]}`;
|
||||
}
|
||||
|
||||
export function resolveKanbanAssigneeRootSessionKey(value?: string | null): string | null {
|
||||
if (value == null || value === 'operator') return null;
|
||||
|
||||
const match = value.match(/^agent:([^:]+)(?::.*)?$/);
|
||||
if (!match || match[1] === 'main') return null;
|
||||
return `agent:${match[1]}:main`;
|
||||
}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
/** Tests for kanban-store: CRUD, CAS conflicts, reorder, config, filters, workflow, proposals. */
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||
import fs from 'node:fs';
|
||||
import path from 'node:path';
|
||||
import os from 'node:os';
|
||||
|
|
@ -10,14 +10,22 @@ import {
|
|||
InvalidTransitionError,
|
||||
ProposalNotFoundError,
|
||||
ProposalAlreadyResolvedError,
|
||||
InvalidBoardConfigError,
|
||||
} from './kanban-store.js';
|
||||
import type { KanbanTask } from './kanban-store.js';
|
||||
import { InvalidKanbanAssigneeError } from './kanban-assignee.js';
|
||||
|
||||
let store: KanbanStore;
|
||||
let tmpDir: string;
|
||||
let filePath: string;
|
||||
let originalNerveDataDir: string | undefined;
|
||||
let originalNerveProjectRoot: string | undefined;
|
||||
let originalCwd: string;
|
||||
|
||||
beforeEach(async () => {
|
||||
originalNerveDataDir = process.env.NERVE_DATA_DIR;
|
||||
originalNerveProjectRoot = process.env.NERVE_PROJECT_ROOT;
|
||||
originalCwd = process.cwd();
|
||||
tmpDir = await fs.promises.mkdtemp(path.join(os.tmpdir(), 'kanban-test-'));
|
||||
filePath = path.join(tmpDir, 'tasks.json');
|
||||
store = new KanbanStore(filePath);
|
||||
|
|
@ -25,6 +33,11 @@ beforeEach(async () => {
|
|||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (originalNerveDataDir === undefined) delete process.env.NERVE_DATA_DIR;
|
||||
else process.env.NERVE_DATA_DIR = originalNerveDataDir;
|
||||
if (originalNerveProjectRoot === undefined) delete process.env.NERVE_PROJECT_ROOT;
|
||||
else process.env.NERVE_PROJECT_ROOT = originalNerveProjectRoot;
|
||||
process.chdir(originalCwd);
|
||||
await fs.promises.rm(tmpDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
|
|
@ -119,6 +132,18 @@ describe('createTask', () => {
|
|||
expect(task.sourceSessionKey).toBe('sess-123');
|
||||
});
|
||||
|
||||
it('canonicalizes assignee on create', async () => {
|
||||
const task = await createSampleTask({ assignee: 'agent:reviewer:main' });
|
||||
expect(task.assignee).toBe('agent:reviewer');
|
||||
|
||||
const persisted = await store.getTask(task.id);
|
||||
expect(persisted.assignee).toBe('agent:reviewer');
|
||||
});
|
||||
|
||||
it('rejects invalid root assignee on create', async () => {
|
||||
await expect(createSampleTask({ assignee: 'agent:main' })).rejects.toThrow(InvalidKanbanAssigneeError);
|
||||
});
|
||||
|
||||
it('persists to disk', async () => {
|
||||
await createSampleTask({ title: 'Persisted' });
|
||||
// Read directly from file
|
||||
|
|
@ -241,6 +266,26 @@ describe('listTasks', () => {
|
|||
const result = await store.listTasks();
|
||||
expect(result.items.map((t) => t.title)).toEqual(['Backlog', 'Todo 1', 'Todo 2', 'Done']);
|
||||
});
|
||||
|
||||
it('sorts by configured board column order for custom statuses', async () => {
|
||||
await store.updateConfig({
|
||||
columns: [
|
||||
{ key: 'backlog', title: 'Backlog', visible: true },
|
||||
{ key: 'blocked', title: 'Blocked', visible: true },
|
||||
{ key: 'todo', title: 'To Do', visible: true },
|
||||
{ key: 'in-progress', title: 'In Progress', visible: true },
|
||||
{ key: 'review', title: 'Review', visible: true },
|
||||
{ key: 'done', title: 'Done', visible: true },
|
||||
{ key: 'cancelled', title: 'Cancelled', visible: false },
|
||||
],
|
||||
});
|
||||
|
||||
await createSampleTask({ title: 'Todo task', status: 'todo' });
|
||||
await createSampleTask({ title: 'Blocked task', status: 'blocked' });
|
||||
|
||||
const result = await store.listTasks();
|
||||
expect(result.items.map((t) => t.title)).toEqual(['Blocked task', 'Todo task']);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Get ──────────────────────────────────────────────────────────────
|
||||
|
|
@ -315,6 +360,34 @@ describe('updateTask', () => {
|
|||
expect(found.title).toBe('Persisted update');
|
||||
expect(found.version).toBe(2);
|
||||
});
|
||||
|
||||
it('canonicalizes assignee on update when present in patch', async () => {
|
||||
const task = await createSampleTask({ assignee: 'agent:codex' });
|
||||
|
||||
const updated = await store.updateTask(task.id, task.version, {
|
||||
assignee: 'agent:reviewer:subagent:child',
|
||||
});
|
||||
|
||||
expect(updated.assignee).toBe('agent:reviewer');
|
||||
|
||||
const persisted = await store.getTask(task.id);
|
||||
expect(persisted.assignee).toBe('agent:reviewer');
|
||||
});
|
||||
|
||||
it('does not rewrite a legacy assignee during unrelated updates', async () => {
|
||||
const task = await createSampleTask({ assignee: 'agent:codex' });
|
||||
const raw = JSON.parse(fs.readFileSync(filePath, 'utf-8'));
|
||||
raw.tasks[0].assignee = 'agent:reviewer:main';
|
||||
fs.writeFileSync(filePath, JSON.stringify(raw, null, 2));
|
||||
|
||||
const updated = await store.updateTask(task.id, task.version, { title: 'Retitled' });
|
||||
|
||||
expect(updated.assignee).toBe('agent:reviewer:main');
|
||||
|
||||
const persisted = await store.getTask(task.id);
|
||||
expect(persisted.assignee).toBe('agent:reviewer:main');
|
||||
expect(persisted.title).toBe('Retitled');
|
||||
});
|
||||
});
|
||||
|
||||
// ── Delete ───────────────────────────────────────────────────────────
|
||||
|
|
@ -435,6 +508,65 @@ describe('config', () => {
|
|||
const cfg = await store.getConfig();
|
||||
expect(cfg.quickViewLimit).toBe(20);
|
||||
});
|
||||
|
||||
it('preserves custom default status across reads', async () => {
|
||||
await store.updateConfig({
|
||||
columns: [
|
||||
{ key: 'backlog', title: 'Backlog', visible: true },
|
||||
{ key: 'todo', title: 'To Do', visible: true },
|
||||
{ key: 'in-progress', title: 'In Progress', visible: true },
|
||||
{ key: 'review', title: 'Review', visible: true },
|
||||
{ key: 'blocked', title: 'Blocked', visible: true },
|
||||
{ key: 'done', title: 'Done', visible: true },
|
||||
{ key: 'cancelled', title: 'Cancelled', visible: false },
|
||||
],
|
||||
defaults: { status: 'blocked', priority: 'normal' },
|
||||
});
|
||||
|
||||
const cfg = await store.getConfig();
|
||||
expect(cfg.defaults.status).toBe('blocked');
|
||||
|
||||
const task = await createSampleTask({ title: 'Uses custom default' });
|
||||
expect(task.status).toBe('blocked');
|
||||
});
|
||||
|
||||
it('rejects config updates that remove a status used by existing tasks', async () => {
|
||||
await store.updateConfig({
|
||||
columns: [
|
||||
{ key: 'backlog', title: 'Backlog', visible: true },
|
||||
{ key: 'todo', title: 'To Do', visible: true },
|
||||
{ key: 'in-progress', title: 'In Progress', visible: true },
|
||||
{ key: 'review', title: 'Review', visible: true },
|
||||
{ key: 'blocked', title: 'Blocked', visible: true },
|
||||
{ key: 'done', title: 'Done', visible: true },
|
||||
{ key: 'cancelled', title: 'Cancelled', visible: false },
|
||||
],
|
||||
});
|
||||
await createSampleTask({ title: 'Blocked task', status: 'blocked' });
|
||||
|
||||
await expect(store.updateConfig({
|
||||
columns: [
|
||||
{ key: 'backlog', title: 'Backlog', visible: true },
|
||||
{ key: 'todo', title: 'To Do', visible: true },
|
||||
{ key: 'in-progress', title: 'In Progress', visible: true },
|
||||
{ key: 'review', title: 'Review', visible: true },
|
||||
{ key: 'done', title: 'Done', visible: true },
|
||||
{ key: 'cancelled', title: 'Cancelled', visible: false },
|
||||
],
|
||||
})).rejects.toBeInstanceOf(InvalidBoardConfigError);
|
||||
});
|
||||
|
||||
it('rejects config updates that remove required built-in columns', async () => {
|
||||
await expect(store.updateConfig({
|
||||
columns: [
|
||||
{ key: 'backlog', title: 'Backlog', visible: true },
|
||||
{ key: 'todo', title: 'To Do', visible: true },
|
||||
{ key: 'in-progress', title: 'In Progress', visible: true },
|
||||
{ key: 'review', title: 'Review', visible: true },
|
||||
{ key: 'cancelled', title: 'Cancelled', visible: false },
|
||||
],
|
||||
})).rejects.toBeInstanceOf(InvalidBoardConfigError);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Concurrency ──────────────────────────────────────────────────────
|
||||
|
|
@ -508,6 +640,29 @@ describe('executeTask', () => {
|
|||
expect(second.run!.sessionKey).toBe(first.run!.sessionKey);
|
||||
});
|
||||
|
||||
it('uses a unique run key for same-millisecond reruns', async () => {
|
||||
const task = await createSampleTask({ status: 'todo' });
|
||||
const nowSpy = vi.spyOn(Date, 'now').mockReturnValue(1_777_777_777_777);
|
||||
|
||||
try {
|
||||
const run1 = await store.executeTask(task.id);
|
||||
const aborted = await store.abortTask(run1.id, 'rerun');
|
||||
const run2 = await store.executeTask(aborted.id);
|
||||
|
||||
expect(run2.run!.sessionKey).not.toBe(run1.run!.sessionKey);
|
||||
|
||||
await expect(store.completeRun(run2.id, run1.run!.sessionKey, 'stale result')).rejects.toThrow(InvalidTransitionError);
|
||||
|
||||
const fresh = await store.getTask(task.id);
|
||||
expect(fresh.status).toBe('in-progress');
|
||||
expect(fresh.run?.status).toBe('running');
|
||||
expect(fresh.run?.sessionKey).toBe(run2.run!.sessionKey);
|
||||
expect(fresh.result).toBeUndefined();
|
||||
} finally {
|
||||
nowSpy.mockRestore();
|
||||
}
|
||||
});
|
||||
|
||||
it('throws InvalidTransitionError for done task', async () => {
|
||||
const task = await createSampleTask({ status: 'todo' });
|
||||
// Manually set to done via updateTask
|
||||
|
|
@ -545,6 +700,103 @@ describe('executeTask', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('canonical run session key', () => {
|
||||
it('accepts a precomputed run correlation key in executeTask', async () => {
|
||||
const task = await createSampleTask({ status: 'todo' });
|
||||
const rootSessionKey = 'kanban-root:test-task';
|
||||
|
||||
const executed = await store.executeTask(task.id, { sessionKey: rootSessionKey });
|
||||
|
||||
expect(executed.status).toBe('in-progress');
|
||||
expect(executed.run).toBeDefined();
|
||||
expect(executed.run!.status).toBe('running');
|
||||
expect(executed.run!.sessionKey).toBe(rootSessionKey);
|
||||
expect(executed.version).toBe(task.version + 1);
|
||||
});
|
||||
|
||||
it('preserves stale-run protection: old run cannot rewrite active canonical session key', async () => {
|
||||
const task = await createSampleTask({ status: 'todo' });
|
||||
const rootSessionKey1 = 'kanban-root:run-1';
|
||||
const rootSessionKey2 = 'kanban-root:run-2';
|
||||
|
||||
// First run
|
||||
const run1 = await store.executeTask(task.id, { sessionKey: rootSessionKey1 });
|
||||
expect(run1.run!.sessionKey).toBe(rootSessionKey1);
|
||||
|
||||
// Abort and re-execute with new root session key
|
||||
const aborted = await store.abortTask(run1.id);
|
||||
expect(aborted.run!.status).toBe('aborted');
|
||||
|
||||
const run2 = await store.executeTask(aborted.id, { sessionKey: rootSessionKey2 });
|
||||
expect(run2.run!.sessionKey).toBe(rootSessionKey2);
|
||||
expect(run2.run!.status).toBe('running');
|
||||
|
||||
// Stale run completion with old session key should fail
|
||||
await expect(
|
||||
store.completeRun(run2.id, rootSessionKey1, 'stale result')
|
||||
).rejects.toThrow(InvalidTransitionError);
|
||||
|
||||
// Verify active run is still intact
|
||||
const current = await store.getTask(task.id);
|
||||
expect(current.run?.status).toBe('running');
|
||||
expect(current.run?.sessionKey).toBe(rootSessionKey2);
|
||||
expect(current.result).toBeUndefined();
|
||||
});
|
||||
|
||||
it('still generates auto keys when no custom session key is provided', async () => {
|
||||
const task = await createSampleTask({ status: 'todo' });
|
||||
|
||||
const executed = await store.executeTask(task.id);
|
||||
|
||||
expect(executed.run!.sessionKey).toMatch(new RegExp(`^kb-${task.id}-\\d+`));
|
||||
});
|
||||
});
|
||||
|
||||
describe('attachRunIdentifiers', () => {
|
||||
it('persists stable spawned identifiers without bumping the task version', async () => {
|
||||
const task = await createSampleTask({ status: 'todo' });
|
||||
const executed = await store.executeTask(task.id);
|
||||
|
||||
const linked = await store.attachRunIdentifiers(executed.id, executed.run!.sessionKey, {
|
||||
childSessionKey: 'agent:main:subagent:stable-child',
|
||||
runId: 'stable-run-123',
|
||||
});
|
||||
|
||||
expect(linked).not.toBeNull();
|
||||
expect(linked!.run?.sessionKey).toBe(executed.run!.sessionKey);
|
||||
expect(linked!.run?.childSessionKey).toBe('agent:main:subagent:stable-child');
|
||||
expect(linked!.run?.sessionId).toBe('agent:main:subagent:stable-child');
|
||||
expect(linked!.run?.runId).toBe('stable-run-123');
|
||||
expect(linked!.version).toBe(executed.version);
|
||||
|
||||
const fresh = await store.getTask(task.id);
|
||||
expect(fresh.run?.childSessionKey).toBe('agent:main:subagent:stable-child');
|
||||
expect(fresh.run?.sessionId).toBe('agent:main:subagent:stable-child');
|
||||
expect(fresh.run?.runId).toBe('stable-run-123');
|
||||
expect(fresh.version).toBe(executed.version);
|
||||
});
|
||||
|
||||
it('ignores stale spawned identifiers after a rerun replaces the active run', async () => {
|
||||
const task = await createSampleTask({ status: 'todo' });
|
||||
const run1 = await store.executeTask(task.id);
|
||||
await store.abortTask(run1.id, 'rerun');
|
||||
const rerunnable = await store.getTask(task.id);
|
||||
const run2 = await store.executeTask(rerunnable.id);
|
||||
|
||||
const linked = await store.attachRunIdentifiers(run2.id, run1.run!.sessionKey, {
|
||||
childSessionKey: 'agent:main:subagent:stale-child',
|
||||
runId: 'stale-run-123',
|
||||
});
|
||||
|
||||
expect(linked).toBeNull();
|
||||
|
||||
const fresh = await store.getTask(task.id);
|
||||
expect(fresh.run?.sessionKey).toBe(run2.run!.sessionKey);
|
||||
expect(fresh.run?.childSessionKey).toBeUndefined();
|
||||
expect(fresh.run?.runId).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
// ── Approve ──────────────────────────────────────────────────────────
|
||||
|
||||
describe('approveTask', () => {
|
||||
|
|
@ -690,7 +942,7 @@ describe('completeRun', () => {
|
|||
const task = await createSampleTask({ status: 'todo' });
|
||||
const executed = await store.executeTask(task.id);
|
||||
|
||||
const completed = await store.completeRun(executed.id, 'Task output here');
|
||||
const completed = await store.completeRun(executed.id, executed.run!.sessionKey, 'Task output here');
|
||||
expect(completed.status).toBe('review');
|
||||
expect(completed.run!.status).toBe('done');
|
||||
expect(completed.run!.endedAt).toBeGreaterThan(0);
|
||||
|
|
@ -703,7 +955,7 @@ describe('completeRun', () => {
|
|||
const task = await createSampleTask({ status: 'todo' });
|
||||
const executed = await store.executeTask(task.id);
|
||||
|
||||
const completed = await store.completeRun(executed.id, undefined, 'Runtime error');
|
||||
const completed = await store.completeRun(executed.id, executed.run!.sessionKey, undefined, 'Runtime error');
|
||||
expect(completed.status).toBe('todo');
|
||||
expect(completed.run!.status).toBe('error');
|
||||
expect(completed.run!.error).toBe('Runtime error');
|
||||
|
|
@ -712,18 +964,77 @@ describe('completeRun', () => {
|
|||
|
||||
it('throws InvalidTransitionError when no active run', async () => {
|
||||
const task = await createSampleTask({ status: 'todo' });
|
||||
await expect(store.completeRun(task.id, 'result')).rejects.toThrow(InvalidTransitionError);
|
||||
await expect(store.completeRun(task.id, 'missing-run-key', 'result')).rejects.toThrow(InvalidTransitionError);
|
||||
});
|
||||
|
||||
it('completes when the child session key matches the active run identifiers', async () => {
|
||||
const task = await createSampleTask({ status: 'todo' });
|
||||
const executed = await store.executeTask(task.id);
|
||||
const linked = await store.attachRunIdentifiers(executed.id, executed.run!.sessionKey, {
|
||||
childSessionKey: 'agent:main:subagent:stable-child',
|
||||
runId: 'stable-run-123',
|
||||
});
|
||||
|
||||
const completed = await store.completeRun(linked!.id, 'agent:main:subagent:stable-child', 'Task output here');
|
||||
expect(completed.status).toBe('review');
|
||||
expect(completed.run!.status).toBe('done');
|
||||
expect(completed.run!.childSessionKey).toBe('agent:main:subagent:stable-child');
|
||||
expect(completed.run!.runId).toBe('stable-run-123');
|
||||
});
|
||||
|
||||
it('completes when the runId matches the active run identifiers', async () => {
|
||||
const task = await createSampleTask({ status: 'todo' });
|
||||
const executed = await store.executeTask(task.id);
|
||||
const linked = await store.attachRunIdentifiers(executed.id, executed.run!.sessionKey, {
|
||||
childSessionKey: 'agent:main:subagent:stable-child',
|
||||
runId: 'stable-run-123',
|
||||
});
|
||||
|
||||
const completed = await store.completeRun(linked!.id, 'stable-run-123', 'Task output here');
|
||||
expect(completed.status).toBe('review');
|
||||
expect(completed.run!.status).toBe('done');
|
||||
expect(completed.run!.childSessionKey).toBe('agent:main:subagent:stable-child');
|
||||
expect(completed.run!.runId).toBe('stable-run-123');
|
||||
});
|
||||
|
||||
it('throws InvalidTransitionError when session key does not match the active run', async () => {
|
||||
const task = await createSampleTask({ status: 'todo' });
|
||||
const executed = await store.executeTask(task.id);
|
||||
|
||||
await expect(store.completeRun(executed.id, `${executed.run!.sessionKey}-stale`, 'result')).rejects.toThrow(InvalidTransitionError);
|
||||
|
||||
const fresh = await store.getTask(task.id);
|
||||
expect(fresh.status).toBe('in-progress');
|
||||
expect(fresh.run?.status).toBe('running');
|
||||
expect(fresh.run?.sessionKey).toBe(executed.run!.sessionKey);
|
||||
expect(fresh.result).toBeUndefined();
|
||||
});
|
||||
|
||||
it('rejects late completion from run 1 after run 2 is active', async () => {
|
||||
const task = await createSampleTask({ status: 'todo' });
|
||||
const run1 = await store.executeTask(task.id);
|
||||
await store.abortTask(run1.id, 'rerun');
|
||||
const rerunnable = await store.getTask(task.id);
|
||||
const run2 = await store.executeTask(rerunnable.id);
|
||||
|
||||
await expect(store.completeRun(run2.id, run1.run!.sessionKey, 'stale result')).rejects.toThrow(InvalidTransitionError);
|
||||
|
||||
const fresh = await store.getTask(task.id);
|
||||
expect(fresh.status).toBe('in-progress');
|
||||
expect(fresh.run?.status).toBe('running');
|
||||
expect(fresh.run?.sessionKey).toBe(run2.run!.sessionKey);
|
||||
expect(fresh.result).toBeUndefined();
|
||||
});
|
||||
|
||||
it('throws TaskNotFoundError for missing task', async () => {
|
||||
await expect(store.completeRun('nonexistent', 'result')).rejects.toThrow(TaskNotFoundError);
|
||||
await expect(store.completeRun('nonexistent', 'missing-run-key', 'result')).rejects.toThrow(TaskNotFoundError);
|
||||
});
|
||||
|
||||
it('completes without result string on success', async () => {
|
||||
const task = await createSampleTask({ status: 'todo' });
|
||||
const executed = await store.executeTask(task.id);
|
||||
|
||||
const completed = await store.completeRun(executed.id);
|
||||
const completed = await store.completeRun(executed.id, executed.run!.sessionKey);
|
||||
expect(completed.status).toBe('review');
|
||||
expect(completed.run!.status).toBe('done');
|
||||
expect(completed.result).toBeUndefined();
|
||||
|
|
@ -802,7 +1113,7 @@ describe('full workflow', () => {
|
|||
expect(executed.status).toBe('in-progress');
|
||||
|
||||
// Complete run
|
||||
const completed = await store.completeRun(executed.id, 'Done!');
|
||||
const completed = await store.completeRun(executed.id, executed.run!.sessionKey, 'Done!');
|
||||
expect(completed.status).toBe('review');
|
||||
|
||||
// Approve
|
||||
|
|
@ -815,7 +1126,7 @@ describe('full workflow', () => {
|
|||
const task = await createSampleTask({ status: 'todo' });
|
||||
|
||||
const executed = await store.executeTask(task.id);
|
||||
const completed = await store.completeRun(executed.id, 'Half done');
|
||||
const completed = await store.completeRun(executed.id, executed.run!.sessionKey, 'Half done');
|
||||
const rejected = await store.rejectTask(completed.id, 'Not good enough');
|
||||
expect(rejected.status).toBe('todo');
|
||||
expect(rejected.run).toBeUndefined();
|
||||
|
|
@ -858,6 +1169,16 @@ describe('createProposal', () => {
|
|||
expect(proposal.proposedAt).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('canonicalizes assignee in create proposal payload', async () => {
|
||||
const proposal = await store.createProposal({
|
||||
type: 'create',
|
||||
payload: { title: 'New feature', assignee: 'agent:reviewer:main' },
|
||||
proposedBy: 'agent:codex',
|
||||
});
|
||||
|
||||
expect(proposal.payload.assignee).toBe('agent:reviewer');
|
||||
});
|
||||
|
||||
it('creates a pending update proposal', async () => {
|
||||
const task = await createSampleTask();
|
||||
const proposal = await store.createProposal({
|
||||
|
|
@ -870,6 +1191,17 @@ describe('createProposal', () => {
|
|||
expect(proposal.payload.id).toBe(task.id);
|
||||
});
|
||||
|
||||
it('canonicalizes assignee in update proposal payload', async () => {
|
||||
const task = await createSampleTask();
|
||||
const proposal = await store.createProposal({
|
||||
type: 'update',
|
||||
payload: { id: task.id, assignee: 'agent:reviewer:subagent:child' },
|
||||
proposedBy: 'agent:codex',
|
||||
});
|
||||
|
||||
expect(proposal.payload.assignee).toBe('agent:reviewer');
|
||||
});
|
||||
|
||||
it('stores sourceSessionKey', async () => {
|
||||
const proposal = await store.createProposal({
|
||||
type: 'create',
|
||||
|
|
@ -944,6 +1276,51 @@ describe('approveProposal', () => {
|
|||
});
|
||||
await expect(store.approveProposal(proposal.id)).rejects.toThrow(TaskNotFoundError);
|
||||
});
|
||||
|
||||
it('canonicalizes assignee when approving a legacy create proposal payload', async () => {
|
||||
const raw = JSON.parse(fs.readFileSync(filePath, 'utf-8'));
|
||||
raw.proposals.push({
|
||||
id: 'legacy-create-proposal',
|
||||
type: 'create',
|
||||
payload: { title: 'Legacy create', assignee: 'agent:reviewer:main' },
|
||||
proposedBy: 'agent:codex',
|
||||
proposedAt: Date.now(),
|
||||
status: 'pending',
|
||||
version: 1,
|
||||
});
|
||||
fs.writeFileSync(filePath, JSON.stringify(raw, null, 2));
|
||||
|
||||
const { proposal, task } = await store.approveProposal('legacy-create-proposal');
|
||||
|
||||
expect(proposal.status).toBe('approved');
|
||||
expect(task.assignee).toBe('agent:reviewer');
|
||||
|
||||
const persisted = await store.getTask(task.id);
|
||||
expect(persisted.assignee).toBe('agent:reviewer');
|
||||
});
|
||||
|
||||
it('canonicalizes assignee when approving a legacy update proposal payload', async () => {
|
||||
const task = await createSampleTask({ assignee: 'agent:codex' });
|
||||
const raw = JSON.parse(fs.readFileSync(filePath, 'utf-8'));
|
||||
raw.proposals.push({
|
||||
id: 'legacy-update-proposal',
|
||||
type: 'update',
|
||||
payload: { id: task.id, assignee: 'agent:reviewer:subagent:child' },
|
||||
proposedBy: 'agent:codex',
|
||||
proposedAt: Date.now(),
|
||||
status: 'pending',
|
||||
version: 1,
|
||||
});
|
||||
fs.writeFileSync(filePath, JSON.stringify(raw, null, 2));
|
||||
|
||||
const { proposal, task: updated } = await store.approveProposal('legacy-update-proposal');
|
||||
|
||||
expect(proposal.status).toBe('approved');
|
||||
expect(updated.assignee).toBe('agent:reviewer');
|
||||
|
||||
const persisted = await store.getTask(task.id);
|
||||
expect(persisted.assignee).toBe('agent:reviewer');
|
||||
});
|
||||
});
|
||||
|
||||
describe('rejectProposal', () => {
|
||||
|
|
@ -1069,3 +1446,118 @@ describe('migration', () => {
|
|||
expect(result.total).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('default path and legacy migration', () => {
|
||||
it('stores default data under NERVE_DATA_DIR/kanban', async () => {
|
||||
process.env.NERVE_DATA_DIR = path.join(tmpDir, 'nerve-data');
|
||||
process.env.NERVE_PROJECT_ROOT = path.join(tmpDir, 'project');
|
||||
|
||||
const defaultStore = new KanbanStore();
|
||||
await defaultStore.init();
|
||||
|
||||
const canonicalPath = path.join(process.env.NERVE_DATA_DIR, 'kanban', 'tasks.json');
|
||||
expect(fs.existsSync(canonicalPath)).toBe(true);
|
||||
|
||||
const raw = JSON.parse(fs.readFileSync(canonicalPath, 'utf-8'));
|
||||
expect(raw.tasks).toEqual([]);
|
||||
});
|
||||
|
||||
it('migrates legacy server-dist data into the canonical store', async () => {
|
||||
const projectRoot = path.join(tmpDir, 'project');
|
||||
const legacyPath = path.join(projectRoot, 'server-dist', 'data', 'kanban', 'tasks.json');
|
||||
process.env.NERVE_DATA_DIR = path.join(tmpDir, 'nerve-data');
|
||||
process.env.NERVE_PROJECT_ROOT = projectRoot;
|
||||
|
||||
const legacyStore = new KanbanStore(legacyPath);
|
||||
await legacyStore.init();
|
||||
await legacyStore.createTask({ title: 'Recovered from server-dist', createdBy: 'operator' });
|
||||
|
||||
const defaultStore = new KanbanStore();
|
||||
await defaultStore.init();
|
||||
|
||||
const result = await defaultStore.listTasks();
|
||||
expect(result.total).toBe(1);
|
||||
expect(result.items[0].title).toBe('Recovered from server-dist');
|
||||
expect(fs.existsSync(path.join(process.env.NERVE_DATA_DIR, 'kanban', 'audit.log'))).toBe(true);
|
||||
});
|
||||
|
||||
it('lazy-initializes and migrates before reads', async () => {
|
||||
const projectRoot = path.join(tmpDir, 'project');
|
||||
const legacyPath = path.join(projectRoot, 'server-dist', 'data', 'kanban', 'tasks.json');
|
||||
process.env.NERVE_DATA_DIR = path.join(tmpDir, 'nerve-data');
|
||||
process.env.NERVE_PROJECT_ROOT = projectRoot;
|
||||
|
||||
const legacyStore = new KanbanStore(legacyPath);
|
||||
await legacyStore.init();
|
||||
await legacyStore.createTask({ title: 'Recovered without explicit init', createdBy: 'operator' });
|
||||
|
||||
const defaultStore = new KanbanStore();
|
||||
const result = await defaultStore.listTasks();
|
||||
|
||||
expect(result.total).toBe(1);
|
||||
expect(result.items[0].title).toBe('Recovered without explicit init');
|
||||
});
|
||||
|
||||
it('migrates legacy server data into the canonical store', async () => {
|
||||
const projectRoot = path.join(tmpDir, 'project');
|
||||
const legacyPath = path.join(projectRoot, 'server', 'data', 'kanban', 'tasks.json');
|
||||
process.env.NERVE_DATA_DIR = path.join(tmpDir, 'nerve-data');
|
||||
process.env.NERVE_PROJECT_ROOT = projectRoot;
|
||||
|
||||
const legacyStore = new KanbanStore(legacyPath);
|
||||
await legacyStore.init();
|
||||
await legacyStore.createTask({ title: 'Recovered from server', createdBy: 'operator' });
|
||||
|
||||
const defaultStore = new KanbanStore();
|
||||
await defaultStore.init();
|
||||
|
||||
const result = await defaultStore.listTasks();
|
||||
expect(result.total).toBe(1);
|
||||
expect(result.items[0].title).toBe('Recovered from server');
|
||||
});
|
||||
|
||||
it('prefers the canonical store when canonical and legacy data both exist', async () => {
|
||||
const projectRoot = path.join(tmpDir, 'project');
|
||||
const canonicalDir = path.join(tmpDir, 'nerve-data', 'kanban');
|
||||
const legacyPath = path.join(projectRoot, 'server-dist', 'data', 'kanban', 'tasks.json');
|
||||
process.env.NERVE_DATA_DIR = path.join(tmpDir, 'nerve-data');
|
||||
process.env.NERVE_PROJECT_ROOT = projectRoot;
|
||||
|
||||
const legacyStore = new KanbanStore(legacyPath);
|
||||
await legacyStore.init();
|
||||
await legacyStore.createTask({ title: 'Legacy task', createdBy: 'operator' });
|
||||
|
||||
const canonicalStore = new KanbanStore(path.join(canonicalDir, 'tasks.json'));
|
||||
await canonicalStore.init();
|
||||
await canonicalStore.createTask({ title: 'Canonical task', createdBy: 'operator' });
|
||||
|
||||
const defaultStore = new KanbanStore();
|
||||
await defaultStore.init();
|
||||
|
||||
const result = await defaultStore.listTasks();
|
||||
expect(result.total).toBe(1);
|
||||
expect(result.items[0].title).toBe('Canonical task');
|
||||
});
|
||||
|
||||
it('prefers the richer legacy candidate over an empty one', async () => {
|
||||
const projectRoot = path.join(tmpDir, 'project');
|
||||
const emptyLegacyPath = path.join(projectRoot, 'server-dist', 'data', 'kanban', 'tasks.json');
|
||||
const richLegacyPath = path.join(projectRoot, 'server', 'data', 'kanban', 'tasks.json');
|
||||
process.env.NERVE_DATA_DIR = path.join(tmpDir, 'nerve-data');
|
||||
process.env.NERVE_PROJECT_ROOT = projectRoot;
|
||||
|
||||
const emptyStore = new KanbanStore(emptyLegacyPath);
|
||||
await emptyStore.init();
|
||||
|
||||
const richStore = new KanbanStore(richLegacyPath);
|
||||
await richStore.init();
|
||||
await richStore.createTask({ title: 'Rich legacy task', createdBy: 'operator' });
|
||||
|
||||
const defaultStore = new KanbanStore();
|
||||
await defaultStore.init();
|
||||
|
||||
const result = await defaultStore.listTasks();
|
||||
expect(result.total).toBe(1);
|
||||
expect(result.items[0].title).toBe('Rich legacy task');
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,16 +1,21 @@
|
|||
/**
|
||||
* Kanban task store — JSON file persistence with mutex-protected I/O.
|
||||
*
|
||||
* Data lives at `server/data/kanban/tasks.json`. Every mutating operation
|
||||
* acquires the store mutex, reads the file, applies the change, and writes
|
||||
* back atomically. CAS version checks prevent stale overwrites.
|
||||
* Runtime data lives under `${NERVE_DATA_DIR:-~/.nerve}/kanban/tasks.json`.
|
||||
* Legacy installs may still have data under `server-dist/data/kanban/` or
|
||||
* `server/data/kanban/`, so the store performs a one-time migration into the
|
||||
* canonical runtime directory on first init. Every mutating operation acquires
|
||||
* the store mutex, reads the file, applies the change, and writes back
|
||||
* atomically. CAS version checks prevent stale overwrites.
|
||||
* @module
|
||||
*/
|
||||
|
||||
import fs from 'node:fs';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import crypto from 'node:crypto';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import { canonicalizeKanbanAssignee } from './kanban-assignee.js';
|
||||
import { createMutex } from './mutex.js';
|
||||
|
||||
// ── Helpers ──────────────────────────────────────────────────────────
|
||||
|
|
@ -35,9 +40,44 @@ function uniqueSlugId(title: string, existingIds: Set<string>): string {
|
|||
}
|
||||
}
|
||||
|
||||
const runKeySequenceByBase = new Map<string, number>();
|
||||
|
||||
/** Build a human-readable run key that stays unique across same-millisecond reruns. */
|
||||
function uniqueRunSessionKey(id: string, now: number): string {
|
||||
const base = `kb-${id}-${now}`;
|
||||
const nextSequence = (runKeySequenceByBase.get(base) ?? 0) + 1;
|
||||
runKeySequenceByBase.set(base, nextSequence);
|
||||
return nextSequence === 1 ? base : `${base}-${nextSequence.toString(36)}`;
|
||||
}
|
||||
|
||||
function matchesRunIdentifier(run: TaskRunLink, value: string): boolean {
|
||||
return value === run.sessionKey
|
||||
|| value === run.childSessionKey
|
||||
|| value === run.sessionId
|
||||
|| value === run.runId;
|
||||
}
|
||||
|
||||
function canonicalizeProposalPayloadAssignee(payload: Record<string, unknown>): Record<string, unknown> {
|
||||
if (!Object.prototype.hasOwnProperty.call(payload, 'assignee')) return payload;
|
||||
return {
|
||||
...payload,
|
||||
assignee: payload.assignee == null
|
||||
? undefined
|
||||
: canonicalizeKanbanAssignee(String(payload.assignee)),
|
||||
};
|
||||
}
|
||||
|
||||
// ── Types ────────────────────────────────────────────────────────────
|
||||
|
||||
export type TaskStatus = 'backlog' | 'todo' | 'in-progress' | 'review' | 'done' | 'cancelled';
|
||||
/** Built-in status keys that ship with the default board config. */
|
||||
export const BUILT_IN_STATUSES = ['backlog', 'todo', 'in-progress', 'review', 'done', 'cancelled'] as const;
|
||||
export type BuiltInStatus = typeof BUILT_IN_STATUSES[number];
|
||||
|
||||
/**
|
||||
* TaskStatus is a string so users can define custom column keys.
|
||||
* Built-in values are still the recommended defaults.
|
||||
*/
|
||||
export type TaskStatus = string;
|
||||
export type TaskPriority = 'critical' | 'high' | 'normal' | 'low';
|
||||
export type TaskActor = 'operator' | `agent:${string}`;
|
||||
|
||||
|
|
@ -49,6 +89,7 @@ export interface TaskFeedback {
|
|||
|
||||
export interface TaskRunLink {
|
||||
sessionKey: string;
|
||||
childSessionKey?: string;
|
||||
sessionId?: string;
|
||||
runId?: string;
|
||||
startedAt: number;
|
||||
|
|
@ -84,7 +125,7 @@ export interface KanbanTask {
|
|||
|
||||
export interface KanbanBoardConfig {
|
||||
columns: Array<{
|
||||
key: TaskStatus;
|
||||
key: string;
|
||||
title: string;
|
||||
wipLimit?: number;
|
||||
visible: boolean;
|
||||
|
|
@ -188,6 +229,30 @@ export class TaskNotFoundError extends Error {
|
|||
}
|
||||
}
|
||||
|
||||
export class InvalidTaskStatusError extends Error {
|
||||
status: string;
|
||||
allowed: string[];
|
||||
constructor(status: string, allowed: Iterable<string>) {
|
||||
const allowedList = [...allowed];
|
||||
super(`Invalid task status: ${status}`);
|
||||
this.name = 'InvalidTaskStatusError';
|
||||
this.status = status;
|
||||
this.allowed = allowedList;
|
||||
}
|
||||
}
|
||||
|
||||
export class InvalidBoardConfigError extends Error {
|
||||
details: string;
|
||||
statuses: string[];
|
||||
constructor(details: string, statuses: Iterable<string> = []) {
|
||||
const statusList = [...statuses];
|
||||
super(details);
|
||||
this.name = 'InvalidBoardConfigError';
|
||||
this.details = details;
|
||||
this.statuses = statusList;
|
||||
}
|
||||
}
|
||||
|
||||
export class InvalidTransitionError extends Error {
|
||||
from: TaskStatus;
|
||||
to: TaskStatus;
|
||||
|
|
@ -205,7 +270,7 @@ const CURRENT_SCHEMA_VERSION = 1;
|
|||
const DEFAULT_LIMIT = 50;
|
||||
const MAX_LIMIT = 200;
|
||||
|
||||
const STATUS_ORDER: Record<TaskStatus, number> = {
|
||||
const STATUS_ORDER: Record<string, number> = {
|
||||
backlog: 0,
|
||||
todo: 1,
|
||||
'in-progress': 2,
|
||||
|
|
@ -214,13 +279,32 @@ const STATUS_ORDER: Record<TaskStatus, number> = {
|
|||
cancelled: 5,
|
||||
};
|
||||
|
||||
const VALID_TASK_STATUSES = new Set<TaskStatus>(['backlog', 'todo', 'in-progress', 'review', 'done', 'cancelled']);
|
||||
const REQUIRED_BOARD_COLUMNS: TaskStatus[] = ['backlog', 'todo', 'in-progress', 'review', 'done'];
|
||||
const VALID_TASK_STATUSES = new Set<string>(BUILT_IN_STATUSES);
|
||||
const VALID_TASK_PRIORITIES = new Set<TaskPriority>(['critical', 'high', 'normal', 'low']);
|
||||
|
||||
function normalizeTaskStatus(value: unknown): TaskStatus {
|
||||
return typeof value === 'string' && VALID_TASK_STATUSES.has(value as TaskStatus)
|
||||
? (value as TaskStatus)
|
||||
: DEFAULT_CONFIG.defaults.status;
|
||||
function getConfiguredStatuses(config: KanbanBoardConfig): TaskStatus[] {
|
||||
return config.columns.map((column) => column.key);
|
||||
}
|
||||
|
||||
function getStatusOrderMap(config: KanbanBoardConfig): Map<string, number> {
|
||||
return new Map(config.columns.map((column, index) => [column.key, index] as const));
|
||||
}
|
||||
|
||||
function getAllowedTaskStatuses(config: KanbanBoardConfig): Set<string> {
|
||||
return new Set([...BUILT_IN_STATUSES, ...getConfiguredStatuses(config)]);
|
||||
}
|
||||
|
||||
function isAllowedTaskStatus(value: string, config: KanbanBoardConfig): boolean {
|
||||
return getAllowedTaskStatuses(config).has(value);
|
||||
}
|
||||
|
||||
function normalizeTaskStatus(value: unknown, configColumns?: TaskStatus[]): TaskStatus {
|
||||
if (typeof value !== 'string') return DEFAULT_CONFIG.defaults.status;
|
||||
// Accept built-in statuses or any key defined in the current board config
|
||||
if (VALID_TASK_STATUSES.has(value)) return value;
|
||||
if (configColumns && configColumns.includes(value)) return value;
|
||||
return DEFAULT_CONFIG.defaults.status;
|
||||
}
|
||||
|
||||
function normalizeTaskPriority(value: unknown): TaskPriority {
|
||||
|
|
@ -278,12 +362,21 @@ export class KanbanStore {
|
|||
private readonly filePath: string;
|
||||
private readonly auditPath: string;
|
||||
private readonly withLock: ReturnType<typeof createMutex>;
|
||||
private readonly legacyCandidatePaths: string[];
|
||||
|
||||
constructor(filePath?: string) {
|
||||
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
||||
const dataDir = path.resolve(__dirname, '..', 'data', 'kanban');
|
||||
const projectRoot = process.env.NERVE_PROJECT_ROOT || path.resolve(__dirname, '..', '..');
|
||||
const dataRoot = process.env.NERVE_DATA_DIR || path.join(os.homedir() || process.cwd(), '.nerve');
|
||||
const dataDir = path.join(dataRoot, 'kanban');
|
||||
this.filePath = filePath || path.join(dataDir, 'tasks.json');
|
||||
this.auditPath = path.join(path.dirname(this.filePath), 'audit.log');
|
||||
this.legacyCandidatePaths = filePath
|
||||
? []
|
||||
: [
|
||||
path.join(projectRoot, 'server-dist', 'data', 'kanban', 'tasks.json'),
|
||||
path.join(projectRoot, 'server', 'data', 'kanban', 'tasks.json'),
|
||||
];
|
||||
this.withLock = createMutex();
|
||||
}
|
||||
|
||||
|
|
@ -333,7 +426,8 @@ export class KanbanStore {
|
|||
if (!data.config.defaults || !data.config.defaults.status) {
|
||||
data.config.defaults = structuredClone(DEFAULT_CONFIG.defaults);
|
||||
}
|
||||
data.config.defaults.status = normalizeTaskStatus(data.config.defaults.status);
|
||||
const configuredStatuses = getConfiguredStatuses(data.config);
|
||||
data.config.defaults.status = normalizeTaskStatus(data.config.defaults.status, configuredStatuses);
|
||||
data.config.defaults.priority = normalizeTaskPriority(data.config.defaults.priority);
|
||||
if (!data.config.proposalPolicy) {
|
||||
data.config.proposalPolicy = 'confirm';
|
||||
|
|
@ -347,15 +441,71 @@ export class KanbanStore {
|
|||
if (data.config.quickViewLimit === undefined) {
|
||||
data.config.quickViewLimit = DEFAULT_CONFIG.quickViewLimit;
|
||||
}
|
||||
data.tasks = data.tasks.map((task) => ({
|
||||
...task,
|
||||
status: normalizeTaskStatus(task.status),
|
||||
priority: normalizeTaskPriority(task.priority),
|
||||
}));
|
||||
data.tasks = data.tasks.map((task) => {
|
||||
const childSessionKey = task.run?.childSessionKey ?? task.run?.sessionId;
|
||||
return {
|
||||
...task,
|
||||
status: normalizeTaskStatus(task.status, configuredStatuses),
|
||||
priority: normalizeTaskPriority(task.priority),
|
||||
run: task.run
|
||||
? {
|
||||
...task.run,
|
||||
childSessionKey,
|
||||
sessionId: task.run.sessionId ?? childSessionKey,
|
||||
}
|
||||
: task.run,
|
||||
};
|
||||
});
|
||||
data.meta.schemaVersion = CURRENT_SCHEMA_VERSION;
|
||||
return data;
|
||||
}
|
||||
|
||||
private async loadLegacyCandidate(filePath: string): Promise<{
|
||||
filePath: string;
|
||||
auditPath: string;
|
||||
data: StoreData;
|
||||
contentScore: number;
|
||||
mtimeMs: number;
|
||||
} | null> {
|
||||
try {
|
||||
const raw = await fs.promises.readFile(filePath, 'utf-8');
|
||||
const parsed = JSON.parse(raw) as StoreData;
|
||||
const data = this.migrate(parsed);
|
||||
const stats = await fs.promises.stat(filePath);
|
||||
return {
|
||||
filePath,
|
||||
auditPath: path.join(path.dirname(filePath), 'audit.log'),
|
||||
data,
|
||||
contentScore: data.tasks.length + data.proposals.length,
|
||||
mtimeMs: stats.mtimeMs,
|
||||
};
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private async migrateLegacyStoreIfNeeded(): Promise<boolean> {
|
||||
if (this.legacyCandidatePaths.length === 0) return false;
|
||||
|
||||
const candidates = (await Promise.all(this.legacyCandidatePaths.map((filePath) => this.loadLegacyCandidate(filePath))))
|
||||
.filter((candidate): candidate is NonNullable<typeof candidate> => candidate !== null)
|
||||
.sort((a, b) => b.contentScore - a.contentScore || b.mtimeMs - a.mtimeMs);
|
||||
|
||||
if (candidates.length === 0) return false;
|
||||
|
||||
const selected = candidates[0];
|
||||
await this.writeRaw(selected.data);
|
||||
|
||||
try {
|
||||
await fs.promises.copyFile(selected.auditPath, this.auditPath);
|
||||
} catch {
|
||||
// audit log migration is best-effort
|
||||
}
|
||||
|
||||
console.log(`[kanban-store] migrated legacy store from ${selected.filePath} to ${this.filePath}`);
|
||||
return true;
|
||||
}
|
||||
|
||||
private async audit(entry: AuditEntry): Promise<void> {
|
||||
try {
|
||||
const dir = path.dirname(this.auditPath);
|
||||
|
|
@ -374,16 +524,27 @@ export class KanbanStore {
|
|||
await this.withLock(async () => {
|
||||
try {
|
||||
await fs.promises.access(this.filePath);
|
||||
return;
|
||||
} catch {
|
||||
// canonical store missing, continue
|
||||
}
|
||||
|
||||
const migrated = await this.migrateLegacyStoreIfNeeded();
|
||||
if (!migrated) {
|
||||
await this.writeRaw(emptyStore());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private async withStore<T>(fn: () => Promise<T>): Promise<T> {
|
||||
await this.init();
|
||||
return this.withLock(fn);
|
||||
}
|
||||
|
||||
// ── Tasks: List ──────────────────────────────────────────────────
|
||||
|
||||
async listTasks(filters: TaskFilters = {}): Promise<TaskListResult> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
let tasks = data.tasks;
|
||||
|
||||
|
|
@ -412,9 +573,12 @@ export class KanbanStore {
|
|||
);
|
||||
}
|
||||
|
||||
const statusOrder = getStatusOrderMap(data.config);
|
||||
|
||||
// Sort: status order → columnOrder → updatedAt desc
|
||||
tasks.sort((a, b) => {
|
||||
const statusDiff = STATUS_ORDER[a.status] - STATUS_ORDER[b.status];
|
||||
const statusDiff = (statusOrder.get(a.status) ?? STATUS_ORDER[a.status] ?? Number.MAX_SAFE_INTEGER)
|
||||
- (statusOrder.get(b.status) ?? STATUS_ORDER[b.status] ?? Number.MAX_SAFE_INTEGER);
|
||||
if (statusDiff !== 0) return statusDiff;
|
||||
const orderDiff = a.columnOrder - b.columnOrder;
|
||||
if (orderDiff !== 0) return orderDiff;
|
||||
|
|
@ -433,7 +597,7 @@ export class KanbanStore {
|
|||
// ── Tasks: Get ───────────────────────────────────────────────────
|
||||
|
||||
async getTask(id: string): Promise<KanbanTask> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
const task = data.tasks.find((t) => t.id === id);
|
||||
if (!task) throw new TaskNotFoundError(id);
|
||||
|
|
@ -457,9 +621,13 @@ export class KanbanStore {
|
|||
dueAt?: number;
|
||||
estimateMin?: number;
|
||||
}): Promise<KanbanTask> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
|
||||
if (input.status && !isAllowedTaskStatus(input.status, data.config)) {
|
||||
throw new InvalidTaskStatusError(input.status, getAllowedTaskStatuses(data.config));
|
||||
}
|
||||
|
||||
// Compute columnOrder — append to end of target column
|
||||
const targetStatus = input.status ?? data.config.defaults.status;
|
||||
const maxOrder = data.tasks
|
||||
|
|
@ -468,6 +636,9 @@ export class KanbanStore {
|
|||
|
||||
const now = Date.now();
|
||||
const existingIds = new Set(data.tasks.map((t) => t.id));
|
||||
const assignee = input.assignee == null
|
||||
? undefined
|
||||
: canonicalizeKanbanAssignee(input.assignee);
|
||||
const task: KanbanTask = {
|
||||
id: uniqueSlugId(input.title, existingIds),
|
||||
title: input.title,
|
||||
|
|
@ -479,7 +650,7 @@ export class KanbanStore {
|
|||
updatedAt: now,
|
||||
version: 1,
|
||||
sourceSessionKey: input.sourceSessionKey,
|
||||
assignee: input.assignee,
|
||||
assignee,
|
||||
labels: input.labels ?? [],
|
||||
columnOrder: maxOrder + 1,
|
||||
model: input.model,
|
||||
|
|
@ -523,7 +694,7 @@ export class KanbanStore {
|
|||
>,
|
||||
actor?: string,
|
||||
): Promise<KanbanTask> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
const idx = data.tasks.findIndex((t) => t.id === id);
|
||||
if (idx === -1) throw new TaskNotFoundError(id);
|
||||
|
|
@ -533,14 +704,30 @@ export class KanbanStore {
|
|||
throw new VersionConflictError(task.version, task);
|
||||
}
|
||||
|
||||
if (patch.status && !isAllowedTaskStatus(patch.status, data.config)) {
|
||||
throw new InvalidTaskStatusError(patch.status, getAllowedTaskStatuses(data.config));
|
||||
}
|
||||
|
||||
// Apply patch
|
||||
const normalizedPatch = { ...patch };
|
||||
if (Object.prototype.hasOwnProperty.call(patch, 'assignee')) {
|
||||
normalizedPatch.assignee = patch.assignee == null
|
||||
? undefined
|
||||
: canonicalizeKanbanAssignee(patch.assignee);
|
||||
}
|
||||
|
||||
const now = Date.now();
|
||||
const updated: KanbanTask = { ...task, ...patch, updatedAt: now, version: task.version + 1 };
|
||||
const updated: KanbanTask = {
|
||||
...task,
|
||||
...normalizedPatch,
|
||||
updatedAt: now,
|
||||
version: task.version + 1,
|
||||
};
|
||||
|
||||
// If status changed, re-compute columnOrder (append to end of new column)
|
||||
if (patch.status && patch.status !== task.status) {
|
||||
if (normalizedPatch.status && normalizedPatch.status !== task.status) {
|
||||
const maxOrder = data.tasks
|
||||
.filter((t) => t.status === patch.status && t.id !== id)
|
||||
.filter((t) => t.status === normalizedPatch.status && t.id !== id)
|
||||
.reduce((max, t) => Math.max(max, t.columnOrder), -1);
|
||||
updated.columnOrder = maxOrder + 1;
|
||||
}
|
||||
|
|
@ -552,7 +739,7 @@ export class KanbanStore {
|
|||
action: 'update',
|
||||
taskId: id,
|
||||
actor,
|
||||
detail: Object.keys(patch).join(','),
|
||||
detail: Object.keys(normalizedPatch).join(','),
|
||||
});
|
||||
return updated;
|
||||
});
|
||||
|
|
@ -561,7 +748,7 @@ export class KanbanStore {
|
|||
// ── Tasks: Delete ────────────────────────────────────────────────
|
||||
|
||||
async deleteTask(id: string, actor?: string): Promise<void> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
const idx = data.tasks.findIndex((t) => t.id === id);
|
||||
if (idx === -1) throw new TaskNotFoundError(id);
|
||||
|
|
@ -581,7 +768,7 @@ export class KanbanStore {
|
|||
targetIndex: number,
|
||||
actor?: string,
|
||||
): Promise<KanbanTask> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
const idx = data.tasks.findIndex((t) => t.id === id);
|
||||
if (idx === -1) throw new TaskNotFoundError(id);
|
||||
|
|
@ -591,6 +778,10 @@ export class KanbanStore {
|
|||
throw new VersionConflictError(task.version, task);
|
||||
}
|
||||
|
||||
if (!isAllowedTaskStatus(targetStatus, data.config)) {
|
||||
throw new InvalidTaskStatusError(targetStatus, getAllowedTaskStatuses(data.config));
|
||||
}
|
||||
|
||||
const now = Date.now();
|
||||
|
||||
// Get all tasks in target column (excluding the task being moved)
|
||||
|
|
@ -632,18 +823,50 @@ export class KanbanStore {
|
|||
// ── Config ───────────────────────────────────────────────────────
|
||||
|
||||
async getConfig(): Promise<KanbanBoardConfig> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
return data.config;
|
||||
});
|
||||
}
|
||||
|
||||
async updateConfig(patch: Partial<KanbanBoardConfig>): Promise<KanbanBoardConfig> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
data.config = { ...data.config, ...patch };
|
||||
if (patch.columns) data.config.columns = patch.columns;
|
||||
if (patch.defaults) data.config.defaults = { ...data.config.defaults, ...patch.defaults };
|
||||
const nextConfig: KanbanBoardConfig = {
|
||||
...data.config,
|
||||
...patch,
|
||||
columns: patch.columns ?? data.config.columns,
|
||||
defaults: { ...data.config.defaults, ...patch.defaults },
|
||||
};
|
||||
|
||||
const configuredStatuses = new Set(getConfiguredStatuses(nextConfig));
|
||||
const missingBuiltIns = REQUIRED_BOARD_COLUMNS.filter((status) => !configuredStatuses.has(status));
|
||||
if (missingBuiltIns.length > 0) {
|
||||
throw new InvalidBoardConfigError(
|
||||
`Missing required board columns: ${missingBuiltIns.join(', ')}`,
|
||||
missingBuiltIns,
|
||||
);
|
||||
}
|
||||
|
||||
if (!isAllowedTaskStatus(nextConfig.defaults.status, nextConfig)) {
|
||||
throw new InvalidTaskStatusError(nextConfig.defaults.status, getAllowedTaskStatuses(nextConfig));
|
||||
}
|
||||
|
||||
const referencedStatuses = new Set<string>([
|
||||
...data.tasks.map((task) => task.status),
|
||||
...data.proposals.flatMap((proposal) => (
|
||||
typeof proposal.payload?.status === 'string' ? [proposal.payload.status] : []
|
||||
)),
|
||||
]);
|
||||
const removedReferencedStatuses = [...referencedStatuses].filter((status) => !configuredStatuses.has(status));
|
||||
if (removedReferencedStatuses.length > 0) {
|
||||
throw new InvalidBoardConfigError(
|
||||
`Cannot remove columns still in use: ${removedReferencedStatuses.join(', ')}`,
|
||||
removedReferencedStatuses,
|
||||
);
|
||||
}
|
||||
|
||||
data.config = nextConfig;
|
||||
await this.writeRaw(data);
|
||||
await this.audit({ ts: Date.now(), action: 'config_update' });
|
||||
return data.config;
|
||||
|
|
@ -654,10 +877,10 @@ export class KanbanStore {
|
|||
|
||||
async executeTask(
|
||||
id: string,
|
||||
options?: { model?: string; thinking?: 'off' | 'low' | 'medium' | 'high' },
|
||||
options?: { model?: string; thinking?: 'off' | 'low' | 'medium' | 'high'; sessionKey?: string },
|
||||
actor?: string,
|
||||
): Promise<KanbanTask> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
const idx = data.tasks.findIndex((t) => t.id === id);
|
||||
if (idx === -1) throw new TaskNotFoundError(id);
|
||||
|
|
@ -679,8 +902,7 @@ export class KanbanStore {
|
|||
}
|
||||
|
||||
const now = Date.now();
|
||||
// Include a short timestamp suffix so reruns of the same task get unique session keys.
|
||||
const sessionKey = `kb-${id}-${now}`;
|
||||
const sessionKey = options?.sessionKey ?? uniqueRunSessionKey(id, now);
|
||||
|
||||
task.status = 'in-progress';
|
||||
task.run = {
|
||||
|
|
@ -688,6 +910,8 @@ export class KanbanStore {
|
|||
startedAt: now,
|
||||
status: 'running',
|
||||
};
|
||||
task.result = undefined;
|
||||
task.resultAt = undefined;
|
||||
if (options?.model) task.model = options.model;
|
||||
if (options?.thinking) task.thinking = options.thinking;
|
||||
|
||||
|
|
@ -707,10 +931,49 @@ export class KanbanStore {
|
|||
});
|
||||
}
|
||||
|
||||
async attachRunIdentifiers(
|
||||
taskId: string,
|
||||
sessionKey: string,
|
||||
identifiers: { childSessionKey?: string; runId?: string },
|
||||
): Promise<KanbanTask | null> {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
const idx = data.tasks.findIndex((t) => t.id === taskId);
|
||||
if (idx === -1) throw new TaskNotFoundError(taskId);
|
||||
|
||||
const task = data.tasks[idx];
|
||||
if (!task.run || task.run.status !== 'running' || task.run.sessionKey !== sessionKey) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const nextChildSessionKey = identifiers.childSessionKey ?? task.run.childSessionKey ?? task.run.sessionId;
|
||||
const nextRunId = identifiers.runId ?? task.run.runId;
|
||||
const nextSessionId = task.run.sessionId ?? nextChildSessionKey;
|
||||
|
||||
if (
|
||||
nextChildSessionKey === task.run.childSessionKey
|
||||
&& nextRunId === task.run.runId
|
||||
&& nextSessionId === task.run.sessionId
|
||||
) {
|
||||
return task;
|
||||
}
|
||||
|
||||
task.run = {
|
||||
...task.run,
|
||||
childSessionKey: nextChildSessionKey,
|
||||
sessionId: nextSessionId,
|
||||
runId: nextRunId,
|
||||
};
|
||||
data.tasks[idx] = task;
|
||||
await this.writeRaw(data);
|
||||
return task;
|
||||
});
|
||||
}
|
||||
|
||||
// ── Workflow: Approve ────────────────────────────────────────────
|
||||
|
||||
async approveTask(id: string, note?: string, actor?: string): Promise<KanbanTask> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
const idx = data.tasks.findIndex((t) => t.id === id);
|
||||
if (idx === -1) throw new TaskNotFoundError(id);
|
||||
|
|
@ -755,7 +1018,7 @@ export class KanbanStore {
|
|||
// ── Workflow: Reject ─────────────────────────────────────────────
|
||||
|
||||
async rejectTask(id: string, note: string, actor?: string): Promise<KanbanTask> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
const idx = data.tasks.findIndex((t) => t.id === id);
|
||||
if (idx === -1) throw new TaskNotFoundError(id);
|
||||
|
|
@ -803,7 +1066,7 @@ export class KanbanStore {
|
|||
// ── Workflow: Abort ──────────────────────────────────────────────
|
||||
|
||||
async abortTask(id: string, note?: string, actor?: string): Promise<KanbanTask> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
const idx = data.tasks.findIndex((t) => t.id === id);
|
||||
if (idx === -1) throw new TaskNotFoundError(id);
|
||||
|
|
@ -855,10 +1118,11 @@ export class KanbanStore {
|
|||
|
||||
async completeRun(
|
||||
taskId: string,
|
||||
sessionKey: string,
|
||||
result?: string,
|
||||
error?: string,
|
||||
): Promise<KanbanTask> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
const idx = data.tasks.findIndex((t) => t.id === taskId);
|
||||
if (idx === -1) throw new TaskNotFoundError(taskId);
|
||||
|
|
@ -873,6 +1137,14 @@ export class KanbanStore {
|
|||
);
|
||||
}
|
||||
|
||||
if (!matchesRunIdentifier(task.run, sessionKey)) {
|
||||
throw new InvalidTransitionError(
|
||||
task.status,
|
||||
error ? 'todo' : 'review',
|
||||
`Run key mismatch for task "${taskId}": active run is "${task.run.sessionKey}", got "${sessionKey}"`,
|
||||
);
|
||||
}
|
||||
|
||||
const now = Date.now();
|
||||
task.run.endedAt = now;
|
||||
|
||||
|
|
@ -881,6 +1153,8 @@ export class KanbanStore {
|
|||
task.run.status = 'error';
|
||||
task.run.error = error;
|
||||
task.status = 'todo';
|
||||
task.result = undefined;
|
||||
task.resultAt = undefined;
|
||||
|
||||
const maxOrder = data.tasks
|
||||
.filter((t) => t.status === 'todo' && t.id !== taskId)
|
||||
|
|
@ -910,7 +1184,7 @@ export class KanbanStore {
|
|||
ts: now,
|
||||
action: 'complete_run',
|
||||
taskId,
|
||||
detail: error ? `error: ${error}` : 'success',
|
||||
detail: error ? `session=${sessionKey},error: ${error}` : `session=${sessionKey},success`,
|
||||
});
|
||||
return task;
|
||||
});
|
||||
|
|
@ -919,7 +1193,7 @@ export class KanbanStore {
|
|||
// ── Stale run reconciliation ─────────────────────────────────────
|
||||
|
||||
async reconcileStaleRuns(maxAgeMs: number): Promise<KanbanTask[]> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
const now = Date.now();
|
||||
const reconciled: KanbanTask[] = [];
|
||||
|
|
@ -971,14 +1245,20 @@ export class KanbanStore {
|
|||
sourceSessionKey?: string;
|
||||
proposedBy: TaskActor;
|
||||
}): Promise<KanbanProposal> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
const now = Date.now();
|
||||
|
||||
const payload = canonicalizeProposalPayloadAssignee(input.payload);
|
||||
|
||||
if ('status' in payload && typeof payload.status === 'string' && !isAllowedTaskStatus(payload.status, data.config)) {
|
||||
throw new InvalidTaskStatusError(payload.status, getAllowedTaskStatuses(data.config));
|
||||
}
|
||||
|
||||
const proposal: KanbanProposal = {
|
||||
id: crypto.randomUUID(),
|
||||
type: input.type,
|
||||
payload: input.payload,
|
||||
payload,
|
||||
sourceSessionKey: input.sourceSessionKey,
|
||||
proposedBy: input.proposedBy,
|
||||
proposedAt: now,
|
||||
|
|
@ -989,17 +1269,17 @@ export class KanbanStore {
|
|||
// In auto mode, immediately execute the proposal
|
||||
if (data.config.proposalPolicy === 'auto') {
|
||||
if (input.type === 'create') {
|
||||
const task = await this._createTaskUnlocked(data, input.payload, input.proposedBy);
|
||||
const task = await this._createTaskUnlocked(data, payload, input.proposedBy);
|
||||
proposal.status = 'approved';
|
||||
proposal.resolvedAt = now;
|
||||
proposal.resolvedBy = input.proposedBy;
|
||||
proposal.resultTaskId = task.id;
|
||||
} else {
|
||||
await this._applyUpdateUnlocked(data, input.payload);
|
||||
await this._applyUpdateUnlocked(data, payload);
|
||||
proposal.status = 'approved';
|
||||
proposal.resolvedAt = now;
|
||||
proposal.resolvedBy = input.proposedBy;
|
||||
proposal.resultTaskId = input.payload.id as string;
|
||||
proposal.resultTaskId = payload.id as string;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1014,7 +1294,7 @@ export class KanbanStore {
|
|||
id: string,
|
||||
actor: TaskActor = 'operator',
|
||||
): Promise<{ proposal: KanbanProposal; task: KanbanTask }> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
const proposal = data.proposals.find((p) => p.id === id);
|
||||
if (!proposal) throw new ProposalNotFoundError(id);
|
||||
|
|
@ -1047,7 +1327,7 @@ export class KanbanStore {
|
|||
reason?: string,
|
||||
actor: TaskActor = 'operator',
|
||||
): Promise<KanbanProposal> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
const proposal = data.proposals.find((p) => p.id === id);
|
||||
if (!proposal) throw new ProposalNotFoundError(id);
|
||||
|
|
@ -1067,7 +1347,7 @@ export class KanbanStore {
|
|||
}
|
||||
|
||||
async listProposals(statusFilter?: ProposalStatus): Promise<KanbanProposal[]> {
|
||||
return this.withLock(async () => {
|
||||
return this.withStore(async () => {
|
||||
const data = await this.readRaw();
|
||||
let proposals = data.proposals;
|
||||
if (statusFilter) {
|
||||
|
|
@ -1086,6 +1366,9 @@ export class KanbanStore {
|
|||
proposedBy: TaskActor,
|
||||
): Promise<KanbanTask> {
|
||||
const targetStatus = (payload.status as TaskStatus) ?? data.config.defaults.status;
|
||||
if (!isAllowedTaskStatus(targetStatus, data.config)) {
|
||||
throw new InvalidTaskStatusError(targetStatus, getAllowedTaskStatuses(data.config));
|
||||
}
|
||||
const maxOrder = data.tasks
|
||||
.filter((t) => t.status === targetStatus)
|
||||
.reduce((max, t) => Math.max(max, t.columnOrder), -1);
|
||||
|
|
@ -1093,6 +1376,9 @@ export class KanbanStore {
|
|||
const now = Date.now();
|
||||
const existingIds = new Set(data.tasks.map((t) => t.id));
|
||||
const title = typeof payload.title === 'string' && payload.title ? payload.title : 'untitled';
|
||||
const assignee = payload.assignee == null
|
||||
? undefined
|
||||
: canonicalizeKanbanAssignee(String(payload.assignee));
|
||||
const task: KanbanTask = {
|
||||
id: uniqueSlugId(title, existingIds),
|
||||
title,
|
||||
|
|
@ -1104,7 +1390,7 @@ export class KanbanStore {
|
|||
updatedAt: now,
|
||||
version: 1,
|
||||
sourceSessionKey: payload.sourceSessionKey as string | undefined,
|
||||
assignee: payload.assignee as TaskActor | undefined,
|
||||
assignee,
|
||||
labels: (payload.labels as string[]) ?? [],
|
||||
columnOrder: maxOrder + 1,
|
||||
model: payload.model as string | undefined,
|
||||
|
|
@ -1138,9 +1424,17 @@ export class KanbanStore {
|
|||
for (const key of ALLOWED_UPDATE_FIELDS) {
|
||||
if (key in payload) patch[key] = payload[key];
|
||||
}
|
||||
if (Object.prototype.hasOwnProperty.call(patch, 'assignee')) {
|
||||
patch.assignee = patch.assignee == null
|
||||
? undefined
|
||||
: canonicalizeKanbanAssignee(String(patch.assignee));
|
||||
}
|
||||
|
||||
// If status changed, re-compute columnOrder
|
||||
if (patch.status && patch.status !== task.status) {
|
||||
if (typeof patch.status !== 'string' || !isAllowedTaskStatus(patch.status, data.config)) {
|
||||
throw new InvalidTaskStatusError(String(patch.status), getAllowedTaskStatuses(data.config));
|
||||
}
|
||||
const maxOrder = data.tasks
|
||||
.filter((t) => t.status === (patch.status as TaskStatus) && t.id !== taskId)
|
||||
.reduce((max, t) => Math.max(max, t.columnOrder), -1);
|
||||
|
|
|
|||
208
server/lib/kanban-subagent-fallback.test.ts
Normal file
208
server/lib/kanban-subagent-fallback.test.ts
Normal file
|
|
@ -0,0 +1,208 @@
|
|||
/**
|
||||
* Tests for Kanban subagent launch helper.
|
||||
* @module
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import {
|
||||
buildKanbanFallbackRunKey,
|
||||
launchKanbanFallbackSubagentViaRpc,
|
||||
resolveKanbanFallbackParentSessionKey,
|
||||
} from './kanban-subagent-fallback.js';
|
||||
import * as gatewayRpc from './gateway-rpc.js';
|
||||
|
||||
describe('launchKanbanFallbackSubagentViaRpc', () => {
|
||||
let calls: Array<{ method: string; params: Record<string, unknown> }>;
|
||||
|
||||
beforeEach(() => {
|
||||
calls = [];
|
||||
vi.spyOn(gatewayRpc, 'gatewayRpcCall').mockImplementation(async (method, params) => {
|
||||
calls.push({ method, params });
|
||||
|
||||
if (method === 'sessions.create') {
|
||||
return {
|
||||
ok: true,
|
||||
key: String(params.key),
|
||||
entry: {
|
||||
label: params.label,
|
||||
parentSessionKey: params.parentSessionKey,
|
||||
},
|
||||
};
|
||||
}
|
||||
if (method === 'sessions.send') {
|
||||
return { ok: true, runId: 'mock-run-id-12345' };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('creates a real child session before sending the task', async () => {
|
||||
await launchKanbanFallbackSubagentViaRpc({
|
||||
label: 'test-kanban-run',
|
||||
task: 'Execute kanban task',
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
});
|
||||
|
||||
expect(calls.length).toBeGreaterThanOrEqual(2);
|
||||
expect(calls[0].method).toBe('sessions.create');
|
||||
expect(calls[1].method).toBe('sessions.send');
|
||||
});
|
||||
|
||||
it('fails when the parent session is not a top-level root', async () => {
|
||||
await expect(launchKanbanFallbackSubagentViaRpc({
|
||||
label: 'test-kanban-run',
|
||||
task: 'Execute kanban task',
|
||||
parentSessionKey: 'agent:reviewer:subagent:existing-child',
|
||||
})).rejects.toThrow('Parent agent session must be a top-level root');
|
||||
|
||||
expect(calls).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('surfaces sessions.create failures from the gateway', async () => {
|
||||
vi.spyOn(gatewayRpc, 'gatewayRpcCall').mockImplementation(async (method, params) => {
|
||||
calls.push({ method, params });
|
||||
if (method === 'sessions.create') {
|
||||
throw new Error('Parent session not found: agent:reviewer:main');
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
await expect(launchKanbanFallbackSubagentViaRpc({
|
||||
label: 'test-kanban-run',
|
||||
task: 'Execute kanban task',
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
})).rejects.toThrow('Parent session not found');
|
||||
|
||||
expect(calls.some((call) => call.method === 'sessions.send')).toBe(false);
|
||||
});
|
||||
|
||||
it('creates the worker session under the requested parent root', async () => {
|
||||
await launchKanbanFallbackSubagentViaRpc({
|
||||
label: 'test-kanban-run',
|
||||
task: 'Execute kanban task',
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
});
|
||||
|
||||
const createCall = calls.find((c) => c.method === 'sessions.create');
|
||||
expect(createCall?.params.parentSessionKey).toBe('agent:reviewer:main');
|
||||
expect(createCall?.params.label).toBe('test-kanban-run');
|
||||
expect(createCall?.params.key).toMatch(/^agent:reviewer:subagent:/);
|
||||
});
|
||||
|
||||
it('sends the raw task to the created child session with model/thinking preserved', async () => {
|
||||
await launchKanbanFallbackSubagentViaRpc({
|
||||
label: 'test-kanban-run',
|
||||
task: 'Execute kanban task',
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
model: 'openai-codex/gpt-5.4',
|
||||
thinking: 'high',
|
||||
});
|
||||
|
||||
const createCall = calls.find((c) => c.method === 'sessions.create');
|
||||
const sendCall = calls.find((c) => c.method === 'sessions.send');
|
||||
|
||||
expect(createCall?.params.model).toBe('openai-codex/gpt-5.4');
|
||||
expect(sendCall?.params.message).toBe('Execute kanban task');
|
||||
expect(sendCall?.params.thinking).toBe('high');
|
||||
expect(sendCall?.params.key).toBe(createCall?.params.key);
|
||||
expect(String(sendCall?.params.message)).not.toContain('[spawn-subagent]');
|
||||
});
|
||||
|
||||
it('deletes the created child session when sessions.send fails after sessions.create', async () => {
|
||||
vi.spyOn(gatewayRpc, 'gatewayRpcCall').mockImplementation(async (method, params) => {
|
||||
calls.push({ method, params });
|
||||
|
||||
if (method === 'sessions.create') {
|
||||
return {
|
||||
ok: true,
|
||||
key: String(params.key),
|
||||
};
|
||||
}
|
||||
if (method === 'sessions.send') {
|
||||
throw new Error('send failed');
|
||||
}
|
||||
if (method === 'sessions.delete') {
|
||||
return { ok: true };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
await expect(launchKanbanFallbackSubagentViaRpc({
|
||||
label: 'test-kanban-run',
|
||||
task: 'Execute kanban task',
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
})).rejects.toThrow('send failed');
|
||||
|
||||
const createCall = calls.find((c) => c.method === 'sessions.create');
|
||||
const deleteCall = calls.find((c) => c.method === 'sessions.delete');
|
||||
|
||||
expect(createCall).toBeDefined();
|
||||
expect(deleteCall?.params).toEqual({
|
||||
key: createCall?.params.key,
|
||||
deleteTranscript: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('returns the deterministic run correlation key, child session key, and runId', async () => {
|
||||
const result = await launchKanbanFallbackSubagentViaRpc({
|
||||
label: 'test-kanban-run',
|
||||
task: 'Execute kanban task',
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
});
|
||||
|
||||
expect(result.sessionKey).toBe('kanban-root:test-kanban-run');
|
||||
expect(result.parentSessionKey).toBe('agent:reviewer:main');
|
||||
expect(result.childSessionKey).toMatch(/^agent:reviewer:subagent:/);
|
||||
expect(result.runId).toBe('mock-run-id-12345');
|
||||
});
|
||||
|
||||
it('returns a compatibility snapshot containing the parent key', async () => {
|
||||
const result = await launchKanbanFallbackSubagentViaRpc({
|
||||
label: 'test-kanban-run',
|
||||
task: 'Execute kanban task',
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
});
|
||||
|
||||
expect(result.knownSessionKeysBefore).toEqual(['agent:reviewer:main']);
|
||||
});
|
||||
|
||||
it('generates an idempotency key for sessions.send', async () => {
|
||||
await launchKanbanFallbackSubagentViaRpc({
|
||||
label: 'test-kanban-run',
|
||||
task: 'Execute kanban task',
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
});
|
||||
|
||||
const sendCall = calls.find((c) => c.method === 'sessions.send');
|
||||
expect(sendCall?.params.idempotencyKey).toBeDefined();
|
||||
expect(typeof sendCall?.params.idempotencyKey).toBe('string');
|
||||
expect((sendCall?.params.idempotencyKey as string).length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('buildKanbanFallbackRunKey', () => {
|
||||
it('returns a deterministic run correlation key derived from label', () => {
|
||||
expect(buildKanbanFallbackRunKey('test-kanban-run')).toBe('kanban-root:test-kanban-run');
|
||||
});
|
||||
});
|
||||
|
||||
describe('resolveKanbanFallbackParentSessionKey', () => {
|
||||
it('maps an assignee agent id to its top-level root session', () => {
|
||||
expect(resolveKanbanFallbackParentSessionKey('agent:reviewer')).toBe('agent:reviewer:main');
|
||||
});
|
||||
|
||||
it('normalizes full agent-flavored values back to the owning top-level root', () => {
|
||||
expect(resolveKanbanFallbackParentSessionKey('agent:reviewer:main')).toBe('agent:reviewer:main');
|
||||
expect(resolveKanbanFallbackParentSessionKey('agent:reviewer:subagent:child')).toBe('agent:reviewer:main');
|
||||
});
|
||||
|
||||
it('rejects operator, unset, and @main assignees for macOS fallback execution', () => {
|
||||
expect(resolveKanbanFallbackParentSessionKey('operator')).toBeNull();
|
||||
expect(resolveKanbanFallbackParentSessionKey(undefined)).toBeNull();
|
||||
expect(resolveKanbanFallbackParentSessionKey('agent:main')).toBeNull();
|
||||
});
|
||||
});
|
||||
115
server/lib/kanban-subagent-fallback.ts
Normal file
115
server/lib/kanban-subagent-fallback.ts
Normal file
|
|
@ -0,0 +1,115 @@
|
|||
/**
|
||||
* Server-side Kanban subagent launch helper.
|
||||
*
|
||||
* Kanban tasks should run as real child sessions under an existing top-level
|
||||
* agent root, not as synthetic message conventions that hope the parent will
|
||||
* spawn on our behalf.
|
||||
*
|
||||
* Historical note: the surrounding route code still uses the word “fallback”
|
||||
* because assigned-root execution originally existed as a macOS-specific
|
||||
* workaround. The transport here is now a first-class session primitive.
|
||||
*
|
||||
* @module
|
||||
*/
|
||||
|
||||
import { randomUUID } from 'node:crypto';
|
||||
import { resolveKanbanAssigneeRootSessionKey } from './kanban-assignee.js';
|
||||
import { gatewayRpcCall } from './gateway-rpc.js';
|
||||
|
||||
export interface KanbanFallbackLaunchResult {
|
||||
/** Deterministic correlation key stored on the task run link. */
|
||||
sessionKey: string;
|
||||
/** Existing top-level agent root that owns the spawned child. */
|
||||
parentSessionKey: string;
|
||||
/** Real worker session created under the selected parent root. */
|
||||
childSessionKey: string;
|
||||
/** Back-compat snapshot hook for older poller logic. */
|
||||
knownSessionKeysBefore: string[];
|
||||
/** Optional runId returned by the initial session send. */
|
||||
runId?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a deterministic Kanban run correlation key from a launch label.
|
||||
*
|
||||
* Historical note: the run link field is still named `sessionKey`, but for
|
||||
* Kanban execution this value is only a stable run correlation key. The real
|
||||
* worker session key is attached separately as `childSessionKey`.
|
||||
*/
|
||||
export function buildKanbanFallbackRunKey(label: string): string {
|
||||
const normalized = label
|
||||
.toLowerCase()
|
||||
.replace(/[^a-z0-9]+/g, '-')
|
||||
.replace(/^-+|-+$/g, '');
|
||||
|
||||
return `kanban-root:${normalized}`;
|
||||
}
|
||||
|
||||
/** Resolve the owning top-level worker root session for a task assignee. */
|
||||
export function resolveKanbanFallbackParentSessionKey(assignee?: string): string | null {
|
||||
return resolveKanbanAssigneeRootSessionKey(assignee);
|
||||
}
|
||||
|
||||
function buildChildSessionKey(parentSessionKey: string): string {
|
||||
const match = parentSessionKey.match(/^agent:([^:]+):main$/);
|
||||
if (!match) {
|
||||
throw new Error(`Parent agent session must be a top-level root: ${parentSessionKey}`);
|
||||
}
|
||||
return `agent:${match[1]}:subagent:${randomUUID()}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Launch a Kanban task as a real child session under an existing top-level
|
||||
* agent root.
|
||||
*/
|
||||
export async function launchKanbanFallbackSubagentViaRpc(params: {
|
||||
label: string;
|
||||
task: string;
|
||||
parentSessionKey: string;
|
||||
model?: string;
|
||||
thinking?: string;
|
||||
}): Promise<KanbanFallbackLaunchResult> {
|
||||
const sessionKey = buildKanbanFallbackRunKey(params.label);
|
||||
const childSessionKey = buildChildSessionKey(params.parentSessionKey);
|
||||
|
||||
const createResponse = await gatewayRpcCall('sessions.create', {
|
||||
key: childSessionKey,
|
||||
parentSessionKey: params.parentSessionKey,
|
||||
label: params.label,
|
||||
...(params.model ? { model: params.model } : {}),
|
||||
}) as { key?: string; sessionKey?: string };
|
||||
|
||||
const resolvedChildSessionKey = typeof createResponse.key === 'string' && createResponse.key.trim()
|
||||
? createResponse.key
|
||||
: typeof createResponse.sessionKey === 'string' && createResponse.sessionKey.trim()
|
||||
? createResponse.sessionKey
|
||||
: childSessionKey;
|
||||
|
||||
let sendResponse: { runId?: string };
|
||||
try {
|
||||
sendResponse = await gatewayRpcCall('sessions.send', {
|
||||
key: resolvedChildSessionKey,
|
||||
message: params.task,
|
||||
...(params.thinking ? { thinking: params.thinking } : {}),
|
||||
idempotencyKey: `kanban-subagent-${Date.now()}-${randomUUID().slice(0, 8)}`,
|
||||
}) as { runId?: string };
|
||||
} catch (error) {
|
||||
try {
|
||||
await gatewayRpcCall('sessions.delete', {
|
||||
key: resolvedChildSessionKey,
|
||||
deleteTranscript: true,
|
||||
});
|
||||
} catch {
|
||||
// Best-effort cleanup only; preserve the original launch failure.
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
return {
|
||||
sessionKey,
|
||||
parentSessionKey: params.parentSessionKey,
|
||||
childSessionKey: resolvedChildSessionKey,
|
||||
knownSessionKeysBefore: [params.parentSessionKey],
|
||||
runId: sendResponse.runId,
|
||||
};
|
||||
}
|
||||
117
server/lib/openclaw-config.ts
Normal file
117
server/lib/openclaw-config.ts
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
import fs from 'node:fs';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import JSON5 from 'json5';
|
||||
import { config } from './config.js';
|
||||
|
||||
interface OpenClawAgentEntry {
|
||||
id?: unknown;
|
||||
workspace?: unknown;
|
||||
}
|
||||
|
||||
interface OpenClawConfigShape {
|
||||
agents?: {
|
||||
defaults?: {
|
||||
workspace?: unknown;
|
||||
};
|
||||
list?: OpenClawAgentEntry[];
|
||||
};
|
||||
}
|
||||
|
||||
type CachedConfig = {
|
||||
path: string;
|
||||
mtimeMs: number;
|
||||
parsed: OpenClawConfigShape | null;
|
||||
};
|
||||
|
||||
let cachedConfig: CachedConfig | null = null;
|
||||
|
||||
function getHomeDir(): string {
|
||||
return config.home || process.env.HOME || os.homedir();
|
||||
}
|
||||
|
||||
export function resolveOpenClawConfigPath(): string {
|
||||
return process.env.OPENCLAW_CONFIG_PATH?.trim() || path.join(getHomeDir(), '.openclaw', 'openclaw.json');
|
||||
}
|
||||
|
||||
function expandAndResolvePath(rawPath: string, configPath: string): string {
|
||||
const trimmed = rawPath.trim();
|
||||
if (!trimmed) return trimmed;
|
||||
|
||||
const expanded = trimmed === '~'
|
||||
? getHomeDir()
|
||||
: trimmed.startsWith('~/')
|
||||
? path.join(getHomeDir(), trimmed.slice(2))
|
||||
: trimmed;
|
||||
|
||||
if (path.isAbsolute(expanded)) return expanded;
|
||||
return path.resolve(path.dirname(configPath), expanded);
|
||||
}
|
||||
|
||||
function loadOpenClawConfig(): { configPath: string; parsed: OpenClawConfigShape | null } {
|
||||
const configPath = resolveOpenClawConfigPath();
|
||||
|
||||
try {
|
||||
const stat = fs.statSync(configPath);
|
||||
if (cachedConfig && cachedConfig.path === configPath && cachedConfig.mtimeMs === stat.mtimeMs) {
|
||||
return { configPath, parsed: cachedConfig.parsed };
|
||||
}
|
||||
|
||||
const raw = fs.readFileSync(configPath, 'utf8');
|
||||
const parsed = JSON5.parse(raw) as OpenClawConfigShape;
|
||||
cachedConfig = { path: configPath, mtimeMs: stat.mtimeMs, parsed };
|
||||
return { configPath, parsed };
|
||||
} catch {
|
||||
cachedConfig = { path: configPath, mtimeMs: -1, parsed: null };
|
||||
return { configPath, parsed: null };
|
||||
}
|
||||
}
|
||||
|
||||
export function getConfiguredAgentWorkspace(agentId: string): string | null {
|
||||
const { configPath, parsed } = loadOpenClawConfig();
|
||||
const agents = parsed?.agents?.list;
|
||||
if (!Array.isArray(agents)) return null;
|
||||
|
||||
const match = agents.find((entry) => typeof entry?.id === 'string' && entry.id === agentId);
|
||||
if (!match || typeof match.workspace !== 'string' || !match.workspace.trim()) return null;
|
||||
|
||||
return expandAndResolvePath(match.workspace, configPath);
|
||||
}
|
||||
|
||||
export function getDefaultAgentWorkspaceRoot(): string | null {
|
||||
const { configPath, parsed } = loadOpenClawConfig();
|
||||
const rawWorkspace = parsed?.agents?.defaults?.workspace;
|
||||
if (typeof rawWorkspace !== 'string' || !rawWorkspace.trim()) return null;
|
||||
return expandAndResolvePath(rawWorkspace, configPath);
|
||||
}
|
||||
|
||||
export function buildDefaultAgentWorkspacePath(agentId: string): string {
|
||||
const defaultWorkspaceRoot = getDefaultAgentWorkspaceRoot();
|
||||
if (defaultWorkspaceRoot) {
|
||||
return path.join(defaultWorkspaceRoot, agentId);
|
||||
}
|
||||
return path.join(getHomeDir(), '.openclaw', `workspace-${agentId}`);
|
||||
}
|
||||
|
||||
export function listConfiguredAgentWorkspaces(): Array<{ agentId: string; workspaceRoot: string }> {
|
||||
const { configPath, parsed } = loadOpenClawConfig();
|
||||
const agents = parsed?.agents?.list;
|
||||
if (!Array.isArray(agents)) return [];
|
||||
|
||||
const seen = new Set<string>();
|
||||
const workspaces: Array<{ agentId: string; workspaceRoot: string }> = [];
|
||||
|
||||
for (const entry of agents) {
|
||||
if (typeof entry?.id !== 'string' || !entry.id.trim()) continue;
|
||||
if (typeof entry.workspace !== 'string' || !entry.workspace.trim()) continue;
|
||||
if (seen.has(entry.id)) continue;
|
||||
seen.add(entry.id);
|
||||
workspaces.push({
|
||||
agentId: entry.id,
|
||||
workspaceRoot: expandAndResolvePath(entry.workspace, configPath),
|
||||
});
|
||||
}
|
||||
|
||||
return workspaces;
|
||||
}
|
||||
|
||||
80
server/lib/plans.test.ts
Normal file
80
server/lib/plans.test.ts
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
import fs from 'node:fs/promises';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { afterEach, describe, expect, it } from 'vitest';
|
||||
import { findRepoPlanByBeadId, listRepoPlans } from './plans.js';
|
||||
|
||||
const tempDirs: string[] = [];
|
||||
|
||||
async function createTempRepo(): Promise<string> {
|
||||
const repoRoot = await fs.mkdtemp(path.join(os.tmpdir(), 'nerve-plans-'));
|
||||
tempDirs.push(repoRoot);
|
||||
await fs.mkdir(path.join(repoRoot, '.plans'), { recursive: true });
|
||||
return repoRoot;
|
||||
}
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(tempDirs.splice(0).map((dir) => fs.rm(dir, { recursive: true, force: true })));
|
||||
});
|
||||
|
||||
describe('listRepoPlans', () => {
|
||||
it('parses CRLF frontmatter and closing delimiters at EOF', async () => {
|
||||
const repoRoot = await createTempRepo();
|
||||
await fs.writeFile(
|
||||
path.join(repoRoot, '.plans', 'crlf-plan.md'),
|
||||
[
|
||||
'---',
|
||||
'plan_id: plan-crlf',
|
||||
'plan_title: CRLF Plan',
|
||||
'status: In Progress',
|
||||
'bead_ids:',
|
||||
' - nerve-4gpd',
|
||||
'---',
|
||||
].join('\r\n'),
|
||||
'utf8',
|
||||
);
|
||||
|
||||
const plans = await listRepoPlans(repoRoot);
|
||||
|
||||
expect(plans).toHaveLength(1);
|
||||
expect(plans[0]).toMatchObject({
|
||||
path: '.plans/crlf-plan.md',
|
||||
title: 'CRLF Plan',
|
||||
planId: 'plan-crlf',
|
||||
status: 'In Progress',
|
||||
beadIds: ['nerve-4gpd'],
|
||||
archived: false,
|
||||
});
|
||||
});
|
||||
|
||||
it('finds bead ids from frontmatter when the closing delimiter is the final line', async () => {
|
||||
const repoRoot = await createTempRepo();
|
||||
await fs.writeFile(
|
||||
path.join(repoRoot, '.plans', 'final-delimiter.md'),
|
||||
'---\nplan_id: plan-final\nbead_ids: [nerve-4gpd]\n---',
|
||||
'utf8',
|
||||
);
|
||||
|
||||
await expect(findRepoPlanByBeadId('nerve-4gpd', repoRoot)).resolves.toMatchObject({
|
||||
path: '.plans/final-delimiter.md',
|
||||
planId: 'plan-final',
|
||||
beadIds: ['nerve-4gpd'],
|
||||
});
|
||||
});
|
||||
|
||||
it('parses BOM-prefixed frontmatter', async () => {
|
||||
const repoRoot = await createTempRepo();
|
||||
await fs.writeFile(
|
||||
path.join(repoRoot, '.plans', 'bom-plan.md'),
|
||||
'\uFEFF---\nplan_id: plan-bom\nplan_title: BOM Plan\nbead_ids:\n - nerve-bom1\n---\n# ignored title',
|
||||
'utf8',
|
||||
);
|
||||
|
||||
await expect(findRepoPlanByBeadId('nerve-bom1', repoRoot)).resolves.toMatchObject({
|
||||
path: '.plans/bom-plan.md',
|
||||
planId: 'plan-bom',
|
||||
title: 'BOM Plan',
|
||||
beadIds: ['nerve-bom1'],
|
||||
});
|
||||
});
|
||||
});
|
||||
182
server/lib/plans.ts
Normal file
182
server/lib/plans.ts
Normal file
|
|
@ -0,0 +1,182 @@
|
|||
import fs from 'node:fs/promises';
|
||||
import path from 'node:path';
|
||||
|
||||
const PLAN_ROOT_NAME = '.plans';
|
||||
|
||||
function normalizeRepoRoot(repoRoot?: string): string {
|
||||
return path.resolve(repoRoot || process.cwd());
|
||||
}
|
||||
|
||||
export function getPlansRoot(repoRoot?: string): string {
|
||||
return path.resolve(normalizeRepoRoot(repoRoot), PLAN_ROOT_NAME);
|
||||
}
|
||||
|
||||
function isMarkdownFile(name: string): boolean {
|
||||
return name.toLowerCase().endsWith('.md');
|
||||
}
|
||||
|
||||
export function isArchivedPlanPath(relativePath: string): boolean {
|
||||
return relativePath.split(/[\\/]+/).filter(Boolean).includes('archive');
|
||||
}
|
||||
|
||||
function stripWrappingQuotes(value: string): string {
|
||||
const trimmed = value.trim();
|
||||
if ((trimmed.startsWith('"') && trimmed.endsWith('"')) || (trimmed.startsWith("'") && trimmed.endsWith("'"))) {
|
||||
return trimmed.slice(1, -1);
|
||||
}
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
function parsePlanContent(content: string): {
|
||||
frontmatter: {
|
||||
plan_id?: string;
|
||||
plan_title?: string;
|
||||
status?: string;
|
||||
bead_ids?: string[];
|
||||
};
|
||||
body: string;
|
||||
} {
|
||||
const frontmatterMatch = content.match(/^(?:\uFEFF)?---\r?\n([\s\S]*?)\r?\n---(?:\r?\n|$)/);
|
||||
if (!frontmatterMatch) {
|
||||
return { frontmatter: {}, body: content };
|
||||
}
|
||||
|
||||
const rawFrontmatter = frontmatterMatch[1] ?? '';
|
||||
const body = content.slice(frontmatterMatch[0].length);
|
||||
const frontmatter: {
|
||||
plan_id?: string;
|
||||
plan_title?: string;
|
||||
status?: string;
|
||||
bead_ids?: string[];
|
||||
} = {};
|
||||
let activeArrayKey: 'bead_ids' | null = null;
|
||||
|
||||
for (const line of rawFrontmatter.split(/\r?\n/)) {
|
||||
if (!line.trim()) continue;
|
||||
|
||||
const arrayMatch = line.match(/^\s+-\s+(.*)$/);
|
||||
if (arrayMatch && activeArrayKey === 'bead_ids') {
|
||||
const next = stripWrappingQuotes(arrayMatch[1] ?? '');
|
||||
if (!frontmatter.bead_ids) frontmatter.bead_ids = [];
|
||||
if (next) frontmatter.bead_ids.push(next);
|
||||
continue;
|
||||
}
|
||||
|
||||
const keyValueMatch = line.match(/^([A-Za-z0-9_]+):\s*(.*)$/);
|
||||
if (!keyValueMatch) {
|
||||
activeArrayKey = null;
|
||||
continue;
|
||||
}
|
||||
|
||||
const [, key, rawValue] = keyValueMatch;
|
||||
const value = rawValue.trim();
|
||||
if (key === 'bead_ids') {
|
||||
activeArrayKey = 'bead_ids';
|
||||
if (!value) {
|
||||
frontmatter.bead_ids = [];
|
||||
} else if (value.startsWith('[') && value.endsWith(']')) {
|
||||
frontmatter.bead_ids = value.slice(1, -1)
|
||||
.split(',')
|
||||
.map((item) => stripWrappingQuotes(item))
|
||||
.filter(Boolean);
|
||||
} else {
|
||||
frontmatter.bead_ids = [stripWrappingQuotes(value)].filter(Boolean);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
activeArrayKey = null;
|
||||
if (key === 'plan_id' || key === 'plan_title' || key === 'status') {
|
||||
frontmatter[key] = stripWrappingQuotes(value);
|
||||
}
|
||||
}
|
||||
|
||||
return { frontmatter, body };
|
||||
}
|
||||
|
||||
function extractPlanTitle(content: string, frontmatter: { plan_title?: string }): string {
|
||||
if (frontmatter.plan_title?.trim()) return frontmatter.plan_title.trim();
|
||||
|
||||
const { body } = parsePlanContent(content);
|
||||
for (const line of body.split(/\r?\n/)) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed) continue;
|
||||
if (trimmed.startsWith('# ')) return trimmed.slice(2).trim();
|
||||
}
|
||||
|
||||
return 'Untitled plan';
|
||||
}
|
||||
|
||||
async function collectPlanFiles(dirPath: string, relativeDir = ''): Promise<string[]> {
|
||||
const items = await fs.readdir(dirPath, { withFileTypes: true });
|
||||
const files: string[] = [];
|
||||
|
||||
for (const item of items) {
|
||||
const childRelative = relativeDir ? path.posix.join(relativeDir, item.name) : item.name;
|
||||
const childAbsolute = path.join(dirPath, item.name);
|
||||
|
||||
if (item.isDirectory()) {
|
||||
files.push(...await collectPlanFiles(childAbsolute, childRelative));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (item.isFile() && isMarkdownFile(item.name)) {
|
||||
files.push(path.posix.join(PLAN_ROOT_NAME, childRelative));
|
||||
}
|
||||
}
|
||||
|
||||
return files;
|
||||
}
|
||||
|
||||
export interface RepoPlanSummary {
|
||||
path: string;
|
||||
title: string;
|
||||
status: string | null;
|
||||
planId: string | null;
|
||||
beadIds: string[];
|
||||
archived: boolean;
|
||||
updatedAt: number;
|
||||
}
|
||||
|
||||
export async function listRepoPlans(repoRoot?: string): Promise<RepoPlanSummary[]> {
|
||||
const plansRoot = getPlansRoot(repoRoot);
|
||||
|
||||
try {
|
||||
const stat = await fs.stat(plansRoot);
|
||||
if (!stat.isDirectory()) return [];
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
|
||||
const relativePaths = await collectPlanFiles(plansRoot);
|
||||
const plans = await Promise.all(relativePaths.map(async (relativePath) => {
|
||||
const absolutePath = path.resolve(normalizeRepoRoot(repoRoot), relativePath);
|
||||
const [content, stat] = await Promise.all([
|
||||
fs.readFile(absolutePath, 'utf-8'),
|
||||
fs.stat(absolutePath),
|
||||
]);
|
||||
const parsed = parsePlanContent(content);
|
||||
return {
|
||||
path: relativePath,
|
||||
title: extractPlanTitle(content, parsed.frontmatter),
|
||||
status: parsed.frontmatter.status?.trim() || null,
|
||||
planId: parsed.frontmatter.plan_id?.trim() || null,
|
||||
beadIds: parsed.frontmatter.bead_ids ?? [],
|
||||
archived: isArchivedPlanPath(relativePath),
|
||||
updatedAt: Math.floor(stat.mtimeMs),
|
||||
} satisfies RepoPlanSummary;
|
||||
}));
|
||||
|
||||
return plans.sort((left, right) => {
|
||||
if (left.archived !== right.archived) return left.archived ? 1 : -1;
|
||||
return right.updatedAt - left.updatedAt;
|
||||
});
|
||||
}
|
||||
|
||||
export async function findRepoPlanByBeadId(beadId: string, repoRoot?: string): Promise<RepoPlanSummary | null> {
|
||||
const normalizedBeadId = beadId.trim();
|
||||
if (!normalizedBeadId) return null;
|
||||
|
||||
const plans = await listRepoPlans(repoRoot);
|
||||
return plans.find((plan) => plan.beadIds.includes(normalizedBeadId)) ?? null;
|
||||
}
|
||||
387
server/lib/subagent-spawn.test.ts
Normal file
387
server/lib/subagent-spawn.test.ts
Normal file
|
|
@ -0,0 +1,387 @@
|
|||
/** Tests for the server-side subagent spawn helper. */
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
import * as gatewayRpc from './gateway-rpc.js';
|
||||
import {
|
||||
__resetSubagentSpawnTestState,
|
||||
buildSpawnSubagentMarkerMessage,
|
||||
buildSubagentParentCompletionMessage,
|
||||
extractAssistantResultForLaunch,
|
||||
isTopLevelRootSessionKey,
|
||||
isUnsupportedDirectSpawnError,
|
||||
pickMarkerSpawnedChildSession,
|
||||
spawnSubagent,
|
||||
} from './subagent-spawn.js';
|
||||
|
||||
describe('subagent-spawn helper', () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
__resetSubagentSpawnTestState();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
vi.useRealTimers();
|
||||
__resetSubagentSpawnTestState();
|
||||
});
|
||||
|
||||
it('recognizes top-level root session keys only', () => {
|
||||
expect(isTopLevelRootSessionKey('agent:reviewer:main')).toBe(true);
|
||||
expect(isTopLevelRootSessionKey('agent:reviewer:subagent:abc')).toBe(false);
|
||||
expect(isTopLevelRootSessionKey('agent:reviewer:cron:daily')).toBe(false);
|
||||
});
|
||||
|
||||
it('matches only narrow unsupported direct spawn errors', () => {
|
||||
expect(isUnsupportedDirectSpawnError(new Error('unknown method: sessions.create'))).toBe(true);
|
||||
expect(isUnsupportedDirectSpawnError(new Error('unknown method: sessions.send'))).toBe(true);
|
||||
expect(isUnsupportedDirectSpawnError(new Error('unknown method: chat.send'))).toBe(false);
|
||||
expect(isUnsupportedDirectSpawnError(new Error('internal server error'))).toBe(false);
|
||||
});
|
||||
|
||||
it('builds the existing spawn-subagent marker message', () => {
|
||||
expect(buildSpawnSubagentMarkerMessage({
|
||||
task: 'Reply with exactly: OK',
|
||||
label: 'audit-auth-flow',
|
||||
model: 'openai/gpt-5',
|
||||
thinking: 'high',
|
||||
cleanup: 'delete',
|
||||
})).toBe([
|
||||
'[spawn-subagent]',
|
||||
'task: Reply with exactly: OK',
|
||||
'label: audit-auth-flow',
|
||||
'model: openai/gpt-5',
|
||||
'thinking: high',
|
||||
'mode: run',
|
||||
'cleanup: delete',
|
||||
].join('\n'));
|
||||
});
|
||||
|
||||
it('builds the generic parent completion report', () => {
|
||||
const completed = buildSubagentParentCompletionMessage({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
childSessionKey: 'agent:reviewer:subagent:abc',
|
||||
label: 'audit-auth-flow',
|
||||
outcome: 'completed',
|
||||
result: 'done',
|
||||
});
|
||||
expect(completed).toContain('Subagent child session completion report.');
|
||||
expect(completed).toContain('Parent root: agent:reviewer:main');
|
||||
expect(completed).toContain('Child session: agent:reviewer:subagent:abc');
|
||||
expect(completed).toContain('Outcome: completed');
|
||||
expect(completed).toContain('Result:');
|
||||
|
||||
const failed = buildSubagentParentCompletionMessage({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
childSessionKey: 'agent:reviewer:subagent:abc',
|
||||
outcome: 'failed',
|
||||
error: 'boom',
|
||||
});
|
||||
expect(failed).toContain('Outcome: failed');
|
||||
expect(failed).toContain('Error:');
|
||||
});
|
||||
|
||||
it('picks only a new child absent from the pre-send snapshot', () => {
|
||||
const picked = pickMarkerSpawnedChildSession([
|
||||
{ sessionKey: 'agent:reviewer:main' },
|
||||
{ sessionKey: 'agent:reviewer:subagent:existing' },
|
||||
{ sessionKey: 'agent:reviewer:subagent:new-child' },
|
||||
], 'agent:reviewer:main', new Set(['agent:reviewer:main', 'agent:reviewer:subagent:existing']));
|
||||
|
||||
expect(picked?.sessionKey).toBe('agent:reviewer:subagent:new-child');
|
||||
});
|
||||
|
||||
it('extracts a launched run by runId first, before later manual follow-ups', () => {
|
||||
const extracted = extractAssistantResultForLaunch([
|
||||
{ role: 'user', content: 'launch task', runId: 'run-1', timestamp: 100 },
|
||||
{ role: 'assistant', content: 'launch result', runId: 'run-1', timestamp: 101 },
|
||||
{ role: 'user', content: 'manual follow-up', timestamp: 102 },
|
||||
{ role: 'assistant', content: 'manual answer', timestamp: 103 },
|
||||
], { runId: 'run-1', launchTimestamp: 99 });
|
||||
|
||||
expect(extracted).toEqual({ started: true, resultText: 'launch result' });
|
||||
});
|
||||
|
||||
it('extracts a launched run by launch boundary when runId is unavailable', () => {
|
||||
const extracted = extractAssistantResultForLaunch([
|
||||
{ role: 'user', content: 'older context', timestamp: 10 },
|
||||
{ role: 'assistant', content: 'older answer', timestamp: 11 },
|
||||
{ role: 'user', content: 'launch task', timestamp: 100 },
|
||||
{ role: 'assistant', content: 'launch result', timestamp: 101 },
|
||||
{ role: 'user', content: 'manual follow-up', timestamp: 102 },
|
||||
{ role: 'assistant', content: 'manual answer', timestamp: 103 },
|
||||
], { launchTimestamp: 100 });
|
||||
|
||||
expect(extracted).toEqual({ started: true, resultText: 'launch result' });
|
||||
});
|
||||
|
||||
it('resolves the canonical child key returned by sessions.create', async () => {
|
||||
const rpcMock = vi.spyOn(gatewayRpc, 'gatewayRpcCall').mockImplementation(async (method) => {
|
||||
if (method === 'sessions.create') return { key: 'agent:reviewer:subagent:canonical' };
|
||||
if (method === 'sessions.send') return { runId: 'run-123' };
|
||||
if (method === 'sessions.list') return { sessions: [] };
|
||||
return {};
|
||||
});
|
||||
|
||||
const result = await spawnSubagent({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
task: 'Reply with exactly: OK',
|
||||
});
|
||||
|
||||
expect(result).toEqual({
|
||||
sessionKey: 'agent:reviewer:subagent:canonical',
|
||||
runId: 'run-123',
|
||||
mode: 'direct',
|
||||
});
|
||||
expect(rpcMock).toHaveBeenNthCalledWith(1, 'sessions.create', expect.any(Object));
|
||||
expect(rpcMock).toHaveBeenNthCalledWith(2, 'sessions.send', expect.objectContaining({
|
||||
key: 'agent:reviewer:subagent:canonical',
|
||||
message: 'Reply with exactly: OK',
|
||||
}));
|
||||
});
|
||||
|
||||
it('does not delete the child when sessions.send fails after create', async () => {
|
||||
const rpcMock = vi.spyOn(gatewayRpc, 'gatewayRpcCall').mockImplementation(async (method, params) => {
|
||||
if (method === 'sessions.create') return { key: 'agent:reviewer:subagent:canonical' };
|
||||
if (method === 'sessions.send' && params.key === 'agent:reviewer:subagent:canonical') {
|
||||
throw new Error('send failed');
|
||||
}
|
||||
if (method === 'sessions.delete') return { ok: true };
|
||||
throw new Error(`unexpected call: ${method}`);
|
||||
});
|
||||
|
||||
await expect(spawnSubagent({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
task: 'Reply with exactly: OK',
|
||||
})).rejects.toThrow('send failed');
|
||||
|
||||
expect(rpcMock).not.toHaveBeenCalledWith('sessions.delete', expect.anything());
|
||||
});
|
||||
|
||||
it('reports completion back to the parent on direct success', async () => {
|
||||
const calls: Array<{ method: string; params: Record<string, unknown> }> = [];
|
||||
let listPolls = 0;
|
||||
|
||||
vi.spyOn(gatewayRpc, 'gatewayRpcCall').mockImplementation(async (method, params) => {
|
||||
calls.push({ method, params });
|
||||
|
||||
if (method === 'sessions.create') return { key: 'agent:reviewer:subagent:child-1' };
|
||||
if (method === 'sessions.send' && params.key === 'agent:reviewer:subagent:child-1') return { runId: 'run-1' };
|
||||
if (method === 'sessions.list') {
|
||||
listPolls += 1;
|
||||
if (listPolls === 1) {
|
||||
return {
|
||||
sessions: [{
|
||||
sessionKey: 'agent:reviewer:subagent:child-1',
|
||||
status: 'running',
|
||||
busy: true,
|
||||
runId: 'run-1',
|
||||
}],
|
||||
};
|
||||
}
|
||||
return {
|
||||
sessions: [{
|
||||
sessionKey: 'agent:reviewer:subagent:child-1',
|
||||
status: 'done',
|
||||
agentState: 'idle',
|
||||
busy: false,
|
||||
processing: false,
|
||||
runId: 'run-1',
|
||||
}],
|
||||
};
|
||||
}
|
||||
if (method === 'sessions.get') {
|
||||
return {
|
||||
messages: [
|
||||
{ role: 'user', content: 'Reply with exactly: OK', runId: 'run-1', timestamp: 100 },
|
||||
{ role: 'assistant', content: 'OK', runId: 'run-1', timestamp: 101 },
|
||||
{ role: 'user', content: 'manual follow-up', timestamp: 102 },
|
||||
{ role: 'assistant', content: 'later answer', timestamp: 103 },
|
||||
],
|
||||
};
|
||||
}
|
||||
if (method === 'sessions.send' && params.key === 'agent:reviewer:main') return { ok: true };
|
||||
throw new Error(`unexpected ${method}`);
|
||||
});
|
||||
|
||||
await spawnSubagent({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
task: 'Reply with exactly: OK',
|
||||
label: 'audit-auth-flow',
|
||||
cleanup: 'keep',
|
||||
});
|
||||
|
||||
await vi.advanceTimersByTimeAsync(20_000);
|
||||
|
||||
const parentReport = calls.find((call) => call.method === 'sessions.send' && call.params.key === 'agent:reviewer:main');
|
||||
expect(parentReport).toBeTruthy();
|
||||
expect(String(parentReport?.params.message ?? '')).toContain('Outcome: completed');
|
||||
expect(String(parentReport?.params.message ?? '')).toContain('Label: audit-auth-flow');
|
||||
expect(String(parentReport?.params.message ?? '')).toContain('Result:\nOK');
|
||||
});
|
||||
|
||||
it('reports failure back to the parent when the child errors', async () => {
|
||||
const calls: Array<{ method: string; params: Record<string, unknown> }> = [];
|
||||
|
||||
vi.spyOn(gatewayRpc, 'gatewayRpcCall').mockImplementation(async (method, params) => {
|
||||
calls.push({ method, params });
|
||||
if (method === 'sessions.create') return { key: 'agent:reviewer:subagent:child-2' };
|
||||
if (method === 'sessions.send' && params.key === 'agent:reviewer:subagent:child-2') return { runId: 'run-2' };
|
||||
if (method === 'sessions.list') {
|
||||
return {
|
||||
sessions: [{
|
||||
sessionKey: 'agent:reviewer:subagent:child-2',
|
||||
status: 'failed',
|
||||
error: 'worker crashed',
|
||||
}],
|
||||
};
|
||||
}
|
||||
if (method === 'sessions.send' && params.key === 'agent:reviewer:main') return { ok: true };
|
||||
throw new Error(`unexpected ${method}`);
|
||||
});
|
||||
|
||||
await spawnSubagent({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
task: 'Do something',
|
||||
cleanup: 'keep',
|
||||
});
|
||||
|
||||
await vi.advanceTimersByTimeAsync(10_000);
|
||||
|
||||
const parentReport = calls.find((call) => call.method === 'sessions.send' && call.params.key === 'agent:reviewer:main');
|
||||
expect(parentReport).toBeTruthy();
|
||||
expect(String(parentReport?.params.message ?? '')).toContain('Outcome: failed');
|
||||
expect(String(parentReport?.params.message ?? '')).toContain('worker crashed');
|
||||
});
|
||||
|
||||
it('deletes the child only after the parent report when cleanup=delete', async () => {
|
||||
const callOrder: string[] = [];
|
||||
|
||||
vi.spyOn(gatewayRpc, 'gatewayRpcCall').mockImplementation(async (method, params) => {
|
||||
callOrder.push(`${method}:${String((params as Record<string, unknown>).key ?? '')}`);
|
||||
if (method === 'sessions.create') return { key: 'agent:reviewer:subagent:child-3' };
|
||||
if (method === 'sessions.send' && params.key === 'agent:reviewer:subagent:child-3') return { runId: 'run-3' };
|
||||
if (method === 'sessions.list') {
|
||||
return { sessions: [{ sessionKey: 'agent:reviewer:subagent:child-3', status: 'failed', error: 'boom' }] };
|
||||
}
|
||||
if (method === 'sessions.send' && params.key === 'agent:reviewer:main') return { ok: true };
|
||||
if (method === 'sessions.delete') return { ok: true };
|
||||
throw new Error(`unexpected ${method}`);
|
||||
});
|
||||
|
||||
await spawnSubagent({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
task: 'Do something',
|
||||
cleanup: 'delete',
|
||||
});
|
||||
|
||||
await vi.advanceTimersByTimeAsync(10_000);
|
||||
|
||||
const reportIndex = callOrder.findIndex((entry) => entry === 'sessions.send:agent:reviewer:main');
|
||||
const deleteIndex = callOrder.findIndex((entry) => entry === 'sessions.delete:agent:reviewer:subagent:child-3');
|
||||
expect(reportIndex).toBeGreaterThan(-1);
|
||||
expect(deleteIndex).toBeGreaterThan(reportIndex);
|
||||
});
|
||||
|
||||
it('keeps the child when cleanup=keep', async () => {
|
||||
const rpcMock = vi.spyOn(gatewayRpc, 'gatewayRpcCall').mockImplementation(async (method, params) => {
|
||||
if (method === 'sessions.create') return { key: 'agent:reviewer:subagent:child-4' };
|
||||
if (method === 'sessions.send' && params.key === 'agent:reviewer:subagent:child-4') return { runId: 'run-4' };
|
||||
if (method === 'sessions.list') {
|
||||
return { sessions: [{ sessionKey: 'agent:reviewer:subagent:child-4', status: 'failed', error: 'boom' }] };
|
||||
}
|
||||
if (method === 'sessions.send' && params.key === 'agent:reviewer:main') return { ok: true };
|
||||
throw new Error(`unexpected ${method}`);
|
||||
});
|
||||
|
||||
await spawnSubagent({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
task: 'Do something',
|
||||
cleanup: 'keep',
|
||||
});
|
||||
|
||||
await vi.advanceTimersByTimeAsync(10_000);
|
||||
|
||||
expect(rpcMock).not.toHaveBeenCalledWith('sessions.delete', expect.anything());
|
||||
});
|
||||
|
||||
it('falls back to marker mode only for narrow unsupported direct-RPC errors', async () => {
|
||||
let listCallCount = 0;
|
||||
const rpcMock = vi.spyOn(gatewayRpc, 'gatewayRpcCall').mockImplementation(async (method) => {
|
||||
if (method === 'sessions.create') throw new Error('unknown method: sessions.create');
|
||||
if (method === 'sessions.list') {
|
||||
listCallCount += 1;
|
||||
if (listCallCount === 1) {
|
||||
return { sessions: [{ sessionKey: 'agent:reviewer:main' }, { sessionKey: 'agent:reviewer:subagent:existing' }] };
|
||||
}
|
||||
return { sessions: [{ sessionKey: 'agent:reviewer:main' }, { sessionKey: 'agent:reviewer:subagent:existing' }, { sessionKey: 'agent:reviewer:subagent:new-child' }] };
|
||||
}
|
||||
if (method === 'chat.send') return { ok: true };
|
||||
throw new Error(`unexpected ${method}`);
|
||||
});
|
||||
|
||||
const resultPromise = spawnSubagent({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
task: 'Do something',
|
||||
cleanup: 'delete',
|
||||
});
|
||||
|
||||
await vi.advanceTimersByTimeAsync(2_000);
|
||||
|
||||
await expect(resultPromise).resolves.toEqual({
|
||||
sessionKey: 'agent:reviewer:subagent:new-child',
|
||||
mode: 'marker',
|
||||
});
|
||||
expect(rpcMock).toHaveBeenCalledWith('chat.send', expect.objectContaining({
|
||||
sessionKey: 'agent:reviewer:main',
|
||||
message: expect.stringContaining('[spawn-subagent]'),
|
||||
}));
|
||||
});
|
||||
|
||||
it('falls back to the first new child instead of timing out when multiple candidates appear', async () => {
|
||||
let listCallCount = 0;
|
||||
|
||||
vi.spyOn(gatewayRpc, 'gatewayRpcCall').mockImplementation(async (method) => {
|
||||
if (method === 'sessions.create') throw new Error('unknown method: sessions.create');
|
||||
if (method === 'sessions.list') {
|
||||
listCallCount += 1;
|
||||
if (listCallCount === 1) {
|
||||
return { sessions: [{ sessionKey: 'agent:reviewer:main' }] };
|
||||
}
|
||||
return {
|
||||
sessions: [
|
||||
{ sessionKey: 'agent:reviewer:main' },
|
||||
{ sessionKey: 'agent:reviewer:subagent:new-child-a' },
|
||||
{ sessionKey: 'agent:reviewer:subagent:new-child-b' },
|
||||
],
|
||||
};
|
||||
}
|
||||
if (method === 'chat.send') return { ok: true };
|
||||
throw new Error(`unexpected ${method}`);
|
||||
});
|
||||
|
||||
const resultPromise = spawnSubagent({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
task: 'Do something',
|
||||
cleanup: 'keep',
|
||||
});
|
||||
|
||||
await vi.advanceTimersByTimeAsync(2_000);
|
||||
|
||||
await expect(resultPromise).resolves.toEqual({
|
||||
sessionKey: 'agent:reviewer:subagent:new-child-a',
|
||||
mode: 'marker',
|
||||
});
|
||||
});
|
||||
|
||||
it('does not hide generic direct-launch errors behind marker fallback', async () => {
|
||||
vi.spyOn(gatewayRpc, 'gatewayRpcCall').mockImplementation(async (method) => {
|
||||
if (method === 'sessions.create') throw new Error('parent root not found');
|
||||
throw new Error(`unexpected ${method}`);
|
||||
});
|
||||
|
||||
await expect(spawnSubagent({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
task: 'Do something',
|
||||
})).rejects.toThrow('parent root not found');
|
||||
});
|
||||
});
|
||||
520
server/lib/subagent-spawn.ts
Normal file
520
server/lib/subagent-spawn.ts
Normal file
|
|
@ -0,0 +1,520 @@
|
|||
/**
|
||||
* Server-side subagent spawn helper.
|
||||
*
|
||||
* Owns the full lifecycle for direct child launches so the React client only
|
||||
* needs to ask the server to spawn a child and then switch focus.
|
||||
*
|
||||
* @module
|
||||
*/
|
||||
|
||||
import { randomUUID } from 'node:crypto';
|
||||
import { gatewayRpcCall } from './gateway-rpc.js';
|
||||
|
||||
export type SubagentCleanupMode = 'keep' | 'delete';
|
||||
|
||||
export interface SpawnSubagentParams {
|
||||
parentSessionKey: string;
|
||||
task: string;
|
||||
label?: string;
|
||||
model?: string;
|
||||
thinking?: string;
|
||||
cleanup?: SubagentCleanupMode;
|
||||
}
|
||||
|
||||
export interface SpawnSubagentResult {
|
||||
sessionKey: string;
|
||||
runId?: string;
|
||||
mode: 'direct' | 'marker';
|
||||
}
|
||||
|
||||
interface GatewaySessionSummary {
|
||||
key?: string;
|
||||
sessionKey?: string;
|
||||
status?: string;
|
||||
error?: string;
|
||||
agentState?: string;
|
||||
busy?: boolean;
|
||||
processing?: boolean;
|
||||
runId?: string;
|
||||
currentRunId?: string;
|
||||
latestRunId?: string;
|
||||
}
|
||||
|
||||
interface LaunchMessage {
|
||||
role?: string;
|
||||
content?: unknown;
|
||||
timestamp?: number;
|
||||
ts?: number;
|
||||
createdAt?: number;
|
||||
runId?: string;
|
||||
currentRunId?: string;
|
||||
latestRunId?: string;
|
||||
meta?: { runId?: string };
|
||||
metadata?: { runId?: string };
|
||||
}
|
||||
|
||||
interface ExtractedLaunchResult {
|
||||
started: boolean;
|
||||
resultText: string | null;
|
||||
}
|
||||
|
||||
const ROOT_SESSION_RE = /^agent:[^:]+:main$/;
|
||||
const POLL_SESSIONS_ACTIVE_MINUTES = 24 * 60;
|
||||
const POLL_SESSIONS_LIMIT = 200;
|
||||
const MONITOR_INITIAL_DELAY_MS = 3_000;
|
||||
const MONITOR_POLL_INTERVAL_MS = 5_000;
|
||||
const MONITOR_MAX_ATTEMPTS = 720;
|
||||
const MARKER_DISCOVERY_TIMEOUT_MS = 60_000;
|
||||
const MARKER_DISCOVERY_POLL_MS = 1_000;
|
||||
|
||||
const activeMonitors = new Set<string>();
|
||||
|
||||
function schedule(fn: () => void, ms: number): ReturnType<typeof setTimeout> {
|
||||
const timer = setTimeout(fn, ms);
|
||||
timer.unref?.();
|
||||
return timer;
|
||||
}
|
||||
|
||||
export function isTopLevelRootSessionKey(sessionKey: string): boolean {
|
||||
return ROOT_SESSION_RE.test(sessionKey);
|
||||
}
|
||||
|
||||
function isSubagentSessionKey(sessionKey: string): boolean {
|
||||
return /^agent:[^:]+:subagent:/.test(sessionKey);
|
||||
}
|
||||
|
||||
function isRootChildSession(sessionKey: string, parentSessionKey: string): boolean {
|
||||
const parentMatch = parentSessionKey.match(/^agent:([^:]+):main$/);
|
||||
if (!parentMatch) return false;
|
||||
return sessionKey.startsWith(`agent:${parentMatch[1]}:subagent:`);
|
||||
}
|
||||
|
||||
function buildRequestedChildSessionKey(parentSessionKey: string): string {
|
||||
const match = parentSessionKey.match(/^agent:([^:]+):main$/);
|
||||
if (!match) {
|
||||
throw new Error(`Parent agent session must be a top-level root: ${parentSessionKey}`);
|
||||
}
|
||||
return `agent:${match[1]}:subagent:${randomUUID()}`;
|
||||
}
|
||||
|
||||
function getSessionKey(session: GatewaySessionSummary): string | null {
|
||||
if (typeof session.sessionKey === 'string' && session.sessionKey.trim()) return session.sessionKey;
|
||||
if (typeof session.key === 'string' && session.key.trim()) return session.key;
|
||||
return null;
|
||||
}
|
||||
|
||||
function isBusySession(session: GatewaySessionSummary): boolean {
|
||||
if (session.busy || session.processing) return true;
|
||||
const status = String(session.status ?? '').toLowerCase();
|
||||
const agentState = String(session.agentState ?? '').toLowerCase();
|
||||
return ['running', 'thinking', 'tool_use', 'streaming', 'started', 'busy', 'working'].includes(status)
|
||||
|| ['running', 'thinking', 'tool_use', 'streaming', 'busy', 'working'].includes(agentState);
|
||||
}
|
||||
|
||||
function isTerminalFailure(session: GatewaySessionSummary): boolean {
|
||||
const status = String(session.status ?? '').toLowerCase();
|
||||
return status === 'error' || status === 'failed';
|
||||
}
|
||||
|
||||
function isTerminalSuccess(session: GatewaySessionSummary): boolean {
|
||||
const status = String(session.status ?? '').toLowerCase();
|
||||
const agentState = String(session.agentState ?? '').toLowerCase();
|
||||
return status === 'done' || (agentState === 'idle' && !session.busy && !session.processing);
|
||||
}
|
||||
|
||||
function sessionMentionsRunId(session: GatewaySessionSummary, runId?: string): boolean {
|
||||
if (!runId) return false;
|
||||
return [session.runId, session.currentRunId, session.latestRunId].some((value) => value === runId);
|
||||
}
|
||||
|
||||
function getMessageTimestamp(message: LaunchMessage): number | undefined {
|
||||
const value = message.timestamp ?? message.ts ?? message.createdAt;
|
||||
return typeof value === 'number' && Number.isFinite(value) ? value : undefined;
|
||||
}
|
||||
|
||||
function getMessageRunId(message: LaunchMessage): string | undefined {
|
||||
const direct = [message.runId, message.currentRunId, message.latestRunId, message.meta?.runId, message.metadata?.runId]
|
||||
.find((value) => typeof value === 'string' && value.trim());
|
||||
return typeof direct === 'string' ? direct : undefined;
|
||||
}
|
||||
|
||||
function getTextContent(content: unknown): string | null {
|
||||
if (typeof content === 'string') {
|
||||
const text = content.trim();
|
||||
return text ? text : null;
|
||||
}
|
||||
if (!Array.isArray(content)) return null;
|
||||
const text = content
|
||||
.map((part) => {
|
||||
if (!part || typeof part !== 'object') return null;
|
||||
const candidate = part as { type?: string; text?: string };
|
||||
if (candidate.type !== 'text' || typeof candidate.text !== 'string') return null;
|
||||
return candidate.text;
|
||||
})
|
||||
.filter((value): value is string => Boolean(value))
|
||||
.join('')
|
||||
.trim();
|
||||
return text || null;
|
||||
}
|
||||
|
||||
function trimReportText(text: string, maxChars = 4_000): string {
|
||||
const normalized = text.trim();
|
||||
if (!normalized) return 'Completed (no result text)';
|
||||
if (normalized.length <= maxChars) return normalized;
|
||||
return `${normalized.slice(0, maxChars - 13).trimEnd()}\n\n[truncated]`;
|
||||
}
|
||||
|
||||
export function buildSpawnSubagentMarkerMessage(params: {
|
||||
task: string;
|
||||
label?: string;
|
||||
model?: string;
|
||||
thinking?: string;
|
||||
cleanup: SubagentCleanupMode;
|
||||
}): string {
|
||||
const lines = ['[spawn-subagent]'];
|
||||
lines.push(`task: ${params.task}`);
|
||||
if (params.label) lines.push(`label: ${params.label}`);
|
||||
if (params.model) lines.push(`model: ${params.model}`);
|
||||
if (params.thinking && params.thinking !== 'off') lines.push(`thinking: ${params.thinking}`);
|
||||
lines.push('mode: run');
|
||||
lines.push(`cleanup: ${params.cleanup}`);
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
export function isUnsupportedDirectSpawnError(error: unknown): boolean {
|
||||
if (!(error instanceof Error)) return false;
|
||||
const message = error.message.trim();
|
||||
return message === 'unknown method: sessions.create' || message === 'unknown method: sessions.send';
|
||||
}
|
||||
|
||||
export function buildSubagentParentCompletionMessage(params: {
|
||||
parentSessionKey: string;
|
||||
childSessionKey: string;
|
||||
label?: string;
|
||||
outcome: 'completed' | 'failed';
|
||||
result?: string;
|
||||
error?: string;
|
||||
}): string {
|
||||
const lines = [
|
||||
'Subagent child session completion report.',
|
||||
'',
|
||||
'Use this as context from work that ran under this root. This is a completion update, not a fresh task unless follow-up is needed.',
|
||||
'',
|
||||
`Parent root: ${params.parentSessionKey}`,
|
||||
`Child session: ${params.childSessionKey}`,
|
||||
];
|
||||
|
||||
if (params.label) lines.push(`Label: ${params.label}`);
|
||||
lines.push(`Outcome: ${params.outcome}`);
|
||||
|
||||
if (params.outcome === 'completed') {
|
||||
lines.push('', 'Result:', trimReportText(params.result ?? 'Completed (no result text)'));
|
||||
} else {
|
||||
lines.push('', 'Error:', trimReportText(params.error ?? 'Child session failed'));
|
||||
}
|
||||
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
async function reportSubagentResultToParent(params: {
|
||||
parentSessionKey: string;
|
||||
childSessionKey: string;
|
||||
label?: string;
|
||||
outcome: 'completed' | 'failed';
|
||||
result?: string;
|
||||
error?: string;
|
||||
}): Promise<void> {
|
||||
const suffix = params.outcome === 'completed' ? 'done' : 'failed';
|
||||
await gatewayRpcCall('sessions.send', {
|
||||
key: params.parentSessionKey,
|
||||
message: buildSubagentParentCompletionMessage(params),
|
||||
idempotencyKey: `subagent-parent-report:${params.childSessionKey}:${suffix}`,
|
||||
});
|
||||
}
|
||||
|
||||
export function extractAssistantResultForLaunch(
|
||||
rawMessages: Array<Record<string, unknown>>,
|
||||
options: { runId?: string; launchTimestamp: number },
|
||||
): ExtractedLaunchResult {
|
||||
const messages = rawMessages as LaunchMessage[];
|
||||
|
||||
if (options.runId) {
|
||||
const runMessages = messages.filter((message) => getMessageRunId(message) === options.runId);
|
||||
const runAssistant = [...runMessages]
|
||||
.reverse()
|
||||
.map((message) => ({ role: message.role, text: getTextContent(message.content) }))
|
||||
.find((message) => message.role === 'assistant' && message.text)?.text ?? null;
|
||||
|
||||
if (runAssistant) {
|
||||
return { started: true, resultText: runAssistant };
|
||||
}
|
||||
if (runMessages.length > 0) {
|
||||
return { started: true, resultText: null };
|
||||
}
|
||||
}
|
||||
|
||||
const firstPostLaunchIndex = messages.findIndex((message) => {
|
||||
const timestamp = getMessageTimestamp(message);
|
||||
if (typeof timestamp === 'number') return timestamp >= options.launchTimestamp;
|
||||
return message.role === 'user';
|
||||
});
|
||||
|
||||
if (firstPostLaunchIndex === -1) {
|
||||
return { started: false, resultText: null };
|
||||
}
|
||||
|
||||
let endIndex = messages.length;
|
||||
for (let index = firstPostLaunchIndex + 1; index < messages.length; index += 1) {
|
||||
if (messages[index]?.role === 'user') {
|
||||
endIndex = index;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
const launchSlice = messages.slice(firstPostLaunchIndex, endIndex);
|
||||
const lastAssistantText = [...launchSlice]
|
||||
.reverse()
|
||||
.map((message) => ({ role: message.role, text: getTextContent(message.content) }))
|
||||
.find((message) => message.role === 'assistant' && message.text)?.text ?? null;
|
||||
|
||||
return {
|
||||
started: launchSlice.length > 0,
|
||||
resultText: lastAssistantText,
|
||||
};
|
||||
}
|
||||
|
||||
export function pickMarkerSpawnedChildSession(
|
||||
sessions: GatewaySessionSummary[],
|
||||
parentSessionKey: string,
|
||||
knownSessionKeysBefore: Set<string>,
|
||||
): GatewaySessionSummary | null {
|
||||
const candidates = sessions.filter((session) => {
|
||||
const sessionKey = getSessionKey(session);
|
||||
if (!sessionKey) return false;
|
||||
if (!isSubagentSessionKey(sessionKey)) return false;
|
||||
if (!isRootChildSession(sessionKey, parentSessionKey)) return false;
|
||||
return !knownSessionKeysBefore.has(sessionKey);
|
||||
});
|
||||
|
||||
return candidates[0] ?? null;
|
||||
}
|
||||
|
||||
function startCompletionMonitor(params: {
|
||||
parentSessionKey: string;
|
||||
childSessionKey: string;
|
||||
label?: string;
|
||||
cleanup: SubagentCleanupMode;
|
||||
runId?: string;
|
||||
launchTimestamp: number;
|
||||
}): void {
|
||||
if (activeMonitors.has(params.childSessionKey)) return;
|
||||
activeMonitors.add(params.childSessionKey);
|
||||
|
||||
let attempts = 0;
|
||||
let observedRunStart = false;
|
||||
|
||||
const finish = async (outcome: 'completed' | 'failed', details: { result?: string; error?: string }) => {
|
||||
activeMonitors.delete(params.childSessionKey);
|
||||
|
||||
let reportSent = false;
|
||||
try {
|
||||
await reportSubagentResultToParent({
|
||||
parentSessionKey: params.parentSessionKey,
|
||||
childSessionKey: params.childSessionKey,
|
||||
label: params.label,
|
||||
outcome,
|
||||
...details,
|
||||
});
|
||||
reportSent = true;
|
||||
} catch (error) {
|
||||
console.warn(`[subagent-spawn] Failed to report ${outcome} for ${params.childSessionKey}:`, error);
|
||||
}
|
||||
|
||||
if (reportSent && params.cleanup === 'delete') {
|
||||
try {
|
||||
await gatewayRpcCall('sessions.delete', {
|
||||
key: params.childSessionKey,
|
||||
deleteTranscript: true,
|
||||
});
|
||||
} catch (error) {
|
||||
console.warn(`[subagent-spawn] Failed to delete child ${params.childSessionKey}:`, error);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const poll = async () => {
|
||||
attempts += 1;
|
||||
if (attempts > MONITOR_MAX_ATTEMPTS) {
|
||||
await finish('failed', { error: 'Subagent timed out (polling limit reached)' });
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const listResponse = await gatewayRpcCall('sessions.list', {
|
||||
activeMinutes: POLL_SESSIONS_ACTIVE_MINUTES,
|
||||
limit: POLL_SESSIONS_LIMIT,
|
||||
}) as { sessions?: GatewaySessionSummary[] };
|
||||
const sessions = Array.isArray(listResponse.sessions) ? listResponse.sessions : [];
|
||||
const session = sessions.find((candidate) => getSessionKey(candidate) === params.childSessionKey);
|
||||
|
||||
if (!session) {
|
||||
schedule(() => { void poll(); }, MONITOR_POLL_INTERVAL_MS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (sessionMentionsRunId(session, params.runId) || isBusySession(session)) {
|
||||
observedRunStart = true;
|
||||
}
|
||||
|
||||
if (isTerminalFailure(session)) {
|
||||
await finish('failed', { error: session.error || 'Child session failed' });
|
||||
return;
|
||||
}
|
||||
|
||||
if (!isTerminalSuccess(session)) {
|
||||
schedule(() => { void poll(); }, MONITOR_POLL_INTERVAL_MS);
|
||||
return;
|
||||
}
|
||||
|
||||
const historyResponse = await gatewayRpcCall('sessions.get', {
|
||||
key: params.childSessionKey,
|
||||
limit: 20,
|
||||
includeTools: true,
|
||||
}) as { messages?: Array<Record<string, unknown>> };
|
||||
const messages = Array.isArray(historyResponse.messages) ? historyResponse.messages : [];
|
||||
const extracted = extractAssistantResultForLaunch(messages, {
|
||||
runId: params.runId,
|
||||
launchTimestamp: params.launchTimestamp,
|
||||
});
|
||||
|
||||
if (extracted.started) {
|
||||
observedRunStart = true;
|
||||
}
|
||||
|
||||
if (!observedRunStart) {
|
||||
schedule(() => { void poll(); }, MONITOR_POLL_INTERVAL_MS);
|
||||
return;
|
||||
}
|
||||
|
||||
await finish('completed', {
|
||||
result: extracted.resultText ?? 'Completed (no result text)',
|
||||
});
|
||||
} catch (error) {
|
||||
console.warn(`[subagent-spawn] Poll error for ${params.childSessionKey}:`, error);
|
||||
schedule(() => { void poll(); }, MONITOR_POLL_INTERVAL_MS);
|
||||
}
|
||||
};
|
||||
|
||||
schedule(() => { void poll(); }, MONITOR_INITIAL_DELAY_MS);
|
||||
}
|
||||
|
||||
async function launchDirect(params: SpawnSubagentParams): Promise<SpawnSubagentResult> {
|
||||
if (!isTopLevelRootSessionKey(params.parentSessionKey)) {
|
||||
throw new Error(`parentSessionKey must be a top-level root session key (agent:<id>:main): ${params.parentSessionKey}`);
|
||||
}
|
||||
|
||||
const requestedKey = buildRequestedChildSessionKey(params.parentSessionKey);
|
||||
const createResponse = await gatewayRpcCall('sessions.create', {
|
||||
key: requestedKey,
|
||||
parentSessionKey: params.parentSessionKey,
|
||||
...(params.label ? { label: params.label } : {}),
|
||||
...(params.model ? { model: params.model } : {}),
|
||||
}) as { key?: string; sessionKey?: string };
|
||||
|
||||
const sessionKey = typeof createResponse.key === 'string' && createResponse.key.trim()
|
||||
? createResponse.key
|
||||
: typeof createResponse.sessionKey === 'string' && createResponse.sessionKey.trim()
|
||||
? createResponse.sessionKey
|
||||
: requestedKey;
|
||||
|
||||
const launchTimestamp = Date.now();
|
||||
|
||||
const sendResponse = await gatewayRpcCall('sessions.send', {
|
||||
key: sessionKey,
|
||||
message: params.task,
|
||||
...(params.thinking ? { thinking: params.thinking } : {}),
|
||||
idempotencyKey: `subagent-spawn:${Date.now()}:${randomUUID().slice(0, 8)}`,
|
||||
}) as { runId?: string };
|
||||
|
||||
startCompletionMonitor({
|
||||
parentSessionKey: params.parentSessionKey,
|
||||
childSessionKey: sessionKey,
|
||||
label: params.label,
|
||||
cleanup: params.cleanup ?? 'keep',
|
||||
runId: sendResponse.runId,
|
||||
launchTimestamp,
|
||||
});
|
||||
|
||||
return {
|
||||
sessionKey,
|
||||
runId: sendResponse.runId,
|
||||
mode: 'direct',
|
||||
};
|
||||
}
|
||||
|
||||
async function launchViaMarker(params: SpawnSubagentParams): Promise<SpawnSubagentResult> {
|
||||
const snapshotResponse = await gatewayRpcCall('sessions.list', {
|
||||
activeMinutes: POLL_SESSIONS_ACTIVE_MINUTES,
|
||||
limit: POLL_SESSIONS_LIMIT,
|
||||
}) as { sessions?: GatewaySessionSummary[] };
|
||||
const snapshotSessions = Array.isArray(snapshotResponse.sessions) ? snapshotResponse.sessions : [];
|
||||
const knownSessionKeysBefore = new Set(
|
||||
snapshotSessions
|
||||
.map(getSessionKey)
|
||||
.filter((value): value is string => Boolean(value)),
|
||||
);
|
||||
|
||||
await gatewayRpcCall('chat.send', {
|
||||
sessionKey: params.parentSessionKey,
|
||||
message: buildSpawnSubagentMarkerMessage({
|
||||
task: params.task,
|
||||
label: params.label,
|
||||
model: params.model,
|
||||
thinking: params.thinking,
|
||||
cleanup: params.cleanup ?? 'keep',
|
||||
}),
|
||||
idempotencyKey: `subagent-marker:${Date.now()}:${randomUUID().slice(0, 8)}`,
|
||||
});
|
||||
|
||||
const deadline = Date.now() + MARKER_DISCOVERY_TIMEOUT_MS;
|
||||
while (Date.now() < deadline) {
|
||||
await new Promise<void>((resolve) => {
|
||||
schedule(resolve, MARKER_DISCOVERY_POLL_MS);
|
||||
});
|
||||
|
||||
const listResponse = await gatewayRpcCall('sessions.list', {
|
||||
activeMinutes: POLL_SESSIONS_ACTIVE_MINUTES,
|
||||
limit: POLL_SESSIONS_LIMIT,
|
||||
}) as { sessions?: GatewaySessionSummary[] };
|
||||
const sessions = Array.isArray(listResponse.sessions) ? listResponse.sessions : [];
|
||||
const spawned = pickMarkerSpawnedChildSession(sessions, params.parentSessionKey, knownSessionKeysBefore);
|
||||
const spawnedKey = spawned ? getSessionKey(spawned) : null;
|
||||
if (spawnedKey) {
|
||||
return {
|
||||
sessionKey: spawnedKey,
|
||||
mode: 'marker',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Timed out waiting for the new subagent session to appear');
|
||||
}
|
||||
|
||||
export async function spawnSubagent(params: SpawnSubagentParams): Promise<SpawnSubagentResult> {
|
||||
if (!isTopLevelRootSessionKey(params.parentSessionKey)) {
|
||||
throw new Error(`parentSessionKey must be a top-level root session key (agent:<id>:main): ${params.parentSessionKey}`);
|
||||
}
|
||||
|
||||
try {
|
||||
return await launchDirect(params);
|
||||
} catch (error) {
|
||||
if (!isUnsupportedDirectSpawnError(error)) {
|
||||
throw error;
|
||||
}
|
||||
return launchViaMarker(params);
|
||||
}
|
||||
}
|
||||
|
||||
export function __resetSubagentSpawnTestState(): void {
|
||||
activeMonitors.clear();
|
||||
}
|
||||
|
|
@ -56,6 +56,34 @@ async function loadTtsModule(opts: {
|
|||
return mod;
|
||||
}
|
||||
|
||||
describe('getTTSConfig', () => {
|
||||
it('returns Xiaomi defaults when config file is missing', async () => {
|
||||
const mod = await loadTtsModule({
|
||||
language: 'en',
|
||||
edgeVoiceGender: 'female',
|
||||
storedVoice: 'en-US-JennyNeural',
|
||||
});
|
||||
|
||||
const cfg = mod.getTTSConfig();
|
||||
expect(cfg.xiaomi.model).toBe('mimo-v2-tts');
|
||||
expect(cfg.xiaomi.voice).toBe('mimo_default');
|
||||
expect(cfg.xiaomi.style).toBe('');
|
||||
});
|
||||
|
||||
it('deep-merges Xiaomi patches without dropping defaults', async () => {
|
||||
const mod = await loadTtsModule({
|
||||
language: 'en',
|
||||
edgeVoiceGender: 'female',
|
||||
storedVoice: 'en-US-JennyNeural',
|
||||
});
|
||||
|
||||
const cfg = mod.updateTTSConfig({ xiaomi: { style: 'Happy' } });
|
||||
expect(cfg.xiaomi.style).toBe('Happy');
|
||||
expect(cfg.xiaomi.model).toBe('mimo-v2-tts');
|
||||
expect(cfg.xiaomi.voice).toBe('mimo_default');
|
||||
});
|
||||
});
|
||||
|
||||
describe('resolveEdgeTTSVoice', () => {
|
||||
it('keeps explicit non-default English override', async () => {
|
||||
const mod = await loadTtsModule({
|
||||
|
|
|
|||
|
|
@ -46,6 +46,15 @@ export interface TTSVoiceConfig {
|
|||
/** Voice name (e.g. en-US-AriaNeural, en-GB-SoniaNeural) */
|
||||
voice: string;
|
||||
};
|
||||
/** Xiaomi MiMo TTS settings */
|
||||
xiaomi: {
|
||||
/** Xiaomi model name */
|
||||
model: string;
|
||||
/** Built-in Xiaomi voice name */
|
||||
voice: string;
|
||||
/** Optional default Xiaomi style prompt */
|
||||
style: string;
|
||||
};
|
||||
}
|
||||
|
||||
const DEFAULTS: TTSVoiceConfig = {
|
||||
|
|
@ -65,6 +74,11 @@ const DEFAULTS: TTSVoiceConfig = {
|
|||
edge: {
|
||||
voice: 'en-US-AriaNeural',
|
||||
},
|
||||
xiaomi: {
|
||||
model: 'mimo-v2-tts',
|
||||
voice: 'mimo_default',
|
||||
style: '',
|
||||
},
|
||||
};
|
||||
|
||||
let cached: TTSVoiceConfig | null = null;
|
||||
|
|
|
|||
193
server/lib/updater/health.test.ts
Normal file
193
server/lib/updater/health.test.ts
Normal file
|
|
@ -0,0 +1,193 @@
|
|||
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||
import { EventEmitter } from 'node:events';
|
||||
import { mkdtempSync, rmSync, writeFileSync } from 'node:fs';
|
||||
import { tmpdir } from 'node:os';
|
||||
import { join } from 'node:path';
|
||||
|
||||
type MockResponse = { status: number; body: string };
|
||||
|
||||
const { httpGetMock } = vi.hoisted(() => ({
|
||||
httpGetMock: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock('node:http', () => ({
|
||||
default: {
|
||||
get: httpGetMock,
|
||||
},
|
||||
get: httpGetMock,
|
||||
}));
|
||||
|
||||
function createMockRequest() {
|
||||
return {
|
||||
on: vi.fn().mockReturnThis(),
|
||||
destroy: vi.fn(),
|
||||
};
|
||||
}
|
||||
|
||||
function createMockResponse({ status, body }: MockResponse) {
|
||||
const response = new EventEmitter() as EventEmitter & { statusCode?: number };
|
||||
response.statusCode = status;
|
||||
|
||||
queueMicrotask(() => {
|
||||
if (body.length > 0) {
|
||||
response.emit('data', Buffer.from(body));
|
||||
}
|
||||
response.emit('end');
|
||||
});
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
describe('updater health checks', () => {
|
||||
const originalEnv = { ...process.env };
|
||||
const tempDirs: string[] = [];
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules();
|
||||
httpGetMock.mockReset();
|
||||
process.env = { ...originalEnv };
|
||||
delete process.env.HOST;
|
||||
delete process.env.PORT;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env = { ...originalEnv };
|
||||
while (tempDirs.length > 0) {
|
||||
const dir = tempDirs.pop();
|
||||
if (dir) rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
function createProjectEnv(lines: string[]) {
|
||||
const dir = mkdtempSync(join(tmpdir(), 'updater-health-'));
|
||||
tempDirs.push(dir);
|
||||
writeFileSync(join(dir, '.env'), `${lines.join('\n')}\n`, 'utf-8');
|
||||
return dir;
|
||||
}
|
||||
|
||||
function mockHealthyVersion(version: string) {
|
||||
const responses: MockResponse[] = [
|
||||
{ status: 200, body: 'ok' },
|
||||
{ status: 200, body: JSON.stringify({ version }) },
|
||||
];
|
||||
|
||||
httpGetMock.mockImplementation((url: string, _options: unknown, callback: (res: EventEmitter) => void) => {
|
||||
const next = responses.shift();
|
||||
if (!next) throw new Error(`Unexpected request: ${url}`);
|
||||
queueMicrotask(() => callback(createMockResponse(next)));
|
||||
return createMockRequest();
|
||||
});
|
||||
}
|
||||
|
||||
it('uses the configured loopback host for health probes', async () => {
|
||||
const cwd = createProjectEnv(['HOST=127.0.0.1', 'PORT=4310']);
|
||||
mockHealthyVersion('1.2.3');
|
||||
|
||||
const { checkHealth } = await import('./health.js');
|
||||
const result = await checkHealth(cwd, '1.2.3');
|
||||
|
||||
expect(result).toMatchObject({ healthy: true, versionMatch: true, reportedVersion: '1.2.3' });
|
||||
expect(httpGetMock).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
'http://127.0.0.1:4310/health',
|
||||
expect.objectContaining({ timeout: 5000 }),
|
||||
expect.any(Function),
|
||||
);
|
||||
});
|
||||
|
||||
it('falls back to loopback when the server binds to 0.0.0.0', async () => {
|
||||
const cwd = createProjectEnv(['HOST=0.0.0.0', 'PORT=4311']);
|
||||
mockHealthyVersion('1.2.3');
|
||||
|
||||
const { checkHealth } = await import('./health.js');
|
||||
const result = await checkHealth(cwd, '1.2.3');
|
||||
|
||||
expect(result).toMatchObject({ healthy: true, versionMatch: true, reportedVersion: '1.2.3' });
|
||||
expect(httpGetMock).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
'http://127.0.0.1:4311/health',
|
||||
expect.objectContaining({ timeout: 5000 }),
|
||||
expect.any(Function),
|
||||
);
|
||||
});
|
||||
|
||||
it('strips quotes from the configured port before probing health', async () => {
|
||||
const cwd = createProjectEnv(['HOST=127.0.0.1', 'PORT="4311"']);
|
||||
mockHealthyVersion('1.2.3');
|
||||
|
||||
const { checkHealth } = await import('./health.js');
|
||||
const result = await checkHealth(cwd, '1.2.3');
|
||||
|
||||
expect(result).toMatchObject({ healthy: true, versionMatch: true, reportedVersion: '1.2.3' });
|
||||
expect(httpGetMock).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
'http://127.0.0.1:4311/health',
|
||||
expect.objectContaining({ timeout: 5000 }),
|
||||
expect.any(Function),
|
||||
);
|
||||
});
|
||||
|
||||
it('strips quotes from the configured host before probing health', async () => {
|
||||
const cwd = createProjectEnv(['HOST="0.0.0.0"', 'PORT=4311']);
|
||||
mockHealthyVersion('1.2.3');
|
||||
|
||||
const { checkHealth } = await import('./health.js');
|
||||
const result = await checkHealth(cwd, '1.2.3');
|
||||
|
||||
expect(result).toMatchObject({ healthy: true, versionMatch: true, reportedVersion: '1.2.3' });
|
||||
expect(httpGetMock).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
'http://127.0.0.1:4311/health',
|
||||
expect.objectContaining({ timeout: 5000 }),
|
||||
expect.any(Function),
|
||||
);
|
||||
});
|
||||
|
||||
it('probes IPv6 unspecified bind through loopback', async () => {
|
||||
const cwd = createProjectEnv(['HOST=::', 'PORT=4312']);
|
||||
mockHealthyVersion('1.2.3');
|
||||
|
||||
const { checkHealth } = await import('./health.js');
|
||||
const result = await checkHealth(cwd, '1.2.3');
|
||||
|
||||
expect(result).toMatchObject({ healthy: true, versionMatch: true, reportedVersion: '1.2.3' });
|
||||
expect(httpGetMock).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
'http://[::1]:4312/health',
|
||||
expect.objectContaining({ timeout: 5000 }),
|
||||
expect.any(Function),
|
||||
);
|
||||
});
|
||||
|
||||
it('probes bracketed IPv6 unspecified bind through loopback', async () => {
|
||||
const cwd = createProjectEnv(['HOST=[::]', 'PORT=4313']);
|
||||
mockHealthyVersion('1.2.3');
|
||||
|
||||
const { checkHealth } = await import('./health.js');
|
||||
const result = await checkHealth(cwd, '1.2.3');
|
||||
|
||||
expect(result).toMatchObject({ healthy: true, versionMatch: true, reportedVersion: '1.2.3' });
|
||||
expect(httpGetMock).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
'http://[::1]:4313/health',
|
||||
expect.objectContaining({ timeout: 5000 }),
|
||||
expect.any(Function),
|
||||
);
|
||||
});
|
||||
|
||||
it('uses a specific configured LAN or tailnet host for health probes', async () => {
|
||||
const cwd = createProjectEnv(['HOST=100.92.14.6', 'PORT=4314']);
|
||||
mockHealthyVersion('1.2.3');
|
||||
|
||||
const { checkHealth } = await import('./health.js');
|
||||
const result = await checkHealth(cwd, '1.2.3');
|
||||
|
||||
expect(result).toMatchObject({ healthy: true, versionMatch: true, reportedVersion: '1.2.3' });
|
||||
expect(httpGetMock).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
'http://100.92.14.6:4314/health',
|
||||
expect.objectContaining({ timeout: 5000 }),
|
||||
expect.any(Function),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
|
@ -7,18 +7,20 @@
|
|||
import { readFileSync, existsSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import http from 'node:http';
|
||||
import { parse as parseDotenv } from 'dotenv';
|
||||
import type { HealthResult } from './types.js';
|
||||
|
||||
const BACKOFFS = [2_000, 4_000, 8_000];
|
||||
const TOTAL_TIMEOUT = 60_000;
|
||||
const REQUEST_TIMEOUT = 5_000;
|
||||
const DEFAULT_PORT = 3080;
|
||||
const DEFAULT_HOST = '127.0.0.1';
|
||||
|
||||
/**
|
||||
* Check that the server is healthy and reports the expected version.
|
||||
*/
|
||||
export async function checkHealth(cwd: string, targetVersion: string): Promise<HealthResult> {
|
||||
const port = readPort(cwd);
|
||||
const baseUrl = `http://127.0.0.1:${port}`;
|
||||
const baseUrl = resolveHealthCheckBaseUrl(cwd);
|
||||
const deadline = Date.now() + TOTAL_TIMEOUT;
|
||||
|
||||
let lastHealthy = false;
|
||||
|
|
@ -45,14 +47,14 @@ export async function checkHealth(cwd: string, targetVersion: string): Promise<H
|
|||
return { healthy: true, versionMatch: true, reportedVersion: data.version };
|
||||
}
|
||||
|
||||
// Version mismatch — stale process may still be serving, keep retrying
|
||||
// Version mismatch, stale process may still be serving, keep retrying
|
||||
continue;
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Deadline expired — report what we last saw
|
||||
// Deadline expired, report what we last saw
|
||||
if (lastHealthy && lastReportedVersion) {
|
||||
return {
|
||||
healthy: true,
|
||||
|
|
@ -65,22 +67,50 @@ export async function checkHealth(cwd: string, targetVersion: string): Promise<H
|
|||
return {
|
||||
healthy: false,
|
||||
versionMatch: false,
|
||||
error: `Health check timed out after ${TOTAL_TIMEOUT / 1_000}s (port ${port})`,
|
||||
error: `Health check timed out after ${TOTAL_TIMEOUT / 1_000}s (${baseUrl})`,
|
||||
};
|
||||
}
|
||||
|
||||
export function resolveHealthCheckBaseUrl(cwd: string): string {
|
||||
const host = resolveProbeHost(readHost(cwd));
|
||||
const port = readPort(cwd);
|
||||
return `http://${formatHostForUrl(host)}:${port}`;
|
||||
}
|
||||
|
||||
// ── Helpers ──────────────────────────────────────────────────────────
|
||||
|
||||
function readPort(cwd: string): number {
|
||||
const rawPort = process.env.PORT ?? readEnvValue(cwd, 'PORT');
|
||||
if (!rawPort) return DEFAULT_PORT;
|
||||
|
||||
const port = Number.parseInt(rawPort, 10);
|
||||
return Number.isFinite(port) ? port : DEFAULT_PORT;
|
||||
}
|
||||
|
||||
function readHost(cwd: string): string {
|
||||
return process.env.HOST ?? readEnvValue(cwd, 'HOST') ?? DEFAULT_HOST;
|
||||
}
|
||||
|
||||
function readEnvValue(cwd: string, key: string): string | undefined {
|
||||
const envPath = join(cwd, '.env');
|
||||
if (existsSync(envPath)) {
|
||||
const content = readFileSync(envPath, 'utf-8');
|
||||
for (const line of content.split('\n')) {
|
||||
const match = /^PORT=(\d+)/.exec(line.trim());
|
||||
if (match) return parseInt(match[1], 10);
|
||||
}
|
||||
if (!existsSync(envPath)) return undefined;
|
||||
|
||||
const content = readFileSync(envPath, 'utf-8');
|
||||
return parseDotenv(content)[key];
|
||||
}
|
||||
|
||||
function resolveProbeHost(host: string): string {
|
||||
const normalized = host.trim();
|
||||
if (!normalized || normalized === '0.0.0.0') return '127.0.0.1';
|
||||
if (normalized === '::' || normalized === '[::]') return '::1';
|
||||
return normalized;
|
||||
}
|
||||
|
||||
function formatHostForUrl(host: string): string {
|
||||
if (host.includes(':') && !host.startsWith('[')) {
|
||||
return `[${host}]`;
|
||||
}
|
||||
return 3080;
|
||||
return host;
|
||||
}
|
||||
|
||||
function httpGet(
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import { resolveVersion } from './release-resolver.js';
|
|||
import { createSnapshot } from './snapshot.js';
|
||||
import { gitFetchAndCheckout, buildProject } from './installer.js';
|
||||
import { detectServiceManager } from './service-manager.js';
|
||||
import { checkHealth } from './health.js';
|
||||
import { checkHealth, resolveHealthCheckBaseUrl } from './health.js';
|
||||
import { rollback } from './rollback.js';
|
||||
import { EXIT_CODES, UpdateError } from './types.js';
|
||||
import type { UpdateOptions, Reporter, ExitCode, ServiceManager } from './types.js';
|
||||
|
|
@ -154,7 +154,8 @@ export async function orchestrate(options: UpdateOptions, reporter: Reporter): P
|
|||
stageNum++;
|
||||
reporter.stage('Health check', stageNum, totalStages);
|
||||
if (serviceManager) {
|
||||
reporter.verbose('Polling /health and /api/version...');
|
||||
const healthBaseUrl = resolveHealthCheckBaseUrl(options.cwd);
|
||||
reporter.verbose(`Polling ${healthBaseUrl}/health and ${healthBaseUrl}/api/version...`);
|
||||
const health = await checkHealth(options.cwd, resolved.version);
|
||||
|
||||
if (!health.healthy || !health.versionMatch) {
|
||||
|
|
|
|||
77
server/lib/upload-config.ts
Normal file
77
server/lib/upload-config.ts
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
export interface UploadFeatureConfig {
|
||||
twoModeEnabled: boolean;
|
||||
inlineEnabled: boolean;
|
||||
fileReferenceEnabled: boolean;
|
||||
modeChooserEnabled: boolean;
|
||||
inlineAttachmentMaxMb: number;
|
||||
inlineImageContextMaxBytes: number;
|
||||
inlineImageAutoDowngradeToFileReference: boolean;
|
||||
inlineImageShrinkMinDimension: number;
|
||||
inlineImageMaxDimension: number;
|
||||
inlineImageWebpQuality: number;
|
||||
exposeInlineBase64ToAgent: boolean;
|
||||
}
|
||||
|
||||
const DEFAULT_UPLOAD_FEATURE_CONFIG: UploadFeatureConfig = {
|
||||
twoModeEnabled: false,
|
||||
inlineEnabled: true,
|
||||
fileReferenceEnabled: false,
|
||||
modeChooserEnabled: false,
|
||||
inlineAttachmentMaxMb: 4,
|
||||
inlineImageContextMaxBytes: 32_768,
|
||||
inlineImageAutoDowngradeToFileReference: true,
|
||||
inlineImageShrinkMinDimension: 512,
|
||||
inlineImageMaxDimension: 2048,
|
||||
inlineImageWebpQuality: 82,
|
||||
exposeInlineBase64ToAgent: false,
|
||||
};
|
||||
|
||||
function readBooleanEnv(name: string, fallback: boolean): boolean {
|
||||
const value = process.env[name];
|
||||
if (value == null) return fallback;
|
||||
|
||||
const normalized = value.trim().toLowerCase();
|
||||
if (['1', 'true', 'yes', 'on'].includes(normalized)) return true;
|
||||
if (['0', 'false', 'no', 'off'].includes(normalized)) return false;
|
||||
|
||||
return fallback;
|
||||
}
|
||||
|
||||
function readNumberEnv(name: string, fallback: number): number {
|
||||
const value = process.env[name];
|
||||
if (value == null) return fallback;
|
||||
|
||||
const parsed = Number(value);
|
||||
return Number.isFinite(parsed) && parsed > 0 ? parsed : fallback;
|
||||
}
|
||||
|
||||
export function getUploadFeatureConfig(): UploadFeatureConfig {
|
||||
return {
|
||||
twoModeEnabled: readBooleanEnv('NERVE_UPLOAD_TWO_MODE_ENABLED', DEFAULT_UPLOAD_FEATURE_CONFIG.twoModeEnabled),
|
||||
inlineEnabled: readBooleanEnv('NERVE_UPLOAD_INLINE_ENABLED', DEFAULT_UPLOAD_FEATURE_CONFIG.inlineEnabled),
|
||||
fileReferenceEnabled: readBooleanEnv('NERVE_UPLOAD_FILE_REFERENCE_ENABLED', DEFAULT_UPLOAD_FEATURE_CONFIG.fileReferenceEnabled),
|
||||
modeChooserEnabled: readBooleanEnv('NERVE_UPLOAD_MODE_CHOOSER_ENABLED', DEFAULT_UPLOAD_FEATURE_CONFIG.modeChooserEnabled),
|
||||
inlineAttachmentMaxMb: readNumberEnv('NERVE_UPLOAD_INLINE_ATTACHMENT_MAX_MB', DEFAULT_UPLOAD_FEATURE_CONFIG.inlineAttachmentMaxMb),
|
||||
inlineImageContextMaxBytes: readNumberEnv('NERVE_UPLOAD_INLINE_IMAGE_CONTEXT_MAX_BYTES', DEFAULT_UPLOAD_FEATURE_CONFIG.inlineImageContextMaxBytes),
|
||||
inlineImageAutoDowngradeToFileReference: readBooleanEnv(
|
||||
'NERVE_UPLOAD_INLINE_IMAGE_AUTO_DOWNGRADE_TO_FILE_REFERENCE',
|
||||
DEFAULT_UPLOAD_FEATURE_CONFIG.inlineImageAutoDowngradeToFileReference,
|
||||
),
|
||||
inlineImageShrinkMinDimension: readNumberEnv(
|
||||
'NERVE_UPLOAD_INLINE_IMAGE_SHRINK_MIN_DIMENSION',
|
||||
DEFAULT_UPLOAD_FEATURE_CONFIG.inlineImageShrinkMinDimension,
|
||||
),
|
||||
inlineImageMaxDimension: readNumberEnv(
|
||||
'NERVE_UPLOAD_IMAGE_OPTIMIZATION_MAX_DIMENSION',
|
||||
DEFAULT_UPLOAD_FEATURE_CONFIG.inlineImageMaxDimension,
|
||||
),
|
||||
inlineImageWebpQuality: readNumberEnv(
|
||||
'NERVE_UPLOAD_IMAGE_OPTIMIZATION_WEBP_QUALITY',
|
||||
DEFAULT_UPLOAD_FEATURE_CONFIG.inlineImageWebpQuality,
|
||||
),
|
||||
exposeInlineBase64ToAgent: readBooleanEnv(
|
||||
'NERVE_UPLOAD_EXPOSE_INLINE_BASE64_TO_AGENT',
|
||||
DEFAULT_UPLOAD_FEATURE_CONFIG.exposeInlineBase64ToAgent,
|
||||
),
|
||||
};
|
||||
}
|
||||
86
server/lib/upload-reference.test.ts
Normal file
86
server/lib/upload-reference.test.ts
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
import { afterEach, describe, expect, it, vi } from 'vitest';
|
||||
import fs from 'node:fs/promises';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
|
||||
async function importHelpers() {
|
||||
vi.resetModules();
|
||||
return import('./upload-reference.js');
|
||||
}
|
||||
|
||||
const originalHome = process.env.HOME;
|
||||
const originalFileBrowserRoot = process.env.FILE_BROWSER_ROOT;
|
||||
const originalUploadStagingTempDir = process.env.NERVE_UPLOAD_STAGING_TEMP_DIR;
|
||||
const tempDirs = new Set<string>();
|
||||
|
||||
async function makeHomeWorkspace(): Promise<{ homeDir: string; workspaceRoot: string }> {
|
||||
const homeDir = await fs.mkdtemp(path.join(os.tmpdir(), 'nerve-upload-reference-lib-home-'));
|
||||
tempDirs.add(homeDir);
|
||||
const workspaceRoot = path.join(homeDir, '.openclaw', 'workspace');
|
||||
await fs.mkdir(workspaceRoot, { recursive: true });
|
||||
process.env.HOME = homeDir;
|
||||
delete process.env.FILE_BROWSER_ROOT;
|
||||
delete process.env.NERVE_UPLOAD_STAGING_TEMP_DIR;
|
||||
return { homeDir, workspaceRoot };
|
||||
}
|
||||
|
||||
afterEach(async () => {
|
||||
if (originalHome == null) {
|
||||
delete process.env.HOME;
|
||||
} else {
|
||||
process.env.HOME = originalHome;
|
||||
}
|
||||
|
||||
if (originalFileBrowserRoot == null) {
|
||||
delete process.env.FILE_BROWSER_ROOT;
|
||||
} else {
|
||||
process.env.FILE_BROWSER_ROOT = originalFileBrowserRoot;
|
||||
}
|
||||
|
||||
if (originalUploadStagingTempDir == null) {
|
||||
delete process.env.NERVE_UPLOAD_STAGING_TEMP_DIR;
|
||||
} else {
|
||||
process.env.NERVE_UPLOAD_STAGING_TEMP_DIR = originalUploadStagingTempDir;
|
||||
}
|
||||
|
||||
for (const dir of tempDirs) {
|
||||
await fs.rm(dir, { recursive: true, force: true });
|
||||
tempDirs.delete(dir);
|
||||
}
|
||||
});
|
||||
|
||||
describe('upload-reference helpers', () => {
|
||||
it('imports external uploads into canonical staged workspace references', async () => {
|
||||
const { workspaceRoot } = await makeHomeWorkspace();
|
||||
const { importExternalUploadToCanonicalReference } = await importHelpers();
|
||||
|
||||
const result = await importExternalUploadToCanonicalReference({
|
||||
originalName: 'proof.txt',
|
||||
mimeType: 'text/plain',
|
||||
bytes: new TextEncoder().encode('hello import'),
|
||||
});
|
||||
|
||||
expect(result.kind).toBe('imported_workspace_reference');
|
||||
expect(result.canonicalPath).toMatch(/^\.temp\/nerve-uploads\/\d{4}\/\d{2}\/\d{2}\/proof-[a-f0-9]{8}\.txt$/);
|
||||
expect(result.absolutePath).toBe(path.join(workspaceRoot, result.canonicalPath));
|
||||
expect(result.mimeType).toBe('text/plain');
|
||||
expect(result.sizeBytes).toBe(12);
|
||||
expect(result.originalName).toBe('proof.txt');
|
||||
await expect(fs.readFile(result.absolutePath, 'utf8')).resolves.toBe('hello import');
|
||||
});
|
||||
|
||||
it('rejects imported staging output when the configured staging root escapes the workspace', async () => {
|
||||
const { homeDir } = await makeHomeWorkspace();
|
||||
const outsideStageRoot = path.join(homeDir, 'outside-stage');
|
||||
process.env.NERVE_UPLOAD_STAGING_TEMP_DIR = outsideStageRoot;
|
||||
const { importExternalUploadToCanonicalReference } = await importHelpers();
|
||||
|
||||
await expect(importExternalUploadToCanonicalReference({
|
||||
originalName: 'proof.txt',
|
||||
mimeType: 'text/plain',
|
||||
bytes: new TextEncoder().encode('hello import'),
|
||||
})).rejects.toThrow('Resolved attachment path is outside the workspace root.');
|
||||
|
||||
await expect(fs.stat(outsideStageRoot)).rejects.toMatchObject({ code: 'ENOENT' });
|
||||
});
|
||||
});
|
||||
162
server/lib/upload-reference.ts
Normal file
162
server/lib/upload-reference.ts
Normal file
|
|
@ -0,0 +1,162 @@
|
|||
import fs from 'node:fs/promises';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
import crypto from 'node:crypto';
|
||||
import { resolveAgentWorkspace } from './agent-workspace.js';
|
||||
import { getWorkspaceRoot, resolveWorkspacePath, resolveWorkspacePathForRoot } from './file-utils.js';
|
||||
|
||||
export type CanonicalUploadReferenceKind = 'direct_workspace_reference' | 'imported_workspace_reference';
|
||||
|
||||
export interface CanonicalUploadReference {
|
||||
kind: CanonicalUploadReferenceKind;
|
||||
canonicalPath: string;
|
||||
absolutePath: string;
|
||||
uri: string;
|
||||
mimeType: string;
|
||||
sizeBytes: number;
|
||||
originalName: string;
|
||||
}
|
||||
|
||||
function toFileUri(filePath: string): string {
|
||||
const normalized = filePath.replace(/\\/g, '/');
|
||||
if (/^[A-Za-z]:\//.test(normalized)) return `file:///${encodeURI(normalized)}`;
|
||||
return `file://${encodeURI(normalized)}`;
|
||||
}
|
||||
|
||||
function isWithinDir(candidate: string, root: string): boolean {
|
||||
const relative = path.relative(root, candidate);
|
||||
return relative === '' || (!relative.startsWith('..') && !path.isAbsolute(relative));
|
||||
}
|
||||
|
||||
function toCanonicalWorkspacePath(absolutePath: string, workspaceRoot: string): string {
|
||||
const relative = path.relative(workspaceRoot, absolutePath);
|
||||
return relative.split(path.sep).join('/');
|
||||
}
|
||||
|
||||
function inferMimeTypeFromName(name: string): string {
|
||||
const ext = path.extname(name).toLowerCase();
|
||||
switch (ext) {
|
||||
case '.png': return 'image/png';
|
||||
case '.jpg':
|
||||
case '.jpeg': return 'image/jpeg';
|
||||
case '.gif': return 'image/gif';
|
||||
case '.webp': return 'image/webp';
|
||||
case '.avif': return 'image/avif';
|
||||
case '.svg': return 'image/svg+xml';
|
||||
case '.ico': return 'image/x-icon';
|
||||
case '.txt': return 'text/plain';
|
||||
case '.md': return 'text/markdown';
|
||||
case '.json': return 'application/json';
|
||||
case '.pdf': return 'application/pdf';
|
||||
case '.mov': return 'video/quicktime';
|
||||
case '.mp4': return 'video/mp4';
|
||||
default: return 'application/octet-stream';
|
||||
}
|
||||
}
|
||||
|
||||
function expandHomePath(input: string): string {
|
||||
const home = process.env.HOME || os.homedir();
|
||||
if (input === '~') return home;
|
||||
if (input.startsWith('~/')) return path.join(home, input.slice(2));
|
||||
return input;
|
||||
}
|
||||
|
||||
function sanitizeFileName(name: string): string {
|
||||
const trimmed = name.trim();
|
||||
const base = path.basename(trimmed || 'upload.bin');
|
||||
const safe = base.replace(/[^A-Za-z0-9._-]+/g, '-').replace(/^-+|-+$/g, '');
|
||||
return safe || 'upload.bin';
|
||||
}
|
||||
|
||||
function buildStagedFileName(originalName: string): string {
|
||||
const safeName = sanitizeFileName(originalName);
|
||||
const ext = path.extname(safeName);
|
||||
const stem = ext ? safeName.slice(0, -ext.length) : safeName;
|
||||
const suffix = crypto.randomUUID().slice(0, 8);
|
||||
return `${stem || 'upload'}-${suffix}${ext}`;
|
||||
}
|
||||
|
||||
function buildStagedSubdir(now = new Date()): string {
|
||||
const year = String(now.getUTCFullYear());
|
||||
const month = String(now.getUTCMonth() + 1).padStart(2, '0');
|
||||
const day = String(now.getUTCDate()).padStart(2, '0');
|
||||
return path.join(year, month, day);
|
||||
}
|
||||
|
||||
function getUploadStagingDir(): string {
|
||||
const stagingRoot = process.env.NERVE_UPLOAD_STAGING_TEMP_DIR
|
||||
|| path.join(getWorkspaceRoot(), '.temp', 'nerve-uploads');
|
||||
return path.resolve(expandHomePath(stagingRoot));
|
||||
}
|
||||
|
||||
async function buildCanonicalReference(params: {
|
||||
kind: CanonicalUploadReferenceKind;
|
||||
absolutePath: string;
|
||||
originalName: string;
|
||||
mimeType?: string;
|
||||
workspaceRoot?: string;
|
||||
}): Promise<CanonicalUploadReference> {
|
||||
const workspaceRoot = path.resolve(getWorkspaceRoot(params.workspaceRoot));
|
||||
const realAbsolutePath = await fs.realpath(params.absolutePath);
|
||||
|
||||
if (!isWithinDir(realAbsolutePath, workspaceRoot)) {
|
||||
throw new Error('Resolved attachment path is outside the workspace root.');
|
||||
}
|
||||
|
||||
const stat = await fs.stat(realAbsolutePath);
|
||||
if (!stat.isFile()) {
|
||||
throw new Error('Resolved attachment path is not a file.');
|
||||
}
|
||||
|
||||
return {
|
||||
kind: params.kind,
|
||||
canonicalPath: toCanonicalWorkspacePath(realAbsolutePath, workspaceRoot),
|
||||
absolutePath: realAbsolutePath,
|
||||
uri: toFileUri(realAbsolutePath),
|
||||
mimeType: params.mimeType?.trim() || inferMimeTypeFromName(params.originalName),
|
||||
sizeBytes: stat.size,
|
||||
originalName: params.originalName,
|
||||
};
|
||||
}
|
||||
|
||||
export async function resolveDirectWorkspaceReference(relativePath: string, agentId?: string): Promise<CanonicalUploadReference> {
|
||||
const workspaceRoot = agentId ? resolveAgentWorkspace(agentId).workspaceRoot : undefined;
|
||||
const resolved = workspaceRoot
|
||||
? await resolveWorkspacePathForRoot(workspaceRoot, relativePath)
|
||||
: await resolveWorkspacePath(relativePath);
|
||||
if (!resolved) {
|
||||
throw new Error('Invalid or excluded workspace path.');
|
||||
}
|
||||
|
||||
return buildCanonicalReference({
|
||||
kind: 'direct_workspace_reference',
|
||||
absolutePath: resolved,
|
||||
originalName: path.basename(resolved),
|
||||
workspaceRoot,
|
||||
});
|
||||
}
|
||||
|
||||
export async function importExternalUploadToCanonicalReference(params: {
|
||||
originalName: string;
|
||||
mimeType?: string;
|
||||
bytes: Uint8Array;
|
||||
}): Promise<CanonicalUploadReference> {
|
||||
const workspaceRoot = path.resolve(getWorkspaceRoot());
|
||||
const rootDir = getUploadStagingDir();
|
||||
const targetDir = path.join(rootDir, buildStagedSubdir());
|
||||
const stagedPath = path.join(targetDir, buildStagedFileName(params.originalName));
|
||||
|
||||
if (!isWithinDir(stagedPath, workspaceRoot)) {
|
||||
throw new Error('Resolved attachment path is outside the workspace root.');
|
||||
}
|
||||
|
||||
await fs.mkdir(targetDir, { recursive: true });
|
||||
await fs.writeFile(stagedPath, params.bytes);
|
||||
|
||||
return buildCanonicalReference({
|
||||
kind: 'imported_workspace_reference',
|
||||
absolutePath: stagedPath,
|
||||
originalName: params.originalName,
|
||||
mimeType: params.mimeType,
|
||||
});
|
||||
}
|
||||
82
server/lib/workspace-detect.test.ts
Normal file
82
server/lib/workspace-detect.test.ts
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
/** Tests for workspace local/remote detection. */
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import fs from 'node:fs/promises';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
|
||||
describe('workspace-detect', () => {
|
||||
let tmpDir: string;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), 'ws-detect-test-'));
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
vi.restoreAllMocks();
|
||||
await fs.rm(tmpDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
async function loadModule(workspaceRemote = false) {
|
||||
vi.doMock('./config.js', () => ({
|
||||
config: { workspaceRemote },
|
||||
}));
|
||||
const mod = await import('./workspace-detect.js');
|
||||
mod.clearWorkspaceDetectCache();
|
||||
return mod;
|
||||
}
|
||||
|
||||
it('returns true when workspace directory exists', async () => {
|
||||
const { isWorkspaceLocal } = await loadModule();
|
||||
const workspaceRoot = path.join(tmpDir, 'workspace');
|
||||
await fs.mkdir(workspaceRoot);
|
||||
|
||||
const result = await isWorkspaceLocal(workspaceRoot);
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
it('returns false when workspace directory does not exist', async () => {
|
||||
const { isWorkspaceLocal } = await loadModule();
|
||||
const workspaceRoot = path.join(tmpDir, 'nonexistent');
|
||||
|
||||
const result = await isWorkspaceLocal(workspaceRoot);
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('returns false when NERVE_WORKSPACE_REMOTE is true', async () => {
|
||||
const { isWorkspaceLocal } = await loadModule(true);
|
||||
const workspaceRoot = path.join(tmpDir, 'workspace');
|
||||
await fs.mkdir(workspaceRoot);
|
||||
|
||||
const result = await isWorkspaceLocal(workspaceRoot);
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('caches results within TTL', async () => {
|
||||
const { isWorkspaceLocal } = await loadModule();
|
||||
const workspaceRoot = path.join(tmpDir, 'workspace');
|
||||
await fs.mkdir(workspaceRoot);
|
||||
|
||||
// First call — should check filesystem
|
||||
expect(await isWorkspaceLocal(workspaceRoot)).toBe(true);
|
||||
|
||||
// Remove the directory
|
||||
await fs.rm(workspaceRoot, { recursive: true });
|
||||
|
||||
// Second call — should use cached result (still true)
|
||||
expect(await isWorkspaceLocal(workspaceRoot)).toBe(true);
|
||||
});
|
||||
|
||||
it('clearWorkspaceDetectCache resets cached values', async () => {
|
||||
const { isWorkspaceLocal, clearWorkspaceDetectCache } = await loadModule();
|
||||
const workspaceRoot = path.join(tmpDir, 'workspace');
|
||||
await fs.mkdir(workspaceRoot);
|
||||
|
||||
expect(await isWorkspaceLocal(workspaceRoot)).toBe(true);
|
||||
await fs.rm(workspaceRoot, { recursive: true });
|
||||
|
||||
// Clear cache and re-check
|
||||
clearWorkspaceDetectCache();
|
||||
expect(await isWorkspaceLocal(workspaceRoot)).toBe(false);
|
||||
});
|
||||
});
|
||||
58
server/lib/workspace-detect.ts
Normal file
58
server/lib/workspace-detect.ts
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
/**
|
||||
* Workspace locality detection.
|
||||
*
|
||||
* Determines whether the agent workspace directory exists on the local
|
||||
* filesystem. When it doesn't (e.g. Nerve running on DGX host while the
|
||||
* workspace lives inside an OpenShell sandbox), route handlers fall back
|
||||
* to gateway RPC for file access.
|
||||
*
|
||||
* The result is cached with a short TTL to avoid probing the filesystem
|
||||
* on every HTTP request. Setting `NERVE_WORKSPACE_REMOTE=true` forces
|
||||
* gateway-only mode unconditionally.
|
||||
* @module
|
||||
*/
|
||||
|
||||
import { access } from 'node:fs/promises';
|
||||
import { config } from './config.js';
|
||||
|
||||
const CACHE_TTL_MS = 60_000; // 60 seconds
|
||||
|
||||
interface CacheEntry {
|
||||
isLocal: boolean;
|
||||
expiresAt: number;
|
||||
}
|
||||
|
||||
const cache = new Map<string, CacheEntry>();
|
||||
|
||||
/**
|
||||
* Check whether the workspace root directory exists locally.
|
||||
*
|
||||
* Returns `true` when the directory is accessible on the local filesystem,
|
||||
* `false` when it isn't (or when `NERVE_WORKSPACE_REMOTE=true` is set).
|
||||
*/
|
||||
export async function isWorkspaceLocal(workspaceRoot: string): Promise<boolean> {
|
||||
// Env override — always treat as remote
|
||||
if (config.workspaceRemote) return false;
|
||||
|
||||
const now = Date.now();
|
||||
const cached = cache.get(workspaceRoot);
|
||||
if (cached && now < cached.expiresAt) {
|
||||
return cached.isLocal;
|
||||
}
|
||||
|
||||
let isLocal: boolean;
|
||||
try {
|
||||
await access(workspaceRoot);
|
||||
isLocal = true;
|
||||
} catch {
|
||||
isLocal = false;
|
||||
}
|
||||
|
||||
cache.set(workspaceRoot, { isLocal, expiresAt: now + CACHE_TTL_MS });
|
||||
return isLocal;
|
||||
}
|
||||
|
||||
/** Clear the detection cache (useful for tests). */
|
||||
export function clearWorkspaceDetectCache(): void {
|
||||
cache.clear();
|
||||
}
|
||||
|
|
@ -195,6 +195,52 @@ describe('ws-proxy', () => {
|
|||
});
|
||||
|
||||
describe('message relaying', () => {
|
||||
it('forwards restricted session mutations for control-ui clients instead of intercepting them', async () => {
|
||||
const ws = new WebSocket(
|
||||
`ws://127.0.0.1:${proxyPort}/ws?target=${encodeURIComponent(mockGw.url + '/ws')}`,
|
||||
);
|
||||
|
||||
const challenge = await waitForMessage(ws);
|
||||
expect(JSON.parse(challenge).event).toBe('connect.challenge');
|
||||
|
||||
ws.send(JSON.stringify({
|
||||
type: 'req',
|
||||
method: 'connect',
|
||||
id: 'c-control-1',
|
||||
params: { auth: { token: 'test-token' }, client: { id: 'openclaw-control-ui', mode: 'webchat' } },
|
||||
}));
|
||||
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
const timer = setTimeout(() => reject(new Error('timeout waiting for connect response')), 5000);
|
||||
ws.on('message', (data) => {
|
||||
try {
|
||||
const msg = JSON.parse(data.toString());
|
||||
if (msg.type === 'res' && msg.id === 'c-control-1') {
|
||||
clearTimeout(timer);
|
||||
resolve();
|
||||
}
|
||||
} catch { /* ignore */ }
|
||||
});
|
||||
});
|
||||
|
||||
mockGw.clearReceived();
|
||||
ws.send(JSON.stringify({
|
||||
type: 'req',
|
||||
method: 'sessions.delete',
|
||||
id: 'delete-1',
|
||||
params: { key: 'agent:main:subagent:test', deleteTranscript: true },
|
||||
}));
|
||||
|
||||
await mockGw.expectMessages(1);
|
||||
const deleteMsg = mockGw.received.find((m) => {
|
||||
const d = m.data as Record<string, unknown>;
|
||||
return d.type === 'req' && d.method === 'sessions.delete';
|
||||
});
|
||||
expect(deleteMsg).toBeTruthy();
|
||||
|
||||
ws.close();
|
||||
});
|
||||
|
||||
it('relays gateway messages to client', async () => {
|
||||
const ws = new WebSocket(
|
||||
`ws://127.0.0.1:${proxyPort}/ws?target=${encodeURIComponent(mockGw.url + '/ws')}`,
|
||||
|
|
|
|||
|
|
@ -18,13 +18,11 @@ import type { Server as HttpServer } from 'node:http';
|
|||
import { WebSocket, WebSocketServer } from 'ws';
|
||||
import type { IncomingMessage } from 'node:http';
|
||||
import type { Duplex } from 'node:stream';
|
||||
import { execFile } from 'node:child_process';
|
||||
import { dirname } from 'node:path';
|
||||
import { randomUUID } from 'node:crypto';
|
||||
import { config, WS_ALLOWED_HOSTS, SESSION_COOKIE_NAME } from './config.js';
|
||||
import { verifySession, parseSessionCookie } from './session.js';
|
||||
import { createDeviceBlock, getDeviceIdentity } from './device-identity.js';
|
||||
import { resolveOpenclawBin } from './openclaw-bin.js';
|
||||
import { gatewayRpcCall } from './gateway-rpc.js';
|
||||
import { canInjectGatewayToken } from './trust-utils.js';
|
||||
import { isAllowedOrigin } from './origin-utils.js';
|
||||
|
||||
|
|
@ -41,30 +39,14 @@ const RESTRICTED_METHODS = new Set([
|
|||
'sessions.reset',
|
||||
'sessions.compact',
|
||||
]);
|
||||
const CONTROL_UI_CLIENT_ID = 'openclaw-control-ui';
|
||||
|
||||
/**
|
||||
* Execute a gateway RPC call via the CLI, bypassing webchat restrictions.
|
||||
* Execute a gateway RPC call, bypassing webchat restrictions.
|
||||
* Delegates to the shared gateway-rpc module.
|
||||
*/
|
||||
function gatewayCall(method: string, params: Record<string, unknown>): Promise<unknown> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const bin = resolveOpenclawBin();
|
||||
const args = ['gateway', 'call', method, '--params', JSON.stringify(params)];
|
||||
// Ensure nvm/fnm/volta node is in PATH for #!/usr/bin/env node shebangs
|
||||
const nodeBinDir = dirname(process.execPath);
|
||||
const existingPath = process.env.PATH;
|
||||
const env = { ...process.env, PATH: existingPath ? `${nodeBinDir}:${existingPath}` : nodeBinDir };
|
||||
execFile(bin, args, { timeout: 10_000, maxBuffer: 1024 * 1024, env }, (err, stdout, stderr) => {
|
||||
if (err) {
|
||||
reject(new Error(stderr?.trim() || err.message));
|
||||
return;
|
||||
}
|
||||
try {
|
||||
resolve(JSON.parse(stdout));
|
||||
} catch {
|
||||
resolve({ ok: true, raw: stdout.trim() });
|
||||
}
|
||||
});
|
||||
});
|
||||
return gatewayRpcCall(method, params);
|
||||
}
|
||||
|
||||
/** Active WSS instances — used for graceful shutdown */
|
||||
|
|
@ -221,6 +203,8 @@ function createGatewayRelay(
|
|||
let savedConnectMsg: Record<string, unknown> | null = null;
|
||||
/** Whether the saved connect message has been dispatched to the gateway */
|
||||
let connectSent = false;
|
||||
/** Whether this connection is using the privileged OpenClaw control UI client id */
|
||||
let isControlUiClient = false;
|
||||
/** Timeout handle for challenge nonce deadline */
|
||||
let challengeTimer: ReturnType<typeof setTimeout> | null = null;
|
||||
|
||||
|
|
@ -259,6 +243,11 @@ function createGatewayRelay(
|
|||
}
|
||||
}
|
||||
|
||||
function updateClientKindFromConnect(msg: Record<string, unknown>): void {
|
||||
const params = (msg.params || {}) as ConnectParams;
|
||||
isControlUiClient = params.client?.id === CONTROL_UI_CLIENT_ID;
|
||||
}
|
||||
|
||||
/**
|
||||
* Dispatch the saved connect message to the gateway.
|
||||
* Injects device identity when `useDeviceIdentity` is true and a nonce is available.
|
||||
|
|
@ -397,6 +386,7 @@ function createGatewayRelay(
|
|||
const msg = JSON.parse(data.toString());
|
||||
if (msg.type === 'req' && msg.method === 'connect' && msg.params) {
|
||||
savedConnectMsg = msg;
|
||||
updateClientKindFromConnect(msg);
|
||||
return; // Do NOT add to pending buffer
|
||||
}
|
||||
} catch { /* pass through */ }
|
||||
|
|
@ -418,6 +408,7 @@ function createGatewayRelay(
|
|||
if (msg.type === 'req' && msg.method === 'connect' && msg.params) {
|
||||
// Last-write-wins if multiple connect frames arrive before dispatch.
|
||||
savedConnectMsg = msg;
|
||||
updateClientKindFromConnect(msg);
|
||||
if (challengeNonce) {
|
||||
dispatchConnect(challengeNonce);
|
||||
} else {
|
||||
|
|
@ -442,6 +433,7 @@ function createGatewayRelay(
|
|||
// Intercept connect request — defer until challenge nonce arrives
|
||||
if (!handshakeComplete && msg.type === 'req' && msg.method === 'connect' && msg.params) {
|
||||
savedConnectMsg = msg;
|
||||
updateClientKindFromConnect(msg);
|
||||
if (challengeNonce) {
|
||||
dispatchConnect(challengeNonce);
|
||||
} else {
|
||||
|
|
@ -450,8 +442,9 @@ function createGatewayRelay(
|
|||
return;
|
||||
}
|
||||
|
||||
// Intercept restricted RPC methods — proxy via CLI (full scopes)
|
||||
if (msg.type === 'req' && RESTRICTED_METHODS.has(msg.method)) {
|
||||
// Intercept restricted RPC methods for plain webchat clients only.
|
||||
// Control UI clients are allowed to call these directly on the gateway.
|
||||
if (msg.type === 'req' && RESTRICTED_METHODS.has(msg.method) && !isControlUiClient) {
|
||||
const reqId = msg.id;
|
||||
gatewayCall(msg.method, msg.params || {})
|
||||
.then((result) => {
|
||||
|
|
|
|||
|
|
@ -114,6 +114,20 @@ describe('authMiddleware', () => {
|
|||
expect(mockedVerifySession).not.toHaveBeenCalled();
|
||||
});
|
||||
}
|
||||
|
||||
it('keeps /api/auth/login public while protected API routes still require a session', async () => {
|
||||
const app = createTestApp();
|
||||
|
||||
const loginRes = await app.request('/api/auth/login');
|
||||
expect(loginRes.status).toBe(200);
|
||||
expect(mockedVerifySession).not.toHaveBeenCalled();
|
||||
|
||||
const protectedRes = await app.request('/api/test');
|
||||
expect(protectedRes.status).toBe(401);
|
||||
const body = (await protectedRes.json()) as { error: string };
|
||||
expect(body.error).toBe('Authentication required');
|
||||
expect(mockedVerifySession).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('non-API routes pass through (SPA/static)', () => {
|
||||
|
|
|
|||
|
|
@ -30,13 +30,13 @@ describe('securityHeaders middleware', () => {
|
|||
expect(csp).toBeTruthy();
|
||||
expect(csp).toContain("default-src 'self'");
|
||||
expect(csp).toContain("script-src 'self'");
|
||||
expect(csp).toContain("frame-ancestors 'none'");
|
||||
expect(csp).toContain("frame-ancestors 'self'");
|
||||
});
|
||||
|
||||
it('sets X-Frame-Options to DENY', async () => {
|
||||
it('sets X-Frame-Options to SAMEORIGIN', async () => {
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/test');
|
||||
expect(res.headers.get('X-Frame-Options')).toBe('DENY');
|
||||
expect(res.headers.get('X-Frame-Options')).toBe('SAMEORIGIN');
|
||||
});
|
||||
|
||||
it('sets X-Content-Type-Options to nosniff', async () => {
|
||||
|
|
|
|||
|
|
@ -52,12 +52,12 @@ function getCspDirectives(): string {
|
|||
"default-src 'self'",
|
||||
"script-src 'self' https://s3.tradingview.com",
|
||||
"style-src 'self' 'unsafe-inline' https://fonts.googleapis.com",
|
||||
"font-src 'self' https://fonts.gstatic.com",
|
||||
"font-src 'self' https://fonts.gstatic.com data:",
|
||||
`connect-src ${connectSrc}`,
|
||||
"img-src 'self' data: blob:",
|
||||
"media-src 'self' blob:", // Allow blob: URLs for TTS audio playback
|
||||
"frame-src https://s3.tradingview.com https://www.tradingview.com https://www.tradingview-widget.com https://s.tradingview.com",
|
||||
"frame-ancestors 'none'",
|
||||
"frame-src 'self' https://s3.tradingview.com https://www.tradingview.com https://www.tradingview-widget.com https://s.tradingview.com",
|
||||
"frame-ancestors 'self'",
|
||||
"base-uri 'self'",
|
||||
"form-action 'self'",
|
||||
].join('; ');
|
||||
|
|
@ -72,7 +72,7 @@ export const securityHeaders: MiddlewareHandler = async (c, next) => {
|
|||
c.header('Content-Security-Policy', getCspDirectives());
|
||||
|
||||
// Prevent clickjacking
|
||||
c.header('X-Frame-Options', 'DENY');
|
||||
c.header('X-Frame-Options', 'SAMEORIGIN');
|
||||
|
||||
// Prevent MIME type sniffing
|
||||
c.header('X-Content-Type-Options', 'nosniff');
|
||||
|
|
|
|||
68
server/routes/api-keys.test.ts
Normal file
68
server/routes/api-keys.test.ts
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
/** Tests for API key status and persistence routes. */
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { Hono } from 'hono';
|
||||
|
||||
describe('api-keys routes', () => {
|
||||
beforeEach(() => {
|
||||
vi.resetModules();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
function mockDeps(overrides: { mimoKey?: string } = {}) {
|
||||
const mockConfig: Record<string, unknown> = {
|
||||
openaiApiKey: '',
|
||||
replicateApiToken: '',
|
||||
mimoApiKey: overrides.mimoKey || '',
|
||||
};
|
||||
|
||||
vi.doMock('../lib/config.js', () => ({
|
||||
config: mockConfig,
|
||||
}));
|
||||
|
||||
vi.doMock('../lib/env-file.js', () => ({
|
||||
writeEnvKey: vi.fn(async () => {}),
|
||||
}));
|
||||
|
||||
vi.doMock('../middleware/rate-limit.js', () => ({
|
||||
rateLimitGeneral: vi.fn((_c: unknown, next: () => Promise<void>) => next()),
|
||||
}));
|
||||
}
|
||||
|
||||
async function buildApp() {
|
||||
const mod = await import('./api-keys.js');
|
||||
const app = new Hono();
|
||||
app.route('/', mod.default);
|
||||
return app;
|
||||
}
|
||||
|
||||
it('reports xiaomiKeySet from config', async () => {
|
||||
mockDeps({ mimoKey: 'sk-mimo' });
|
||||
const app = await buildApp();
|
||||
|
||||
const res = await app.request('/api/keys');
|
||||
expect(res.status).toBe(200);
|
||||
|
||||
const json = await res.json() as Record<string, unknown>;
|
||||
expect(json.xiaomiKeySet).toBe(true);
|
||||
});
|
||||
|
||||
it('writes MIMO_API_KEY from mimoApiKey input', async () => {
|
||||
mockDeps();
|
||||
const app = await buildApp();
|
||||
|
||||
const res = await app.request('/api/keys', {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ mimoApiKey: 'sk-mimo' }),
|
||||
});
|
||||
|
||||
expect(res.status).toBe(200);
|
||||
|
||||
const json = await res.json() as Record<string, unknown>;
|
||||
expect(json.ok).toBe(true);
|
||||
expect(json.xiaomiKeySet).toBe(true);
|
||||
});
|
||||
});
|
||||
|
|
@ -18,6 +18,7 @@ app.get('/api/keys', rateLimitGeneral, (c) => {
|
|||
return c.json({
|
||||
openaiKeySet: !!config.openaiApiKey,
|
||||
replicateKeySet: !!config.replicateApiToken,
|
||||
xiaomiKeySet: !!config.mimoApiKey,
|
||||
});
|
||||
});
|
||||
|
||||
|
|
@ -42,11 +43,19 @@ app.put('/api/keys', rateLimitGeneral, async (c) => {
|
|||
results.push(val ? 'REPLICATE_API_TOKEN saved' : 'REPLICATE_API_TOKEN cleared');
|
||||
}
|
||||
|
||||
if (body.mimoApiKey !== undefined) {
|
||||
const val = body.mimoApiKey.trim();
|
||||
await writeEnvKey('MIMO_API_KEY', val);
|
||||
(config as Record<string, unknown>).mimoApiKey = val;
|
||||
results.push(val ? 'MIMO_API_KEY saved' : 'MIMO_API_KEY cleared');
|
||||
}
|
||||
|
||||
return c.json({
|
||||
ok: true,
|
||||
message: results.join(', ') || 'No changes',
|
||||
openaiKeySet: !!config.openaiApiKey,
|
||||
replicateKeySet: !!config.replicateApiToken,
|
||||
xiaomiKeySet: !!config.mimoApiKey,
|
||||
});
|
||||
} catch {
|
||||
return c.text('Invalid request', 400);
|
||||
|
|
|
|||
|
|
@ -71,15 +71,17 @@ describe('auth routes', () => {
|
|||
expect(res.status).toBe(400);
|
||||
});
|
||||
|
||||
it('rejects gateway token as password', async () => {
|
||||
const app = await buildApp({ gatewayToken: 'my-secret-token' });
|
||||
it('accepts gateway token as a fallback password when no password hash is configured', async () => {
|
||||
const app = await buildApp({ passwordHash: '', gatewayToken: 'my-secret-token' });
|
||||
const res = await app.request('/api/auth/login', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ password: 'my-secret-token' }),
|
||||
});
|
||||
// Gateway token should NOT be accepted as a login password
|
||||
expect(res.status).toBe(401);
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(true);
|
||||
expect(res.headers.get('set-cookie')).toContain('nerve_session');
|
||||
});
|
||||
|
||||
it('accepts valid password with scrypt hash', async () => {
|
||||
|
|
@ -97,6 +99,20 @@ describe('auth routes', () => {
|
|||
expect(res.headers.get('set-cookie')).toContain('nerve_session');
|
||||
});
|
||||
|
||||
it('rejects gateway token when a password hash is configured', async () => {
|
||||
const hash = '2b49a0429e647f74418e40e49bfe701257b91d64a825f921fd20986defa6508f:68a86fadbec3e62c603639333693f5c64e5a5788fb4228b7f5d5dfd5804b024cb42dab05ea276c2f8a49e597ffff2f3bd1533612fbd76a4bd22019c54f794173';
|
||||
const app = await buildApp({
|
||||
passwordHash: hash,
|
||||
gatewayToken: 'my-secret-token',
|
||||
});
|
||||
const res = await app.request('/api/auth/login', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ password: 'my-secret-token' }),
|
||||
});
|
||||
expect(res.status).toBe(401);
|
||||
});
|
||||
|
||||
it('returns 401 for invalid password', async () => {
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/auth/login', {
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
* @module
|
||||
*/
|
||||
|
||||
import crypto from 'node:crypto';
|
||||
import { Hono } from 'hono';
|
||||
import { setCookie, deleteCookie, getCookie } from 'hono/cookie';
|
||||
import { config, SESSION_COOKIE_NAME } from '../lib/config.js';
|
||||
|
|
@ -15,6 +16,21 @@ import { rateLimitAuth } from '../middleware/rate-limit.js';
|
|||
|
||||
const app = new Hono();
|
||||
|
||||
function matchesGatewayTokenFallback(password: string): boolean {
|
||||
if (config.passwordHash || !config.gatewayToken) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const provided = Buffer.from(password);
|
||||
const expected = Buffer.from(config.gatewayToken);
|
||||
|
||||
if (provided.length !== expected.length) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return crypto.timingSafeEqual(provided, expected);
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /api/auth/login
|
||||
* Accepts { password: string }
|
||||
|
|
@ -36,9 +52,10 @@ app.post('/api/auth/login', rateLimitAuth, async (c) => {
|
|||
|
||||
let valid = false;
|
||||
|
||||
// Check against stored password hash
|
||||
if (config.passwordHash) {
|
||||
valid = await verifyPassword(password, config.passwordHash);
|
||||
} else if (matchesGatewayTokenFallback(password)) {
|
||||
valid = true;
|
||||
}
|
||||
|
||||
if (!valid) {
|
||||
|
|
|
|||
142
server/routes/beads.test.ts
Normal file
142
server/routes/beads.test.ts
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { Hono } from 'hono';
|
||||
|
||||
const getBeadDetailMock = vi.fn();
|
||||
class MockBeadNotFoundError extends Error {}
|
||||
class MockBeadAdapterError extends Error {}
|
||||
class MockBeadValidationError extends Error {}
|
||||
|
||||
vi.mock('../lib/beads.js', () => ({
|
||||
getBeadDetail: (...args: unknown[]) => getBeadDetailMock(...args),
|
||||
BeadNotFoundError: MockBeadNotFoundError,
|
||||
BeadAdapterError: MockBeadAdapterError,
|
||||
BeadValidationError: MockBeadValidationError,
|
||||
}));
|
||||
|
||||
describe('beads routes', () => {
|
||||
beforeEach(() => {
|
||||
getBeadDetailMock.mockReset();
|
||||
});
|
||||
|
||||
async function buildApp() {
|
||||
vi.resetModules();
|
||||
const mod = await import('./beads.js');
|
||||
const app = new Hono();
|
||||
app.route('/', mod.default);
|
||||
return app;
|
||||
}
|
||||
|
||||
it('returns bead detail for a known bead id', async () => {
|
||||
getBeadDetailMock.mockResolvedValue({
|
||||
id: 'nerve-fms2',
|
||||
title: 'Implement read-only bead viewer tab foundation',
|
||||
notes: 'Open a bead viewer tab.',
|
||||
status: 'in_progress',
|
||||
priority: 1,
|
||||
issueType: 'task',
|
||||
owner: 'Derrick',
|
||||
createdAt: '2026-04-06T13:23:33Z',
|
||||
updatedAt: '2026-04-06T13:26:10Z',
|
||||
closedAt: null,
|
||||
closeReason: null,
|
||||
dependencies: [{ id: 'nerve-qkdo', title: 'Create branch', status: 'closed', dependencyType: 'blocks' }],
|
||||
dependents: [],
|
||||
linkedPlan: {
|
||||
path: '.plans/2026-04-06-bead-viewer-tab-foundation-execution.md',
|
||||
title: 'Bead viewer tab foundation',
|
||||
planId: 'plan-bead-viewer-tab-foundation-execution',
|
||||
archived: false,
|
||||
status: 'In Progress',
|
||||
updatedAt: 123,
|
||||
},
|
||||
});
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/beads/nerve-fms2');
|
||||
|
||||
expect(res.status).toBe(200);
|
||||
await expect(res.json()).resolves.toEqual({
|
||||
ok: true,
|
||||
bead: expect.objectContaining({
|
||||
id: 'nerve-fms2',
|
||||
dependencies: [expect.objectContaining({ id: 'nerve-qkdo' })],
|
||||
linkedPlan: expect.objectContaining({
|
||||
path: '.plans/2026-04-06-bead-viewer-tab-foundation-execution.md',
|
||||
}),
|
||||
}),
|
||||
});
|
||||
expect(getBeadDetailMock).toHaveBeenCalledWith('nerve-fms2', {
|
||||
targetPath: undefined,
|
||||
currentDocumentPath: undefined,
|
||||
workspaceAgentId: undefined,
|
||||
});
|
||||
});
|
||||
|
||||
it('passes explicit lookup context through to the bead lookup', async () => {
|
||||
getBeadDetailMock.mockResolvedValue({
|
||||
id: 'virtra-apex-docs-id2',
|
||||
title: 'Demo',
|
||||
notes: null,
|
||||
status: null,
|
||||
priority: null,
|
||||
issueType: null,
|
||||
owner: null,
|
||||
createdAt: null,
|
||||
updatedAt: null,
|
||||
closedAt: null,
|
||||
closeReason: null,
|
||||
dependencies: [],
|
||||
dependents: [],
|
||||
linkedPlan: null,
|
||||
});
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/beads/virtra-apex-docs-id2?targetPath=../projects/virtra-apex-docs/.beads¤tDocumentPath=bead-link-dogfood.md&workspaceAgentId=main');
|
||||
|
||||
expect(res.status).toBe(200);
|
||||
expect(getBeadDetailMock).toHaveBeenCalledWith('virtra-apex-docs-id2', {
|
||||
targetPath: '../projects/virtra-apex-docs/.beads',
|
||||
currentDocumentPath: 'bead-link-dogfood.md',
|
||||
workspaceAgentId: 'main',
|
||||
});
|
||||
});
|
||||
|
||||
it('returns 400 when the lookup request context is invalid', async () => {
|
||||
getBeadDetailMock.mockRejectedValue(new MockBeadValidationError('Relative explicit bead URIs require a current document path'));
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/beads/nerve-fms2');
|
||||
|
||||
expect(res.status).toBe(400);
|
||||
await expect(res.json()).resolves.toEqual({
|
||||
error: 'invalid_request',
|
||||
details: 'Relative explicit bead URIs require a current document path',
|
||||
});
|
||||
});
|
||||
|
||||
it('returns 404 when the bead is missing', async () => {
|
||||
getBeadDetailMock.mockRejectedValue(new MockBeadNotFoundError('nerve-miss'));
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/beads/nerve-miss');
|
||||
|
||||
expect(res.status).toBe(404);
|
||||
await expect(res.json()).resolves.toEqual({
|
||||
error: 'not_found',
|
||||
details: 'nerve-miss',
|
||||
});
|
||||
});
|
||||
|
||||
it('returns 502 when the bd adapter fails', async () => {
|
||||
getBeadDetailMock.mockRejectedValue(new MockBeadAdapterError('bd failed'));
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/beads/nerve-fms2');
|
||||
|
||||
expect(res.status).toBe(502);
|
||||
await expect(res.json()).resolves.toEqual({
|
||||
error: 'beads_adapter_error',
|
||||
details: 'bd failed',
|
||||
});
|
||||
});
|
||||
});
|
||||
38
server/routes/beads.ts
Normal file
38
server/routes/beads.ts
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
import { Hono } from 'hono';
|
||||
import { rateLimitGeneral } from '../middleware/rate-limit.js';
|
||||
import { BeadAdapterError, BeadNotFoundError, BeadValidationError, getBeadDetail } from '../lib/beads.js';
|
||||
|
||||
const app = new Hono();
|
||||
|
||||
app.get('/api/beads/:id', rateLimitGeneral, async (c) => {
|
||||
const beadId = c.req.param('id')?.trim();
|
||||
if (!beadId) {
|
||||
return c.json({ error: 'invalid_request', details: 'bead id is required' }, 400);
|
||||
}
|
||||
|
||||
const targetPath = c.req.query('targetPath')?.trim() || undefined;
|
||||
const currentDocumentPath = c.req.query('currentDocumentPath')?.trim() || undefined;
|
||||
const workspaceAgentId = c.req.query('workspaceAgentId')?.trim() || undefined;
|
||||
|
||||
try {
|
||||
const bead = await getBeadDetail(beadId, {
|
||||
targetPath,
|
||||
currentDocumentPath,
|
||||
workspaceAgentId,
|
||||
});
|
||||
return c.json({ ok: true, bead });
|
||||
} catch (error) {
|
||||
if (error instanceof BeadValidationError) {
|
||||
return c.json({ error: 'invalid_request', details: error.message }, 400);
|
||||
}
|
||||
if (error instanceof BeadNotFoundError) {
|
||||
return c.json({ error: 'not_found', details: error.message }, 404);
|
||||
}
|
||||
if (error instanceof BeadAdapterError) {
|
||||
return c.json({ error: 'beads_adapter_error', details: error.message }, 502);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
});
|
||||
|
||||
export default app;
|
||||
|
|
@ -6,34 +6,67 @@ import path from 'node:path';
|
|||
import os from 'node:os';
|
||||
|
||||
describe('file-browser routes', () => {
|
||||
let homeDir: string;
|
||||
let tmpDir: string;
|
||||
let researchWorkspace: string;
|
||||
let remoteHomeDir: string;
|
||||
let remoteWorkspace: string;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), 'fbrowser-test-'));
|
||||
homeDir = await fs.mkdtemp(path.join(os.tmpdir(), 'fbrowser-test-'));
|
||||
tmpDir = path.join(homeDir, '.openclaw', 'workspace');
|
||||
researchWorkspace = path.join(homeDir, '.openclaw', 'workspace-research');
|
||||
remoteHomeDir = path.join(homeDir, 'remote-nonexistent');
|
||||
remoteWorkspace = path.join(remoteHomeDir, '.openclaw', 'workspace');
|
||||
await fs.mkdir(tmpDir, { recursive: true });
|
||||
// Create a MEMORY.md in the tmpDir so getWorkspaceRoot returns tmpDir
|
||||
await fs.writeFile(path.join(tmpDir, 'MEMORY.md'), '# Memories\n');
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
vi.restoreAllMocks();
|
||||
await fs.rm(tmpDir, { recursive: true, force: true });
|
||||
await fs.rm(homeDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
async function buildApp(opts?: { fileBrowserRoot?: string }) {
|
||||
async function buildApp(opts?: {
|
||||
fileBrowserRoot?: string;
|
||||
remote?: boolean;
|
||||
gatewayFilesListResult?: Array<{ name: string; missing?: boolean; size?: number; updatedAtMs?: number }>;
|
||||
}) {
|
||||
vi.resetModules();
|
||||
vi.doUnmock('../lib/gateway-rpc.js');
|
||||
|
||||
const useRemote = opts?.remote ?? false;
|
||||
const configuredHomeDir = useRemote ? remoteHomeDir : homeDir;
|
||||
const configuredWorkspace = useRemote ? remoteWorkspace : tmpDir;
|
||||
|
||||
vi.doMock('../lib/config.js', () => ({
|
||||
config: {
|
||||
auth: false,
|
||||
port: 3000,
|
||||
host: '127.0.0.1',
|
||||
sslPort: 3443,
|
||||
memoryPath: path.join(tmpDir, 'MEMORY.md'),
|
||||
home: configuredHomeDir,
|
||||
memoryPath: path.join(configuredWorkspace, 'MEMORY.md'),
|
||||
memoryDir: path.join(configuredWorkspace, 'memory'),
|
||||
fileBrowserRoot: opts?.fileBrowserRoot ?? '',
|
||||
workspaceRemote: false,
|
||||
},
|
||||
SESSION_COOKIE_NAME: 'nerve_session_3000',
|
||||
}));
|
||||
|
||||
if (useRemote) {
|
||||
vi.doMock('../lib/gateway-rpc.js', () => ({
|
||||
gatewayFilesList: vi.fn().mockResolvedValue(opts?.gatewayFilesListResult ?? []),
|
||||
gatewayFilesGet: vi.fn(),
|
||||
gatewayFilesSet: vi.fn(),
|
||||
}));
|
||||
|
||||
const detectMod = await import('../lib/workspace-detect.js');
|
||||
detectMod.clearWorkspaceDetectCache();
|
||||
}
|
||||
|
||||
const mod = await import('./file-browser.js');
|
||||
const app = new Hono();
|
||||
app.route('/', mod.default);
|
||||
|
|
@ -82,6 +115,197 @@ describe('file-browser routes', () => {
|
|||
expect(names).not.toContain('node_modules');
|
||||
expect(names).not.toContain('.git');
|
||||
});
|
||||
|
||||
it('hides hidden workspace entries by default', async () => {
|
||||
await fs.writeFile(path.join(tmpDir, '.hidden.md'), 'secret');
|
||||
await fs.writeFile(path.join(tmpDir, 'visible.md'), 'hello');
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/files/tree');
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; entries: Array<{ name: string }> };
|
||||
const names = json.entries.map(e => e.name);
|
||||
|
||||
expect(names).toContain('visible.md');
|
||||
expect(names).not.toContain('.hidden.md');
|
||||
});
|
||||
|
||||
it('includes hidden workspace entries when showHidden=true', async () => {
|
||||
await fs.writeFile(path.join(tmpDir, '.hidden.md'), 'secret');
|
||||
await fs.mkdir(path.join(tmpDir, '.plans'));
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/files/tree?showHidden=true');
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; entries: Array<{ name: string }> };
|
||||
const names = json.entries.map(e => e.name);
|
||||
|
||||
expect(names).toContain('.hidden.md');
|
||||
expect(names).toContain('.plans');
|
||||
});
|
||||
|
||||
it('includes hidden workspace entries when showHidden=true via remote gateway fallback', async () => {
|
||||
const app = await buildApp({
|
||||
remote: true,
|
||||
gatewayFilesListResult: [
|
||||
{ name: '.hidden.md', missing: false, size: 6, updatedAtMs: 1000 },
|
||||
{ name: '.plans', missing: false, size: 0, updatedAtMs: 1001 },
|
||||
{ name: 'visible.md', missing: false, size: 5, updatedAtMs: 1002 },
|
||||
],
|
||||
});
|
||||
|
||||
const res = await app.request('/api/files/tree?showHidden=true');
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; entries: Array<{ name: string }>; remoteWorkspace?: boolean };
|
||||
const names = json.entries.map((e) => e.name);
|
||||
|
||||
expect(json.ok).toBe(true);
|
||||
expect(json.remoteWorkspace).toBe(true);
|
||||
expect(names).toContain('.hidden.md');
|
||||
expect(names).toContain('.plans');
|
||||
expect(names).toContain('visible.md');
|
||||
});
|
||||
});
|
||||
|
||||
describe('GET /api/files/resolve', () => {
|
||||
it('classifies workspace files as openable targets', async () => {
|
||||
await fs.writeFile(path.join(tmpDir, 'docs-note.md'), '# hi');
|
||||
const app = await buildApp();
|
||||
|
||||
const res = await app.request('/api/files/resolve?path=docs-note.md');
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; path: string; type: string; binary: boolean };
|
||||
expect(json).toEqual({ ok: true, path: 'docs-note.md', type: 'file', binary: false });
|
||||
});
|
||||
|
||||
it('classifies workspace directories as revealable targets', async () => {
|
||||
await fs.mkdir(path.join(tmpDir, 'docs'), { recursive: true });
|
||||
const app = await buildApp();
|
||||
|
||||
const res = await app.request('/api/files/resolve?path=docs');
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; path: string; type: string; binary: boolean };
|
||||
expect(json).toEqual({ ok: true, path: 'docs', type: 'directory', binary: false });
|
||||
});
|
||||
|
||||
it('resolves current-document-relative file links safely within the workspace', async () => {
|
||||
await fs.mkdir(path.join(tmpDir, 'docs', 'guide'), { recursive: true });
|
||||
await fs.writeFile(path.join(tmpDir, 'docs', 'guide', 'advanced.md'), '# Advanced');
|
||||
const app = await buildApp();
|
||||
|
||||
const res = await app.request('/api/files/resolve?path=advanced.md&relativeTo=docs/guide/index.md');
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; path: string; type: string; binary: boolean };
|
||||
expect(json).toEqual({ ok: true, path: 'docs/guide/advanced.md', type: 'file', binary: false });
|
||||
});
|
||||
|
||||
it('supports workspace-root links from markdown docs via a leading slash', async () => {
|
||||
await fs.mkdir(path.join(tmpDir, 'docs'), { recursive: true });
|
||||
await fs.writeFile(path.join(tmpDir, 'docs', 'todo.md'), '# Todo');
|
||||
const app = await buildApp();
|
||||
|
||||
const res = await app.request('/api/files/resolve?path=/docs/todo.md&relativeTo=notes/index.md');
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; path: string; type: string; binary: boolean };
|
||||
expect(json).toEqual({ ok: true, path: 'docs/todo.md', type: 'file', binary: false });
|
||||
});
|
||||
|
||||
it('resolves workspace-root-document relative links even when relativeTo is slash-prefixed', async () => {
|
||||
await fs.mkdir(path.join(tmpDir, 'projects', 'demo'), { recursive: true });
|
||||
await fs.writeFile(path.join(tmpDir, 'projects', 'demo', 'notes.md'), '# Notes');
|
||||
const app = await buildApp();
|
||||
|
||||
const res = await app.request('/api/files/resolve?path=./projects/demo/notes.md&relativeTo=/README.md');
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; path: string; type: string; binary: boolean };
|
||||
expect(json).toEqual({ ok: true, path: 'projects/demo/notes.md', type: 'file', binary: false });
|
||||
});
|
||||
|
||||
it('returns 404 for safe missing targets inside the workspace root', async () => {
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/files/resolve?path=missing-note.md');
|
||||
expect(res.status).toBe(404);
|
||||
});
|
||||
|
||||
it('accepts /workspace-prefixed paths by normalizing to workspace-relative', async () => {
|
||||
await fs.mkdir(path.join(tmpDir, 'src'));
|
||||
await fs.writeFile(path.join(tmpDir, 'src', 'main.ts'), 'export {};');
|
||||
const app = await buildApp();
|
||||
|
||||
const res = await app.request('/api/files/resolve?path=%2Fworkspace%2Fsrc%2Fmain.ts');
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; path: string; type: string; binary: boolean };
|
||||
expect(json).toEqual({ ok: true, path: 'src/main.ts', type: 'file', binary: false });
|
||||
});
|
||||
|
||||
it('keeps /workspace-prefixed links rooted even when relativeTo is provided', async () => {
|
||||
await fs.mkdir(path.join(tmpDir, 'src'));
|
||||
await fs.mkdir(path.join(tmpDir, 'notes'));
|
||||
await fs.writeFile(path.join(tmpDir, 'src', 'main.ts'), 'export {};');
|
||||
const app = await buildApp();
|
||||
|
||||
const res = await app.request('/api/files/resolve?path=%2Fworkspace%2Fsrc%2Fmain.ts&relativeTo=notes/index.md');
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; path: string; type: string; binary: boolean };
|
||||
expect(json).toEqual({ ok: true, path: 'src/main.ts', type: 'file', binary: false });
|
||||
});
|
||||
|
||||
it('accepts absolute host paths rooted at the real workspace by normalizing them to workspace-relative', async () => {
|
||||
await fs.mkdir(path.join(tmpDir, 'src'));
|
||||
await fs.writeFile(path.join(tmpDir, 'src', 'main.ts'), 'export {};');
|
||||
const app = await buildApp();
|
||||
const absoluteTarget = path.join(tmpDir, 'src', 'main.ts').split(path.sep).join('/');
|
||||
|
||||
const res = await app.request(`/api/files/resolve?path=${encodeURIComponent(absoluteTarget)}`);
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; path: string; type: string; binary: boolean };
|
||||
expect(json).toEqual({ ok: true, path: 'src/main.ts', type: 'file', binary: false });
|
||||
});
|
||||
|
||||
it('accepts symlink-expanded absolute host paths for the same workspace root', async () => {
|
||||
await fs.mkdir(path.join(tmpDir, 'src'));
|
||||
await fs.writeFile(path.join(tmpDir, 'src', 'main.ts'), 'export {};');
|
||||
const app = await buildApp();
|
||||
const realTarget = (await fs.realpath(path.join(tmpDir, 'src', 'main.ts'))).split(path.sep).join('/');
|
||||
|
||||
const res = await app.request(`/api/files/resolve?path=${encodeURIComponent(realTarget)}`);
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; path: string; type: string; binary: boolean };
|
||||
expect(json).toEqual({ ok: true, path: 'src/main.ts', type: 'file', binary: false });
|
||||
});
|
||||
|
||||
it('keeps absolute host workspace paths rooted even when relativeTo is provided', async () => {
|
||||
await fs.mkdir(path.join(tmpDir, 'src'));
|
||||
await fs.mkdir(path.join(tmpDir, 'notes'));
|
||||
await fs.writeFile(path.join(tmpDir, 'src', 'main.ts'), 'export {};');
|
||||
const app = await buildApp();
|
||||
const absoluteTarget = path.join(tmpDir, 'src', 'main.ts').split(path.sep).join('/');
|
||||
|
||||
const res = await app.request(`/api/files/resolve?path=${encodeURIComponent(absoluteTarget)}&relativeTo=notes/index.md`);
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; path: string; type: string; binary: boolean };
|
||||
expect(json).toEqual({ ok: true, path: 'src/main.ts', type: 'file', binary: false });
|
||||
});
|
||||
|
||||
it('treats the absolute workspace root itself as a non-openable root target', async () => {
|
||||
const app = await buildApp();
|
||||
const absoluteRoot = tmpDir.split(path.sep).join('/');
|
||||
|
||||
const res = await app.request(`/api/files/resolve?path=${encodeURIComponent(absoluteRoot)}`);
|
||||
expect(res.status).toBe(404);
|
||||
});
|
||||
|
||||
it('returns 403 for invalid or excluded targets', async () => {
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/files/resolve?path=../../etc');
|
||||
expect(res.status).toBe(403);
|
||||
});
|
||||
|
||||
it('returns 403 when a current-document-relative link escapes the workspace', async () => {
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/files/resolve?path=../../../etc/passwd&relativeTo=docs/guide/index.md');
|
||||
expect(res.status).toBe(403);
|
||||
});
|
||||
});
|
||||
|
||||
describe('GET /api/files/read', () => {
|
||||
|
|
@ -397,6 +621,128 @@ describe('file-browser routes', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('agent-scoped workspaces', () => {
|
||||
it('lists only files from the requested agent workspace', async () => {
|
||||
await fs.mkdir(researchWorkspace, { recursive: true });
|
||||
await fs.writeFile(path.join(tmpDir, 'main-only.md'), 'main');
|
||||
await fs.writeFile(path.join(researchWorkspace, 'research-only.md'), 'research');
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/files/tree?agentId=research');
|
||||
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; entries: Array<{ name: string }> };
|
||||
const names = json.entries.map((entry) => entry.name);
|
||||
expect(names).toContain('research-only.md');
|
||||
expect(names).not.toContain('main-only.md');
|
||||
});
|
||||
|
||||
it('reads files from the requested agent workspace', async () => {
|
||||
await fs.mkdir(researchWorkspace, { recursive: true });
|
||||
await fs.writeFile(path.join(tmpDir, 'notes.md'), 'main notes');
|
||||
await fs.writeFile(path.join(researchWorkspace, 'notes.md'), 'research notes');
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/files/read?agentId=research&path=notes.md');
|
||||
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { ok: boolean; content: string };
|
||||
expect(json.ok).toBe(true);
|
||||
expect(json.content).toBe('research notes');
|
||||
});
|
||||
|
||||
it('serves raw assets from the requested agent workspace', async () => {
|
||||
await fs.mkdir(researchWorkspace, { recursive: true });
|
||||
const mainBytes = Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x00]);
|
||||
const researchBytes = Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x01]);
|
||||
await fs.writeFile(path.join(tmpDir, 'photo.png'), mainBytes);
|
||||
await fs.writeFile(path.join(researchWorkspace, 'photo.png'), researchBytes);
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/files/raw?agentId=research&path=photo.png');
|
||||
|
||||
expect(res.status).toBe(200);
|
||||
expect(Buffer.from(await res.arrayBuffer())).toEqual(researchBytes);
|
||||
});
|
||||
|
||||
it('writes files into the requested agent workspace', async () => {
|
||||
await fs.mkdir(researchWorkspace, { recursive: true });
|
||||
await fs.writeFile(path.join(tmpDir, 'notes.md'), 'main notes');
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/files/write', {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ agentId: 'research', path: 'notes.md', content: 'research notes' }),
|
||||
});
|
||||
|
||||
expect(res.status).toBe(200);
|
||||
await expect(fs.readFile(path.join(researchWorkspace, 'notes.md'), 'utf-8')).resolves.toBe('research notes');
|
||||
await expect(fs.readFile(path.join(tmpDir, 'notes.md'), 'utf-8')).resolves.toBe('main notes');
|
||||
});
|
||||
|
||||
it('bootstraps the first write into a fresh agent workspace', async () => {
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/files/write', {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ agentId: 'research', path: 'notes.md', content: 'research notes' }),
|
||||
});
|
||||
|
||||
expect(res.status).toBe(200);
|
||||
await expect(fs.readFile(path.join(researchWorkspace, 'notes.md'), 'utf-8')).resolves.toBe('research notes');
|
||||
await expect(fs.access(path.join(tmpDir, 'notes.md'))).rejects.toThrow();
|
||||
});
|
||||
|
||||
it('keeps rename, move, trash, and restore scoped to the requested agent workspace', async () => {
|
||||
await fs.mkdir(researchWorkspace, { recursive: true });
|
||||
await fs.mkdir(path.join(tmpDir, 'archive'), { recursive: true });
|
||||
await fs.mkdir(path.join(researchWorkspace, 'archive'), { recursive: true });
|
||||
await fs.writeFile(path.join(tmpDir, 'draft.md'), 'main draft');
|
||||
await fs.writeFile(path.join(researchWorkspace, 'draft.md'), 'research draft');
|
||||
|
||||
const app = await buildApp();
|
||||
|
||||
const renameRes = await app.request('/api/files/rename', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ agentId: 'research', path: 'draft.md', newName: 'renamed.md' }),
|
||||
});
|
||||
expect(renameRes.status).toBe(200);
|
||||
await expect(fs.readFile(path.join(researchWorkspace, 'renamed.md'), 'utf-8')).resolves.toBe('research draft');
|
||||
await expect(fs.readFile(path.join(tmpDir, 'draft.md'), 'utf-8')).resolves.toBe('main draft');
|
||||
|
||||
const moveRes = await app.request('/api/files/move', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ agentId: 'research', sourcePath: 'renamed.md', targetDirPath: 'archive' }),
|
||||
});
|
||||
expect(moveRes.status).toBe(200);
|
||||
await expect(fs.readFile(path.join(researchWorkspace, 'archive', 'renamed.md'), 'utf-8')).resolves.toBe('research draft');
|
||||
await expect(fs.access(path.join(tmpDir, 'archive', 'renamed.md'))).rejects.toThrow();
|
||||
|
||||
const trashRes = await app.request('/api/files/trash', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ agentId: 'research', path: 'archive/renamed.md' }),
|
||||
});
|
||||
expect(trashRes.status).toBe(200);
|
||||
const trashJson = (await trashRes.json()) as { ok: boolean; to: string };
|
||||
expect(trashJson.ok).toBe(true);
|
||||
expect(trashJson.to.startsWith('.trash/')).toBe(true);
|
||||
await expect(fs.readFile(path.join(tmpDir, 'draft.md'), 'utf-8')).resolves.toBe('main draft');
|
||||
|
||||
const restoreRes = await app.request('/api/files/restore', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ agentId: 'research', path: trashJson.to }),
|
||||
});
|
||||
expect(restoreRes.status).toBe(200);
|
||||
await expect(fs.readFile(path.join(researchWorkspace, 'archive', 'renamed.md'), 'utf-8')).resolves.toBe('research draft');
|
||||
await expect(fs.readFile(path.join(tmpDir, 'draft.md'), 'utf-8')).resolves.toBe('main draft');
|
||||
});
|
||||
});
|
||||
|
||||
describe('workspace info in tree response', () => {
|
||||
it('includes workspace info when FILE_BROWSER_ROOT is not set', async () => {
|
||||
await fs.writeFile(path.join(tmpDir, 'test.md'), '# Test');
|
||||
|
|
|
|||
|
|
@ -5,6 +5,10 @@
|
|||
* file browser UI. All paths are relative to the workspace root
|
||||
* (~/.openclaw/workspace/) and validated against traversal + exclusion rules.
|
||||
*
|
||||
* When the workspace is not locally accessible, falls back to gateway RPC
|
||||
* for top-level persona files. Mutation endpoints (rename, move, trash,
|
||||
* restore) return 501 for remote workspaces.
|
||||
*
|
||||
* GET /api/files/tree — List directory entries (lazy, depth-limited)
|
||||
* GET /api/files/read — Read a text file's content
|
||||
* PUT /api/files/write — Write/update a text file
|
||||
|
|
@ -13,10 +17,12 @@
|
|||
|
||||
import { Hono, type Context } from 'hono';
|
||||
import fs from 'node:fs/promises';
|
||||
import fsSync from 'node:fs';
|
||||
import { Readable } from 'node:stream';
|
||||
import path from 'node:path';
|
||||
import {
|
||||
getWorkspaceRoot,
|
||||
resolveWorkspacePath,
|
||||
resolveWorkspacePathForRoot,
|
||||
isExcluded,
|
||||
isBinary,
|
||||
MAX_FILE_SIZE,
|
||||
|
|
@ -29,6 +35,9 @@ import {
|
|||
restoreEntry,
|
||||
trashEntry,
|
||||
} from '../lib/file-ops.js';
|
||||
import { InvalidAgentIdError, resolveAgentWorkspace } from '../lib/agent-workspace.js';
|
||||
import { isWorkspaceLocal } from '../lib/workspace-detect.js';
|
||||
import { gatewayFilesList, gatewayFilesGet, gatewayFilesSet } from '../lib/gateway-rpc.js';
|
||||
|
||||
const app = new Hono();
|
||||
|
||||
|
|
@ -44,12 +53,45 @@ interface TreeEntry {
|
|||
children?: TreeEntry[] | null; // null = not loaded, [] = empty dir
|
||||
}
|
||||
|
||||
interface ScopedWorkspace {
|
||||
agentId: string;
|
||||
workspaceRoot: string;
|
||||
isCustomWorkspace: boolean;
|
||||
}
|
||||
|
||||
// ── Helpers ──────────────────────────────────────────────────────────
|
||||
|
||||
function resolveScopedWorkspace(agentId?: string): ScopedWorkspace {
|
||||
const customRoot = (config.fileBrowserRoot || '').trim();
|
||||
if (customRoot) {
|
||||
return {
|
||||
agentId: 'main',
|
||||
workspaceRoot: getWorkspaceRoot(),
|
||||
isCustomWorkspace: true,
|
||||
};
|
||||
}
|
||||
|
||||
const workspace = resolveAgentWorkspace(agentId);
|
||||
return {
|
||||
agentId: workspace.agentId,
|
||||
workspaceRoot: workspace.workspaceRoot,
|
||||
isCustomWorkspace: false,
|
||||
};
|
||||
}
|
||||
|
||||
function handleAgentWorkspaceError(c: Context, err: unknown) {
|
||||
if (err instanceof InvalidAgentIdError) {
|
||||
return c.json({ ok: false, error: err.message }, 400);
|
||||
}
|
||||
const message = err instanceof Error ? err.message : 'Invalid workspace request';
|
||||
return c.json({ ok: false, error: message }, 500);
|
||||
}
|
||||
|
||||
async function listDirectory(
|
||||
dirPath: string,
|
||||
basePath: string,
|
||||
depth: number,
|
||||
showHidden: boolean,
|
||||
): Promise<TreeEntry[]> {
|
||||
const entries: TreeEntry[] = [];
|
||||
|
||||
|
|
@ -74,8 +116,8 @@ async function listDirectory(
|
|||
if (inTrash) {
|
||||
// Internal metadata file for restore bookkeeping.
|
||||
if (item.name === '.index.json') continue;
|
||||
// FILE_BROWSER_ROOT: Show all files when custom root is set, but always hide .trash folder
|
||||
} else if (!config.fileBrowserRoot && item.name.startsWith('.') && item.name !== '.nerveignore' && item.name !== '.trash') {
|
||||
// Hide dotfiles unless showHidden=true, except for .nerveignore and .trash; custom roots still hide .trash.
|
||||
} else if (!showHidden && item.name.startsWith('.') && item.name !== '.nerveignore' && item.name !== '.trash') {
|
||||
continue;
|
||||
} else if (config.fileBrowserRoot && item.name === '.trash') {
|
||||
continue;
|
||||
|
|
@ -90,7 +132,7 @@ async function listDirectory(
|
|||
path: relativePath,
|
||||
type: 'directory',
|
||||
children: depth > 1
|
||||
? await listDirectory(fullPath, relativePath, depth - 1)
|
||||
? await listDirectory(fullPath, relativePath, depth - 1, showHidden)
|
||||
: null,
|
||||
});
|
||||
} else if (item.isFile()) {
|
||||
|
|
@ -121,45 +163,216 @@ function handleFileOpError(c: Context, err: unknown) {
|
|||
return c.json({ ok: false, error: message }, 500);
|
||||
}
|
||||
|
||||
/** Convert gateway file list to TreeEntry format for the UI. */
|
||||
function gatewayFilesToTree(
|
||||
files: Awaited<ReturnType<typeof gatewayFilesList>>,
|
||||
showHidden: boolean,
|
||||
): TreeEntry[] {
|
||||
return files
|
||||
.filter((f) => !f.missing)
|
||||
.filter((f) => showHidden || !f.name.startsWith('.') || f.name === '.nerveignore' || f.name === '.trash')
|
||||
.map((f) => ({
|
||||
name: f.name,
|
||||
path: f.name,
|
||||
type: 'file' as const,
|
||||
size: f.size,
|
||||
mtime: f.updatedAtMs,
|
||||
}));
|
||||
}
|
||||
|
||||
function normalizeWorkspaceLookupPath(input: string, workspaceRoots: string[] = []): string {
|
||||
const trimmed = input.trim();
|
||||
if (trimmed === '/workspace' || trimmed === '/workspace/') {
|
||||
return '.';
|
||||
}
|
||||
|
||||
if (trimmed.startsWith('/workspace/')) {
|
||||
return trimmed.slice('/workspace/'.length);
|
||||
}
|
||||
|
||||
const normalizedWorkspaceRoots = workspaceRoots
|
||||
.map((root) => getWorkspaceRoot(root).split(path.sep).join('/').replace(/\/+$/, ''))
|
||||
.filter((root, index, array) => Boolean(root) && array.indexOf(root) === index);
|
||||
|
||||
for (const normalizedWorkspaceRoot of normalizedWorkspaceRoots) {
|
||||
if (trimmed === normalizedWorkspaceRoot || trimmed === `${normalizedWorkspaceRoot}/`) {
|
||||
return '.';
|
||||
}
|
||||
|
||||
if (trimmed.startsWith(`${normalizedWorkspaceRoot}/`)) {
|
||||
return trimmed.slice(normalizedWorkspaceRoot.length + 1);
|
||||
}
|
||||
}
|
||||
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
async function getWorkspaceLookupRoots(workspaceRoot: string): Promise<string[]> {
|
||||
const root = getWorkspaceRoot(workspaceRoot);
|
||||
const realRoot = await fs.realpath(root).catch(() => root);
|
||||
return realRoot === root ? [root] : [root, realRoot];
|
||||
}
|
||||
|
||||
// ── GET /api/files/tree ──────────────────────────────────────────────
|
||||
|
||||
app.get('/api/files/tree', async (c) => {
|
||||
const root = getWorkspaceRoot();
|
||||
const subPath = c.req.query('path') || '';
|
||||
const depth = Math.min(Math.max(Number(c.req.query('depth')) || 1, 1), 5);
|
||||
|
||||
// Resolve the target directory
|
||||
let targetDir: string;
|
||||
if (subPath) {
|
||||
const resolved = await resolveWorkspacePath(subPath);
|
||||
if (!resolved) {
|
||||
return c.json({ ok: false, error: 'Invalid path' }, 400);
|
||||
}
|
||||
targetDir = resolved;
|
||||
|
||||
// Ensure it's a directory
|
||||
try {
|
||||
const stat = await fs.stat(targetDir);
|
||||
if (!stat.isDirectory()) {
|
||||
return c.json({ ok: false, error: 'Not a directory' }, 400);
|
||||
}
|
||||
} catch {
|
||||
return c.json({ ok: false, error: 'Directory not found' }, 404);
|
||||
}
|
||||
} else {
|
||||
targetDir = root;
|
||||
let workspace: ScopedWorkspace;
|
||||
try {
|
||||
workspace = resolveScopedWorkspace(c.req.query('agentId'));
|
||||
} catch (err) {
|
||||
return handleAgentWorkspaceError(c, err);
|
||||
}
|
||||
|
||||
const entries = await listDirectory(targetDir, subPath, depth);
|
||||
const root = workspace.workspaceRoot;
|
||||
const subPath = c.req.query('path') || '';
|
||||
const depth = Math.min(Math.max(Number(c.req.query('depth')) || 1, 1), 5);
|
||||
const showHidden = c.req.query('showHidden') === 'true';
|
||||
|
||||
return c.json({
|
||||
ok: true,
|
||||
root: subPath || '.',
|
||||
entries,
|
||||
workspaceInfo: {
|
||||
isCustomWorkspace: !!config.fileBrowserRoot,
|
||||
rootPath: getWorkspaceRoot(),
|
||||
// Check if workspace is local
|
||||
const isLocal = await isWorkspaceLocal(root);
|
||||
|
||||
if (isLocal) {
|
||||
// Resolve the target directory
|
||||
let targetDir: string;
|
||||
if (subPath) {
|
||||
const resolved = await resolveWorkspacePathForRoot(root, subPath);
|
||||
if (!resolved) {
|
||||
return c.json({ ok: false, error: 'Invalid path' }, 400);
|
||||
}
|
||||
targetDir = resolved;
|
||||
|
||||
// Ensure it's a directory
|
||||
try {
|
||||
const stat = await fs.stat(targetDir);
|
||||
if (!stat.isDirectory()) {
|
||||
return c.json({ ok: false, error: 'Not a directory' }, 400);
|
||||
}
|
||||
} catch {
|
||||
return c.json({ ok: false, error: 'Directory not found' }, 404);
|
||||
}
|
||||
} else {
|
||||
targetDir = root;
|
||||
}
|
||||
|
||||
const entries = await listDirectory(targetDir, subPath, depth, showHidden);
|
||||
|
||||
return c.json({
|
||||
ok: true,
|
||||
root: subPath || '.',
|
||||
entries,
|
||||
workspaceInfo: {
|
||||
isCustomWorkspace: workspace.isCustomWorkspace,
|
||||
rootPath: root,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Remote workspace — gateway fallback (top-level only)
|
||||
if (subPath) {
|
||||
// Gateway only supports top-level files
|
||||
return c.json({
|
||||
ok: true,
|
||||
root: subPath,
|
||||
entries: [],
|
||||
remoteWorkspace: true,
|
||||
workspaceInfo: {
|
||||
isCustomWorkspace: workspace.isCustomWorkspace,
|
||||
rootPath: root,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const remoteFiles = await gatewayFilesList(workspace.agentId);
|
||||
const entries = gatewayFilesToTree(remoteFiles, showHidden);
|
||||
return c.json({
|
||||
ok: true,
|
||||
root: '.',
|
||||
entries,
|
||||
remoteWorkspace: true,
|
||||
workspaceInfo: {
|
||||
isCustomWorkspace: workspace.isCustomWorkspace,
|
||||
rootPath: root,
|
||||
},
|
||||
});
|
||||
} catch (err) {
|
||||
console.warn('[file-browser] Gateway tree fallback failed:', (err as Error).message);
|
||||
return c.json({
|
||||
ok: true,
|
||||
root: '.',
|
||||
entries: [],
|
||||
remoteWorkspace: true,
|
||||
workspaceInfo: {
|
||||
isCustomWorkspace: workspace.isCustomWorkspace,
|
||||
rootPath: root,
|
||||
},
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// ── GET /api/files/resolve ───────────────────────────────────────────
|
||||
|
||||
app.get('/api/files/resolve', async (c) => {
|
||||
const targetPath = c.req.query('path');
|
||||
const relativeTo = c.req.query('relativeTo');
|
||||
if (!targetPath) {
|
||||
return c.json({ ok: false, error: 'Missing path parameter' }, 400);
|
||||
}
|
||||
|
||||
let workspace: ScopedWorkspace;
|
||||
try {
|
||||
workspace = resolveScopedWorkspace(c.req.query('agentId'));
|
||||
} catch (err) {
|
||||
return handleAgentWorkspaceError(c, err);
|
||||
}
|
||||
|
||||
if (!(await isWorkspaceLocal(workspace.workspaceRoot))) {
|
||||
return c.json({ ok: false, error: 'Not supported for remote workspaces', code: 'REMOTE_WORKSPACE' }, 501);
|
||||
}
|
||||
|
||||
const workspaceLookupRoots = await getWorkspaceLookupRoots(workspace.workspaceRoot);
|
||||
const rawTargetPath = targetPath.trim().replace(/\\/g, '/');
|
||||
const normalizedTargetPath = normalizeWorkspaceLookupPath(rawTargetPath, workspaceLookupRoots);
|
||||
const workspaceRelativePath = (() => {
|
||||
if (!relativeTo) return normalizedTargetPath;
|
||||
if (normalizedTargetPath === '.') return '.';
|
||||
if (normalizedTargetPath !== rawTargetPath) return normalizedTargetPath;
|
||||
if (rawTargetPath.startsWith('/')) return rawTargetPath.replace(/^\/+/, '');
|
||||
|
||||
const normalizedRelativeTo = normalizeWorkspaceLookupPath(
|
||||
relativeTo.replace(/\\/g, '/'),
|
||||
workspaceLookupRoots,
|
||||
).replace(/^\/+/, '');
|
||||
const relativeDir = path.posix.dirname(normalizedRelativeTo);
|
||||
return path.posix.normalize(path.posix.join(relativeDir === '.' ? '' : relativeDir, normalizedTargetPath));
|
||||
})();
|
||||
|
||||
const resolved = await resolveWorkspacePathForRoot(
|
||||
workspace.workspaceRoot,
|
||||
workspaceRelativePath,
|
||||
{ allowNonExistent: true },
|
||||
);
|
||||
if (!resolved) {
|
||||
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
|
||||
}
|
||||
|
||||
let stat;
|
||||
try {
|
||||
stat = await fs.stat(resolved);
|
||||
} catch {
|
||||
return c.json({ ok: false, error: 'Path not found' }, 404);
|
||||
}
|
||||
|
||||
const relative = path.relative(workspace.workspaceRoot, resolved).split(path.sep).join('/');
|
||||
if (!relative || relative === '.') {
|
||||
return c.json({ ok: false, error: 'Path not found' }, 404);
|
||||
}
|
||||
|
||||
return c.json({
|
||||
ok: true,
|
||||
path: relative,
|
||||
type: stat.isDirectory() ? 'directory' : 'file',
|
||||
binary: stat.isFile() ? isBinary(path.basename(resolved)) : false,
|
||||
});
|
||||
});
|
||||
|
||||
|
|
@ -171,49 +384,85 @@ app.get('/api/files/read', async (c) => {
|
|||
return c.json({ ok: false, error: 'Missing path parameter' }, 400);
|
||||
}
|
||||
|
||||
const resolved = await resolveWorkspacePath(filePath);
|
||||
if (!resolved) {
|
||||
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
|
||||
let workspace: ScopedWorkspace;
|
||||
try {
|
||||
workspace = resolveScopedWorkspace(c.req.query('agentId'));
|
||||
} catch (err) {
|
||||
return handleAgentWorkspaceError(c, err);
|
||||
}
|
||||
|
||||
// Check if binary
|
||||
if (isBinary(path.basename(resolved))) {
|
||||
// Note: Write endpoint uses config.workspaceRemote instead to allow bootstrapping new workspaces
|
||||
const isLocal = await isWorkspaceLocal(workspace.workspaceRoot);
|
||||
|
||||
if (isLocal) {
|
||||
const resolved = await resolveWorkspacePathForRoot(workspace.workspaceRoot, filePath);
|
||||
if (!resolved) {
|
||||
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
|
||||
}
|
||||
|
||||
// Check if binary
|
||||
if (isBinary(path.basename(resolved))) {
|
||||
return c.json({ ok: false, error: 'Binary file', binary: true }, 415);
|
||||
}
|
||||
|
||||
// Stat the file
|
||||
let stat;
|
||||
try {
|
||||
stat = await fs.stat(resolved);
|
||||
} catch {
|
||||
return c.json({ ok: false, error: 'File not found' }, 404);
|
||||
}
|
||||
|
||||
if (!stat.isFile()) {
|
||||
return c.json({ ok: false, error: 'Not a file' }, 400);
|
||||
}
|
||||
|
||||
if (stat.size > MAX_FILE_SIZE) {
|
||||
return c.json({ ok: false, error: `File too large (${(stat.size / 1024).toFixed(0)}KB, max 1MB)` }, 413);
|
||||
}
|
||||
|
||||
try {
|
||||
const content = await fs.readFile(resolved, 'utf-8');
|
||||
return c.json({
|
||||
ok: true,
|
||||
content,
|
||||
size: stat.size,
|
||||
mtime: Math.floor(stat.mtimeMs),
|
||||
});
|
||||
} catch {
|
||||
return c.json({ ok: false, error: 'Failed to read file' }, 500);
|
||||
}
|
||||
}
|
||||
|
||||
// Remote workspace fallback — only top-level files
|
||||
const basename = path.basename(filePath);
|
||||
if (filePath !== basename) {
|
||||
// Subdirectory path — not supported via gateway
|
||||
return c.json({ ok: false, error: 'File not found', remoteWorkspace: true }, 404);
|
||||
}
|
||||
|
||||
if (isBinary(basename)) {
|
||||
return c.json({ ok: false, error: 'Binary file', binary: true }, 415);
|
||||
}
|
||||
|
||||
// Stat the file
|
||||
let stat;
|
||||
try {
|
||||
stat = await fs.stat(resolved);
|
||||
} catch {
|
||||
return c.json({ ok: false, error: 'File not found' }, 404);
|
||||
}
|
||||
|
||||
if (!stat.isFile()) {
|
||||
return c.json({ ok: false, error: 'Not a file' }, 400);
|
||||
}
|
||||
|
||||
if (stat.size > MAX_FILE_SIZE) {
|
||||
return c.json({ ok: false, error: `File too large (${(stat.size / 1024).toFixed(0)}KB, max 1MB)` }, 413);
|
||||
}
|
||||
|
||||
try {
|
||||
const content = await fs.readFile(resolved, 'utf-8');
|
||||
const file = await gatewayFilesGet(workspace.agentId, basename);
|
||||
if (file) {
|
||||
return c.json({
|
||||
ok: true,
|
||||
content,
|
||||
size: stat.size,
|
||||
mtime: Math.floor(stat.mtimeMs),
|
||||
content: file.content,
|
||||
size: file.size,
|
||||
mtime: file.updatedAtMs,
|
||||
remoteWorkspace: true,
|
||||
});
|
||||
} catch {
|
||||
return c.json({ ok: false, error: 'Failed to read file' }, 500);
|
||||
}
|
||||
|
||||
return c.json({ ok: false, error: 'File not found', remoteWorkspace: true }, 404);
|
||||
});
|
||||
|
||||
// ── PUT /api/files/write ─────────────────────────────────────────────
|
||||
|
||||
app.put('/api/files/write', async (c) => {
|
||||
let body: { path?: string; content?: string; expectedMtime?: number };
|
||||
let body: { path?: string; content?: string; expectedMtime?: number; agentId?: string };
|
||||
try {
|
||||
body = await c.req.json();
|
||||
} catch {
|
||||
|
|
@ -232,52 +481,100 @@ app.put('/api/files/write', async (c) => {
|
|||
return c.json({ ok: false, error: 'Content too large (max 1MB)' }, 413);
|
||||
}
|
||||
|
||||
const resolved = await resolveWorkspacePath(filePath, { allowNonExistent: true });
|
||||
if (!resolved) {
|
||||
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
|
||||
let workspace: ScopedWorkspace;
|
||||
try {
|
||||
workspace = resolveScopedWorkspace(body.agentId ?? c.req.query('agentId'));
|
||||
} catch (err) {
|
||||
return handleAgentWorkspaceError(c, err);
|
||||
}
|
||||
|
||||
if (isBinary(path.basename(resolved))) {
|
||||
return c.json({ ok: false, error: 'Cannot write binary files' }, 415);
|
||||
}
|
||||
// For writes, treat workspace as local unless explicitly forced remote.
|
||||
// This allows bootstrapping new agent workspaces (directory doesn't exist yet).
|
||||
const isLocal = !config.workspaceRemote;
|
||||
|
||||
// Conflict detection: check mtime if expectedMtime provided
|
||||
if (typeof expectedMtime === 'number') {
|
||||
try {
|
||||
const stat = await fs.stat(resolved);
|
||||
const currentMtime = Math.floor(stat.mtimeMs);
|
||||
if (currentMtime !== expectedMtime) {
|
||||
return c.json({
|
||||
ok: false,
|
||||
error: 'File was modified since you loaded it',
|
||||
currentMtime,
|
||||
}, 409);
|
||||
if (isLocal) {
|
||||
const resolved = await resolveWorkspacePathForRoot(workspace.workspaceRoot, filePath, { allowNonExistent: true });
|
||||
if (!resolved) {
|
||||
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
|
||||
}
|
||||
|
||||
if (isBinary(path.basename(resolved))) {
|
||||
return c.json({ ok: false, error: 'Cannot write binary files' }, 415);
|
||||
}
|
||||
|
||||
// Conflict detection: check mtime if expectedMtime provided
|
||||
if (typeof expectedMtime === 'number') {
|
||||
try {
|
||||
const stat = await fs.stat(resolved);
|
||||
const currentMtime = Math.floor(stat.mtimeMs);
|
||||
if (currentMtime !== expectedMtime) {
|
||||
return c.json({
|
||||
ok: false,
|
||||
error: 'File was modified since you loaded it',
|
||||
currentMtime,
|
||||
}, 409);
|
||||
}
|
||||
} catch {
|
||||
// File doesn't exist yet — no conflict possible
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure parent directory exists
|
||||
await fs.mkdir(path.dirname(resolved), { recursive: true });
|
||||
|
||||
// Write the file
|
||||
try {
|
||||
await fs.writeFile(resolved, content, 'utf-8');
|
||||
const stat = await fs.stat(resolved);
|
||||
return c.json({
|
||||
ok: true,
|
||||
mtime: Math.floor(stat.mtimeMs),
|
||||
});
|
||||
} catch {
|
||||
// File doesn't exist yet — no conflict possible
|
||||
return c.json({ ok: false, error: 'Failed to write file' }, 500);
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure parent directory exists
|
||||
await fs.mkdir(path.dirname(resolved), { recursive: true });
|
||||
|
||||
// Write the file
|
||||
try {
|
||||
await fs.writeFile(resolved, content, 'utf-8');
|
||||
const stat = await fs.stat(resolved);
|
||||
// Remote workspace fallback — only top-level files
|
||||
const basename = path.basename(filePath);
|
||||
if (filePath !== basename) {
|
||||
return c.json({
|
||||
ok: true,
|
||||
mtime: Math.floor(stat.mtimeMs),
|
||||
});
|
||||
} catch {
|
||||
ok: false,
|
||||
error: 'Not supported for remote workspaces',
|
||||
code: 'REMOTE_WORKSPACE',
|
||||
}, 501);
|
||||
}
|
||||
|
||||
if (isBinary(basename)) {
|
||||
return c.json({ ok: false, error: 'Cannot write binary files' }, 415);
|
||||
}
|
||||
|
||||
try {
|
||||
await gatewayFilesSet(workspace.agentId, basename, content);
|
||||
return c.json({ ok: true, remoteWorkspace: true, mtime: Date.now() });
|
||||
} catch (err) {
|
||||
console.error('[file-browser] Gateway write fallback failed:', (err as Error).message);
|
||||
return c.json({ ok: false, error: 'Failed to write file' }, 500);
|
||||
}
|
||||
});
|
||||
|
||||
// ── Mutation endpoints — 501 for remote workspaces ───────────────────
|
||||
|
||||
async function requireLocalWorkspace(c: Context, workspace: ScopedWorkspace): Promise<Response | null> {
|
||||
if (!(await isWorkspaceLocal(workspace.workspaceRoot))) {
|
||||
return c.json({
|
||||
ok: false,
|
||||
error: 'Not supported for remote workspaces',
|
||||
code: 'REMOTE_WORKSPACE',
|
||||
}, 501);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// ── POST /api/files/rename ────────────────────────────────────────────
|
||||
|
||||
app.post('/api/files/rename', async (c) => {
|
||||
let body: { path?: string; newName?: string };
|
||||
let body: { path?: string; newName?: string; agentId?: string };
|
||||
try {
|
||||
body = await c.req.json();
|
||||
} catch {
|
||||
|
|
@ -291,8 +588,27 @@ app.post('/api/files/rename', async (c) => {
|
|||
return c.json({ ok: false, error: 'Missing newName' }, 400);
|
||||
}
|
||||
|
||||
let workspace: ScopedWorkspace;
|
||||
try {
|
||||
const result = await renameEntry({ path: body.path, newName: body.newName });
|
||||
workspace = resolveScopedWorkspace(body.agentId ?? c.req.query('agentId'));
|
||||
} catch (err) {
|
||||
return handleAgentWorkspaceError(c, err);
|
||||
}
|
||||
|
||||
const remoteBlock = await requireLocalWorkspace(c, workspace);
|
||||
if (remoteBlock) return remoteBlock;
|
||||
|
||||
const sourceAbs = await resolveWorkspacePathForRoot(workspace.workspaceRoot, body.path);
|
||||
if (!sourceAbs) {
|
||||
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await renameEntry({
|
||||
workspaceRoot: workspace.workspaceRoot,
|
||||
sourceAbs,
|
||||
newName: body.newName,
|
||||
});
|
||||
return c.json({ ok: true, ...result });
|
||||
} catch (err) {
|
||||
return handleFileOpError(c, err);
|
||||
|
|
@ -302,7 +618,7 @@ app.post('/api/files/rename', async (c) => {
|
|||
// ── POST /api/files/move ──────────────────────────────────────────────
|
||||
|
||||
app.post('/api/files/move', async (c) => {
|
||||
let body: { sourcePath?: string; targetDirPath?: string };
|
||||
let body: { sourcePath?: string; targetDirPath?: string; agentId?: string };
|
||||
try {
|
||||
body = await c.req.json();
|
||||
} catch {
|
||||
|
|
@ -316,10 +632,33 @@ app.post('/api/files/move', async (c) => {
|
|||
return c.json({ ok: false, error: 'Missing targetDirPath' }, 400);
|
||||
}
|
||||
|
||||
let workspace: ScopedWorkspace;
|
||||
try {
|
||||
workspace = resolveScopedWorkspace(body.agentId ?? c.req.query('agentId'));
|
||||
} catch (err) {
|
||||
return handleAgentWorkspaceError(c, err);
|
||||
}
|
||||
|
||||
const remoteBlock = await requireLocalWorkspace(c, workspace);
|
||||
if (remoteBlock) return remoteBlock;
|
||||
|
||||
const sourceAbs = await resolveWorkspacePathForRoot(workspace.workspaceRoot, body.sourcePath);
|
||||
if (!sourceAbs) {
|
||||
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
|
||||
}
|
||||
|
||||
const targetDirAbs = body.targetDirPath
|
||||
? await resolveWorkspacePathForRoot(workspace.workspaceRoot, body.targetDirPath)
|
||||
: workspace.workspaceRoot;
|
||||
if (!targetDirAbs) {
|
||||
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await moveEntry({
|
||||
sourcePath: body.sourcePath,
|
||||
targetDirPath: body.targetDirPath,
|
||||
workspaceRoot: workspace.workspaceRoot,
|
||||
sourceAbs,
|
||||
targetDirAbs,
|
||||
});
|
||||
return c.json({ ok: true, ...result });
|
||||
} catch (err) {
|
||||
|
|
@ -330,7 +669,7 @@ app.post('/api/files/move', async (c) => {
|
|||
// ── POST /api/files/trash ─────────────────────────────────────────────
|
||||
|
||||
app.post('/api/files/trash', async (c) => {
|
||||
let body: { path?: string };
|
||||
let body: { path?: string; agentId?: string };
|
||||
try {
|
||||
body = await c.req.json();
|
||||
} catch {
|
||||
|
|
@ -341,31 +680,48 @@ app.post('/api/files/trash', async (c) => {
|
|||
return c.json({ ok: false, error: 'Missing path' }, 400);
|
||||
}
|
||||
|
||||
let workspace: ScopedWorkspace;
|
||||
try {
|
||||
workspace = resolveScopedWorkspace(body.agentId ?? c.req.query('agentId'));
|
||||
} catch (err) {
|
||||
return handleAgentWorkspaceError(c, err);
|
||||
}
|
||||
|
||||
const remoteBlock = await requireLocalWorkspace(c, workspace);
|
||||
if (remoteBlock) return remoteBlock;
|
||||
|
||||
try {
|
||||
// Custom directory browser root uses permanent deletion (no trash)
|
||||
if (config.fileBrowserRoot) {
|
||||
if (workspace.isCustomWorkspace) {
|
||||
const requestedPath = body.path.trim();
|
||||
if (requestedPath === '.' || requestedPath === './') {
|
||||
return c.json({ ok: false, error: 'Deleting workspace root is not allowed' }, 400);
|
||||
}
|
||||
|
||||
const resolved = await resolveWorkspacePath(requestedPath);
|
||||
|
||||
const resolved = await resolveWorkspacePathForRoot(workspace.workspaceRoot, requestedPath);
|
||||
if (!resolved) {
|
||||
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
|
||||
}
|
||||
|
||||
const rootRealPath = await fs.realpath(getWorkspaceRoot()).catch(() => getWorkspaceRoot());
|
||||
const rootRealPath = await fs.realpath(workspace.workspaceRoot).catch(() => workspace.workspaceRoot);
|
||||
if (resolved === rootRealPath) {
|
||||
return c.json({ ok: false, error: 'Deleting workspace root is not allowed' }, 400);
|
||||
}
|
||||
|
||||
await fs.rm(resolved, { recursive: true, force: true });
|
||||
return c.json({ ok: true, from: body.path, to: '' });
|
||||
} else {
|
||||
// Default workspace: use trash
|
||||
const result = await trashEntry({ path: body.path });
|
||||
return c.json({ ok: true, ...result });
|
||||
}
|
||||
|
||||
const sourceAbs = await resolveWorkspacePathForRoot(workspace.workspaceRoot, body.path);
|
||||
if (!sourceAbs) {
|
||||
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
|
||||
}
|
||||
|
||||
const result = await trashEntry({
|
||||
workspaceRoot: workspace.workspaceRoot,
|
||||
sourceAbs,
|
||||
});
|
||||
return c.json({ ok: true, ...result });
|
||||
} catch (err) {
|
||||
return handleFileOpError(c, err);
|
||||
}
|
||||
|
|
@ -374,7 +730,7 @@ app.post('/api/files/trash', async (c) => {
|
|||
// ── POST /api/files/restore ───────────────────────────────────────────
|
||||
|
||||
app.post('/api/files/restore', async (c) => {
|
||||
let body: { path?: string };
|
||||
let body: { path?: string; agentId?: string };
|
||||
try {
|
||||
body = await c.req.json();
|
||||
} catch {
|
||||
|
|
@ -385,8 +741,26 @@ app.post('/api/files/restore', async (c) => {
|
|||
return c.json({ ok: false, error: 'Missing path' }, 400);
|
||||
}
|
||||
|
||||
let workspace: ScopedWorkspace;
|
||||
try {
|
||||
const result = await restoreEntry({ path: body.path });
|
||||
workspace = resolveScopedWorkspace(body.agentId ?? c.req.query('agentId'));
|
||||
} catch (err) {
|
||||
return handleAgentWorkspaceError(c, err);
|
||||
}
|
||||
|
||||
const remoteBlock = await requireLocalWorkspace(c, workspace);
|
||||
if (remoteBlock) return remoteBlock;
|
||||
|
||||
const sourceAbs = await resolveWorkspacePathForRoot(workspace.workspaceRoot, body.path);
|
||||
if (!sourceAbs) {
|
||||
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await restoreEntry({
|
||||
workspaceRoot: workspace.workspaceRoot,
|
||||
sourceAbs,
|
||||
});
|
||||
return c.json({ ok: true, ...result });
|
||||
} catch (err) {
|
||||
return handleFileOpError(c, err);
|
||||
|
|
@ -406,6 +780,7 @@ const MIME_TYPES: Record<string, string> = {
|
|||
'.avif': 'image/avif',
|
||||
'.svg': 'image/svg+xml',
|
||||
'.ico': 'image/x-icon',
|
||||
'.pdf': 'application/pdf',
|
||||
};
|
||||
|
||||
/** Check if a file is a supported image. */
|
||||
|
|
@ -419,7 +794,19 @@ app.get('/api/files/raw', async (c) => {
|
|||
return c.json({ ok: false, error: 'Missing path parameter' }, 400);
|
||||
}
|
||||
|
||||
const resolved = await resolveWorkspacePath(filePath);
|
||||
let workspace: ScopedWorkspace;
|
||||
try {
|
||||
workspace = resolveScopedWorkspace(c.req.query('agentId'));
|
||||
} catch (err) {
|
||||
return handleAgentWorkspaceError(c, err);
|
||||
}
|
||||
|
||||
// Raw/binary endpoints don't support gateway fallback
|
||||
if (!(await isWorkspaceLocal(workspace.workspaceRoot))) {
|
||||
return c.json({ ok: false, error: 'Binary files not available for remote workspaces', remoteWorkspace: true }, 404);
|
||||
}
|
||||
|
||||
const resolved = await resolveWorkspacePathForRoot(workspace.workspaceRoot, filePath);
|
||||
if (!resolved) {
|
||||
return c.json({ ok: false, error: 'Invalid or excluded path' }, 403);
|
||||
}
|
||||
|
|
@ -435,18 +822,94 @@ app.get('/api/files/raw', async (c) => {
|
|||
if (!stat.isFile()) {
|
||||
return c.json({ ok: false, error: 'Not a file' }, 400);
|
||||
}
|
||||
// Cap at 10MB for images
|
||||
if (stat.size > 10_485_760) {
|
||||
return c.json({ ok: false, error: 'File too large (max 10MB)' }, 413);
|
||||
// Cap at 10MB for images, 50 MB for PDFs (can adjust as needed)
|
||||
const maxSize = ext === '.pdf' ? 52_428_800 : 10_485_760;
|
||||
if (stat.size > maxSize) {
|
||||
return c.json({ ok: false, error: `File too large (max ${ext === '.pdf' ? '50MB' : '10MB'})` }, 413);
|
||||
}
|
||||
|
||||
const buffer = await fs.readFile(resolved);
|
||||
return new Response(buffer, {
|
||||
headers: {
|
||||
'Content-Type': mime,
|
||||
'Content-Length': String(stat.size),
|
||||
'Cache-Control': 'no-cache',
|
||||
},
|
||||
// Parse Range header for PDFs to support partial content requests
|
||||
let start = 0;
|
||||
let end = stat.size - 1;
|
||||
let statusCode = 200;
|
||||
let rangeHeader: string | undefined;
|
||||
|
||||
const rangeHeaderValue = c.req.header('range');
|
||||
if (rangeHeaderValue && ext === '.pdf') {
|
||||
// Match both explicit ranges (bytes=100-200) and suffix ranges (bytes=-500)
|
||||
const rangeMatch = rangeHeaderValue.match(/^bytes=(\d*)-(\d*)$/);
|
||||
if (rangeMatch) {
|
||||
const hasSuffix = rangeMatch[1] === '';
|
||||
let rangeStart: number;
|
||||
let rangeEnd: number;
|
||||
let isValid = false;
|
||||
|
||||
if (hasSuffix) {
|
||||
// Suffix range: bytes=-500 means last 500 bytes
|
||||
const suffixLen = parseInt(rangeMatch[2], 10);
|
||||
rangeStart = Math.max(0, stat.size - suffixLen);
|
||||
rangeEnd = stat.size - 1;
|
||||
isValid = suffixLen > 0;
|
||||
} else {
|
||||
rangeStart = parseInt(rangeMatch[1], 10);
|
||||
rangeEnd = rangeMatch[2] ? parseInt(rangeMatch[2], 10) : stat.size - 1;
|
||||
isValid = rangeStart >= 0 && rangeStart <= rangeEnd && rangeEnd < stat.size;
|
||||
}
|
||||
|
||||
// Validate and apply range
|
||||
if (isValid) {
|
||||
start = rangeStart;
|
||||
end = rangeEnd;
|
||||
statusCode = 206;
|
||||
rangeHeader = `bytes ${start}-${end}/${stat.size}`;
|
||||
} else {
|
||||
// Invalid range
|
||||
return new Response('', {
|
||||
status: 416,
|
||||
headers: {
|
||||
'Content-Range': `bytes */${stat.size}`,
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stream file with optional range support
|
||||
const fileStream = fsSync.createReadStream(resolved, { start, end });
|
||||
fileStream.on('error', (err) => {
|
||||
console.error(`[file-browser] Stream error for ${filePath}:`, err.message);
|
||||
});
|
||||
|
||||
// Add error listener to surface stream failures for easier debugging
|
||||
fileStream.on('error', (err) => {
|
||||
console.error('[file-browser] fileStream error:', {
|
||||
path: resolved,
|
||||
rangeStart: start,
|
||||
rangeEnd: end,
|
||||
error: (err as Error).message,
|
||||
});
|
||||
});
|
||||
|
||||
// Convert Node.js stream to Web Stream using Node's built-in conversion
|
||||
const webStream = Readable.toWeb(fileStream);
|
||||
|
||||
const responseHeaders: Record<string, string> = {
|
||||
'Content-Type': mime,
|
||||
'Content-Length': String(end - start + 1),
|
||||
'Cache-Control': 'no-cache',
|
||||
};
|
||||
|
||||
// Add Range headers for partial content responses
|
||||
if (ext === '.pdf') {
|
||||
responseHeaders['Accept-Ranges'] = 'bytes';
|
||||
if (rangeHeader) {
|
||||
responseHeaders['Content-Range'] = rangeHeader;
|
||||
}
|
||||
}
|
||||
|
||||
return new Response(webStream, {
|
||||
status: statusCode,
|
||||
headers: responseHeaders,
|
||||
});
|
||||
} catch {
|
||||
return c.json({ ok: false, error: 'Failed to read file' }, 500);
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import { describe, it, expect, vi, afterEach } from 'vitest';
|
|||
import { Hono } from 'hono';
|
||||
|
||||
let execFileImpl: (...args: unknown[]) => void;
|
||||
let readFileImpl: (...args: unknown[]) => Promise<string>;
|
||||
let invokeGatewayImpl: (tool: string, args: Record<string, unknown>) => unknown;
|
||||
|
||||
vi.mock('node:child_process', async (importOriginal) => {
|
||||
|
|
@ -11,6 +12,12 @@ vi.mock('node:child_process', async (importOriginal) => {
|
|||
return { ...mock, default: mock };
|
||||
});
|
||||
|
||||
vi.mock('node:fs/promises', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('node:fs/promises')>();
|
||||
const mock = { ...actual, readFile: (...args: unknown[]) => readFileImpl(...args) };
|
||||
return { ...mock, default: mock };
|
||||
});
|
||||
|
||||
vi.mock('../lib/config.js', () => ({
|
||||
config: {
|
||||
auth: false, port: 3000, host: '127.0.0.1', sslPort: 3443,
|
||||
|
|
@ -60,12 +67,26 @@ vi.mock('node:net', () => {
|
|||
return { Socket: MockSocket, default: { Socket: MockSocket } };
|
||||
});
|
||||
|
||||
const GOOD_MODELS = JSON.stringify({
|
||||
models: [
|
||||
{ key: 'anthropic/claude-opus-4', name: 'Claude Opus 4', available: true },
|
||||
{ key: 'openai/gpt-4o', name: 'GPT-4o', available: true },
|
||||
],
|
||||
});
|
||||
const OPENCLAW_CONFIG = {
|
||||
agents: {
|
||||
defaults: {
|
||||
model: {
|
||||
primary: 'zai/glm-4.7',
|
||||
fallbacks: [
|
||||
'openrouter/xiaomi/mimo-v2-pro',
|
||||
'zai/glm-4.5',
|
||||
'ollama/qwen2.5:7b-instruct-q5_K_M',
|
||||
],
|
||||
},
|
||||
models: {
|
||||
'zai/glm-4.7': { alias: 'glm-4.7' },
|
||||
'openrouter/xiaomi/mimo-v2-pro': { alias: 'mimo-v2-pro' },
|
||||
'zai/glm-4.5': { alias: 'glm-4.5' },
|
||||
'ollama/qwen2.5:7b-instruct-q5_K_M': { alias: 'qwen-local' },
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
import gatewayRoutes from './gateway.js';
|
||||
|
||||
|
|
@ -78,40 +99,340 @@ function buildApp() {
|
|||
describe('gateway routes', () => {
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
delete process.env.OPENCLAW_CONFIG_PATH;
|
||||
});
|
||||
|
||||
function setDefaults() {
|
||||
execFileImpl = (_bin: unknown, _args: unknown, _opts: unknown, cb: unknown) => {
|
||||
(cb as (err: null, stdout: string) => void)(null, GOOD_MODELS);
|
||||
(cb as (err: null, stdout: string) => void)(null, '');
|
||||
};
|
||||
readFileImpl = async () => JSON.stringify(OPENCLAW_CONFIG);
|
||||
invokeGatewayImpl = () => ({});
|
||||
}
|
||||
|
||||
describe('GET /api/gateway/models', () => {
|
||||
it('returns parsed model list', async () => {
|
||||
it('returns the configured primary model', async () => {
|
||||
setDefaults();
|
||||
process.env.OPENCLAW_CONFIG_PATH = '/tmp/openclaw.json';
|
||||
readFileImpl = async (path: unknown) => {
|
||||
expect(path).toBe('/tmp/openclaw.json');
|
||||
return JSON.stringify({
|
||||
agents: {
|
||||
defaults: {
|
||||
model: {
|
||||
primary: 'zai/glm-4.7',
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
const app = buildApp();
|
||||
const res = await app.request('/api/gateway/models');
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { models: Array<{ id: string; label: string; provider: string }> };
|
||||
expect(json.models.length).toBeGreaterThanOrEqual(1);
|
||||
for (const m of json.models) {
|
||||
expect(m).toHaveProperty('id');
|
||||
expect(m).toHaveProperty('label');
|
||||
expect(m).toHaveProperty('provider');
|
||||
}
|
||||
expect(await res.json()).toEqual({
|
||||
models: [
|
||||
{
|
||||
id: 'zai/glm-4.7',
|
||||
label: 'glm-4.7',
|
||||
provider: 'zai',
|
||||
configured: true,
|
||||
role: 'primary',
|
||||
},
|
||||
],
|
||||
error: null,
|
||||
source: 'config',
|
||||
});
|
||||
});
|
||||
|
||||
it('returns empty array when openclaw binary fails', async () => {
|
||||
execFileImpl = (_bin: unknown, _args: unknown, _opts: unknown, cb: unknown) => {
|
||||
(cb as (err: Error, stdout: string) => void)(new Error('not found'), '');
|
||||
};
|
||||
invokeGatewayImpl = () => ({});
|
||||
it('returns primary plus fallbacks in declared order', async () => {
|
||||
setDefaults();
|
||||
readFileImpl = async () => JSON.stringify({
|
||||
agents: {
|
||||
defaults: {
|
||||
model: {
|
||||
primary: 'zai/glm-4.7',
|
||||
fallbacks: [
|
||||
'openrouter/xiaomi/mimo-v2-pro',
|
||||
'zai/glm-4.5',
|
||||
],
|
||||
},
|
||||
models: {
|
||||
'zai/glm-4.7': { alias: 'glm-4.7' },
|
||||
'openrouter/xiaomi/mimo-v2-pro': { alias: 'mimo-v2-pro' },
|
||||
'zai/glm-4.5': { alias: 'glm-4.5' },
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const app = buildApp();
|
||||
const res = await app.request('/api/gateway/models');
|
||||
expect(res.status).toBe(200);
|
||||
expect(await res.json()).toEqual({
|
||||
models: [
|
||||
{
|
||||
id: 'zai/glm-4.7',
|
||||
label: 'glm-4.7',
|
||||
provider: 'zai',
|
||||
alias: 'glm-4.7',
|
||||
configured: true,
|
||||
role: 'primary',
|
||||
},
|
||||
{
|
||||
id: 'openrouter/xiaomi/mimo-v2-pro',
|
||||
label: 'mimo-v2-pro',
|
||||
provider: 'openrouter',
|
||||
alias: 'mimo-v2-pro',
|
||||
configured: true,
|
||||
role: 'fallback',
|
||||
},
|
||||
{
|
||||
id: 'zai/glm-4.5',
|
||||
label: 'glm-4.5',
|
||||
provider: 'zai',
|
||||
alias: 'glm-4.5',
|
||||
configured: true,
|
||||
role: 'fallback',
|
||||
},
|
||||
],
|
||||
error: null,
|
||||
source: 'config',
|
||||
});
|
||||
});
|
||||
|
||||
it('includes remaining allowlist entries after primary and fallbacks', async () => {
|
||||
setDefaults();
|
||||
readFileImpl = async () => JSON.stringify({
|
||||
agents: {
|
||||
defaults: {
|
||||
model: {
|
||||
primary: 'zai/glm-4.7',
|
||||
fallbacks: [
|
||||
'openrouter/xiaomi/mimo-v2-pro',
|
||||
'zai/glm-4.5',
|
||||
'ollama/qwen2.5:7b-instruct-q5_K_M',
|
||||
],
|
||||
},
|
||||
models: {
|
||||
'zai/glm-4.7': { alias: 'glm-4.7' },
|
||||
'openrouter/xiaomi/mimo-v2-pro': { alias: 'mimo-v2-pro' },
|
||||
'zai/glm-4.5': { alias: 'glm-4.5' },
|
||||
'ollama/qwen2.5:7b-instruct-q5_K_M': { alias: 'qwen-local' },
|
||||
'anthropic/claude-sonnet-4': { alias: 'claude-sonnet-4' },
|
||||
'openai/gpt-4o-mini': { alias: 'gpt-4o-mini' },
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const app = buildApp();
|
||||
const res = await app.request('/api/gateway/models');
|
||||
expect(res.status).toBe(200);
|
||||
expect(await res.json()).toEqual({
|
||||
models: [
|
||||
{
|
||||
id: 'zai/glm-4.7',
|
||||
label: 'glm-4.7',
|
||||
provider: 'zai',
|
||||
alias: 'glm-4.7',
|
||||
configured: true,
|
||||
role: 'primary',
|
||||
},
|
||||
{
|
||||
id: 'openrouter/xiaomi/mimo-v2-pro',
|
||||
label: 'mimo-v2-pro',
|
||||
provider: 'openrouter',
|
||||
alias: 'mimo-v2-pro',
|
||||
configured: true,
|
||||
role: 'fallback',
|
||||
},
|
||||
{
|
||||
id: 'zai/glm-4.5',
|
||||
label: 'glm-4.5',
|
||||
provider: 'zai',
|
||||
alias: 'glm-4.5',
|
||||
configured: true,
|
||||
role: 'fallback',
|
||||
},
|
||||
{
|
||||
id: 'ollama/qwen2.5:7b-instruct-q5_K_M',
|
||||
label: 'qwen-local',
|
||||
provider: 'ollama',
|
||||
alias: 'qwen-local',
|
||||
configured: true,
|
||||
role: 'fallback',
|
||||
},
|
||||
{
|
||||
id: 'anthropic/claude-sonnet-4',
|
||||
label: 'claude-sonnet-4',
|
||||
provider: 'anthropic',
|
||||
alias: 'claude-sonnet-4',
|
||||
configured: true,
|
||||
role: 'allowed',
|
||||
},
|
||||
{
|
||||
id: 'openai/gpt-4o-mini',
|
||||
label: 'gpt-4o-mini',
|
||||
provider: 'openai',
|
||||
alias: 'gpt-4o-mini',
|
||||
configured: true,
|
||||
role: 'allowed',
|
||||
},
|
||||
],
|
||||
error: null,
|
||||
source: 'config',
|
||||
});
|
||||
});
|
||||
|
||||
it('dedupes repeated model refs while preserving stable role order', async () => {
|
||||
setDefaults();
|
||||
readFileImpl = async () => JSON.stringify({
|
||||
agents: {
|
||||
defaults: {
|
||||
model: {
|
||||
primary: 'zai/glm-4.7',
|
||||
fallbacks: [
|
||||
'zai/glm-4.7',
|
||||
'openrouter/xiaomi/mimo-v2-pro',
|
||||
'openrouter/xiaomi/mimo-v2-pro',
|
||||
],
|
||||
},
|
||||
models: {
|
||||
'zai/glm-4.7': { alias: 'glm-4.7' },
|
||||
'openrouter/xiaomi/mimo-v2-pro': { alias: 'mimo-v2-pro' },
|
||||
'anthropic/claude-sonnet-4': { alias: 'claude-sonnet-4' },
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const app = buildApp();
|
||||
const res = await app.request('/api/gateway/models');
|
||||
expect(res.status).toBe(200);
|
||||
expect(await res.json()).toEqual({
|
||||
models: [
|
||||
{
|
||||
id: 'zai/glm-4.7',
|
||||
label: 'glm-4.7',
|
||||
provider: 'zai',
|
||||
alias: 'glm-4.7',
|
||||
configured: true,
|
||||
role: 'primary',
|
||||
},
|
||||
{
|
||||
id: 'openrouter/xiaomi/mimo-v2-pro',
|
||||
label: 'mimo-v2-pro',
|
||||
provider: 'openrouter',
|
||||
alias: 'mimo-v2-pro',
|
||||
configured: true,
|
||||
role: 'fallback',
|
||||
},
|
||||
{
|
||||
id: 'anthropic/claude-sonnet-4',
|
||||
label: 'claude-sonnet-4',
|
||||
provider: 'anthropic',
|
||||
alias: 'claude-sonnet-4',
|
||||
configured: true,
|
||||
role: 'allowed',
|
||||
},
|
||||
],
|
||||
error: null,
|
||||
source: 'config',
|
||||
});
|
||||
});
|
||||
|
||||
it('returns an explicit error when the config has no configured models', async () => {
|
||||
setDefaults();
|
||||
readFileImpl = async () => JSON.stringify({
|
||||
agents: {
|
||||
defaults: {},
|
||||
},
|
||||
});
|
||||
|
||||
const app = buildApp();
|
||||
const res = await app.request('/api/gateway/models');
|
||||
expect(res.status).toBe(200);
|
||||
expect(await res.json()).toEqual({
|
||||
models: [],
|
||||
error: 'No models configured in OpenClaw config.',
|
||||
source: 'config',
|
||||
});
|
||||
});
|
||||
|
||||
it('returns configured models when the OpenClaw config uses JSON5 syntax', async () => {
|
||||
setDefaults();
|
||||
readFileImpl = async () => `
|
||||
{
|
||||
// OpenClaw config often includes comments and trailing commas
|
||||
agents: {
|
||||
defaults: {
|
||||
model: {
|
||||
primary: 'zai/glm-4.7',
|
||||
fallbacks: [
|
||||
'openrouter/xiaomi/mimo-v2-pro',
|
||||
],
|
||||
},
|
||||
models: {
|
||||
'zai/glm-4.7': { alias: 'glm-4.7' },
|
||||
'openrouter/xiaomi/mimo-v2-pro': { alias: 'mimo-v2-pro' },
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
`;
|
||||
|
||||
const app = buildApp();
|
||||
const res = await app.request('/api/gateway/models');
|
||||
expect(res.status).toBe(200);
|
||||
expect(await res.json()).toEqual({
|
||||
models: [
|
||||
{
|
||||
id: 'zai/glm-4.7',
|
||||
label: 'glm-4.7',
|
||||
provider: 'zai',
|
||||
alias: 'glm-4.7',
|
||||
configured: true,
|
||||
role: 'primary',
|
||||
},
|
||||
{
|
||||
id: 'openrouter/xiaomi/mimo-v2-pro',
|
||||
label: 'mimo-v2-pro',
|
||||
provider: 'openrouter',
|
||||
alias: 'mimo-v2-pro',
|
||||
configured: true,
|
||||
role: 'fallback',
|
||||
},
|
||||
],
|
||||
error: null,
|
||||
source: 'config',
|
||||
});
|
||||
});
|
||||
|
||||
it('returns an explicit error when the config is unreadable', async () => {
|
||||
setDefaults();
|
||||
readFileImpl = async () => {
|
||||
throw new Error('ENOENT: no such file or directory');
|
||||
};
|
||||
|
||||
const app = buildApp();
|
||||
const res = await app.request('/api/gateway/models');
|
||||
expect(res.status).toBe(200);
|
||||
expect(await res.json()).toEqual({
|
||||
models: [],
|
||||
error: 'Could not read OpenClaw config.',
|
||||
source: 'config',
|
||||
});
|
||||
});
|
||||
|
||||
it('uses a long enough timeout for model catalog fetches', async () => {
|
||||
vi.resetModules();
|
||||
vi.doMock('node:child_process', () => ({
|
||||
execFile: (...args: unknown[]) => execFileImpl(...args),
|
||||
}));
|
||||
vi.doMock('node:fs/promises', () => ({
|
||||
readFile: (...args: unknown[]) => readFileImpl(...args),
|
||||
}));
|
||||
vi.doMock('../lib/config.js', () => ({
|
||||
config: {
|
||||
auth: false, port: 3000, host: '127.0.0.1', sslPort: 3443,
|
||||
|
|
@ -131,13 +452,7 @@ describe('gateway routes', () => {
|
|||
}));
|
||||
|
||||
const mod = await import('./gateway.js');
|
||||
const app = new Hono();
|
||||
app.route('/', mod.default);
|
||||
|
||||
const res = await app.request('/api/gateway/models');
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as { models: unknown[] };
|
||||
expect(json.models).toEqual([]);
|
||||
expect(mod.MODEL_LIST_TIMEOUT_MS).toBeGreaterThanOrEqual(15_000);
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -1,22 +1,24 @@
|
|||
/**
|
||||
* Gateway API Routes
|
||||
*
|
||||
* GET /api/gateway/models — Returns available models via `openclaw models list`.
|
||||
* Respects allowlist if configured; falls back to all available.
|
||||
* GET /api/gateway/models — Returns configured models from the active OpenClaw config.
|
||||
* GET /api/gateway/session-info — Returns the current session's runtime info (model, thinking level).
|
||||
* POST /api/gateway/session-patch — Change model/effort for a session via HTTP (reliable fallback).
|
||||
* POST /api/gateway/restart — Restart the OpenClaw gateway service via `openclaw gateway restart`.
|
||||
*
|
||||
* Response (models): { models: Array<{ id: string; label: string; provider: string }> }
|
||||
* Response (models): { models: Array<{ id: string; label: string; provider: string; configured: true; role: string }>, error: string | null, source: 'config' }
|
||||
* Response (session-info): { model?: string; thinking?: string }
|
||||
* Response (session-patch): { ok: boolean; model?: string; thinking?: string; error?: string }
|
||||
* Response (restart): { ok: boolean; output: string }
|
||||
*/
|
||||
|
||||
import { Hono } from 'hono';
|
||||
import JSON5 from 'json5';
|
||||
import { execFile } from 'node:child_process';
|
||||
import { homedir } from 'node:os';
|
||||
import { readFile } from 'node:fs/promises';
|
||||
import { Socket } from 'node:net';
|
||||
import { homedir } from 'node:os';
|
||||
import path from 'node:path';
|
||||
import { z } from 'zod';
|
||||
import { invokeGatewayTool } from '../lib/gateway-client.js';
|
||||
import { rateLimitGeneral, rateLimitRestart } from '../middleware/rate-limit.js';
|
||||
|
|
@ -26,6 +28,7 @@ import { config } from '../lib/config.js';
|
|||
const app = new Hono();
|
||||
|
||||
const GATEWAY_TIMEOUT_MS = 8_000;
|
||||
export const MODEL_LIST_TIMEOUT_MS = 15_000;
|
||||
const SESSIONS_ACTIVE_MINUTES = 24 * 60;
|
||||
const SESSIONS_LIMIT = 200;
|
||||
|
||||
|
|
@ -33,6 +36,9 @@ export interface GatewayModelInfo {
|
|||
id: string;
|
||||
label: string;
|
||||
provider: string;
|
||||
alias?: string;
|
||||
configured: true;
|
||||
role: 'primary' | 'fallback' | 'allowed';
|
||||
}
|
||||
|
||||
interface GatewaySessionSummary {
|
||||
|
|
@ -43,143 +49,134 @@ interface GatewaySessionSummary {
|
|||
thinkingLevel?: string;
|
||||
}
|
||||
|
||||
// ─── Model catalog via `openclaw models list` CLI ──────────────────────────────
|
||||
|
||||
/** How long to cache the model catalog (ms). Models don't change often. */
|
||||
const MODEL_CACHE_TTL_MS = 5 * 60 * 1000; // 5 minutes
|
||||
|
||||
interface ModelCache {
|
||||
models: GatewayModelInfo[];
|
||||
fetchedAt: number;
|
||||
}
|
||||
let modelCache: ModelCache | null = null;
|
||||
|
||||
interface CliModelEntry {
|
||||
key: string;
|
||||
name?: string;
|
||||
available?: boolean;
|
||||
}
|
||||
interface CliModelsOutput {
|
||||
models?: CliModelEntry[];
|
||||
}
|
||||
|
||||
/** Parse CLI JSON output into GatewayModelInfo[].
|
||||
* When `configuredOnly` is true, include all models regardless of `available` flag
|
||||
* (user explicitly configured them). Otherwise filter to available only. */
|
||||
function parseModelsOutput(stdout: string, configuredOnly = false): GatewayModelInfo[] {
|
||||
const data = JSON.parse(stdout) as CliModelsOutput;
|
||||
if (!Array.isArray(data.models)) return [];
|
||||
const out: GatewayModelInfo[] = [];
|
||||
for (const m of data.models) {
|
||||
if (!configuredOnly && !m.available) continue;
|
||||
const id = m.key;
|
||||
if (!id) continue;
|
||||
const [provider, ...rest] = id.split('/');
|
||||
out.push({
|
||||
id,
|
||||
label: rest.join('/') || id,
|
||||
provider: provider || 'unknown',
|
||||
});
|
||||
}
|
||||
return out.sort((a, b) => a.id.localeCompare(b.id));
|
||||
}
|
||||
// ─── Model catalog via active OpenClaw config ──────────────────────────────────
|
||||
|
||||
const openclawBin = resolveOpenclawBin();
|
||||
|
||||
/** Directory containing the node binary — needed in PATH for `#!/usr/bin/env node` shims. */
|
||||
const nodeBinDir = process.execPath.replace(/\/node$/, '');
|
||||
|
||||
const CONFIG_READ_ERROR = 'Could not read OpenClaw config.';
|
||||
const NO_CONFIGURED_MODELS_ERROR = 'No models configured in OpenClaw config.';
|
||||
|
||||
interface OpenClawModelConfigEntry {
|
||||
alias?: string;
|
||||
}
|
||||
|
||||
interface OpenClawConfig {
|
||||
agents?: {
|
||||
defaults?: {
|
||||
model?: {
|
||||
primary?: string;
|
||||
fallbacks?: string[];
|
||||
};
|
||||
models?: Record<string, OpenClawModelConfigEntry | undefined>;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Infer the HOME directory for openclaw execution.
|
||||
* When server runs as root but openclaw is installed under a user account
|
||||
* (e.g., /home/username/.nvm/...), we need to use that user's HOME so openclaw
|
||||
* can find its config at ~/.openclaw/config.yaml.
|
||||
*
|
||||
* can find its config at ~/.openclaw/openclaw.json.
|
||||
*
|
||||
* Extracts home from paths like:
|
||||
* /home/username/.nvm/... → /home/username
|
||||
* /Users/username/.nvm/... → /Users/username
|
||||
*
|
||||
*
|
||||
* Falls back to process.env.HOME if extraction fails.
|
||||
*/
|
||||
function inferOpenclawHome(): string {
|
||||
// Try to extract from openclaw binary path
|
||||
const match = openclawBin.match(/^(\/home\/[^/]+|\/Users\/[^/]+)/);
|
||||
if (match) return match[1];
|
||||
|
||||
// Fallback: use actual user home (works for any user, not just root)
|
||||
|
||||
return process.env.HOME || homedir();
|
||||
}
|
||||
|
||||
const openclawHome = inferOpenclawHome();
|
||||
|
||||
/** Run `openclaw models list` with the given args. */
|
||||
function runModelsList(args: string[]): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
execFile(openclawBin, ['models', 'list', ...args], {
|
||||
timeout: GATEWAY_TIMEOUT_MS,
|
||||
maxBuffer: 2 * 1024 * 1024,
|
||||
env: {
|
||||
...process.env,
|
||||
HOME: openclawHome,
|
||||
PATH: `${nodeBinDir}:${process.env.PATH || '/usr/bin:/bin'}`
|
||||
},
|
||||
}, (err, stdout) => {
|
||||
if (err) reject(err);
|
||||
else resolve(stdout);
|
||||
});
|
||||
});
|
||||
function resolveOpenClawConfigPath(): string {
|
||||
return process.env.OPENCLAW_CONFIG_PATH?.trim() || path.join(openclawHome, '.openclaw', 'openclaw.json');
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch models available for the model selector.
|
||||
*
|
||||
* Strategy (works for any OpenClaw install):
|
||||
* 1. Run `openclaw models list --json` (returns configured/allowlisted models)
|
||||
* 2. If that yields ≤1 model (no allowlist, just primary), fall back to
|
||||
* `openclaw models list --all --json` filtered to available models
|
||||
*
|
||||
* This respects `agents.defaults.models` when configured, and gracefully
|
||||
* shows all available models when it isn't.
|
||||
*/
|
||||
async function execOpenclawModels(): Promise<GatewayModelInfo[]> {
|
||||
try {
|
||||
// First: try configured models (respects allowlist)
|
||||
// Always include configured models regardless of `available` flag —
|
||||
// if the user configured them, they should appear.
|
||||
const configured = await runModelsList(['--json']);
|
||||
const models = parseModelsOutput(configured, true);
|
||||
if (models.length > 0) return models;
|
||||
|
||||
// Fallback: no allowlist configured — show all available (filter by available)
|
||||
const all = await runModelsList(['--all', '--json']);
|
||||
const allModels = parseModelsOutput(all, false);
|
||||
if (allModels.length === 0) {
|
||||
console.warn('[gateway/models] openclaw models list returned 0 models.',
|
||||
`Binary: ${openclawBin}, PATH includes: ${nodeBinDir}`);
|
||||
}
|
||||
return allModels;
|
||||
} catch (err) {
|
||||
console.warn('[gateway/models] openclaw models list failed.',
|
||||
`Binary: ${openclawBin}, Error: ${(err as Error).message}`);
|
||||
return [];
|
||||
}
|
||||
function normalizeAlias(entry: OpenClawModelConfigEntry | undefined): string | undefined {
|
||||
const alias = entry?.alias;
|
||||
return typeof alias === 'string' && alias.trim() ? alias.trim() : undefined;
|
||||
}
|
||||
|
||||
/** Get models from cache or fetch fresh. */
|
||||
async function getModelCatalog(): Promise<GatewayModelInfo[]> {
|
||||
if (modelCache && Date.now() - modelCache.fetchedAt < MODEL_CACHE_TTL_MS) {
|
||||
return modelCache.models;
|
||||
function buildGatewayModelInfo(
|
||||
id: string,
|
||||
role: GatewayModelInfo['role'],
|
||||
entry: OpenClawModelConfigEntry | undefined,
|
||||
): GatewayModelInfo {
|
||||
const alias = normalizeAlias(entry);
|
||||
const [provider, ...rest] = id.split('/');
|
||||
|
||||
return {
|
||||
id,
|
||||
label: alias || rest.join('/') || id,
|
||||
provider: provider || 'unknown',
|
||||
...(alias ? { alias } : {}),
|
||||
configured: true,
|
||||
role,
|
||||
};
|
||||
}
|
||||
|
||||
function readConfiguredModels(configData: OpenClawConfig): GatewayModelInfo[] {
|
||||
const defaults = configData.agents?.defaults;
|
||||
const modelDefaults = defaults?.model;
|
||||
const allowlist = defaults?.models || {};
|
||||
const seen = new Set<string>();
|
||||
const models: GatewayModelInfo[] = [];
|
||||
|
||||
const addModel = (value: unknown, role: GatewayModelInfo['role']) => {
|
||||
if (typeof value !== 'string' || !value.trim()) return;
|
||||
const id = value.trim();
|
||||
if (seen.has(id)) return;
|
||||
seen.add(id);
|
||||
models.push(buildGatewayModelInfo(id, role, allowlist[id]));
|
||||
};
|
||||
|
||||
addModel(modelDefaults?.primary, 'primary');
|
||||
|
||||
for (const fallback of modelDefaults?.fallbacks || []) {
|
||||
addModel(fallback, 'fallback');
|
||||
}
|
||||
const models = await execOpenclawModels();
|
||||
if (models.length > 0) {
|
||||
modelCache = { models, fetchedAt: Date.now() };
|
||||
|
||||
const remainingAllowlistEntries = Object.keys(allowlist)
|
||||
.filter((id) => !seen.has(id))
|
||||
.sort((a, b) => a.localeCompare(b));
|
||||
|
||||
for (const id of remainingAllowlistEntries) {
|
||||
addModel(id, 'allowed');
|
||||
}
|
||||
|
||||
return models;
|
||||
}
|
||||
|
||||
async function getModelCatalog(): Promise<{ models: GatewayModelInfo[]; error: string | null }> {
|
||||
const configPath = resolveOpenClawConfigPath();
|
||||
|
||||
try {
|
||||
const raw = await readFile(configPath, 'utf8');
|
||||
const configData = JSON5.parse(raw) as OpenClawConfig;
|
||||
const models = readConfiguredModels(configData);
|
||||
|
||||
if (models.length === 0) {
|
||||
return { models: [], error: NO_CONFIGURED_MODELS_ERROR };
|
||||
}
|
||||
|
||||
return { models, error: null };
|
||||
} catch (err) {
|
||||
console.warn('[gateway/models] failed to read configured models from config:', configPath, (err as Error).message);
|
||||
return { models: [], error: CONFIG_READ_ERROR };
|
||||
}
|
||||
}
|
||||
|
||||
app.get('/api/gateway/models', rateLimitGeneral, async (c) => {
|
||||
const models = await getModelCatalog();
|
||||
return c.json({ models });
|
||||
const { models, error } = await getModelCatalog();
|
||||
return c.json({ models, error, source: 'config' });
|
||||
});
|
||||
|
||||
/**
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -6,27 +6,36 @@ import path from 'node:path';
|
|||
import os from 'node:os';
|
||||
|
||||
describe('memories routes', () => {
|
||||
let homeDir: string;
|
||||
let tmpDir: string;
|
||||
let researchWorkspace: string;
|
||||
let memoryPath: string;
|
||||
let memoryDir: string;
|
||||
let broadcastMock: ReturnType<typeof vi.fn>;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), 'memories-test-'));
|
||||
homeDir = await fs.mkdtemp(path.join(os.tmpdir(), 'memories-test-'));
|
||||
tmpDir = path.join(homeDir, '.openclaw', 'workspace');
|
||||
researchWorkspace = path.join(homeDir, '.openclaw', 'workspace-research');
|
||||
memoryDir = path.join(tmpDir, 'memory');
|
||||
memoryPath = path.join(tmpDir, 'MEMORY.md');
|
||||
await fs.mkdir(memoryDir, { recursive: true });
|
||||
await fs.mkdir(researchWorkspace, { recursive: true });
|
||||
broadcastMock = vi.fn();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
vi.restoreAllMocks();
|
||||
await fs.rm(tmpDir, { recursive: true, force: true });
|
||||
await fs.rm(homeDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
async function buildApp() {
|
||||
vi.resetModules();
|
||||
vi.doMock('../lib/config.js', () => ({
|
||||
config: {
|
||||
auth: false, port: 3000, host: '127.0.0.1', sslPort: 3443,
|
||||
home: homeDir,
|
||||
memoryPath,
|
||||
memoryDir,
|
||||
},
|
||||
|
|
@ -38,9 +47,8 @@ describe('memories routes', () => {
|
|||
vi.doMock('../lib/gateway-client.js', () => ({
|
||||
invokeGatewayTool: vi.fn(async () => ({})),
|
||||
}));
|
||||
// Mock broadcast from events
|
||||
vi.doMock('./events.js', () => ({
|
||||
broadcast: vi.fn(),
|
||||
broadcast: broadcastMock,
|
||||
}));
|
||||
|
||||
const mod = await import('./memories.js');
|
||||
|
|
@ -49,6 +57,46 @@ describe('memories routes', () => {
|
|||
return app;
|
||||
}
|
||||
|
||||
async function loadFileWatcher() {
|
||||
vi.resetModules();
|
||||
|
||||
vi.doMock('../lib/config.js', () => ({
|
||||
config: {
|
||||
auth: false, port: 3000, host: '127.0.0.1', sslPort: 3443,
|
||||
home: homeDir,
|
||||
memoryPath,
|
||||
memoryDir,
|
||||
workspaceWatchRecursive: false,
|
||||
},
|
||||
SESSION_COOKIE_NAME: 'nerve_session_3000',
|
||||
}));
|
||||
vi.doMock('../routes/events.js', () => ({
|
||||
broadcast: broadcastMock,
|
||||
}));
|
||||
|
||||
return import('../lib/file-watcher.js');
|
||||
}
|
||||
|
||||
async function waitForBroadcast(
|
||||
matcher: (call: Array<unknown>) => boolean,
|
||||
timeoutMs = 2000,
|
||||
) {
|
||||
const deadline = Date.now() + timeoutMs;
|
||||
|
||||
while (Date.now() < deadline) {
|
||||
if (broadcastMock.mock.calls.some((call) => matcher(call as Array<unknown>))) {
|
||||
return;
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, 25));
|
||||
}
|
||||
|
||||
throw new Error(`Timed out waiting for broadcast. Calls: ${JSON.stringify(broadcastMock.mock.calls)}`);
|
||||
}
|
||||
|
||||
async function allowWatcherToSettle() {
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
}
|
||||
|
||||
describe('GET /api/memories', () => {
|
||||
it('returns empty array when no memories exist', async () => {
|
||||
const app = await buildApp();
|
||||
|
|
@ -100,6 +148,49 @@ describe('memories routes', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('agent-scoped memories', () => {
|
||||
it('reads MEMORY.md from the requested agent workspace', async () => {
|
||||
await fs.mkdir(researchWorkspace, { recursive: true });
|
||||
await fs.writeFile(memoryPath, '# MEMORY.md\n\n## Main Facts\n- Main only\n');
|
||||
await fs.writeFile(
|
||||
path.join(researchWorkspace, 'MEMORY.md'),
|
||||
'# MEMORY.md\n\n## Research Facts\n- Research only\n',
|
||||
);
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/memories?agentId=research');
|
||||
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as Array<{ type: string; text: string }>;
|
||||
const texts = json.map((item) => item.text);
|
||||
expect(texts).toContain('Research Facts');
|
||||
expect(texts).toContain('Research only');
|
||||
expect(texts).not.toContain('Main Facts');
|
||||
expect(texts).not.toContain('Main only');
|
||||
});
|
||||
|
||||
it('writes memories into the requested agent workspace and broadcasts agentId', async () => {
|
||||
await fs.mkdir(researchWorkspace, { recursive: true });
|
||||
await fs.writeFile(memoryPath, '# MEMORY.md\n');
|
||||
await fs.writeFile(path.join(researchWorkspace, 'MEMORY.md'), '# MEMORY.md\n');
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/memories', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ agentId: 'research', text: 'Research fact', section: 'Facts' }),
|
||||
});
|
||||
|
||||
expect(res.status).toBe(200);
|
||||
await expect(fs.readFile(path.join(researchWorkspace, 'MEMORY.md'), 'utf-8')).resolves.toContain('Research fact');
|
||||
await expect(fs.readFile(memoryPath, 'utf-8')).resolves.not.toContain('Research fact');
|
||||
expect(broadcastMock).toHaveBeenCalledWith(
|
||||
'memory.changed',
|
||||
expect.objectContaining({ agentId: 'research', action: 'create', section: 'Facts' }),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('POST /api/memories', () => {
|
||||
it('returns 400 when text is empty', async () => {
|
||||
const app = await buildApp();
|
||||
|
|
@ -296,4 +387,119 @@ describe('memories routes', () => {
|
|||
expect(res.status).toBe(404);
|
||||
});
|
||||
});
|
||||
|
||||
describe('file watcher broadcasts', () => {
|
||||
it('includes agentId when MEMORY.md changes on disk', async () => {
|
||||
await fs.writeFile(memoryPath, '# MEMORY.md\n');
|
||||
const { startFileWatcher, stopFileWatcher } = await loadFileWatcher();
|
||||
|
||||
startFileWatcher();
|
||||
await allowWatcherToSettle();
|
||||
await fs.writeFile(memoryPath, '# MEMORY.md\n\n## Facts\n- Updated\n');
|
||||
|
||||
await waitForBroadcast(([event, payload]) => (
|
||||
event === 'memory.changed'
|
||||
&& payload !== null
|
||||
&& typeof payload === 'object'
|
||||
&& (payload as { file?: string; agentId?: string }).file === 'MEMORY.md'
|
||||
&& (payload as { file?: string; agentId?: string }).agentId === 'main'
|
||||
));
|
||||
|
||||
await waitForBroadcast(([event, payload]) => (
|
||||
event === 'file.changed'
|
||||
&& payload !== null
|
||||
&& typeof payload === 'object'
|
||||
&& (payload as { path?: string; agentId?: string }).path === 'MEMORY.md'
|
||||
&& (payload as { path?: string; agentId?: string }).agentId === 'main'
|
||||
));
|
||||
|
||||
stopFileWatcher();
|
||||
});
|
||||
|
||||
it('includes agentId when daily memory files change on disk', async () => {
|
||||
const dailyPath = path.join(memoryDir, '2026-02-26.md');
|
||||
await fs.writeFile(dailyPath, '## Morning\n- First\n');
|
||||
const { startFileWatcher, stopFileWatcher } = await loadFileWatcher();
|
||||
|
||||
startFileWatcher();
|
||||
await allowWatcherToSettle();
|
||||
await fs.writeFile(dailyPath, '## Morning\n- Updated\n');
|
||||
|
||||
await waitForBroadcast(([event, payload]) => (
|
||||
event === 'memory.changed'
|
||||
&& payload !== null
|
||||
&& typeof payload === 'object'
|
||||
&& (payload as { file?: string; agentId?: string }).file === '2026-02-26.md'
|
||||
&& (payload as { file?: string; agentId?: string }).agentId === 'main'
|
||||
));
|
||||
|
||||
await waitForBroadcast(([event, payload]) => (
|
||||
event === 'file.changed'
|
||||
&& payload !== null
|
||||
&& typeof payload === 'object'
|
||||
&& (payload as { path?: string; agentId?: string }).path === 'memory/2026-02-26.md'
|
||||
&& (payload as { path?: string; agentId?: string }).agentId === 'main'
|
||||
));
|
||||
|
||||
stopFileWatcher();
|
||||
});
|
||||
|
||||
it('includes scoped agentId when a non-main MEMORY.md changes on disk', async () => {
|
||||
const researchMemoryPath = path.join(researchWorkspace, 'MEMORY.md');
|
||||
await fs.writeFile(researchMemoryPath, '# MEMORY.md\n');
|
||||
const { startFileWatcher, stopFileWatcher } = await loadFileWatcher();
|
||||
|
||||
startFileWatcher();
|
||||
await allowWatcherToSettle();
|
||||
await fs.writeFile(researchMemoryPath, '# MEMORY.md\n\n## Facts\n- Research update\n');
|
||||
|
||||
await waitForBroadcast(([event, payload]) => (
|
||||
event === 'memory.changed'
|
||||
&& payload !== null
|
||||
&& typeof payload === 'object'
|
||||
&& (payload as { file?: string; agentId?: string }).file === 'MEMORY.md'
|
||||
&& (payload as { file?: string; agentId?: string }).agentId === 'research'
|
||||
));
|
||||
|
||||
await waitForBroadcast(([event, payload]) => (
|
||||
event === 'file.changed'
|
||||
&& payload !== null
|
||||
&& typeof payload === 'object'
|
||||
&& (payload as { path?: string; agentId?: string }).path === 'MEMORY.md'
|
||||
&& (payload as { path?: string; agentId?: string }).agentId === 'research'
|
||||
));
|
||||
|
||||
stopFileWatcher();
|
||||
});
|
||||
|
||||
it('includes scoped agentId when a non-main daily memory file changes on disk', async () => {
|
||||
const researchMemoryDir = path.join(researchWorkspace, 'memory');
|
||||
const researchDailyPath = path.join(researchMemoryDir, '2026-02-26.md');
|
||||
await fs.mkdir(researchMemoryDir, { recursive: true });
|
||||
await fs.writeFile(researchDailyPath, '## Morning\n- First\n');
|
||||
const { startFileWatcher, stopFileWatcher } = await loadFileWatcher();
|
||||
|
||||
startFileWatcher();
|
||||
await allowWatcherToSettle();
|
||||
await fs.writeFile(researchDailyPath, '## Morning\n- Updated\n');
|
||||
|
||||
await waitForBroadcast(([event, payload]) => (
|
||||
event === 'memory.changed'
|
||||
&& payload !== null
|
||||
&& typeof payload === 'object'
|
||||
&& (payload as { file?: string; agentId?: string }).file === '2026-02-26.md'
|
||||
&& (payload as { file?: string; agentId?: string }).agentId === 'research'
|
||||
));
|
||||
|
||||
await waitForBroadcast(([event, payload]) => (
|
||||
event === 'file.changed'
|
||||
&& payload !== null
|
||||
&& typeof payload === 'object'
|
||||
&& (payload as { path?: string; agentId?: string }).path === 'memory/2026-02-26.md'
|
||||
&& (payload as { path?: string; agentId?: string }).agentId === 'research'
|
||||
));
|
||||
|
||||
stopFileWatcher();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -8,18 +8,20 @@
|
|||
* Response: Array of { type: "section"|"item"|"daily", text, date? }
|
||||
*/
|
||||
|
||||
import { Hono } from 'hono';
|
||||
import { Hono, type Context } from 'hono';
|
||||
import { zValidator } from '@hono/zod-validator';
|
||||
import { z } from 'zod';
|
||||
import fs from 'node:fs/promises';
|
||||
import path from 'node:path';
|
||||
import { config } from '../lib/config.js';
|
||||
import { invokeGatewayTool } from '../lib/gateway-client.js';
|
||||
import { readText } from '../lib/files.js';
|
||||
import { rateLimitGeneral } from '../middleware/rate-limit.js';
|
||||
import { broadcast } from './events.js';
|
||||
import { withMutex } from '../lib/mutex.js';
|
||||
import type { MemoryItem } from '../types.js';
|
||||
import { resolveAgentWorkspace, type AgentWorkspace } from '../lib/agent-workspace.js';
|
||||
import { isWorkspaceLocal } from '../lib/workspace-detect.js';
|
||||
import { gatewayFilesGet, gatewayFilesSet } from '../lib/gateway-rpc.js';
|
||||
|
||||
const app = new Hono();
|
||||
|
||||
|
|
@ -35,6 +37,7 @@ const createMemorySchema = z.object({
|
|||
section: z.string().max(200, 'Section name too long').optional(),
|
||||
category: z.enum(['preference', 'fact', 'decision', 'entity', 'other']).optional(),
|
||||
importance: z.number().min(0).max(1).optional(),
|
||||
agentId: z.string().max(200).optional(),
|
||||
});
|
||||
|
||||
/** Safe filename pattern: alphanumeric, hyphens, underscores, dots. No slashes, no `..` */
|
||||
|
|
@ -45,6 +48,7 @@ const deleteMemorySchema = z.object({
|
|||
query: z.string().min(1, 'Query is required').max(1000, 'Query too long'),
|
||||
type: z.enum(['section', 'item', 'daily']).optional(),
|
||||
date: z.string().max(100).optional(),
|
||||
agentId: z.string().max(200).optional(),
|
||||
});
|
||||
|
||||
/**
|
||||
|
|
@ -96,7 +100,7 @@ function deleteSectionFromLines(lines: string[], sectionTitle: string): string[]
|
|||
*/
|
||||
function deleteItemFromLines(lines: string[], itemText: string): string[] | null {
|
||||
const originalLength = lines.length;
|
||||
|
||||
|
||||
const filtered = lines.filter((line) => {
|
||||
const trimmed = line.trim();
|
||||
// Match bullet points or numbered lists
|
||||
|
|
@ -125,13 +129,35 @@ interface DeleteOptions {
|
|||
date?: string; // For daily files: YYYY-MM-DD
|
||||
}
|
||||
|
||||
function getMemoryFilePath(workspace: Pick<AgentWorkspace, 'memoryPath' | 'memoryDir'>, date?: string): string {
|
||||
if (date) {
|
||||
return path.join(workspace.memoryDir, `${date}.md`);
|
||||
}
|
||||
return workspace.memoryPath;
|
||||
}
|
||||
|
||||
function getMutexKey(agentId: string): string {
|
||||
return `memory-file:${agentId}`;
|
||||
}
|
||||
|
||||
function resolveWorkspaceOrResponse(c: Context, agentId?: string): AgentWorkspace | Response {
|
||||
try {
|
||||
return resolveAgentWorkspace(agentId);
|
||||
} catch {
|
||||
return c.json({ ok: false, error: 'Invalid agentId' }, 400);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete from MEMORY.md or daily files
|
||||
* - If type is 'section': delete the section header AND all items until the next section
|
||||
* - If type is 'item': delete just that one line from MEMORY.md
|
||||
* - If type is 'daily': delete the section from the daily file (memory/YYYY-MM-DD.md)
|
||||
*/
|
||||
async function deleteMemory(opts: DeleteOptions): Promise<{ deleted: boolean; file?: string }> {
|
||||
async function deleteMemory(
|
||||
opts: DeleteOptions,
|
||||
workspace: Pick<AgentWorkspace, 'memoryPath' | 'memoryDir'>,
|
||||
): Promise<{ deleted: boolean; file?: string }> {
|
||||
const { text, type, date } = opts;
|
||||
|
||||
// Validate filename to prevent path traversal
|
||||
|
|
@ -140,14 +166,7 @@ async function deleteMemory(opts: DeleteOptions): Promise<{ deleted: boolean; fi
|
|||
}
|
||||
|
||||
try {
|
||||
// Determine which file to edit
|
||||
let filePath: string;
|
||||
if (type === 'daily' && date) {
|
||||
filePath = path.join(config.memoryDir, `${date}.md`);
|
||||
} else {
|
||||
filePath = config.memoryPath;
|
||||
}
|
||||
|
||||
const filePath = getMemoryFilePath(workspace, type === 'daily' ? date : undefined);
|
||||
const content = await readText(filePath);
|
||||
if (!content) {
|
||||
return { deleted: false };
|
||||
|
|
@ -171,7 +190,7 @@ async function deleteMemory(opts: DeleteOptions): Promise<{ deleted: boolean; fi
|
|||
// Clean up and save
|
||||
const cleaned = cleanBlankLines(result);
|
||||
await fs.writeFile(filePath, cleaned.join('\n'), 'utf-8');
|
||||
|
||||
|
||||
return { deleted: true, file: path.basename(filePath) };
|
||||
} catch (err) {
|
||||
console.error('[memories] Failed to delete:', (err as Error).message);
|
||||
|
|
@ -185,8 +204,7 @@ async function deleteMemory(opts: DeleteOptions): Promise<{ deleted: boolean; fi
|
|||
* Append a bullet point to MEMORY.md under the given section heading.
|
||||
* If the section doesn't exist, create it at the end of the file.
|
||||
*/
|
||||
async function appendToMemoryFile(text: string, section: string): Promise<void> {
|
||||
const filePath = config.memoryPath;
|
||||
async function appendToMemoryFile(text: string, section: string, filePath: string): Promise<void> {
|
||||
let content = '';
|
||||
try {
|
||||
content = await fs.readFile(filePath, 'utf-8');
|
||||
|
|
@ -224,6 +242,7 @@ async function appendToMemoryFile(text: string, section: string): Promise<void>
|
|||
// Ensure trailing newline before new section
|
||||
const trimmedEnd = content.trimEnd();
|
||||
const newContent = `${trimmedEnd}\n\n${sectionHeader}\n${bulletLine}\n`;
|
||||
await fs.mkdir(path.dirname(filePath), { recursive: true });
|
||||
await fs.writeFile(filePath, newContent, 'utf-8');
|
||||
} else {
|
||||
// Section exists — find the last non-blank line within the section to append after
|
||||
|
|
@ -239,42 +258,68 @@ async function appendToMemoryFile(text: string, section: string): Promise<void>
|
|||
// Insert the bullet line
|
||||
lines.splice(insertAt, 0, bulletLine);
|
||||
const cleaned = cleanBlankLines(lines);
|
||||
await fs.mkdir(path.dirname(filePath), { recursive: true });
|
||||
await fs.writeFile(filePath, cleaned.join('\n'), 'utf-8');
|
||||
}
|
||||
}
|
||||
|
||||
/** Parse MEMORY.md content into MemoryItem array. */
|
||||
function parseMemoryContent(content: string): MemoryItem[] {
|
||||
const memories: MemoryItem[] = [];
|
||||
for (const line of content.split('\n')) {
|
||||
const trimmed = line.trim();
|
||||
if (trimmed.startsWith('## ')) {
|
||||
memories.push({ type: 'section', text: trimmed.slice(3).trim() });
|
||||
} else if (/^[-*]\s+/.test(trimmed) || /^\d+\.\s/.test(trimmed)) {
|
||||
const clean = trimmed
|
||||
.replace(/^[-*]\s+|^\d+\.\s+/, '')
|
||||
.replace(/\*\*/g, '')
|
||||
.replace(/`/g, '');
|
||||
if (clean.length > 0) {
|
||||
memories.push({ type: 'item', text: clean });
|
||||
}
|
||||
}
|
||||
}
|
||||
return memories;
|
||||
}
|
||||
|
||||
app.get('/api/memories', rateLimitGeneral, async (c) => {
|
||||
const workspace = resolveWorkspaceOrResponse(c, c.req.query('agentId'));
|
||||
if (workspace instanceof Response) return workspace;
|
||||
|
||||
const memories: MemoryItem[] = [];
|
||||
|
||||
// Parse MEMORY.md — sections and bullet points
|
||||
const content = await readText(config.memoryPath);
|
||||
if (content) {
|
||||
for (const line of content.split('\n')) {
|
||||
const trimmed = line.trim();
|
||||
if (trimmed.startsWith('## ')) {
|
||||
memories.push({ type: 'section', text: trimmed.slice(3).trim() });
|
||||
} else if (/^[-*]\s+/.test(trimmed) || /^\d+\.\s/.test(trimmed)) {
|
||||
const clean = trimmed
|
||||
.replace(/^[-*]\s+|^\d+\.\s+/, '')
|
||||
.replace(/\*\*/g, '')
|
||||
.replace(/`/g, '');
|
||||
if (clean.length > 0) {
|
||||
memories.push({ type: 'item', text: clean });
|
||||
// Parse MEMORY.md — try local first, then gateway fallback
|
||||
let content = await readText(workspace.memoryPath);
|
||||
if (!content) {
|
||||
// Local read failed — try gateway
|
||||
const isLocal = await isWorkspaceLocal(path.dirname(workspace.memoryPath));
|
||||
if (!isLocal) {
|
||||
try {
|
||||
const file = await gatewayFilesGet(workspace.agentId, 'MEMORY.md');
|
||||
if (file) {
|
||||
content = file.content;
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('[memories] Gateway MEMORY.md fallback failed:', (err as Error).message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse recent daily files — section headers only
|
||||
if (content) {
|
||||
memories.push(...parseMemoryContent(content));
|
||||
}
|
||||
|
||||
// Parse recent daily files — local only (gateway doesn't support subdirectories)
|
||||
try {
|
||||
const files = (await fs.readdir(config.memoryDir))
|
||||
const files = (await fs.readdir(workspace.memoryDir))
|
||||
.filter((f) => f.endsWith('.md'))
|
||||
.sort()
|
||||
.reverse()
|
||||
.slice(0, 7);
|
||||
|
||||
for (const f of files) {
|
||||
const dailyContent = await readText(path.join(config.memoryDir, f));
|
||||
const dailyContent = await readText(path.join(workspace.memoryDir, f));
|
||||
for (const line of dailyContent.split('\n')) {
|
||||
const trimmed = line.trim();
|
||||
if (trimmed.startsWith('## ')) {
|
||||
|
|
@ -315,14 +360,11 @@ app.get('/api/memories/section', rateLimitGeneral, async (c) => {
|
|||
return c.json({ ok: false, error: 'Invalid filename' }, 400);
|
||||
}
|
||||
|
||||
try {
|
||||
let filePath: string;
|
||||
if (date) {
|
||||
filePath = path.join(config.memoryDir, `${date}.md`);
|
||||
} else {
|
||||
filePath = config.memoryPath;
|
||||
}
|
||||
const workspace = resolveWorkspaceOrResponse(c, c.req.query('agentId'));
|
||||
if (workspace instanceof Response) return workspace;
|
||||
|
||||
try {
|
||||
const filePath = getMemoryFilePath(workspace, date);
|
||||
const content = await readText(filePath);
|
||||
if (!content) {
|
||||
return c.json({ ok: false, error: 'File not found' }, 404);
|
||||
|
|
@ -382,12 +424,56 @@ app.post(
|
|||
async (c) => {
|
||||
try {
|
||||
const body = c.req.valid('json');
|
||||
const workspace = resolveWorkspaceOrResponse(c, body.agentId ?? c.req.query('agentId'));
|
||||
if (workspace instanceof Response) return workspace;
|
||||
|
||||
const trimmedText = body.text.trim();
|
||||
const safeSection = (body.section ?? '').replace(/[\r\n]/g, ' ').trim();
|
||||
const section = safeSection || 'General';
|
||||
|
||||
// 1. Write to MEMORY.md (primary display source)
|
||||
await withMutex('memory-file', () => appendToMemoryFile(trimmedText, section));
|
||||
const isLocal = await isWorkspaceLocal(path.dirname(workspace.memoryPath));
|
||||
if (isLocal) {
|
||||
await withMutex(getMutexKey(workspace.agentId), () => appendToMemoryFile(trimmedText, section, workspace.memoryPath));
|
||||
} else {
|
||||
// Remote workspace — read via gateway, modify in memory, write back
|
||||
try {
|
||||
await withMutex(getMutexKey(workspace.agentId), async () => {
|
||||
const file = await gatewayFilesGet(workspace.agentId, 'MEMORY.md');
|
||||
let content = file?.content || '# MEMORY.md\n';
|
||||
const sectionHeader = `## ${section}`;
|
||||
const bulletLine = `- ${trimmedText}`;
|
||||
|
||||
if (content.includes(sectionHeader)) {
|
||||
// Find section and append bullet after it
|
||||
const lines = content.split('\n');
|
||||
let sectionStart = -1;
|
||||
let sectionEnd = lines.length;
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
if (sectionStart === -1) {
|
||||
if (lines[i].trim().toLowerCase() === sectionHeader.toLowerCase()) sectionStart = i;
|
||||
} else if (lines[i].trim().startsWith('## ')) {
|
||||
sectionEnd = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Insert before next section
|
||||
let insertAt = sectionEnd;
|
||||
for (let i = sectionEnd - 1; i > sectionStart; i--) {
|
||||
if (lines[i].trim() !== '') { insertAt = i + 1; break; }
|
||||
}
|
||||
lines.splice(insertAt, 0, bulletLine);
|
||||
content = lines.join('\n');
|
||||
} else {
|
||||
content = `${content.trimEnd()}\n\n${sectionHeader}\n${bulletLine}\n`;
|
||||
}
|
||||
await gatewayFilesSet(workspace.agentId, 'MEMORY.md', content);
|
||||
});
|
||||
} catch (err) {
|
||||
console.error('[memories] Gateway MEMORY.md write failed:', (err as Error).message);
|
||||
return c.json({ ok: false, error: 'Failed to store memory (remote workspace)' }, 500);
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Also store in gateway LanceDB (for vector search) — best effort
|
||||
try {
|
||||
|
|
@ -402,7 +488,7 @@ app.post(
|
|||
}
|
||||
|
||||
// Broadcast memory change to all SSE clients
|
||||
broadcast('memory.changed', { source: 'api', action: 'create', section });
|
||||
broadcast('memory.changed', { source: 'api', action: 'create', section, agentId: workspace.agentId });
|
||||
|
||||
return c.json({ ok: true, result: { written: true, section } });
|
||||
} catch (err) {
|
||||
|
|
@ -421,6 +507,7 @@ const updateSectionSchema = z.object({
|
|||
title: z.string().min(1, 'Title is required').max(200, 'Title too long'),
|
||||
content: z.string().max(50000, 'Content too long'),
|
||||
date: z.string().max(100).optional(),
|
||||
agentId: z.string().max(200).optional(),
|
||||
});
|
||||
|
||||
app.put(
|
||||
|
|
@ -434,32 +521,23 @@ app.put(
|
|||
}),
|
||||
async (c) => {
|
||||
try {
|
||||
const { title, content, date } = c.req.valid('json');
|
||||
const { title, content, date, agentId } = c.req.valid('json');
|
||||
|
||||
// Validate filename to prevent path traversal
|
||||
if (date && (!SAFE_FILENAME.test(date) || date.includes('..'))) {
|
||||
return c.json({ ok: false, error: 'Invalid filename' }, 400);
|
||||
}
|
||||
|
||||
// Determine which file to edit
|
||||
let filePath: string;
|
||||
if (date) {
|
||||
filePath = path.join(config.memoryDir, `${date}.md`);
|
||||
} else {
|
||||
filePath = config.memoryPath;
|
||||
}
|
||||
|
||||
const result = await withMutex('memory-file', async () => {
|
||||
const fileContent = await readText(filePath);
|
||||
if (!fileContent) {
|
||||
return { ok: false as const, error: 'File not found', status: 404 as const };
|
||||
}
|
||||
const workspace = resolveWorkspaceOrResponse(c, agentId ?? c.req.query('agentId'));
|
||||
if (workspace instanceof Response) return workspace;
|
||||
const filePath = getMemoryFilePath(workspace, date);
|
||||
|
||||
/** Replace a section's content in a markdown string. */
|
||||
function replaceSectionContent(fileContent: string): { ok: true; newContent: string } | { ok: false; error: string; status: 404 } {
|
||||
const lines = fileContent.split('\n');
|
||||
let sectionStart = -1;
|
||||
let sectionEnd = lines.length;
|
||||
|
||||
// Find the section
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const trimmed = lines[i].trim();
|
||||
if (sectionStart === -1) {
|
||||
|
|
@ -475,42 +553,75 @@ app.put(
|
|||
}
|
||||
|
||||
if (sectionStart === -1) {
|
||||
return { ok: false as const, error: 'Section not found', status: 404 as const };
|
||||
return { ok: false, error: 'Section not found', status: 404 };
|
||||
}
|
||||
|
||||
// Replace the section content (keep the header, replace everything until next section)
|
||||
const newLines = [
|
||||
...lines.slice(0, sectionStart + 1), // Everything before section + section header
|
||||
content, // New content
|
||||
'', // Blank line before next section
|
||||
...lines.slice(sectionEnd), // Everything from next section onwards
|
||||
...lines.slice(0, sectionStart + 1),
|
||||
content,
|
||||
'',
|
||||
...lines.slice(sectionEnd),
|
||||
];
|
||||
|
||||
// Clean up multiple consecutive blank lines
|
||||
const cleaned = cleanBlankLines(newLines);
|
||||
await fs.writeFile(filePath, cleaned.join('\n'), 'utf-8');
|
||||
return { ok: true as const };
|
||||
});
|
||||
return { ok: true, newContent: cleaned.join('\n') };
|
||||
}
|
||||
|
||||
// Try local first
|
||||
const isLocal = await isWorkspaceLocal(path.dirname(workspace.memoryPath));
|
||||
let result: { ok: true } | { ok: false; error: string; status: 404 };
|
||||
|
||||
if (isLocal || date) {
|
||||
// Daily files are always local-only; MEMORY.md tries local first
|
||||
result = await withMutex(getMutexKey(workspace.agentId), async () => {
|
||||
const fileContent = await readText(filePath);
|
||||
if (!fileContent) {
|
||||
return { ok: false as const, error: 'File not found', status: 404 as const };
|
||||
}
|
||||
const replaced = replaceSectionContent(fileContent);
|
||||
if (!replaced.ok) return replaced;
|
||||
await fs.writeFile(filePath, replaced.newContent, 'utf-8');
|
||||
return { ok: true as const };
|
||||
});
|
||||
} else {
|
||||
// Gateway fallback for MEMORY.md
|
||||
try {
|
||||
result = await withMutex(getMutexKey(workspace.agentId), async () => {
|
||||
const file = await gatewayFilesGet(workspace.agentId, 'MEMORY.md');
|
||||
if (!file) {
|
||||
return { ok: false as const, error: 'File not found', status: 404 as const };
|
||||
}
|
||||
const replaced = replaceSectionContent(file.content);
|
||||
if (!replaced.ok) {
|
||||
return replaced;
|
||||
}
|
||||
await gatewayFilesSet(workspace.agentId, 'MEMORY.md', replaced.newContent);
|
||||
return { ok: true as const };
|
||||
});
|
||||
} catch (err) {
|
||||
console.error('[memories] Gateway section update failed:', (err as Error).message);
|
||||
return c.json({ ok: false, error: 'Failed to update memory section' }, 500);
|
||||
}
|
||||
}
|
||||
|
||||
if (!result.ok) {
|
||||
return c.json({ ok: false, error: result.error }, result.status);
|
||||
}
|
||||
|
||||
// Broadcast memory change to all SSE clients
|
||||
broadcast('memory.changed', {
|
||||
source: 'api',
|
||||
action: 'update',
|
||||
broadcast('memory.changed', {
|
||||
source: 'api',
|
||||
action: 'update',
|
||||
file: path.basename(filePath),
|
||||
section: title
|
||||
section: title,
|
||||
agentId: workspace.agentId,
|
||||
});
|
||||
|
||||
return c.json({
|
||||
ok: true,
|
||||
result: {
|
||||
updated: true,
|
||||
return c.json({
|
||||
ok: true,
|
||||
result: {
|
||||
updated: true,
|
||||
file: path.basename(filePath),
|
||||
section: title
|
||||
}
|
||||
section: title,
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
console.error('[memories] PUT section error:', (err as Error).message);
|
||||
|
|
@ -536,32 +647,70 @@ app.delete(
|
|||
async (c) => {
|
||||
try {
|
||||
const body = c.req.valid('json');
|
||||
const workspace = resolveWorkspaceOrResponse(c, body.agentId ?? c.req.query('agentId'));
|
||||
if (workspace instanceof Response) return workspace;
|
||||
|
||||
const isDailyType = body.type === 'daily';
|
||||
const isLocal = await isWorkspaceLocal(path.dirname(workspace.memoryPath));
|
||||
|
||||
let result: { deleted: boolean; file?: string };
|
||||
|
||||
if (isLocal || isDailyType) {
|
||||
// Local delete (daily files are always local-only)
|
||||
result = await withMutex(getMutexKey(workspace.agentId), () => deleteMemory({
|
||||
text: body.query,
|
||||
type: body.type,
|
||||
date: body.date,
|
||||
}, workspace));
|
||||
} else {
|
||||
// Gateway fallback for MEMORY.md
|
||||
try {
|
||||
result = await withMutex(getMutexKey(workspace.agentId), async () => {
|
||||
const file = await gatewayFilesGet(workspace.agentId, 'MEMORY.md');
|
||||
if (!file) {
|
||||
return { deleted: false };
|
||||
}
|
||||
const lines = file.content.split('\n');
|
||||
let modified: string[] | null = null;
|
||||
|
||||
if (body.type === 'section') {
|
||||
modified = deleteSectionFromLines(lines, body.query);
|
||||
} else {
|
||||
modified = deleteItemFromLines(lines, body.query);
|
||||
}
|
||||
|
||||
if (modified) {
|
||||
const cleaned = cleanBlankLines(modified);
|
||||
await gatewayFilesSet(workspace.agentId, 'MEMORY.md', cleaned.join('\n'));
|
||||
return { deleted: true, file: 'MEMORY.md' };
|
||||
}
|
||||
return { deleted: false };
|
||||
});
|
||||
} catch (err) {
|
||||
console.error('[memories] Gateway DELETE failed:', (err as Error).message);
|
||||
return c.json({ ok: false, error: 'Failed to delete memory' }, 500);
|
||||
}
|
||||
}
|
||||
|
||||
const result = await withMutex('memory-file', () => deleteMemory({
|
||||
text: body.query,
|
||||
type: body.type,
|
||||
date: body.date,
|
||||
}));
|
||||
|
||||
if (result.deleted) {
|
||||
// Broadcast memory change to all SSE clients
|
||||
broadcast('memory.changed', {
|
||||
source: 'api',
|
||||
action: 'delete',
|
||||
file: result.file
|
||||
broadcast('memory.changed', {
|
||||
source: 'api',
|
||||
action: 'delete',
|
||||
file: result.file,
|
||||
agentId: workspace.agentId,
|
||||
});
|
||||
|
||||
return c.json({
|
||||
ok: true,
|
||||
result: {
|
||||
deleted: 1,
|
||||
source: 'file',
|
||||
return c.json({
|
||||
ok: true,
|
||||
result: {
|
||||
deleted: 1,
|
||||
source: 'file',
|
||||
file: result.file,
|
||||
type: body.type || 'item'
|
||||
}
|
||||
type: body.type || 'item',
|
||||
}
|
||||
});
|
||||
} else {
|
||||
const file = body.type === 'daily' ? `memory/${body.date}.md` : 'MEMORY.md';
|
||||
const file = isDailyType ? `memory/${body.date}.md` : 'MEMORY.md';
|
||||
return c.json({ ok: false, error: `Memory not found in ${file}` }, 404);
|
||||
}
|
||||
} catch (err) {
|
||||
|
|
|
|||
127
server/routes/server-info.test.ts
Normal file
127
server/routes/server-info.test.ts
Normal file
|
|
@ -0,0 +1,127 @@
|
|||
/** Tests for the GET /api/server-info endpoint. */
|
||||
import { describe, it, expect, vi, afterEach } from 'vitest';
|
||||
import { Hono } from 'hono';
|
||||
|
||||
let execFileImpl: (...args: unknown[]) => void;
|
||||
let readFileImpl: (...args: unknown[]) => Promise<string>;
|
||||
|
||||
const runtime = vi.hoisted(() => ({
|
||||
platform: 'linux' as NodeJS.Platform,
|
||||
}));
|
||||
|
||||
vi.mock('node:os', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('node:os')>();
|
||||
const mock = { ...actual, platform: () => runtime.platform };
|
||||
return { ...mock, default: mock };
|
||||
});
|
||||
|
||||
vi.mock('node:child_process', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('node:child_process')>();
|
||||
const mock = { ...actual, execFile: (...args: unknown[]) => execFileImpl(...args) };
|
||||
return { ...mock, default: mock };
|
||||
});
|
||||
|
||||
vi.mock('node:fs', async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import('node:fs')>();
|
||||
const mock = {
|
||||
...actual,
|
||||
promises: {
|
||||
...actual.promises,
|
||||
readFile: (...args: unknown[]) => readFileImpl(...args),
|
||||
},
|
||||
};
|
||||
return { ...mock, default: mock };
|
||||
});
|
||||
|
||||
vi.mock('../lib/config.js', () => ({
|
||||
config: { agentName: 'Jen' },
|
||||
}));
|
||||
|
||||
vi.mock('../lib/openclaw-config.js', () => ({
|
||||
getDefaultAgentWorkspaceRoot: () => '/mock/workspaces',
|
||||
}));
|
||||
|
||||
vi.mock('../middleware/rate-limit.js', () => ({
|
||||
rateLimitGeneral: vi.fn(async (_c: unknown, next: () => Promise<void>) => next()),
|
||||
}));
|
||||
|
||||
async function buildApp(platform: NodeJS.Platform) {
|
||||
runtime.platform = platform;
|
||||
vi.resetModules();
|
||||
const mod = await import('./server-info.js');
|
||||
const app = new Hono();
|
||||
app.route('/', mod.default);
|
||||
return app;
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
vi.resetModules();
|
||||
});
|
||||
|
||||
describe('GET /api/server-info', () => {
|
||||
it('returns Linux gateway start time from /proc', async () => {
|
||||
execFileImpl = (file, args, _opts, cb) => {
|
||||
expect(file).toBe('pgrep');
|
||||
expect(args).toEqual(['-f', 'openclaw-gatewa']);
|
||||
(cb as (err: Error | null, stdout: string) => void)(null, '72246\n');
|
||||
};
|
||||
|
||||
readFileImpl = async (filePath) => {
|
||||
if (filePath === '/proc/72246/stat') {
|
||||
const afterCommFields = [
|
||||
'S', '1', '2', '3', '4', '5', '6', '7', '8', '9',
|
||||
'10', '11', '12', '13', '14', '15', '16', '17', '18', '1234',
|
||||
];
|
||||
return `72246 (openclaw-gateway) ${afterCommFields.join(' ')}`;
|
||||
}
|
||||
if (filePath === '/proc/stat') {
|
||||
return 'cpu 1 2 3 4\nbtime 1700000000\n';
|
||||
}
|
||||
throw new Error(`Unexpected read: ${String(filePath)}`);
|
||||
};
|
||||
|
||||
const app = await buildApp('linux');
|
||||
const res = await app.request('/api/server-info');
|
||||
expect(res.status).toBe(200);
|
||||
|
||||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.gatewayStartedAt).toBe(1700000012340);
|
||||
expect(typeof json.serverTime).toBe('number');
|
||||
expect(json.agentName).toBe('Jen');
|
||||
expect(json.defaultAgentWorkspaceRoot).toBe('/mock/workspaces');
|
||||
});
|
||||
|
||||
it('returns macOS gateway start time from ps output', async () => {
|
||||
const execCalls: Array<{ file: unknown; args: unknown }> = [];
|
||||
|
||||
execFileImpl = (file, args, _opts, cb) => {
|
||||
execCalls.push({ file, args });
|
||||
if (file === 'ps' && Array.isArray(args) && args[0] === '-axo') {
|
||||
(cb as (err: Error | null, stdout: string) => void)(null, '72245 openclaw\n72246 openclaw-gateway\n');
|
||||
return;
|
||||
}
|
||||
if (file === 'ps' && Array.isArray(args) && args[0] === '-p') {
|
||||
(cb as (err: Error | null, stdout: string) => void)(null, 'Tue Mar 31 20:14:31 2026\n');
|
||||
return;
|
||||
}
|
||||
throw new Error(`Unexpected exec: ${String(file)}`);
|
||||
};
|
||||
|
||||
readFileImpl = async () => {
|
||||
throw new Error('macOS path should not read /proc');
|
||||
};
|
||||
|
||||
const app = await buildApp('darwin');
|
||||
const res = await app.request('/api/server-info');
|
||||
expect(res.status).toBe(200);
|
||||
|
||||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.gatewayStartedAt).toBe(new Date('Tue Mar 31 20:14:31 2026').getTime());
|
||||
expect(json.defaultAgentWorkspaceRoot).toBe('/mock/workspaces');
|
||||
expect(execCalls).toEqual([
|
||||
{ file: 'ps', args: ['-axo', 'pid=,comm='] },
|
||||
{ file: 'ps', args: ['-p', '72246', '-o', 'lstart='] },
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
|
@ -4,7 +4,7 @@
|
|||
* Returns `serverTime` (epoch ms), `gatewayStartedAt` (epoch ms), `timezone`,
|
||||
* and `agentName` so the frontend can show a real-time server clock and true
|
||||
* gateway uptime. Gateway start time is derived from `/proc` on Linux and
|
||||
* cached for 30 s.
|
||||
* from `ps -o lstart` on platforms like macOS, then cached for 30 s.
|
||||
* @module
|
||||
*/
|
||||
|
||||
|
|
@ -13,6 +13,7 @@ import { execFile } from 'node:child_process';
|
|||
import fs from 'node:fs';
|
||||
import os from 'node:os';
|
||||
import { config } from '../lib/config.js';
|
||||
import { getDefaultAgentWorkspaceRoot } from '../lib/openclaw-config.js';
|
||||
import { rateLimitGeneral } from '../middleware/rate-limit.js';
|
||||
|
||||
const app = new Hono();
|
||||
|
|
@ -27,45 +28,94 @@ let gatewayStartedAtCache: number | null = null;
|
|||
let cacheTs = 0;
|
||||
const CACHE_TTL = 30_000;
|
||||
|
||||
async function execFileText(file: string, args: string[]): Promise<string> {
|
||||
return await new Promise<string>((resolve, reject) => {
|
||||
execFile(file, args, { timeout: 2000 }, (err, stdout) => {
|
||||
if (err) return reject(err);
|
||||
resolve(stdout.trim());
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
const GATEWAY_COMM_PREFIX = 'openclaw-gatewa';
|
||||
|
||||
async function getGatewayPidFromPgrep(): Promise<string> {
|
||||
const stdout = await execFileText('pgrep', ['-f', GATEWAY_COMM_PREFIX]);
|
||||
return stdout.split('\n')[0] || '';
|
||||
}
|
||||
|
||||
async function getGatewayPidFromPs(): Promise<string> {
|
||||
const stdout = await execFileText('ps', ['-axo', 'pid=,comm=']);
|
||||
for (const line of stdout.split('\n')) {
|
||||
const match = line.trim().match(/^(\d+)\s+(.+)$/);
|
||||
if (!match) continue;
|
||||
const [, pid, comm] = match;
|
||||
if (comm === 'openclaw-gateway' || comm.startsWith(GATEWAY_COMM_PREFIX)) return pid;
|
||||
}
|
||||
return '';
|
||||
}
|
||||
|
||||
async function getGatewayPid(): Promise<string> {
|
||||
if (isLinux) {
|
||||
try {
|
||||
const pid = await getGatewayPidFromPgrep();
|
||||
if (pid) return pid;
|
||||
} catch {
|
||||
// Fall through to ps-based lookup below.
|
||||
}
|
||||
}
|
||||
|
||||
return await getGatewayPidFromPs();
|
||||
}
|
||||
|
||||
async function getGatewayStartedAtLinux(pidStr: string): Promise<number | null> {
|
||||
const stat = await fs.promises.readFile(`/proc/${pidStr}/stat`, 'utf8');
|
||||
// Parse starttime (field 22, 0-indexed 21) after the comm field.
|
||||
// comm can contain spaces/parens, so find the last ')' first.
|
||||
const afterComm = stat.slice(stat.lastIndexOf(')') + 2);
|
||||
const startTimeTicks = parseInt(afterComm.split(' ')[19], 10); // field 22 = index 19 after pid+comm
|
||||
|
||||
const procStat = await fs.promises.readFile('/proc/stat', 'utf8');
|
||||
const btimeLine = procStat.split('\n').find((l) => l.startsWith('btime'));
|
||||
if (!btimeLine) return null;
|
||||
const btime = parseInt(btimeLine.split(' ')[1], 10);
|
||||
|
||||
const startSecs = btime + startTimeTicks / CLK_TCK;
|
||||
return Math.round(startSecs * 1000);
|
||||
}
|
||||
|
||||
async function getGatewayStartedAtPs(pidStr: string): Promise<number | null> {
|
||||
const lstart = await execFileText('ps', ['-p', pidStr, '-o', 'lstart=']);
|
||||
if (!lstart) return null;
|
||||
const startedAt = new Date(lstart).getTime();
|
||||
return Number.isFinite(startedAt) ? startedAt : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine when the OpenClaw gateway process started (Linux only).
|
||||
* Determine when the OpenClaw gateway process started.
|
||||
*
|
||||
* Uses `pgrep` to find the gateway PID, then reads `/proc/<pid>/stat`
|
||||
* to extract the start time in clock ticks, converting to epoch ms
|
||||
* via the system boot time from `/proc/stat`. Result is cached for 30 s.
|
||||
* Uses `pgrep` to find the gateway PID, then reads `/proc/<pid>/stat` on
|
||||
* Linux or `ps -p <pid> -o lstart=` elsewhere. Result is cached for 30 s.
|
||||
*
|
||||
* @returns Epoch ms of gateway start, or `null` on non-Linux / if not running.
|
||||
* @returns Epoch ms of gateway start, or `null` if not running / unavailable.
|
||||
*/
|
||||
async function getGatewayStartedAt(): Promise<number | null> {
|
||||
if (!isLinux) return null; // /proc and pgrep are Linux-only
|
||||
|
||||
const now = Date.now();
|
||||
if (gatewayStartedAtCache && now - cacheTs < CACHE_TTL) return gatewayStartedAtCache;
|
||||
|
||||
try {
|
||||
const pidStr = await new Promise<string>((resolve, reject) => {
|
||||
execFile('pgrep', ['-f', 'openclaw-gatewa'], { timeout: 2000 }, (err, stdout) => {
|
||||
if (err) return reject(err);
|
||||
resolve(stdout.trim().split('\n')[0] || '');
|
||||
});
|
||||
});
|
||||
const pidStr = await getGatewayPid();
|
||||
if (!pidStr) return null;
|
||||
|
||||
const stat = await fs.promises.readFile(`/proc/${pidStr}/stat`, 'utf8');
|
||||
// Parse starttime (field 22, 0-indexed 21) after the comm field.
|
||||
// comm can contain spaces/parens, so find the last ')' first.
|
||||
const afterComm = stat.slice(stat.lastIndexOf(')') + 2);
|
||||
const startTimeTicks = parseInt(afterComm.split(' ')[19], 10); // field 22 = index 19 after pid+comm
|
||||
const startedAt = isLinux
|
||||
? await getGatewayStartedAtLinux(pidStr)
|
||||
: await getGatewayStartedAtPs(pidStr);
|
||||
|
||||
const procStat = await fs.promises.readFile('/proc/stat', 'utf8');
|
||||
const btimeLine = procStat.split('\n').find((l) => l.startsWith('btime'));
|
||||
if (!btimeLine) return null;
|
||||
const btime = parseInt(btimeLine.split(' ')[1], 10);
|
||||
|
||||
const startSecs = btime + startTimeTicks / CLK_TCK;
|
||||
gatewayStartedAtCache = Math.round(startSecs * 1000);
|
||||
cacheTs = now;
|
||||
return gatewayStartedAtCache;
|
||||
if (startedAt !== null) {
|
||||
gatewayStartedAtCache = startedAt;
|
||||
cacheTs = now;
|
||||
}
|
||||
return startedAt;
|
||||
} catch {
|
||||
return gatewayStartedAtCache; // return stale if available
|
||||
}
|
||||
|
|
@ -77,6 +127,7 @@ app.get('/api/server-info', rateLimitGeneral, async (c) => {
|
|||
gatewayStartedAt: await getGatewayStartedAt(),
|
||||
timezone: Intl.DateTimeFormat().resolvedOptions().timeZone,
|
||||
agentName: config.agentName,
|
||||
defaultAgentWorkspaceRoot: getDefaultAgentWorkspaceRoot(),
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -1,16 +1,18 @@
|
|||
/** Tests for the sessions API route (GET /api/sessions/:id/model). */
|
||||
/** Tests for the sessions API routes. */
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { Hono } from 'hono';
|
||||
import fs from 'node:fs/promises';
|
||||
import path from 'node:path';
|
||||
import os from 'node:os';
|
||||
|
||||
describe('GET /api/sessions/:id/model', () => {
|
||||
describe('sessions routes', () => {
|
||||
let tmpDir: string;
|
||||
let spawnSubagentMock: ReturnType<typeof vi.fn>;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), 'sessions-test-'));
|
||||
spawnSubagentMock = vi.fn();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
|
|
@ -22,6 +24,7 @@ describe('GET /api/sessions/:id/model', () => {
|
|||
// Mock config to use our temp sessions dir
|
||||
vi.doMock('../lib/config.js', () => ({
|
||||
config: {
|
||||
home: tmpDir,
|
||||
sessionsDir: tmpDir,
|
||||
auth: false,
|
||||
port: 3000,
|
||||
|
|
@ -33,6 +36,9 @@ describe('GET /api/sessions/:id/model', () => {
|
|||
vi.doMock('../middleware/rate-limit.js', () => ({
|
||||
rateLimitGeneral: vi.fn((_c: unknown, next: () => Promise<void>) => next()),
|
||||
}));
|
||||
vi.doMock('../lib/subagent-spawn.js', () => ({
|
||||
spawnSubagent: spawnSubagentMock,
|
||||
}));
|
||||
|
||||
const mod = await import('./sessions.js');
|
||||
const app = new Hono();
|
||||
|
|
@ -57,15 +63,17 @@ describe('GET /api/sessions/:id/model', () => {
|
|||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(true);
|
||||
expect(json.model).toBeNull();
|
||||
expect(json.thinking).toBeNull();
|
||||
expect(json.missing).toBe(true);
|
||||
});
|
||||
|
||||
it('returns model from transcript with model_change entry', async () => {
|
||||
it('returns runtime defaults from transcript entries near the top', async () => {
|
||||
const app = await buildApp();
|
||||
const uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee';
|
||||
const transcript = [
|
||||
JSON.stringify({ type: 'session_start', ts: Date.now() }),
|
||||
JSON.stringify({ type: 'model_change', modelId: 'anthropic/claude-opus-4', ts: Date.now() }),
|
||||
JSON.stringify({ type: 'thinking_level_change', thinkingLevel: 'medium', ts: Date.now() }),
|
||||
JSON.stringify({ type: 'message', role: 'user', content: 'hello' }),
|
||||
].join('\n');
|
||||
await fs.writeFile(path.join(tmpDir, `${uuid}.jsonl`), transcript);
|
||||
|
|
@ -75,10 +83,11 @@ describe('GET /api/sessions/:id/model', () => {
|
|||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(true);
|
||||
expect(json.model).toBe('anthropic/claude-opus-4');
|
||||
expect(json.thinking).toBe('medium');
|
||||
expect(json.missing).toBe(false);
|
||||
});
|
||||
|
||||
it('returns model: null when transcript has no model_change', async () => {
|
||||
it('returns model: null when transcript has no runtime defaults', async () => {
|
||||
const app = await buildApp();
|
||||
const uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee';
|
||||
const transcript = [
|
||||
|
|
@ -92,6 +101,7 @@ describe('GET /api/sessions/:id/model', () => {
|
|||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(true);
|
||||
expect(json.model).toBeNull();
|
||||
expect(json.thinking).toBeNull();
|
||||
expect(json.missing).toBe(false);
|
||||
});
|
||||
|
||||
|
|
@ -106,6 +116,243 @@ describe('GET /api/sessions/:id/model', () => {
|
|||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(true);
|
||||
expect(json.model).toBe('openai/gpt-4o');
|
||||
expect(json.thinking).toBeNull();
|
||||
expect(json.missing).toBe(false);
|
||||
});
|
||||
|
||||
it('reads non-main agent transcripts when agentId is provided', async () => {
|
||||
const app = await buildApp();
|
||||
const uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee';
|
||||
const agentSessionsDir = path.join(tmpDir, '.openclaw', 'agents', 'smoke257', 'sessions');
|
||||
await fs.mkdir(agentSessionsDir, { recursive: true });
|
||||
await fs.writeFile(path.join(agentSessionsDir, `${uuid}.jsonl`), [
|
||||
JSON.stringify({ type: 'model_change', modelId: 'openai-codex/gpt-5.4', ts: Date.now() }),
|
||||
JSON.stringify({ type: 'thinking_level_change', thinkingLevel: 'medium', ts: Date.now() }),
|
||||
].join('\n'));
|
||||
|
||||
const res = await app.request(`/api/sessions/${uuid}/model?agentId=smoke257`);
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(true);
|
||||
expect(json.model).toBe('openai-codex/gpt-5.4');
|
||||
expect(json.thinking).toBe('medium');
|
||||
expect(json.missing).toBe(false);
|
||||
});
|
||||
|
||||
it('resolves runtime defaults by sessionKey for non-main agents', async () => {
|
||||
const app = await buildApp();
|
||||
const uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee';
|
||||
const sessionKey = 'agent:smoke257:main';
|
||||
const agentSessionsDir = path.join(tmpDir, '.openclaw', 'agents', 'smoke257', 'sessions');
|
||||
await fs.mkdir(agentSessionsDir, { recursive: true });
|
||||
await fs.writeFile(path.join(agentSessionsDir, 'sessions.json'), JSON.stringify({
|
||||
[sessionKey]: { sessionId: uuid },
|
||||
}));
|
||||
await fs.writeFile(path.join(agentSessionsDir, `${uuid}.jsonl`), [
|
||||
JSON.stringify({ type: 'model_change', modelId: 'openai-codex/gpt-5.4', ts: Date.now() }),
|
||||
JSON.stringify({ type: 'thinking_level_change', thinkingLevel: 'medium', ts: Date.now() }),
|
||||
].join('\n'));
|
||||
|
||||
const res = await app.request(`/api/sessions/runtime?sessionKey=${encodeURIComponent(sessionKey)}`);
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(true);
|
||||
expect(json.model).toBe('openai-codex/gpt-5.4');
|
||||
expect(json.thinking).toBe('medium');
|
||||
expect(json.missing).toBe(false);
|
||||
});
|
||||
|
||||
it('serves omitted image bytes from a session transcript', async () => {
|
||||
const app = await buildApp();
|
||||
const sessionKey = 'agent:main:main';
|
||||
const sessionId = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee';
|
||||
const timestamp = 1775131617235;
|
||||
const base64 = Buffer.from('hello-image').toString('base64');
|
||||
|
||||
await fs.writeFile(path.join(tmpDir, 'sessions.json'), JSON.stringify({
|
||||
[sessionKey]: { sessionId },
|
||||
}));
|
||||
await fs.writeFile(path.join(tmpDir, `${sessionId}.jsonl`), [
|
||||
JSON.stringify({ type: 'session_start', ts: Date.now() }),
|
||||
JSON.stringify({
|
||||
type: 'message',
|
||||
message: {
|
||||
timestamp,
|
||||
content: [
|
||||
{ type: 'text', text: 'testing' },
|
||||
{ type: 'image', mimeType: 'image/png', data: base64 },
|
||||
],
|
||||
},
|
||||
}),
|
||||
].join('\n'));
|
||||
|
||||
const res = await app.request(`/api/sessions/media?sessionKey=${encodeURIComponent(sessionKey)}×tamp=${timestamp}&imageIndex=0`);
|
||||
expect(res.status).toBe(200);
|
||||
expect(res.headers.get('content-type')).toBe('image/png');
|
||||
expect(res.headers.get('content-disposition')).toContain(`message-${timestamp}-image-0.png`);
|
||||
const body = Buffer.from(await res.arrayBuffer()).toString('utf-8');
|
||||
expect(body).toBe('hello-image');
|
||||
});
|
||||
|
||||
it('returns 404 when session transcript media cannot be resolved', async () => {
|
||||
const app = await buildApp();
|
||||
const sessionKey = 'agent:main:main';
|
||||
await fs.writeFile(path.join(tmpDir, 'sessions.json'), JSON.stringify({
|
||||
[sessionKey]: { sessionId: 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' },
|
||||
}));
|
||||
|
||||
const res = await app.request(`/api/sessions/media?sessionKey=${encodeURIComponent(sessionKey)}×tamp=1775131617235&imageIndex=0`);
|
||||
expect(res.status).toBe(404);
|
||||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(false);
|
||||
});
|
||||
|
||||
// ── POST /api/sessions/spawn-subagent ────────────────────────────
|
||||
|
||||
it('rejects missing body with 400', async () => {
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/sessions/spawn-subagent', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: 'not json',
|
||||
});
|
||||
expect(res.status).toBe(400);
|
||||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(false);
|
||||
expect(typeof json.error).toBe('string');
|
||||
});
|
||||
|
||||
it('rejects body with missing required fields', async () => {
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/sessions/spawn-subagent', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ task: 'do something' }), // missing parentSessionKey
|
||||
});
|
||||
expect(res.status).toBe(400);
|
||||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(false);
|
||||
expect(String(json.error)).toContain('parentSessionKey');
|
||||
});
|
||||
|
||||
it('rejects parentSessionKey that is not a top-level root key', async () => {
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/sessions/spawn-subagent', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
parentSessionKey: 'agent:reviewer:subagent:child',
|
||||
task: 'do something',
|
||||
}),
|
||||
});
|
||||
expect(res.status).toBe(400);
|
||||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(false);
|
||||
expect(String(json.error)).toContain('parentSessionKey');
|
||||
});
|
||||
|
||||
it('rejects empty task string', async () => {
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/sessions/spawn-subagent', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
task: '',
|
||||
}),
|
||||
});
|
||||
expect(res.status).toBe(400);
|
||||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(false);
|
||||
});
|
||||
|
||||
it('returns direct success payload when helper succeeds with direct mode', async () => {
|
||||
spawnSubagentMock.mockResolvedValueOnce({
|
||||
sessionKey: 'agent:reviewer:subagent:abc-123',
|
||||
runId: 'run-xyz',
|
||||
mode: 'direct',
|
||||
});
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/sessions/spawn-subagent', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
task: 'Reply with exactly: OK',
|
||||
label: 'audit-auth-flow',
|
||||
model: 'claude-sonnet-4-6',
|
||||
thinking: 'medium',
|
||||
cleanup: 'keep',
|
||||
}),
|
||||
});
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(true);
|
||||
expect(json.sessionKey).toBe('agent:reviewer:subagent:abc-123');
|
||||
expect(json.runId).toBe('run-xyz');
|
||||
expect(json.mode).toBe('direct');
|
||||
});
|
||||
|
||||
it('returns marker success payload when helper falls back to marker mode', async () => {
|
||||
spawnSubagentMock.mockResolvedValueOnce({
|
||||
sessionKey: 'agent:reviewer:subagent:from-marker',
|
||||
mode: 'marker',
|
||||
});
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/sessions/spawn-subagent', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
task: 'do something',
|
||||
}),
|
||||
});
|
||||
expect(res.status).toBe(200);
|
||||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(true);
|
||||
expect(json.sessionKey).toBe('agent:reviewer:subagent:from-marker');
|
||||
expect(json.mode).toBe('marker');
|
||||
expect(json.runId).toBeUndefined();
|
||||
});
|
||||
|
||||
it('returns 500 with error message when helper throws', async () => {
|
||||
spawnSubagentMock.mockRejectedValueOnce(new Error('Gateway connection failed'));
|
||||
|
||||
const app = await buildApp();
|
||||
const res = await app.request('/api/sessions/spawn-subagent', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
task: 'do something',
|
||||
}),
|
||||
});
|
||||
expect(res.status).toBe(500);
|
||||
const json = (await res.json()) as Record<string, unknown>;
|
||||
expect(json.ok).toBe(false);
|
||||
expect(String(json.error)).toContain('Gateway connection failed');
|
||||
});
|
||||
|
||||
it('defaults cleanup to keep when not specified', async () => {
|
||||
spawnSubagentMock.mockResolvedValueOnce({
|
||||
sessionKey: 'agent:reviewer:subagent:test',
|
||||
mode: 'direct',
|
||||
});
|
||||
|
||||
const app = await buildApp();
|
||||
await app.request('/api/sessions/spawn-subagent', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
parentSessionKey: 'agent:reviewer:main',
|
||||
task: 'do something',
|
||||
}),
|
||||
});
|
||||
|
||||
expect(spawnSubagentMock).toHaveBeenCalledWith(expect.objectContaining({
|
||||
cleanup: 'keep',
|
||||
}));
|
||||
});
|
||||
});
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue