mirror of
https://github.com/stablyai/orca
synced 2026-04-21 14:17:16 +00:00
feat(settings): gate persistent terminal daemon behind experimental toggle
The out-of-process terminal daemon (PR #729) can leave sessions visibly unresponsive when its internal state drifts. Rather than revert, hide it behind an Experimental setting (default OFF) so users get a fast off-ramp while the daemon stabilizes. - New `experimentalTerminalDaemon` setting with an Experimental settings pane, "Restart required" banner, and `app:relaunch` IPC. - Graceful v1.3.0 upgrade path: `cleanupOrphanedDaemon()` on startup kills surviving sessions and shuts down the stale daemon; a one-shot toast informs the user and links to the new pane. - Zod validation (`parseWorkspaceSession`) at the persistence read boundary so a schema drift or truncated write falls back to defaults instead of poisoning Zustand state.
This commit is contained in:
parent
1ea2a6d768
commit
a29dc3e9d1
22 changed files with 975 additions and 35 deletions
|
|
@ -94,6 +94,7 @@
|
|||
"ssh2": "^1.17.0",
|
||||
"tailwind-merge": "^3.5.0",
|
||||
"tw-animate-css": "^1.4.0",
|
||||
"zod": "^4.3.6",
|
||||
"zustand": "^5.0.12"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
|
|
|||
|
|
@ -175,6 +175,9 @@ importers:
|
|||
tw-animate-css:
|
||||
specifier: ^1.4.0
|
||||
version: 1.4.0
|
||||
zod:
|
||||
specifier: ^4.3.6
|
||||
version: 4.3.6
|
||||
zustand:
|
||||
specifier: ^5.0.12
|
||||
version: 5.0.12(@types/react@19.2.14)(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4))
|
||||
|
|
@ -6014,6 +6017,9 @@ packages:
|
|||
zod@3.25.76:
|
||||
resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==}
|
||||
|
||||
zod@4.3.6:
|
||||
resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==}
|
||||
|
||||
zustand@5.0.12:
|
||||
resolution: {integrity: sha512-i77ae3aZq4dhMlRhJVCYgMLKuSiZAaUPAct2AksxQ+gOtimhGMdXljRT21P5BNpeT4kXlLIckvkPM029OljD7g==}
|
||||
engines: {node: '>=12.20.0'}
|
||||
|
|
@ -12254,6 +12260,8 @@ snapshots:
|
|||
|
||||
zod@3.25.76: {}
|
||||
|
||||
zod@4.3.6: {}
|
||||
|
||||
zustand@5.0.12(@types/react@19.2.14)(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)):
|
||||
optionalDependencies:
|
||||
'@types/react': 19.2.14
|
||||
|
|
|
|||
116
src/main/daemon/daemon-init.test.ts
Normal file
116
src/main/daemon/daemon-init.test.ts
Normal file
|
|
@ -0,0 +1,116 @@
|
|||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { tmpdir } from 'os'
|
||||
import { join } from 'path'
|
||||
import { mkdtempSync, rmSync, existsSync } from 'fs'
|
||||
import type { SubprocessHandle } from './session'
|
||||
import type * as DaemonInitModule from './daemon-init'
|
||||
|
||||
const { getPathMock } = vi.hoisted(() => ({
|
||||
getPathMock: vi.fn()
|
||||
}))
|
||||
|
||||
vi.mock('electron', () => ({
|
||||
app: {
|
||||
getPath: getPathMock,
|
||||
getAppPath: () => process.cwd(),
|
||||
isPackaged: false
|
||||
}
|
||||
}))
|
||||
|
||||
// Why: we want the real DaemonServer + DaemonClient but not electron-based
|
||||
// subprocess spawning. createTestDaemon() wires a mock subprocess harness
|
||||
// compatible with daemon-spawner.test.ts.
|
||||
function createMockSubprocess(): SubprocessHandle {
|
||||
let onExitCb: ((code: number) => void) | null = null
|
||||
return {
|
||||
pid: 77777,
|
||||
write: vi.fn(),
|
||||
resize: vi.fn(),
|
||||
kill: vi.fn(() => setTimeout(() => onExitCb?.(0), 5)),
|
||||
forceKill: vi.fn(),
|
||||
signal: vi.fn(),
|
||||
onData(_cb: (data: string) => void) {},
|
||||
onExit(cb: (code: number) => void) {
|
||||
onExitCb = cb
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function importFreshDaemonInit(): Promise<typeof DaemonInitModule> {
|
||||
vi.resetModules()
|
||||
return import('./daemon-init')
|
||||
}
|
||||
|
||||
describe('cleanupOrphanedDaemon', () => {
|
||||
let userDataDir: string
|
||||
|
||||
beforeEach(() => {
|
||||
userDataDir = mkdtempSync(join(tmpdir(), 'daemon-init-test-'))
|
||||
getPathMock.mockImplementation(() => userDataDir)
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
rmSync(userDataDir, { recursive: true, force: true })
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
|
||||
it('returns cleaned=false when no daemon socket exists', async () => {
|
||||
const { cleanupOrphanedDaemon } = await importFreshDaemonInit()
|
||||
|
||||
const result = await cleanupOrphanedDaemon()
|
||||
expect(result.cleaned).toBe(false)
|
||||
expect(result.killedCount).toBe(0)
|
||||
})
|
||||
|
||||
it('kills live sessions and shuts down a running daemon', async () => {
|
||||
const { cleanupOrphanedDaemon } = await importFreshDaemonInit()
|
||||
const { DaemonSpawner, getDaemonSocketPath } = await import('./daemon-spawner')
|
||||
const { startDaemon } = await import('./daemon-main')
|
||||
const { DaemonClient } = await import('./client')
|
||||
|
||||
const runtimeDir = join(userDataDir, 'daemon')
|
||||
const { mkdirSync } = await import('fs')
|
||||
mkdirSync(runtimeDir, { recursive: true })
|
||||
|
||||
// Spin up a real daemon exactly where cleanupOrphanedDaemon will look.
|
||||
const daemonHandles: { shutdown: () => Promise<void> }[] = []
|
||||
const spawner = new DaemonSpawner({
|
||||
runtimeDir,
|
||||
launcher: async (socketPath, tokenPath) => {
|
||||
const handle = await startDaemon({
|
||||
socketPath,
|
||||
tokenPath,
|
||||
spawnSubprocess: () => createMockSubprocess()
|
||||
})
|
||||
daemonHandles.push(handle)
|
||||
return { shutdown: () => handle.shutdown() }
|
||||
}
|
||||
})
|
||||
const info = await spawner.ensureRunning()
|
||||
|
||||
// Create two sessions so killedCount is non-zero.
|
||||
const client = new DaemonClient({
|
||||
socketPath: info.socketPath,
|
||||
tokenPath: info.tokenPath
|
||||
})
|
||||
await client.ensureConnected()
|
||||
await client.request('createOrAttach', { sessionId: 'a', cols: 80, rows: 24 })
|
||||
await client.request('createOrAttach', { sessionId: 'b', cols: 80, rows: 24 })
|
||||
client.disconnect()
|
||||
|
||||
// Now the daemon looks "orphaned" from cleanupOrphanedDaemon's POV.
|
||||
const result = await cleanupOrphanedDaemon()
|
||||
expect(result.cleaned).toBe(true)
|
||||
expect(result.killedCount).toBeGreaterThanOrEqual(2)
|
||||
|
||||
// Socket file should be gone so a later opt-in relaunch can bind cleanly.
|
||||
if (process.platform !== 'win32') {
|
||||
expect(existsSync(getDaemonSocketPath(runtimeDir))).toBe(false)
|
||||
}
|
||||
|
||||
// Best-effort teardown of any surviving handles from the spawner side.
|
||||
for (const handle of daemonHandles) {
|
||||
await handle.shutdown().catch(() => {})
|
||||
}
|
||||
})
|
||||
})
|
||||
|
|
@ -3,8 +3,15 @@ import { app } from 'electron'
|
|||
import { mkdirSync, existsSync, unlinkSync } from 'fs'
|
||||
import { fork } from 'child_process'
|
||||
import { connect } from 'net'
|
||||
import { DaemonSpawner, type DaemonLauncher } from './daemon-spawner'
|
||||
import {
|
||||
DaemonSpawner,
|
||||
getDaemonSocketPath,
|
||||
getDaemonTokenPath,
|
||||
type DaemonLauncher
|
||||
} from './daemon-spawner'
|
||||
import { DaemonPtyAdapter } from './daemon-pty-adapter'
|
||||
import { DaemonClient } from './client'
|
||||
import type { ListSessionsResult } from './types'
|
||||
import { setLocalPtyProvider } from '../ipc/pty'
|
||||
|
||||
let spawner: DaemonSpawner | null = null
|
||||
|
|
@ -177,3 +184,75 @@ export async function shutdownDaemon(): Promise<void> {
|
|||
await spawner?.shutdown()
|
||||
spawner = null
|
||||
}
|
||||
|
||||
export type OrphanedDaemonCleanupResult = {
|
||||
/** True when we detected a live daemon socket and connected to tear it down.
|
||||
* False when no daemon was running (fresh install or clean previous quit). */
|
||||
cleaned: boolean
|
||||
/** Number of live PTY sessions killed during cleanup. The caller surfaces this
|
||||
* to the user so they know what background work was stopped. */
|
||||
killedCount: number
|
||||
}
|
||||
|
||||
/** Detect and tear down an orphaned daemon left behind by a previous app
|
||||
* session (e.g. a user who had `experimentalTerminalDaemon` enabled on an
|
||||
* older build and is now launching a build where the feature is disabled).
|
||||
*
|
||||
* Why it matters: the daemon is designed to outlive the Electron process.
|
||||
* If we just skip `initDaemonPtyProvider()` on this launch, any live sessions
|
||||
* from the previous session keep running invisibly — consuming CPU / holding
|
||||
* files open / re-launching on every boot because nothing ever kills them.
|
||||
* This helper connects to the existing socket, enumerates sessions, and asks
|
||||
* the daemon to shut itself down (which terminates all PTYs). */
|
||||
export async function cleanupOrphanedDaemon(): Promise<OrphanedDaemonCleanupResult> {
|
||||
const runtimeDir = getRuntimeDir()
|
||||
const socketPath = getDaemonSocketPath(runtimeDir)
|
||||
const tokenPath = getDaemonTokenPath(runtimeDir)
|
||||
|
||||
const alive = await probeSocket(socketPath)
|
||||
if (!alive) {
|
||||
// Why: still best-effort remove a stale socket file so a future opt-in
|
||||
// launch doesn't hit EADDRINUSE when the daemon tries to bind.
|
||||
if (process.platform !== 'win32' && existsSync(socketPath)) {
|
||||
try {
|
||||
unlinkSync(socketPath)
|
||||
} catch {
|
||||
// Best-effort
|
||||
}
|
||||
}
|
||||
return { cleaned: false, killedCount: 0 }
|
||||
}
|
||||
|
||||
const client = new DaemonClient({ socketPath, tokenPath })
|
||||
let killedCount = 0
|
||||
try {
|
||||
await client.ensureConnected()
|
||||
const sessions = await client
|
||||
.request<ListSessionsResult>('listSessions', undefined)
|
||||
.catch(() => ({ sessions: [] }))
|
||||
killedCount = sessions.sessions.filter((s) => s.isAlive).length
|
||||
|
||||
// Why: the daemon exposes a single-shot `shutdown` RPC (daemon-server.ts:263)
|
||||
// that kills every session and then terminates its own process. Using it
|
||||
// avoids the race between per-session `kill` calls and the daemon exiting.
|
||||
await client.request('shutdown', { killSessions: true }).catch(() => {
|
||||
// Daemon exits immediately after handling the RPC — the socket may close
|
||||
// before the reply round-trips. Treat that as success.
|
||||
})
|
||||
} finally {
|
||||
client.disconnect()
|
||||
}
|
||||
|
||||
// Why: after `shutdown`, the daemon unlinks its socket itself — but on some
|
||||
// crash paths the file lingers. Clean up defensively so a later opt-in
|
||||
// relaunch can bind cleanly.
|
||||
if (process.platform !== 'win32' && existsSync(socketPath)) {
|
||||
try {
|
||||
unlinkSync(socketPath)
|
||||
} catch {
|
||||
// Best-effort
|
||||
}
|
||||
}
|
||||
|
||||
return { cleaned: true, killedCount }
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,12 @@ import { StatsCollector, initStatsPath } from './stats/collector'
|
|||
import { ClaudeUsageStore, initClaudeUsagePath } from './claude-usage/store'
|
||||
import { CodexUsageStore, initCodexUsagePath } from './codex-usage/store'
|
||||
import { killAllPty } from './ipc/pty'
|
||||
import { initDaemonPtyProvider, disconnectDaemon } from './daemon/daemon-init'
|
||||
import {
|
||||
initDaemonPtyProvider,
|
||||
disconnectDaemon,
|
||||
cleanupOrphanedDaemon
|
||||
} from './daemon/daemon-init'
|
||||
import { recordPendingDaemonTransitionNotice, setAppRuntimeFlags } from './ipc/app'
|
||||
import { closeAllWatchers } from './ipc/filesystem-watcher'
|
||||
import { registerCoreHandlers } from './ipc/register-core-handlers'
|
||||
import { triggerStartupNotificationRegistration } from './ipc/notifications'
|
||||
|
|
@ -152,15 +157,39 @@ app.whenReady().then(async () => {
|
|||
userDataPath: app.getPath('userData')
|
||||
})
|
||||
|
||||
// Why: daemon must start before openMainWindow because registerPtyHandlers
|
||||
// (called inside) relies on the provider already being set. Starting it
|
||||
// alongside the other parallel servers keeps cold-start latency flat.
|
||||
// Why: catch so the app still opens even if the daemon fails. The local
|
||||
// PTY provider remains as the fallback — terminals will still work, just
|
||||
// without cross-restart persistence.
|
||||
await initDaemonPtyProvider().catch((error) => {
|
||||
console.error('[daemon] Failed to start daemon PTY provider, falling back to local:', error)
|
||||
})
|
||||
// Why: persistent terminal sessions (the out-of-process daemon) are gated
|
||||
// behind an experimental setting that defaults to OFF. Users on v1.3.0 had
|
||||
// the daemon on by default, so on upgrade we may need to clean up a live
|
||||
// daemon from their previous session before continuing with the local
|
||||
// provider. `registerPtyHandlers` (called inside openMainWindow) relies on
|
||||
// the provider being set, so whichever branch runs must complete first.
|
||||
const daemonEnabled = store.getSettings().experimentalTerminalDaemon === true
|
||||
let daemonStarted = false
|
||||
if (daemonEnabled) {
|
||||
// Why: catch so the app still opens even if the daemon fails. The local
|
||||
// PTY provider remains as the fallback — terminals will still work, just
|
||||
// without cross-restart persistence.
|
||||
try {
|
||||
await initDaemonPtyProvider()
|
||||
daemonStarted = true
|
||||
} catch (error) {
|
||||
console.error('[daemon] Failed to start daemon PTY provider, falling back to local:', error)
|
||||
}
|
||||
} else {
|
||||
// Why: stash the cleanup result so the renderer's one-shot transition
|
||||
// toast can tell the user how many background sessions were stopped. Only
|
||||
// record when `cleaned: true` — i.e. an orphan daemon was actually found.
|
||||
// Fresh installs (no socket) skip the toast entirely.
|
||||
try {
|
||||
const result = await cleanupOrphanedDaemon()
|
||||
if (result.cleaned) {
|
||||
recordPendingDaemonTransitionNotice({ killedCount: result.killedCount })
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[daemon] Failed to clean up orphaned daemon:', error)
|
||||
}
|
||||
}
|
||||
setAppRuntimeFlags({ daemonEnabledAtStartup: daemonStarted })
|
||||
|
||||
// Why: both server binds are independent and neither blocks window creation.
|
||||
// Parallelizing them with the window open shaves ~100-200ms off cold start.
|
||||
|
|
|
|||
52
src/main/ipc/app.ts
Normal file
52
src/main/ipc/app.ts
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
import { app, ipcMain } from 'electron'
|
||||
|
||||
export type AppRuntimeFlags = {
|
||||
/** Whether the persistent terminal daemon was actually started this session.
|
||||
* The renderer compares this against the current setting to decide whether
|
||||
* a "restart required" banner needs to be shown on the Experimental pane. */
|
||||
daemonEnabledAtStartup: boolean
|
||||
}
|
||||
|
||||
export type DaemonTransitionNotice = {
|
||||
/** Number of live daemon PTY sessions that were killed when the app booted
|
||||
* with `experimentalTerminalDaemon: false` but discovered a leftover daemon
|
||||
* from a previous session. Non-zero values are surfaced in a one-shot
|
||||
* toast so the user knows background work was stopped. */
|
||||
killedCount: number
|
||||
}
|
||||
|
||||
let runtimeFlags: AppRuntimeFlags = { daemonEnabledAtStartup: false }
|
||||
let pendingDaemonTransitionNotice: DaemonTransitionNotice | null = null
|
||||
|
||||
export function setAppRuntimeFlags(flags: AppRuntimeFlags): void {
|
||||
runtimeFlags = flags
|
||||
}
|
||||
|
||||
export function recordPendingDaemonTransitionNotice(notice: DaemonTransitionNotice): void {
|
||||
pendingDaemonTransitionNotice = notice
|
||||
}
|
||||
|
||||
export function registerAppHandlers(): void {
|
||||
ipcMain.handle('app:getRuntimeFlags', (): AppRuntimeFlags => runtimeFlags)
|
||||
|
||||
ipcMain.handle('app:consumeDaemonTransitionNotice', (): DaemonTransitionNotice | null => {
|
||||
// Why: one-shot consumption — clear after reading so the renderer's
|
||||
// post-hydration effect can't fire the same toast twice (e.g. after a
|
||||
// window reload during dev). The persisted `experimentalTerminalDaemonNoticeShown`
|
||||
// flag is the cross-session guard; this clear handles within-session races.
|
||||
const notice = pendingDaemonTransitionNotice
|
||||
pendingDaemonTransitionNotice = null
|
||||
return notice
|
||||
})
|
||||
|
||||
ipcMain.handle('app:relaunch', () => {
|
||||
// Why: small delay lets the renderer finish painting any "Restarting…"
|
||||
// UI state before the window tears down. `app.relaunch()` schedules a
|
||||
// spawn; `app.exit(0)` triggers the actual quit without invoking
|
||||
// before-quit handlers that could block on confirmation dialogs.
|
||||
setTimeout(() => {
|
||||
app.relaunch()
|
||||
app.exit(0)
|
||||
}, 150)
|
||||
})
|
||||
}
|
||||
|
|
@ -20,7 +20,8 @@ const {
|
|||
registerRateLimitHandlersMock,
|
||||
registerBrowserHandlersMock,
|
||||
setTrustedBrowserRendererWebContentsIdMock,
|
||||
registerFilesystemWatcherHandlersMock
|
||||
registerFilesystemWatcherHandlersMock,
|
||||
registerAppHandlersMock
|
||||
} = vi.hoisted(() => ({
|
||||
registerCliHandlersMock: vi.fn(),
|
||||
registerPreflightHandlersMock: vi.fn(),
|
||||
|
|
@ -41,7 +42,8 @@ const {
|
|||
registerRateLimitHandlersMock: vi.fn(),
|
||||
registerBrowserHandlersMock: vi.fn(),
|
||||
setTrustedBrowserRendererWebContentsIdMock: vi.fn(),
|
||||
registerFilesystemWatcherHandlersMock: vi.fn()
|
||||
registerFilesystemWatcherHandlersMock: vi.fn(),
|
||||
registerAppHandlersMock: vi.fn()
|
||||
}))
|
||||
|
||||
vi.mock('./cli', () => ({
|
||||
|
|
@ -118,6 +120,10 @@ vi.mock('./browser', () => ({
|
|||
setTrustedBrowserRendererWebContentsId: setTrustedBrowserRendererWebContentsIdMock
|
||||
}))
|
||||
|
||||
vi.mock('./app', () => ({
|
||||
registerAppHandlers: registerAppHandlersMock
|
||||
}))
|
||||
|
||||
import { registerCoreHandlers } from './register-core-handlers'
|
||||
|
||||
describe('registerCoreHandlers', () => {
|
||||
|
|
@ -142,6 +148,7 @@ describe('registerCoreHandlers', () => {
|
|||
registerBrowserHandlersMock.mockReset()
|
||||
setTrustedBrowserRendererWebContentsIdMock.mockReset()
|
||||
registerFilesystemWatcherHandlersMock.mockReset()
|
||||
registerAppHandlersMock.mockReset()
|
||||
})
|
||||
|
||||
it('passes the store through to handler registrars that need it', () => {
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import { registerAppHandlers } from './app'
|
||||
import { registerCliHandlers } from './cli'
|
||||
import { registerPreflightHandlers } from './preflight'
|
||||
import type { Store } from '../persistence'
|
||||
|
|
@ -52,6 +53,7 @@ export function registerCoreHandlers(
|
|||
}
|
||||
registered = true
|
||||
|
||||
registerAppHandlers()
|
||||
registerCliHandlers()
|
||||
registerPreflightHandlers()
|
||||
registerClaudeUsageHandlers(claudeUsage)
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ import {
|
|||
getDefaultRepoHookSettings,
|
||||
getDefaultWorkspaceSession
|
||||
} from '../shared/constants'
|
||||
import { parseWorkspaceSession } from '../shared/workspace-session-schema'
|
||||
|
||||
// Why: the data-file path must not be a module-level constant. Module-level
|
||||
// code runs at import time — before configureDevUserDataPath() redirects the
|
||||
|
|
@ -101,7 +102,27 @@ export class Store {
|
|||
_sortBySmartMigrated: true
|
||||
}
|
||||
})(),
|
||||
workspaceSession: { ...defaults.workspaceSession, ...parsed.workspaceSession },
|
||||
// Why: the workspace session is the most volatile persisted surface
|
||||
// (schema evolves per release, daemon session IDs embedded in it).
|
||||
// Zod-validate at the read boundary so a field-type flip from an
|
||||
// older build — or a truncated write from a crash — gets rejected
|
||||
// cleanly instead of poisoning Zustand state and crashing the
|
||||
// renderer on mount. On validation failure, fall back to defaults
|
||||
// and log; a corrupt session file shouldn't trap the user out.
|
||||
workspaceSession: (() => {
|
||||
if (parsed.workspaceSession === undefined) {
|
||||
return defaults.workspaceSession
|
||||
}
|
||||
const result = parseWorkspaceSession(parsed.workspaceSession)
|
||||
if (!result.ok) {
|
||||
console.error(
|
||||
'[persistence] Corrupt workspace session, using defaults:',
|
||||
result.error
|
||||
)
|
||||
return defaults.workspaceSession
|
||||
}
|
||||
return { ...defaults.workspaceSession, ...result.value }
|
||||
})(),
|
||||
sshTargets: (parsed.sshTargets ?? []).map(normalizeSshTarget)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
25
src/preload/api-types.d.ts
vendored
25
src/preload/api-types.d.ts
vendored
|
|
@ -215,7 +215,32 @@ export type CodexUsageApi = {
|
|||
}) => Promise<CodexUsageSessionRow[]>
|
||||
}
|
||||
|
||||
export type AppRuntimeFlags = {
|
||||
daemonEnabledAtStartup: boolean
|
||||
}
|
||||
|
||||
export type DaemonTransitionNotice = {
|
||||
killedCount: number
|
||||
}
|
||||
|
||||
export type AppApi = {
|
||||
/** Returns flags about the main-process state that was set at startup
|
||||
* (e.g. whether the persistent terminal daemon actually started). The
|
||||
* renderer uses this to show a "restart required" banner when the user
|
||||
* toggles a setting that only applies across a full relaunch. */
|
||||
getRuntimeFlags: () => Promise<AppRuntimeFlags>
|
||||
/** Reads and clears any pending one-shot notice about a daemon cleanup
|
||||
* that ran during startup (e.g. when upgrading from v1.3.0 where the
|
||||
* daemon was on by default to a build where it's opt-in). Returns null
|
||||
* when there is nothing to show. */
|
||||
consumeDaemonTransitionNotice: () => Promise<DaemonTransitionNotice | null>
|
||||
/** Relaunches the app via Electron's app.relaunch() + app.exit(0). Used
|
||||
* by the "Restart now" button on the Experimental settings pane. */
|
||||
relaunch: () => Promise<void>
|
||||
}
|
||||
|
||||
export type PreloadApi = {
|
||||
app: AppApi
|
||||
repos: {
|
||||
list: () => Promise<Repo[]>
|
||||
add: (args: { path: string; kind?: 'git' | 'folder' }) => Promise<Repo>
|
||||
|
|
|
|||
|
|
@ -159,6 +159,14 @@ document.addEventListener(
|
|||
|
||||
// Custom APIs for renderer
|
||||
const api = {
|
||||
app: {
|
||||
getRuntimeFlags: (): Promise<{ daemonEnabledAtStartup: boolean }> =>
|
||||
ipcRenderer.invoke('app:getRuntimeFlags'),
|
||||
consumeDaemonTransitionNotice: (): Promise<{ killedCount: number } | null> =>
|
||||
ipcRenderer.invoke('app:consumeDaemonTransitionNotice'),
|
||||
relaunch: (): Promise<void> => ipcRenderer.invoke('app:relaunch')
|
||||
},
|
||||
|
||||
repos: {
|
||||
list: (): Promise<unknown[]> => ipcRenderer.invoke('repos:list'),
|
||||
|
||||
|
|
|
|||
|
|
@ -153,6 +153,12 @@ function App(): React.JSX.Element {
|
|||
await actions.fetchAllWorktrees()
|
||||
const persistedUI = await window.api.ui.get()
|
||||
const session = await window.api.session.get()
|
||||
// Why: settings must be loaded before hydrateWorkspaceSession so that
|
||||
// it can read experimentalTerminalDaemon to decide whether to stage
|
||||
// pendingReconnectPtyIdByTabId. Without this, opted-in daemon users
|
||||
// would silently lose session reattach on every launch because
|
||||
// s.settings would still be null at hydration time.
|
||||
await actions.fetchSettings()
|
||||
if (!cancelled) {
|
||||
actions.hydratePersistedUI(persistedUI)
|
||||
actions.hydrateWorkspaceSession(session)
|
||||
|
|
@ -197,7 +203,6 @@ function App(): React.JSX.Element {
|
|||
await actions.reconnectPersistedTerminals()
|
||||
}
|
||||
}
|
||||
void actions.fetchSettings()
|
||||
void actions.initGitHubCache()
|
||||
})()
|
||||
|
||||
|
|
@ -368,6 +373,65 @@ function App(): React.JSX.Element {
|
|||
return () => document.removeEventListener('visibilitychange', handler)
|
||||
}, [actions])
|
||||
|
||||
// Why: v1.3.0 shipped the persistent-terminal daemon ON by default. v1.3.1+
|
||||
// defaults it OFF and gates it behind an Experimental toggle. On the first
|
||||
// launch after that upgrade, main detects a still-running daemon, shuts it
|
||||
// down (killing any surviving `sleep 9999`-style sessions), and stashes a
|
||||
// one-shot notice. We consume that notice here and inform the user so their
|
||||
// vanished sessions don't look like a bug. The renderer-side
|
||||
// `experimentalTerminalDaemonNoticeShown` flag guarantees the toast fires at
|
||||
// most once per install, even if main stashes a notice again on a later
|
||||
// launch.
|
||||
const transitionNoticeHandledRef = useRef(false)
|
||||
useEffect(() => {
|
||||
if (!settings || transitionNoticeHandledRef.current) {
|
||||
return
|
||||
}
|
||||
if (settings.experimentalTerminalDaemonNoticeShown) {
|
||||
transitionNoticeHandledRef.current = true
|
||||
return
|
||||
}
|
||||
transitionNoticeHandledRef.current = true
|
||||
void (async () => {
|
||||
let notice: { killedCount: number } | null = null
|
||||
try {
|
||||
notice = await window.api.app.consumeDaemonTransitionNotice()
|
||||
} catch {
|
||||
// Informational only — if the IPC fails, don't fire the toast and
|
||||
// don't flip the "shown" flag so we can retry on next launch.
|
||||
return
|
||||
}
|
||||
if (!notice) {
|
||||
return
|
||||
}
|
||||
const killedCount = notice.killedCount
|
||||
const killedClause =
|
||||
killedCount > 0
|
||||
? ` Cleaned up ${killedCount} background session${killedCount === 1 ? '' : 's'} from the previous version.`
|
||||
: ''
|
||||
toast.info('Persistent terminal sessions are now opt-in.', {
|
||||
description: `${killedClause} You can re-enable them in Settings → Experimental.`.trim(),
|
||||
duration: 15000,
|
||||
action: {
|
||||
label: 'Open settings',
|
||||
onClick: () => {
|
||||
useAppStore.getState().openSettingsTarget({
|
||||
pane: 'experimental',
|
||||
repoId: null
|
||||
})
|
||||
useAppStore.getState().openSettingsPage()
|
||||
}
|
||||
}
|
||||
})
|
||||
try {
|
||||
await actions.updateSettings({ experimentalTerminalDaemonNoticeShown: true })
|
||||
} catch {
|
||||
// If persistence fails, the toast may re-fire on a later launch —
|
||||
// acceptable tradeoff vs. silently dropping the notification.
|
||||
}
|
||||
})()
|
||||
}, [actions, settings])
|
||||
|
||||
const tabs = activeWorktreeId ? (tabsByWorktree[activeWorktreeId] ?? []) : []
|
||||
const hasTabBar = tabs.length >= 2
|
||||
const effectiveActiveTabId = activeTabId ?? tabs[0]?.id ?? null
|
||||
|
|
|
|||
139
src/renderer/src/components/settings/ExperimentalPane.tsx
Normal file
139
src/renderer/src/components/settings/ExperimentalPane.tsx
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
import { useEffect, useState } from 'react'
|
||||
import { RotateCw } from 'lucide-react'
|
||||
import type { GlobalSettings } from '../../../../shared/types'
|
||||
import { Button } from '../ui/button'
|
||||
import { Label } from '../ui/label'
|
||||
import { useAppStore } from '../../store'
|
||||
import { SearchableSetting } from './SearchableSetting'
|
||||
import { matchesSettingsSearch } from './settings-search'
|
||||
import { EXPERIMENTAL_PANE_SEARCH_ENTRIES } from './experimental-search'
|
||||
|
||||
export { EXPERIMENTAL_PANE_SEARCH_ENTRIES }
|
||||
|
||||
type ExperimentalPaneProps = {
|
||||
settings: GlobalSettings
|
||||
updateSettings: (updates: Partial<GlobalSettings>) => void
|
||||
}
|
||||
|
||||
export function ExperimentalPane({
|
||||
settings,
|
||||
updateSettings
|
||||
}: ExperimentalPaneProps): React.JSX.Element {
|
||||
const searchQuery = useAppStore((s) => s.settingsSearchQuery)
|
||||
// Why: "daemon enabled at startup" is the effective runtime state, read
|
||||
// directly from main once on mount. The banner compares the user's current
|
||||
// setting against this snapshot to tell them a restart is still required.
|
||||
// null = not yet fetched (banner stays hidden to avoid a flash).
|
||||
const [daemonEnabledAtStartup, setDaemonEnabledAtStartup] = useState<boolean | null>(null)
|
||||
const [relaunching, setRelaunching] = useState(false)
|
||||
|
||||
useEffect(() => {
|
||||
let cancelled = false
|
||||
void window.api.app
|
||||
.getRuntimeFlags()
|
||||
.then((flags) => {
|
||||
if (!cancelled) {
|
||||
setDaemonEnabledAtStartup(flags.daemonEnabledAtStartup)
|
||||
}
|
||||
})
|
||||
.catch(() => {
|
||||
// Non-fatal; banner will just never show if the IPC is unavailable.
|
||||
})
|
||||
return () => {
|
||||
cancelled = true
|
||||
}
|
||||
}, [])
|
||||
|
||||
const showDaemon = matchesSettingsSearch(searchQuery, [EXPERIMENTAL_PANE_SEARCH_ENTRIES[0]])
|
||||
const pendingRestart =
|
||||
daemonEnabledAtStartup !== null &&
|
||||
settings.experimentalTerminalDaemon !== daemonEnabledAtStartup
|
||||
|
||||
const handleRelaunch = async (): Promise<void> => {
|
||||
if (relaunching) {
|
||||
return
|
||||
}
|
||||
setRelaunching(true)
|
||||
try {
|
||||
await window.api.app.relaunch()
|
||||
} catch {
|
||||
setRelaunching(false)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
{showDaemon ? (
|
||||
<SearchableSetting
|
||||
title="Persistent terminal sessions"
|
||||
description="Keeps terminal sessions alive across app restarts via a background daemon."
|
||||
keywords={[
|
||||
'experimental',
|
||||
'terminal',
|
||||
'daemon',
|
||||
'persistent',
|
||||
'background',
|
||||
'sessions',
|
||||
'restart',
|
||||
'reattach'
|
||||
]}
|
||||
className="space-y-3 px-1 py-2"
|
||||
>
|
||||
<div className="flex items-start justify-between gap-4">
|
||||
<div className="min-w-0 shrink space-y-0.5">
|
||||
<Label>Persistent terminal sessions</Label>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Keeps terminals alive in a background daemon so they survive app restarts, with full
|
||||
scrollback. Experimental — some sessions may become unresponsive after internal
|
||||
state drift. Requires an app restart to take effect.
|
||||
</p>
|
||||
</div>
|
||||
<button
|
||||
role="switch"
|
||||
aria-checked={settings.experimentalTerminalDaemon}
|
||||
onClick={() =>
|
||||
updateSettings({
|
||||
experimentalTerminalDaemon: !settings.experimentalTerminalDaemon
|
||||
})
|
||||
}
|
||||
className={`relative inline-flex h-5 w-9 shrink-0 cursor-pointer items-center rounded-full border border-transparent transition-colors ${
|
||||
settings.experimentalTerminalDaemon ? 'bg-foreground' : 'bg-muted-foreground/30'
|
||||
}`}
|
||||
>
|
||||
<span
|
||||
className={`inline-block h-3.5 w-3.5 transform rounded-full bg-background shadow-sm transition-transform ${
|
||||
settings.experimentalTerminalDaemon ? 'translate-x-4' : 'translate-x-0.5'
|
||||
}`}
|
||||
/>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{pendingRestart ? (
|
||||
<div className="flex items-center justify-between gap-3 rounded-md border border-yellow-500/50 bg-yellow-500/10 px-3 py-2.5">
|
||||
<div className="min-w-0 flex-1 space-y-0.5">
|
||||
<p className="text-sm font-medium text-yellow-700 dark:text-yellow-300">
|
||||
Restart required
|
||||
</p>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
{settings.experimentalTerminalDaemon
|
||||
? 'Restart Orca to start the background session daemon.'
|
||||
: 'Restart Orca to stop the background session daemon. Any running background sessions will be closed.'}
|
||||
</p>
|
||||
</div>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="default"
|
||||
className="shrink-0 gap-1.5"
|
||||
disabled={relaunching}
|
||||
onClick={handleRelaunch}
|
||||
>
|
||||
<RotateCw className={`size-3 ${relaunching ? 'animate-spin' : ''}`} />
|
||||
{relaunching ? 'Restarting…' : 'Restart now'}
|
||||
</Button>
|
||||
</div>
|
||||
) : null}
|
||||
</SearchableSetting>
|
||||
) : null}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
|
@ -4,6 +4,7 @@ import {
|
|||
BarChart3,
|
||||
Bell,
|
||||
Bot,
|
||||
FlaskConical,
|
||||
GitBranch,
|
||||
Globe,
|
||||
Keyboard,
|
||||
|
|
@ -28,6 +29,7 @@ import { getTerminalPaneSearchEntries } from './terminal-search'
|
|||
import { GitPane, GIT_PANE_SEARCH_ENTRIES } from './GitPane'
|
||||
import { NotificationsPane, NOTIFICATIONS_PANE_SEARCH_ENTRIES } from './NotificationsPane'
|
||||
import { SshPane, SSH_PANE_SEARCH_ENTRIES } from './SshPane'
|
||||
import { ExperimentalPane, EXPERIMENTAL_PANE_SEARCH_ENTRIES } from './ExperimentalPane'
|
||||
import { AgentsPane, AGENTS_PANE_SEARCH_ENTRIES } from './AgentsPane'
|
||||
import { StatsPane, STATS_PANE_SEARCH_ENTRIES } from '../stats/StatsPane'
|
||||
import { SettingsSidebar } from './SettingsSidebar'
|
||||
|
|
@ -44,6 +46,7 @@ type SettingsNavTarget =
|
|||
| 'shortcuts'
|
||||
| 'stats'
|
||||
| 'ssh'
|
||||
| 'experimental'
|
||||
| 'agents'
|
||||
| 'repo'
|
||||
|
||||
|
|
@ -302,6 +305,13 @@ function Settings(): React.JSX.Element {
|
|||
searchEntries: SSH_PANE_SEARCH_ENTRIES,
|
||||
badge: 'Beta'
|
||||
},
|
||||
{
|
||||
id: 'experimental',
|
||||
title: 'Experimental',
|
||||
description: 'Features that are still being stabilized. Enable at your own risk.',
|
||||
icon: FlaskConical,
|
||||
searchEntries: EXPERIMENTAL_PANE_SEARCH_ENTRIES
|
||||
},
|
||||
...repos.map((repo) => ({
|
||||
id: `repo-${repo.id}`,
|
||||
title: repo.displayName,
|
||||
|
|
@ -543,6 +553,15 @@ function Settings(): React.JSX.Element {
|
|||
<SshPane />
|
||||
</SettingsSection>
|
||||
|
||||
<SettingsSection
|
||||
id="experimental"
|
||||
title="Experimental"
|
||||
description="Features that are still being stabilized. Enable at your own risk."
|
||||
searchEntries={EXPERIMENTAL_PANE_SEARCH_ENTRIES}
|
||||
>
|
||||
<ExperimentalPane settings={settings} updateSettings={updateSettings} />
|
||||
</SettingsSection>
|
||||
|
||||
{repos.map((repo) => {
|
||||
const repoSectionId = `repo-${repo.id}`
|
||||
const repoHooksState = repoHooksMap[repo.id]
|
||||
|
|
|
|||
20
src/renderer/src/components/settings/experimental-search.ts
Normal file
20
src/renderer/src/components/settings/experimental-search.ts
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
import type { SettingsSearchEntry } from './settings-search'
|
||||
|
||||
export const EXPERIMENTAL_PANE_SEARCH_ENTRIES: SettingsSearchEntry[] = [
|
||||
{
|
||||
title: 'Persistent terminal sessions',
|
||||
description:
|
||||
'Keeps terminal sessions alive across app restarts via a background daemon. Experimental — some sessions may become unresponsive.',
|
||||
keywords: [
|
||||
'experimental',
|
||||
'terminal',
|
||||
'daemon',
|
||||
'persistent',
|
||||
'background',
|
||||
'sessions',
|
||||
'restart',
|
||||
'scrollback',
|
||||
'reattach'
|
||||
]
|
||||
}
|
||||
]
|
||||
|
|
@ -654,6 +654,22 @@ vi.mock('@/components/terminal-pane/pty-transport', () => ({
|
|||
describe('reconnectPersistedTerminals', () => {
|
||||
let ptyIdCounter: number
|
||||
|
||||
// Why: reconnect-by-daemon-session-ID is an opt-in path (the experimental
|
||||
// daemon toggle). These tests exercise that path, so each store created here
|
||||
// must have the toggle set to true before hydrateWorkspaceSession runs —
|
||||
// otherwise hydration clears pendingReconnectPtyIdByTabId and tab.ptyId
|
||||
// never gets rehydrated.
|
||||
function createDaemonEnabledStore(): ReturnType<typeof createTestStore> {
|
||||
const store = createTestStore()
|
||||
store.setState((prev) => ({
|
||||
settings: {
|
||||
...(prev.settings ?? ({} as AppState['settings'])),
|
||||
experimentalTerminalDaemon: true
|
||||
} as AppState['settings']
|
||||
}))
|
||||
return store
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
ptyIdCounter = 0
|
||||
|
|
@ -666,7 +682,7 @@ describe('reconnectPersistedTerminals', () => {
|
|||
})
|
||||
|
||||
it('records daemon session IDs for deferred reattach and sets workspaceSessionReady', async () => {
|
||||
const store = createTestStore()
|
||||
const store = createDaemonEnabledStore()
|
||||
const wt1 = 'repo1::/path/wt1'
|
||||
const wt2 = 'repo1::/path/wt2'
|
||||
|
||||
|
|
@ -775,7 +791,7 @@ describe('reconnectPersistedTerminals', () => {
|
|||
})
|
||||
|
||||
it('falls back to tab ptyIds when activeWorktreeIdsOnShutdown is absent (upgrade)', async () => {
|
||||
const store = createTestStore()
|
||||
const store = createDaemonEnabledStore()
|
||||
const wt1 = 'repo1::/path/wt1'
|
||||
|
||||
store.setState({
|
||||
|
|
@ -808,7 +824,7 @@ describe('reconnectPersistedTerminals', () => {
|
|||
})
|
||||
|
||||
it('reconnects the correct tab per worktree (not always tabs[0])', async () => {
|
||||
const store = createTestStore()
|
||||
const store = createDaemonEnabledStore()
|
||||
const wt1 = 'repo1::/path/wt1'
|
||||
|
||||
store.setState({
|
||||
|
|
@ -843,7 +859,7 @@ describe('reconnectPersistedTerminals', () => {
|
|||
})
|
||||
|
||||
it('reconnects multiple live tabs in the same worktree', async () => {
|
||||
const store = createTestStore()
|
||||
const store = createDaemonEnabledStore()
|
||||
const wt1 = 'repo1::/path/wt1'
|
||||
|
||||
store.setState({
|
||||
|
|
@ -908,7 +924,7 @@ describe('reconnectPersistedTerminals', () => {
|
|||
})
|
||||
|
||||
it('skips deleted worktrees in activeWorktreeIdsOnShutdown', async () => {
|
||||
const store = createTestStore()
|
||||
const store = createDaemonEnabledStore()
|
||||
const existing = 'repo1::/path/wt1'
|
||||
const deleted = 'repo1::/path/deleted'
|
||||
|
||||
|
|
@ -943,7 +959,7 @@ describe('reconnectPersistedTerminals', () => {
|
|||
})
|
||||
|
||||
it('preserves split-pane ptyIdsByLeafId for deferred reattach by connectPanePty', async () => {
|
||||
const store = createTestStore()
|
||||
const store = createDaemonEnabledStore()
|
||||
const wt1 = 'repo1::/path/wt1'
|
||||
|
||||
store.setState({
|
||||
|
|
|
|||
|
|
@ -1068,19 +1068,26 @@ export const createTerminalSlice: StateCreator<AppState, [], [], TerminalSlice>
|
|||
// Why: preserve the previous session's ptyId for each tab so that
|
||||
// reconnectPersistedTerminals can pass it as sessionId to the daemon's
|
||||
// createOrAttach RPC, triggering reattach instead of a fresh spawn.
|
||||
// When the experimental daemon is disabled, the LocalPtyProvider will
|
||||
// ignore any sessionId we pass anyway — populating this map just
|
||||
// persists stale daemon-era session IDs into the next session save,
|
||||
// which confuses debugging and bloats the session file. Skip it.
|
||||
const daemonEnabled = s.settings?.experimentalTerminalDaemon === true
|
||||
const pendingReconnectPtyIdByTabId: Record<string, string> = {}
|
||||
for (const worktreeId of pendingReconnectWorktreeIds) {
|
||||
const worktree = Object.values(s.worktreesByRepo)
|
||||
.flat()
|
||||
.find((entry) => entry.id === worktreeId)
|
||||
const repo = worktree ? s.repos.find((entry) => entry.id === worktree.repoId) : null
|
||||
if (repo?.connectionId) {
|
||||
continue
|
||||
}
|
||||
const rawTabs = session.tabsByWorktree[worktreeId] ?? []
|
||||
for (const tab of rawTabs) {
|
||||
if (tab.ptyId && validTabIds.has(tab.id)) {
|
||||
pendingReconnectPtyIdByTabId[tab.id] = tab.ptyId
|
||||
if (daemonEnabled) {
|
||||
for (const worktreeId of pendingReconnectWorktreeIds) {
|
||||
const worktree = Object.values(s.worktreesByRepo)
|
||||
.flat()
|
||||
.find((entry) => entry.id === worktreeId)
|
||||
const repo = worktree ? s.repos.find((entry) => entry.id === worktree.repoId) : null
|
||||
if (repo?.connectionId) {
|
||||
continue
|
||||
}
|
||||
const rawTabs = session.tabsByWorktree[worktreeId] ?? []
|
||||
for (const tab of rawTabs) {
|
||||
if (tab.ptyId && validTabIds.has(tab.id)) {
|
||||
pendingReconnectPtyIdByTabId[tab.id] = tab.ptyId
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -84,7 +84,15 @@ export type UISlice = {
|
|||
openSettingsPage: () => void
|
||||
closeSettingsPage: () => void
|
||||
settingsNavigationTarget: {
|
||||
pane: 'general' | 'browser' | 'appearance' | 'terminal' | 'shortcuts' | 'repo' | 'agents'
|
||||
pane:
|
||||
| 'general'
|
||||
| 'browser'
|
||||
| 'appearance'
|
||||
| 'terminal'
|
||||
| 'shortcuts'
|
||||
| 'repo'
|
||||
| 'agents'
|
||||
| 'experimental'
|
||||
repoId: string | null
|
||||
sectionId?: string
|
||||
} | null
|
||||
|
|
|
|||
|
|
@ -122,7 +122,9 @@ export function getDefaultSettings(homedir: string): GlobalSettings {
|
|||
defaultTuiAgent: null,
|
||||
defaultTaskViewPreset: 'all',
|
||||
agentCmdOverrides: {},
|
||||
terminalMacOptionAsAlt: 'true'
|
||||
terminalMacOptionAsAlt: 'true',
|
||||
experimentalTerminalDaemon: false,
|
||||
experimentalTerminalDaemonNoticeShown: false
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -627,6 +627,16 @@ export type GlobalSettings = {
|
|||
* 'true' = full Meta on both Option keys;
|
||||
* 'left' / 'right' = only that Option key acts as Meta, the other composes. */
|
||||
terminalMacOptionAsAlt: 'true' | 'false' | 'left' | 'right'
|
||||
/** Experimental: persist terminal sessions across app restarts via an
|
||||
* out-of-process daemon (src/main/daemon/**). Opt-in because the daemon
|
||||
* protocol is still stabilizing — some sessions have been observed to go
|
||||
* unresponsive after internal state drift. Disabled sessions fall back to
|
||||
* the in-process LocalPtyProvider. Requires an app restart to apply. */
|
||||
experimentalTerminalDaemon: boolean
|
||||
/** One-shot flag for the "persistent sessions are now opt-in" transition
|
||||
* toast shown to users upgrading from v1.3.0 (where the daemon was on by
|
||||
* default). Set to true the first time the toast fires so it never repeats. */
|
||||
experimentalTerminalDaemonNoticeShown: boolean
|
||||
}
|
||||
|
||||
export type NotificationEventSource = 'agent-task-complete' | 'terminal-bell' | 'test'
|
||||
|
|
|
|||
98
src/shared/workspace-session-schema.test.ts
Normal file
98
src/shared/workspace-session-schema.test.ts
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
import { describe, it, expect } from 'vitest'
|
||||
import { parseWorkspaceSession } from './workspace-session-schema'
|
||||
|
||||
describe('parseWorkspaceSession', () => {
|
||||
it('accepts a minimal valid session', () => {
|
||||
const result = parseWorkspaceSession({
|
||||
activeRepoId: null,
|
||||
activeWorktreeId: null,
|
||||
activeTabId: null,
|
||||
tabsByWorktree: {},
|
||||
terminalLayoutsByTabId: {}
|
||||
})
|
||||
expect(result.ok).toBe(true)
|
||||
})
|
||||
|
||||
it('accepts a fully populated session with optional fields', () => {
|
||||
const result = parseWorkspaceSession({
|
||||
activeRepoId: 'repo1',
|
||||
activeWorktreeId: 'repo1::/path/wt1',
|
||||
activeTabId: 'tab1',
|
||||
tabsByWorktree: {
|
||||
'repo1::/path/wt1': [
|
||||
{
|
||||
id: 'tab1',
|
||||
ptyId: 'daemon-session-abc',
|
||||
worktreeId: 'repo1::/path/wt1',
|
||||
title: 'bash',
|
||||
customTitle: null,
|
||||
color: null,
|
||||
sortOrder: 0,
|
||||
createdAt: 1_700_000_000_000
|
||||
}
|
||||
]
|
||||
},
|
||||
terminalLayoutsByTabId: {
|
||||
tab1: {
|
||||
root: {
|
||||
type: 'split',
|
||||
direction: 'vertical',
|
||||
first: { type: 'leaf', leafId: 'pane:1' },
|
||||
second: { type: 'leaf', leafId: 'pane:2' }
|
||||
},
|
||||
activeLeafId: 'pane:1',
|
||||
expandedLeafId: null,
|
||||
ptyIdsByLeafId: { 'pane:1': 'daemon-session-A' }
|
||||
}
|
||||
},
|
||||
activeWorktreeIdsOnShutdown: ['repo1::/path/wt1']
|
||||
})
|
||||
expect(result.ok).toBe(true)
|
||||
})
|
||||
|
||||
it('rejects a session where ptyId is a number (schema drift)', () => {
|
||||
const result = parseWorkspaceSession({
|
||||
activeRepoId: null,
|
||||
activeWorktreeId: null,
|
||||
activeTabId: null,
|
||||
tabsByWorktree: {
|
||||
wt: [
|
||||
{
|
||||
id: 'tab1',
|
||||
ptyId: 42,
|
||||
worktreeId: 'wt',
|
||||
title: 'bash',
|
||||
customTitle: null,
|
||||
color: null,
|
||||
sortOrder: 0,
|
||||
createdAt: 0
|
||||
}
|
||||
]
|
||||
},
|
||||
terminalLayoutsByTabId: {}
|
||||
})
|
||||
expect(result.ok).toBe(false)
|
||||
if (!result.ok) {
|
||||
expect(result.error).toContain('ptyId')
|
||||
}
|
||||
})
|
||||
|
||||
it('rejects a session with missing required top-level fields', () => {
|
||||
const result = parseWorkspaceSession({
|
||||
activeRepoId: null
|
||||
// missing activeWorktreeId, tabsByWorktree, etc.
|
||||
})
|
||||
expect(result.ok).toBe(false)
|
||||
})
|
||||
|
||||
it('rejects a truncated JSON object', () => {
|
||||
const result = parseWorkspaceSession({})
|
||||
expect(result.ok).toBe(false)
|
||||
})
|
||||
|
||||
it('rejects non-object input (e.g. corrupted file contents)', () => {
|
||||
expect(parseWorkspaceSession(null).ok).toBe(false)
|
||||
expect(parseWorkspaceSession('garbage').ok).toBe(false)
|
||||
expect(parseWorkspaceSession(42).ok).toBe(false)
|
||||
})
|
||||
})
|
||||
209
src/shared/workspace-session-schema.ts
Normal file
209
src/shared/workspace-session-schema.ts
Normal file
|
|
@ -0,0 +1,209 @@
|
|||
/* Why: the workspace session JSON is written to disk by older builds and read
|
||||
* back by newer ones. A field type flip (e.g. ptyId going from string to an
|
||||
* object) or a truncated write could poison Zustand state and crash the
|
||||
* renderer on mount. Schema-validating at the read boundary gives us a single
|
||||
* "reject and fall back to defaults" point so garbage never reaches React.
|
||||
*
|
||||
* Policy: be tolerant of extra fields (future builds may add more) but strict
|
||||
* about the types of fields we actually read. Unknown enum values, wrong types,
|
||||
* and wrong shapes all collapse to "use defaults" — never throw into main.
|
||||
*/
|
||||
import { z } from 'zod'
|
||||
import type {
|
||||
BrowserWorkspace,
|
||||
TabGroupLayoutNode,
|
||||
TerminalPaneLayoutNode,
|
||||
WorkspaceSessionState
|
||||
} from './types'
|
||||
|
||||
// ─── Terminal pane layout (recursive) ───────────────────────────────
|
||||
|
||||
const terminalPaneSplitDirectionSchema = z.enum(['vertical', 'horizontal'])
|
||||
|
||||
// Why: z.lazy + type annotation keeps the recursive inference working without
|
||||
// forcing zod to resolve the whole tree at definition time.
|
||||
const terminalPaneLayoutNodeSchema: z.ZodType<TerminalPaneLayoutNode> = z.lazy(() =>
|
||||
z.union([
|
||||
z.object({
|
||||
type: z.literal('leaf'),
|
||||
leafId: z.string()
|
||||
}),
|
||||
z.object({
|
||||
type: z.literal('split'),
|
||||
direction: terminalPaneSplitDirectionSchema,
|
||||
first: terminalPaneLayoutNodeSchema,
|
||||
second: terminalPaneLayoutNodeSchema,
|
||||
ratio: z.number().optional()
|
||||
})
|
||||
])
|
||||
)
|
||||
|
||||
const terminalLayoutSnapshotSchema = z.object({
|
||||
root: terminalPaneLayoutNodeSchema.nullable(),
|
||||
activeLeafId: z.string().nullable(),
|
||||
expandedLeafId: z.string().nullable(),
|
||||
ptyIdsByLeafId: z.record(z.string(), z.string()).optional(),
|
||||
buffersByLeafId: z.record(z.string(), z.string()).optional(),
|
||||
titlesByLeafId: z.record(z.string(), z.string()).optional()
|
||||
})
|
||||
|
||||
// ─── Terminal tab (legacy) ──────────────────────────────────────────
|
||||
|
||||
const terminalTabSchema = z.object({
|
||||
id: z.string(),
|
||||
ptyId: z.string().nullable(),
|
||||
worktreeId: z.string(),
|
||||
title: z.string(),
|
||||
defaultTitle: z.string().optional(),
|
||||
customTitle: z.string().nullable(),
|
||||
color: z.string().nullable(),
|
||||
sortOrder: z.number(),
|
||||
createdAt: z.number(),
|
||||
generation: z.number().optional()
|
||||
})
|
||||
|
||||
// ─── Unified tab model ──────────────────────────────────────────────
|
||||
|
||||
const tabContentTypeSchema = z.enum(['terminal', 'editor', 'diff', 'conflict-review', 'browser'])
|
||||
|
||||
const workspaceVisibleTabTypeSchema = z.enum(['terminal', 'editor', 'browser'])
|
||||
|
||||
const tabSchema = z.object({
|
||||
id: z.string(),
|
||||
entityId: z.string(),
|
||||
groupId: z.string(),
|
||||
worktreeId: z.string(),
|
||||
contentType: tabContentTypeSchema,
|
||||
label: z.string(),
|
||||
customLabel: z.string().nullable(),
|
||||
color: z.string().nullable(),
|
||||
sortOrder: z.number(),
|
||||
createdAt: z.number(),
|
||||
isPreview: z.boolean().optional(),
|
||||
isPinned: z.boolean().optional()
|
||||
})
|
||||
|
||||
const tabGroupSchema = z.object({
|
||||
id: z.string(),
|
||||
worktreeId: z.string(),
|
||||
activeTabId: z.string().nullable(),
|
||||
tabOrder: z.array(z.string())
|
||||
})
|
||||
|
||||
const tabGroupSplitDirectionSchema = z.enum(['horizontal', 'vertical'])
|
||||
|
||||
const tabGroupLayoutNodeSchema: z.ZodType<TabGroupLayoutNode> = z.lazy(() =>
|
||||
z.union([
|
||||
z.object({
|
||||
type: z.literal('leaf'),
|
||||
groupId: z.string()
|
||||
}),
|
||||
z.object({
|
||||
type: z.literal('split'),
|
||||
direction: tabGroupSplitDirectionSchema,
|
||||
first: tabGroupLayoutNodeSchema,
|
||||
second: tabGroupLayoutNodeSchema,
|
||||
ratio: z.number().optional()
|
||||
})
|
||||
])
|
||||
)
|
||||
|
||||
// ─── Editor ─────────────────────────────────────────────────────────
|
||||
|
||||
const persistedOpenFileSchema = z.object({
|
||||
filePath: z.string(),
|
||||
relativePath: z.string(),
|
||||
worktreeId: z.string(),
|
||||
language: z.string(),
|
||||
isPreview: z.boolean().optional()
|
||||
})
|
||||
|
||||
// ─── Browser ────────────────────────────────────────────────────────
|
||||
|
||||
const browserLoadErrorSchema = z.object({
|
||||
code: z.number(),
|
||||
description: z.string(),
|
||||
validatedUrl: z.string()
|
||||
})
|
||||
|
||||
// Why: cast to WorkspaceSessionState's embedded BrowserWorkspace so future
|
||||
// additive fields in the type flow through without requiring a schema edit.
|
||||
const browserWorkspaceSchema: z.ZodType<BrowserWorkspace> = z.object({
|
||||
id: z.string(),
|
||||
worktreeId: z.string(),
|
||||
label: z.string().optional(),
|
||||
sessionProfileId: z.string().nullable().optional(),
|
||||
activePageId: z.string().nullable().optional(),
|
||||
pageIds: z.array(z.string()).optional(),
|
||||
url: z.string(),
|
||||
title: z.string(),
|
||||
loading: z.boolean(),
|
||||
faviconUrl: z.string().nullable(),
|
||||
canGoBack: z.boolean(),
|
||||
canGoForward: z.boolean(),
|
||||
loadError: browserLoadErrorSchema.nullable(),
|
||||
createdAt: z.number()
|
||||
})
|
||||
|
||||
const browserPageSchema = z.object({
|
||||
id: z.string(),
|
||||
workspaceId: z.string(),
|
||||
worktreeId: z.string(),
|
||||
url: z.string(),
|
||||
title: z.string(),
|
||||
loading: z.boolean(),
|
||||
faviconUrl: z.string().nullable(),
|
||||
canGoBack: z.boolean(),
|
||||
canGoForward: z.boolean(),
|
||||
loadError: browserLoadErrorSchema.nullable(),
|
||||
createdAt: z.number()
|
||||
})
|
||||
|
||||
const browserHistoryEntrySchema = z.object({
|
||||
url: z.string(),
|
||||
normalizedUrl: z.string(),
|
||||
title: z.string(),
|
||||
lastVisitedAt: z.number(),
|
||||
visitCount: z.number()
|
||||
})
|
||||
|
||||
// ─── Workspace session ──────────────────────────────────────────────
|
||||
|
||||
export const workspaceSessionStateSchema: z.ZodType<WorkspaceSessionState> = z.object({
|
||||
activeRepoId: z.string().nullable(),
|
||||
activeWorktreeId: z.string().nullable(),
|
||||
activeTabId: z.string().nullable(),
|
||||
tabsByWorktree: z.record(z.string(), z.array(terminalTabSchema)),
|
||||
terminalLayoutsByTabId: z.record(z.string(), terminalLayoutSnapshotSchema),
|
||||
activeWorktreeIdsOnShutdown: z.array(z.string()).optional(),
|
||||
openFilesByWorktree: z.record(z.string(), z.array(persistedOpenFileSchema)).optional(),
|
||||
activeFileIdByWorktree: z.record(z.string(), z.string().nullable()).optional(),
|
||||
browserTabsByWorktree: z.record(z.string(), z.array(browserWorkspaceSchema)).optional(),
|
||||
browserPagesByWorkspace: z.record(z.string(), z.array(browserPageSchema)).optional(),
|
||||
activeBrowserTabIdByWorktree: z.record(z.string(), z.string().nullable()).optional(),
|
||||
activeTabTypeByWorktree: z.record(z.string(), workspaceVisibleTabTypeSchema).optional(),
|
||||
browserUrlHistory: z.array(browserHistoryEntrySchema).optional(),
|
||||
activeTabIdByWorktree: z.record(z.string(), z.string().nullable()).optional(),
|
||||
unifiedTabs: z.record(z.string(), z.array(tabSchema)).optional(),
|
||||
tabGroups: z.record(z.string(), z.array(tabGroupSchema)).optional(),
|
||||
tabGroupLayouts: z.record(z.string(), tabGroupLayoutNodeSchema).optional(),
|
||||
activeGroupIdByWorktree: z.record(z.string(), z.string()).optional()
|
||||
})
|
||||
|
||||
export type ParsedWorkspaceSession =
|
||||
| { ok: true; value: WorkspaceSessionState }
|
||||
| { ok: false; error: string }
|
||||
|
||||
/** Validate raw JSON as a WorkspaceSessionState. Returns a discriminated union
|
||||
* so callers can fall back to defaults on failure without a try/catch. */
|
||||
export function parseWorkspaceSession(raw: unknown): ParsedWorkspaceSession {
|
||||
const result = workspaceSessionStateSchema.safeParse(raw)
|
||||
if (result.success) {
|
||||
return { ok: true, value: result.data }
|
||||
}
|
||||
// Why: keep the error compact — a zod issue dump is noisy and most of the
|
||||
// time only the first divergent field is actionable for debugging.
|
||||
const firstIssue = result.error.issues[0]
|
||||
const path = firstIssue?.path.join('.') || '<root>'
|
||||
return { ok: false, error: `${path}: ${firstIssue?.message ?? 'invalid session'}` }
|
||||
}
|
||||
Loading…
Reference in a new issue