feat: Add SSH remote support (beta) (#590)

This commit is contained in:
Jinwoo Hong 2026-04-13 22:23:09 -04:00 committed by GitHub
parent 8d6caa1f11
commit cc66e120eb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
129 changed files with 16181 additions and 2160 deletions

View file

@ -16,6 +16,9 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Install native build tools
run: sudo apt-get update && sudo apt-get install -y build-essential python3
- name: Setup Node.js
uses: actions/setup-node@v4
with:

View file

@ -1,6 +1,12 @@
{
"extends": "@electron-toolkit/tsconfig/tsconfig.node.json",
"include": ["../electron.vite.config.*", "../src/main/**/*", "../src/preload/**/*", "../src/shared/**/*"],
"include": [
"../electron.vite.config.*",
"../src/main/**/*",
"../src/preload/**/*",
"../src/shared/**/*",
"../src/relay/**/*"
],
"compilerOptions": {
"composite": true,
"types": ["electron-vite/node"]

View file

@ -0,0 +1,10 @@
{
"extends": "@electron-toolkit/tsconfig/tsconfig.node.json",
"include": ["../src/relay/**/*"],
"exclude": ["../src/relay/integration.test.ts"],
"compilerOptions": {
"composite": true,
"rootDir": "../src",
"outDir": "../out/relay"
}
}

View file

@ -23,10 +23,11 @@
"typecheck": "tsc --noEmit -p config/tsconfig.node.json --composite false && tsc --noEmit -p config/tsconfig.cli.json --composite false && tsc --noEmit -p config/tsconfig.web.json --composite false",
"start": "electron-vite preview",
"dev": "node config/scripts/run-electron-vite-dev.mjs",
"build:relay": "node scripts/build-relay.mjs",
"build:cli": "tsc -p config/tsconfig.cli.json --outDir out --composite false --incremental false",
"build:electron-vite": "node config/scripts/run-electron-vite-build.mjs",
"build": "pnpm run typecheck && pnpm run build:electron-vite && pnpm run build:cli",
"build:release": "pnpm run build:electron-vite && pnpm run build:cli",
"build": "pnpm run typecheck && pnpm run build:relay && pnpm run build:electron-vite && pnpm run build:cli",
"build:release": "pnpm run build:relay && pnpm run build:electron-vite && pnpm run build:cli",
"postinstall": "pnpm rebuild electron && electron-builder install-app-deps",
"build:unpack": "pnpm run build && electron-builder --config config/electron-builder.config.cjs --dir",
"build:win": "pnpm run build && electron-builder --config config/electron-builder.config.cjs --win",
@ -89,6 +90,7 @@
"shadcn": "^4.1.0",
"simple-git": "^3.33.0",
"sonner": "^2.0.7",
"ssh2": "^1.17.0",
"tailwind-merge": "^3.5.0",
"tw-animate-css": "^1.4.0",
"zustand": "^5.0.12"
@ -99,6 +101,7 @@
"@types/node": "^25.5.0",
"@types/react": "^19.2.14",
"@types/react-dom": "^19.2.3",
"@types/ssh2": "^1.15.5",
"@typescript/native-preview": "7.0.0-dev.20260406.1",
"@vitejs/plugin-react": "^5.2.0",
"electron": "^41.0.3",
@ -129,6 +132,7 @@
"pnpm": {
"onlyBuiltDependencies": [
"@parcel/watcher",
"cpu-features",
"electron",
"esbuild",
"node-pty"

View file

@ -163,6 +163,9 @@ importers:
sonner:
specifier: ^2.0.7
version: 2.0.7(react-dom@19.2.4(react@19.2.4))(react@19.2.4)
ssh2:
specifier: ^1.17.0
version: 1.17.0
tailwind-merge:
specifier: ^3.5.0
version: 3.5.0
@ -188,6 +191,9 @@ importers:
'@types/react-dom':
specifier: ^19.2.3
version: 19.2.3(@types/react@19.2.14)
'@types/ssh2':
specifier: ^1.15.5
version: 1.15.5
'@typescript/native-preview':
specifier: 7.0.0-dev.20260406.1
version: 7.0.0-dev.20260406.1
@ -2644,6 +2650,9 @@ packages:
'@types/ms@2.1.0':
resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==}
'@types/node@18.19.130':
resolution: {integrity: sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==}
'@types/node@24.12.0':
resolution: {integrity: sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ==}
@ -2669,6 +2678,9 @@ packages:
'@types/responselike@1.0.3':
resolution: {integrity: sha512-H/+L+UkTV33uf49PH5pCAUBVPNj2nDBXTN+qS1dOwyyg24l3CcicicCA7ca+HMvJBZcFgl5r8e+RR6elsb4Lyw==}
'@types/ssh2@1.15.5':
resolution: {integrity: sha512-N1ASjp/nXH3ovBHddRJpli4ozpk6UdDYIX4RJWFa9L1YKnzdhTlVmiGHm4DZnj/jLbqZpes4aeR30EFGQtvhQQ==}
'@types/statuses@2.0.6':
resolution: {integrity: sha512-xMAgYwceFhRA2zY+XbEA7mxYbA093wdiW8Vu6gZPGWy9cmOyU9XesH1tNcEWsKFd5Vzrqx5T3D38PWx1FIIXkA==}
@ -2872,6 +2884,9 @@ packages:
resolution: {integrity: sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA==}
engines: {node: '>=10'}
asn1@0.2.6:
resolution: {integrity: sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==}
assert-plus@1.0.0:
resolution: {integrity: sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==}
engines: {node: '>=0.8'}
@ -2920,6 +2935,9 @@ packages:
engines: {node: '>=6.0.0'}
hasBin: true
bcrypt-pbkdf@1.0.2:
resolution: {integrity: sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==}
bippy@0.5.32:
resolution: {integrity: sha512-yt1mC8eReTxjfg41YBZdN4PvsDwHFWxltoiQX0Q+Htlbf41aSniopb7ECZits01HwNAvXEh69RGk/ImlswDTEw==}
peerDependencies:
@ -2964,6 +2982,10 @@ packages:
buffer@5.7.1:
resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==}
buildcheck@0.0.7:
resolution: {integrity: sha512-lHblz4ahamxpTmnsk+MNTRWsjYKv965MwOrSJyeD588rR3Jcu7swE+0wN5F+PbL5cjgu/9ObkhfzEPuofEMwLA==}
engines: {node: '>=10.0.0'}
builder-util-runtime@9.5.1:
resolution: {integrity: sha512-qt41tMfgHTllhResqM5DcnHyDIWNgzHvuY2jDcYP9iaGpkWxTUzV6GQjDeLnlR1/DtdlcsWQbA7sByMpmJFTLQ==}
engines: {node: '>=12.0.0'}
@ -3208,6 +3230,10 @@ packages:
typescript:
optional: true
cpu-features@0.0.10:
resolution: {integrity: sha512-9IkYqtX3YHPCzoVg1Py+o9057a3i0fp7S530UWokCSaFVTc7CwXPRiOjRjBQQ18ZCNafx78YfnG+HALxtVmOGA==}
engines: {node: '>=10.0.0'}
crc@3.8.0:
resolution: {integrity: sha512-iX3mfgcTMIq3ZKLIsVFAbv7+Mc10kxabAGQb8HvjA1o3T1PIYprbakQ65d3I+2HGHt6nSKkM9PYjgoJO2KcFBQ==}
@ -4717,6 +4743,9 @@ packages:
resolution: {integrity: sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==}
engines: {node: ^18.17.0 || >=20.5.0}
nan@2.26.2:
resolution: {integrity: sha512-0tTvBTYkt3tdGw22nrAy50x7gpbGCCFH3AFcyS5WiUu7Eu4vWlri1woE6qHBSfy11vksDqkiwjOnlR7WV8G1Hw==}
nanoid@3.3.11:
resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==}
engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
@ -5425,6 +5454,10 @@ packages:
sprintf-js@1.1.3:
resolution: {integrity: sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==}
ssh2@1.17.0:
resolution: {integrity: sha512-wPldCk3asibAjQ/kziWQQt1Wh3PgDFpC0XpwclzKcdT1vql6KeYxf5LIt4nlFkUeR8WuphYMKqUA56X4rjbfgQ==}
engines: {node: '>=10.16.0'}
ssri@12.0.0:
resolution: {integrity: sha512-S7iGNosepx9RadX82oimUkvr0Ct7IjJbEbs4mJcTxst8um95J3sDYU1RBEOvdu6oL1Wek2ODI5i4MAw+dZ6cAQ==}
engines: {node: ^18.17.0 || >=20.5.0}
@ -5625,6 +5658,9 @@ packages:
tw-animate-css@1.4.0:
resolution: {integrity: sha512-7bziOlRqH0hJx80h/3mbicLW7o8qLsH5+RaLR2t+OHM3D0JlWGODQKQ4cxbK7WlvmUxpcj6Kgu6EKqjrGFe3QQ==}
tweetnacl@0.14.5:
resolution: {integrity: sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==}
type-fest@0.13.1:
resolution: {integrity: sha512-34R7HTnG0XIJcBSn5XhDd7nNFPRcXYRZrBB2O2jdKqYODldSzBAqzsWoZYYvduky73toYS/ESqxPvkDf/F0XMg==}
engines: {node: '>=10'}
@ -5648,6 +5684,9 @@ packages:
ufo@1.6.3:
resolution: {integrity: sha512-yDJTmhydvl5lJzBmy/hyOAA0d+aqCBuwl818haVdYCRrWV84o7YyeVm4QlVHStqNrrJSTb6jKuFAVqAFsr+K3Q==}
undici-types@5.26.5:
resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==}
undici-types@7.16.0:
resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==}
@ -8256,6 +8295,10 @@ snapshots:
'@types/ms@2.1.0': {}
'@types/node@18.19.130':
dependencies:
undici-types: 5.26.5
'@types/node@24.12.0':
dependencies:
undici-types: 7.16.0
@ -8286,6 +8329,10 @@ snapshots:
dependencies:
'@types/node': 25.5.0
'@types/ssh2@1.15.5':
dependencies:
'@types/node': 18.19.130
'@types/statuses@2.0.6': {}
'@types/trusted-types@2.0.7':
@ -8513,6 +8560,10 @@ snapshots:
dependencies:
tslib: 2.8.1
asn1@0.2.6:
dependencies:
safer-buffer: 2.1.2
assert-plus@1.0.0:
optional: true
@ -8543,6 +8594,10 @@ snapshots:
baseline-browser-mapping@2.10.10: {}
bcrypt-pbkdf@1.0.2:
dependencies:
tweetnacl: 0.14.5
bippy@0.5.32(@types/react@19.2.14)(react@19.2.4):
dependencies:
'@types/react-reconciler': 0.28.9(@types/react@19.2.14)
@ -8607,6 +8662,9 @@ snapshots:
base64-js: 1.5.1
ieee754: 1.2.1
buildcheck@0.0.7:
optional: true
builder-util-runtime@9.5.1:
dependencies:
debug: 4.4.3
@ -8849,6 +8907,12 @@ snapshots:
optionalDependencies:
typescript: 5.9.3
cpu-features@0.0.10:
dependencies:
buildcheck: 0.0.7
nan: 2.26.2
optional: true
crc@3.8.0:
dependencies:
buffer: 5.7.1
@ -10719,6 +10783,9 @@ snapshots:
mute-stream@2.0.0: {}
nan@2.26.2:
optional: true
nanoid@3.3.11: {}
negotiator@1.0.0: {}
@ -11685,6 +11752,14 @@ snapshots:
sprintf-js@1.1.3:
optional: true
ssh2@1.17.0:
dependencies:
asn1: 0.2.6
bcrypt-pbkdf: 1.0.2
optionalDependencies:
cpu-features: 0.0.10
nan: 2.26.2
ssri@12.0.0:
dependencies:
minipass: 7.1.3
@ -11871,6 +11946,8 @@ snapshots:
tw-animate-css@1.4.0: {}
tweetnacl@0.14.5: {}
type-fest@0.13.1:
optional: true
@ -11890,6 +11967,8 @@ snapshots:
ufo@1.6.3: {}
undici-types@5.26.5: {}
undici-types@7.16.0: {}
undici-types@7.18.2: {}

55
scripts/build-relay.mjs Normal file
View file

@ -0,0 +1,55 @@
#!/usr/bin/env node
/**
* Bundle the relay daemon into a single relay.js file per platform.
*
* The relay runs on remote hosts via `node relay.js`, so it must be a
* self-contained CommonJS bundle with no external dependencies beyond
* Node.js built-ins. Native addons (node-pty, @parcel/watcher) are
* marked external and expected to be installed on the remote or
* gracefully degraded.
*/
import { build } from 'esbuild'
import { createHash } from 'crypto'
import { mkdirSync, readFileSync, writeFileSync } from 'fs'
import { join, dirname } from 'path'
import { fileURLToPath } from 'url'
const __dirname = dirname(fileURLToPath(import.meta.url))
const ROOT = join(__dirname, '..')
const RELAY_ENTRY = join(ROOT, 'src', 'relay', 'relay.ts')
const PLATFORMS = ['linux-x64', 'linux-arm64', 'darwin-x64', 'darwin-arm64']
const RELAY_VERSION = '0.1.0'
for (const platform of PLATFORMS) {
const outDir = join(ROOT, 'out', 'relay', platform)
mkdirSync(outDir, { recursive: true })
await build({
entryPoints: [RELAY_ENTRY],
bundle: true,
platform: 'node',
target: 'node18',
format: 'cjs',
outfile: join(outDir, 'relay.js'),
// Native addons cannot be bundled — they must exist on the remote host.
// The relay gracefully degrades when they are absent.
external: ['node-pty', '@parcel/watcher'],
sourcemap: false,
minify: true,
define: {
'process.env.NODE_ENV': '"production"'
}
})
// Why: include a content hash so the deploy check detects code changes
// even when RELAY_VERSION hasn't been bumped (common during development).
const relayContent = readFileSync(join(outDir, 'relay.js'))
const hash = createHash('sha256').update(relayContent).digest('hex').slice(0, 12)
writeFileSync(join(outDir, '.version'), `${RELAY_VERSION}+${hash}`)
console.log(`Built relay for ${platform}${outDir}/relay.js`)
}
console.log('Relay build complete.')

View file

@ -3,6 +3,7 @@ import { lstat, mkdir, rename, writeFile } from 'fs/promises'
import { basename, dirname } from 'path'
import type { Store } from '../persistence'
import { resolveAuthorizedPath, isENOENT } from './filesystem-auth'
import { getSshFilesystemProvider } from '../providers/ssh-filesystem-dispatch'
/**
* Re-throw filesystem errors with user-friendly messages.
@ -49,29 +50,59 @@ async function assertNotExists(targetPath: string): Promise<void> {
* Deletion is handled separately via `fs:deletePath` (shell.trashItem).
*/
export function registerFilesystemMutationHandlers(store: Store): void {
ipcMain.handle('fs:createFile', async (_event, args: { filePath: string }): Promise<void> => {
const filePath = await resolveAuthorizedPath(args.filePath, store)
await mkdir(dirname(filePath), { recursive: true })
try {
// Use the 'wx' flag for atomic create-if-not-exists, avoiding TOCTOU races
await writeFile(filePath, '', { encoding: 'utf-8', flag: 'wx' })
} catch (error) {
rethrowWithUserMessage(error, filePath)
ipcMain.handle(
'fs:createFile',
async (_event, args: { filePath: string; connectionId?: string }): Promise<void> => {
if (args.connectionId) {
const provider = getSshFilesystemProvider(args.connectionId)
if (!provider) {
throw new Error(`No filesystem provider for connection "${args.connectionId}"`)
}
return provider.createFile(args.filePath)
}
const filePath = await resolveAuthorizedPath(args.filePath, store)
await mkdir(dirname(filePath), { recursive: true })
try {
// Use the 'wx' flag for atomic create-if-not-exists, avoiding TOCTOU races
await writeFile(filePath, '', { encoding: 'utf-8', flag: 'wx' })
} catch (error) {
rethrowWithUserMessage(error, filePath)
}
}
})
)
ipcMain.handle('fs:createDir', async (_event, args: { dirPath: string }): Promise<void> => {
const dirPath = await resolveAuthorizedPath(args.dirPath, store)
await assertNotExists(dirPath)
await mkdir(dirPath, { recursive: true })
})
ipcMain.handle(
'fs:createDir',
async (_event, args: { dirPath: string; connectionId?: string }): Promise<void> => {
if (args.connectionId) {
const provider = getSshFilesystemProvider(args.connectionId)
if (!provider) {
throw new Error(`No filesystem provider for connection "${args.connectionId}"`)
}
return provider.createDir(args.dirPath)
}
const dirPath = await resolveAuthorizedPath(args.dirPath, store)
await assertNotExists(dirPath)
await mkdir(dirPath, { recursive: true })
}
)
// Note: fs.rename throws EXDEV if old and new paths are on different
// filesystems/volumes. This is unlikely since both paths are under the same
// workspace root, but a cross-drive rename would surface as an IPC error.
ipcMain.handle(
'fs:rename',
async (_event, args: { oldPath: string; newPath: string }): Promise<void> => {
async (
_event,
args: { oldPath: string; newPath: string; connectionId?: string }
): Promise<void> => {
if (args.connectionId) {
const provider = getSshFilesystemProvider(args.connectionId)
if (!provider) {
throw new Error(`No filesystem provider for connection "${args.connectionId}"`)
}
return provider.rename(args.oldPath, args.newPath)
}
const oldPath = await resolveAuthorizedPath(args.oldPath, store)
const newPath = await resolveAuthorizedPath(args.newPath, store)
await assertNotExists(newPath)

View file

@ -1,3 +1,8 @@
/* eslint-disable max-lines -- Why: filesystem-watcher centralizes native
(@parcel/watcher), WSL (inotifywait), and SSH remote watcher lifecycles in
one module so subscription/cleanup invariants stay auditable from a single
file. Splitting by transport would scatter the shared debounce/coalesce
helpers and the common batch-flush path across three files. */
import { ipcMain, type WebContents } from 'electron'
import * as path from 'path'
import { stat } from 'fs/promises'
@ -6,6 +11,7 @@ import type { FsChangeEvent, FsChangedPayload } from '../../shared/types'
import { isWslPath } from '../wsl'
import { createWslWatcher } from './filesystem-watcher-wsl'
import type { WatchedRoot } from './filesystem-watcher-wsl'
import { getSshFilesystemProvider } from '../providers/ssh-filesystem-dispatch'
// ── Ignore patterns ──────────────────────────────────────────────────
// Why: high-churn directories are suppressed at the native watcher level
@ -385,20 +391,65 @@ function unsubscribe(worktreePath: string, senderId: number): void {
}
}
// ── Remote watcher state ─────────────────────────────────────────────
// Key: `${connectionId}:${worktreePath}`, Value: unwatch function
const remoteWatchers = new Map<string, () => void>()
// ── Public API ───────────────────────────────────────────────────────
export function registerFilesystemWatcherHandlers(): void {
ipcMain.handle(
'fs:watchWorktree',
async (event, args: { worktreePath: string }): Promise<void> => {
async (event, args: { worktreePath: string; connectionId?: string }): Promise<void> => {
if (args.connectionId) {
const provider = getSshFilesystemProvider(args.connectionId)
if (!provider) {
throw new Error(`No filesystem provider for connection "${args.connectionId}"`)
}
const key = `${args.connectionId}:${args.worktreePath}`
if (remoteWatchers.has(key)) {
return
}
const unwatch = await provider.watch(args.worktreePath, (events) => {
if (!event.sender.isDestroyed()) {
event.sender.send('fs:changed', {
worktreePath: args.worktreePath,
events
} satisfies FsChangedPayload)
}
})
remoteWatchers.set(key, unwatch)
event.sender.once('destroyed', () => {
const unwatchFn = remoteWatchers.get(key)
if (unwatchFn) {
unwatchFn()
remoteWatchers.delete(key)
}
})
return
}
await subscribe(args.worktreePath, event.sender)
}
)
ipcMain.handle('fs:unwatchWorktree', (_event, args: { worktreePath: string }): void => {
const senderId = _event.sender.id
unsubscribe(args.worktreePath, senderId)
})
ipcMain.handle(
'fs:unwatchWorktree',
(_event, args: { worktreePath: string; connectionId?: string }): void => {
if (args.connectionId) {
const key = `${args.connectionId}:${args.worktreePath}`
const unwatchFn = remoteWatchers.get(key)
if (unwatchFn) {
unwatchFn()
remoteWatchers.delete(key)
}
return
}
const senderId = _event.sender.id
unsubscribe(args.worktreePath, senderId)
}
)
}
/** Tear down all watchers on app shutdown. */
@ -414,4 +465,17 @@ export async function closeAllWatchers(): Promise<void> {
}
}
watchedRoots.clear()
// Why: remote watchers are tracked separately from local @parcel/watcher
// subscriptions. Without cleaning them up here, their unwatch callbacks
// would never fire, leaving the relay polling for FS changes after the
// app has shut down.
for (const [key, unwatchFn] of remoteWatchers) {
try {
unwatchFn()
} catch (err) {
console.error(`[filesystem-watcher] remote unwatch error for ${key}:`, err)
}
}
remoteWatchers.clear()
}

View file

@ -41,6 +41,8 @@ import { listQuickOpenFiles } from './filesystem-list-files'
import { registerFilesystemMutationHandlers } from './filesystem-mutations'
import { searchWithGitGrep } from './filesystem-search-git'
import { checkRgAvailable } from './rg-availability'
import { getSshFilesystemProvider } from '../providers/ssh-filesystem-dispatch'
import { getSshGitProvider } from '../providers/ssh-git-dispatch'
const MAX_FILE_SIZE = 5 * 1024 * 1024 // 5MB
const DEFAULT_SEARCH_MAX_RESULTS = 2000
@ -80,30 +82,46 @@ export function registerFilesystemHandlers(store: Store): void {
const activeTextSearches = new Map<string, ChildProcess>()
// ─── Filesystem ─────────────────────────────────────────
ipcMain.handle('fs:readDir', async (_event, args: { dirPath: string }): Promise<DirEntry[]> => {
const dirPath = await resolveAuthorizedPath(args.dirPath, store)
const entries = await readdir(dirPath, { withFileTypes: true })
return entries
.map((entry) => ({
name: entry.name,
isDirectory: entry.isDirectory(),
isSymlink: entry.isSymbolicLink()
}))
.sort((a, b) => {
// Directories first, then alphabetical
if (a.isDirectory !== b.isDirectory) {
return a.isDirectory ? -1 : 1
ipcMain.handle(
'fs:readDir',
async (_event, args: { dirPath: string; connectionId?: string }): Promise<DirEntry[]> => {
if (args.connectionId) {
const provider = getSshFilesystemProvider(args.connectionId)
if (!provider) {
throw new Error(`No filesystem provider for connection "${args.connectionId}"`)
}
return a.name.localeCompare(b.name)
})
})
return provider.readDir(args.dirPath)
}
const dirPath = await resolveAuthorizedPath(args.dirPath, store)
const entries = await readdir(dirPath, { withFileTypes: true })
return entries
.map((entry) => ({
name: entry.name,
isDirectory: entry.isDirectory(),
isSymlink: entry.isSymbolicLink()
}))
.sort((a, b) => {
if (a.isDirectory !== b.isDirectory) {
return a.isDirectory ? -1 : 1
}
return a.name.localeCompare(b.name)
})
}
)
ipcMain.handle(
'fs:readFile',
async (
_event,
args: { filePath: string }
args: { filePath: string; connectionId?: string }
): Promise<{ content: string; isBinary: boolean; isImage?: boolean; mimeType?: string }> => {
if (args.connectionId) {
const provider = getSshFilesystemProvider(args.connectionId)
if (!provider) {
throw new Error(`No filesystem provider for connection "${args.connectionId}"`)
}
return provider.readFile(args.filePath)
}
const filePath = await resolveAuthorizedPath(args.filePath, store)
const stats = await stat(filePath)
if (stats.size > MAX_FILE_SIZE) {
@ -136,7 +154,17 @@ export function registerFilesystemHandlers(store: Store): void {
ipcMain.handle(
'fs:writeFile',
async (_event, args: { filePath: string; content: string }): Promise<void> => {
async (
_event,
args: { filePath: string; content: string; connectionId?: string }
): Promise<void> => {
if (args.connectionId) {
const provider = getSshFilesystemProvider(args.connectionId)
if (!provider) {
throw new Error(`No filesystem provider for connection "${args.connectionId}"`)
}
return provider.writeFile(args.filePath, args.content)
}
const filePath = await resolveAuthorizedPath(args.filePath, store)
try {
@ -154,21 +182,31 @@ export function registerFilesystemHandlers(store: Store): void {
}
)
ipcMain.handle('fs:deletePath', async (_event, args: { targetPath: string }): Promise<void> => {
const targetPath = await resolveAuthorizedPath(args.targetPath, store)
// Why: once auto-refresh exists, an external delete can race with a
// UI-initiated delete. Swallowing ENOENT keeps the action idempotent
// from the user's perspective (design §7.1).
try {
await shell.trashItem(targetPath)
} catch (error) {
if (isENOENT(error)) {
return
ipcMain.handle(
'fs:deletePath',
async (_event, args: { targetPath: string; connectionId?: string }): Promise<void> => {
if (args.connectionId) {
const provider = getSshFilesystemProvider(args.connectionId)
if (!provider) {
throw new Error(`No filesystem provider for connection "${args.connectionId}"`)
}
return provider.deletePath(args.targetPath)
}
const targetPath = await resolveAuthorizedPath(args.targetPath, store)
// Why: once auto-refresh exists, an external delete can race with a
// UI-initiated delete. Swallowing ENOENT keeps the action idempotent
// from the user's perspective (design §7.1).
try {
await shell.trashItem(targetPath)
} catch (error) {
if (isENOENT(error)) {
return
}
throw error
}
throw error
}
})
)
registerFilesystemMutationHandlers(store)
@ -180,8 +218,16 @@ export function registerFilesystemHandlers(store: Store): void {
'fs:stat',
async (
_event,
args: { filePath: string }
args: { filePath: string; connectionId?: string }
): Promise<{ size: number; isDirectory: boolean; mtime: number }> => {
if (args.connectionId) {
const provider = getSshFilesystemProvider(args.connectionId)
if (!provider) {
throw new Error(`No filesystem provider for connection "${args.connectionId}"`)
}
const s = await provider.stat(args.filePath)
return { size: s.size, isDirectory: s.type === 'directory', mtime: s.mtime }
}
const filePath = await resolveAuthorizedPath(args.filePath, store)
const stats = await stat(filePath)
return {
@ -193,190 +239,218 @@ export function registerFilesystemHandlers(store: Store): void {
)
// ─── Search ────────────────────────────────────────────
ipcMain.handle('fs:search', async (event, args: SearchOptions): Promise<SearchResult> => {
const rootPath = await resolveAuthorizedPath(args.rootPath, store)
const maxResults = Math.max(
1,
Math.min(args.maxResults ?? DEFAULT_SEARCH_MAX_RESULTS, DEFAULT_SEARCH_MAX_RESULTS)
)
const searchKey = `${event.sender.id}:${rootPath}`
// Why: checking rg availability upfront avoids a race condition where
// spawn('rg') emits 'close' before 'error' on some platforms, causing
// the handler to resolve with empty results before the git-grep
// fallback can run. The result is cached after the first check.
const rgAvailable = await checkRgAvailable(rootPath)
if (!rgAvailable) {
return searchWithGitGrep(rootPath, args, maxResults)
}
return new Promise((resolvePromise) => {
const rgArgs: string[] = [
'--json',
'--hidden',
'--glob',
'!.git',
'--max-count',
String(MAX_MATCHES_PER_FILE),
'--max-filesize',
`${Math.floor(MAX_FILE_SIZE / 1024 / 1024)}M`
]
if (!args.caseSensitive) {
rgArgs.push('--ignore-case')
}
if (args.wholeWord) {
rgArgs.push('--word-regexp')
}
if (!args.useRegex) {
rgArgs.push('--fixed-strings')
}
if (args.includePattern) {
for (const pat of args.includePattern
.split(',')
.map((s) => s.trim())
.filter(Boolean)) {
rgArgs.push('--glob', pat)
ipcMain.handle(
'fs:search',
async (event, args: SearchOptions & { connectionId?: string }): Promise<SearchResult> => {
if (args.connectionId) {
const provider = getSshFilesystemProvider(args.connectionId)
if (!provider) {
throw new Error(`No filesystem provider for connection "${args.connectionId}"`)
}
return provider.search(args)
}
if (args.excludePattern) {
for (const pat of args.excludePattern
.split(',')
.map((s) => s.trim())
.filter(Boolean)) {
rgArgs.push('--glob', `!${pat}`)
}
const rootPath = await resolveAuthorizedPath(args.rootPath, store)
const maxResults = Math.max(
1,
Math.min(args.maxResults ?? DEFAULT_SEARCH_MAX_RESULTS, DEFAULT_SEARCH_MAX_RESULTS)
)
const searchKey = `${event.sender.id}:${rootPath}`
// Why: checking rg availability upfront avoids a race condition where
// spawn('rg') emits 'close' before 'error' on some platforms, causing
// the handler to resolve with empty results before the git-grep
// fallback can run. The result is cached after the first check.
const rgAvailable = await checkRgAvailable(rootPath)
if (!rgAvailable) {
return searchWithGitGrep(rootPath, args, maxResults)
}
rgArgs.push('--', args.query, rootPath)
return new Promise((resolvePromise) => {
const rgArgs: string[] = [
'--json',
'--hidden',
'--glob',
'!.git',
'--max-count',
String(MAX_MATCHES_PER_FILE),
'--max-filesize',
`${Math.floor(MAX_FILE_SIZE / 1024 / 1024)}M`
]
// Why: search requests are fired on each query/options change. If the
// previous ripgrep process keeps running, it can continue streaming and
// parsing thousands of matches on the Electron main thread after the UI
// no longer cares about that result, which is exactly the freeze users
// experience in large repos.
activeTextSearches.get(searchKey)?.kill()
const fileMap = new Map<string, SearchFileResult>()
let totalMatches = 0
let truncated = false
let stdoutBuffer = ''
let resolved = false
let child: ChildProcess | null = null
const resolveOnce = (): void => {
if (resolved) {
return
if (!args.caseSensitive) {
rgArgs.push('--ignore-case')
}
resolved = true
if (activeTextSearches.get(searchKey) === child) {
activeTextSearches.delete(searchKey)
if (args.wholeWord) {
rgArgs.push('--word-regexp')
}
clearTimeout(killTimeout)
resolvePromise({
files: Array.from(fileMap.values()),
totalMatches,
truncated
})
}
const processLine = (line: string): void => {
if (!line || totalMatches >= maxResults) {
return
if (!args.useRegex) {
rgArgs.push('--fixed-strings')
}
if (args.includePattern) {
for (const pat of args.includePattern
.split(',')
.map((s) => s.trim())
.filter(Boolean)) {
rgArgs.push('--glob', pat)
}
}
if (args.excludePattern) {
for (const pat of args.excludePattern
.split(',')
.map((s) => s.trim())
.filter(Boolean)) {
rgArgs.push('--glob', `!${pat}`)
}
}
try {
const msg = JSON.parse(line)
if (msg.type !== 'match') {
rgArgs.push('--', args.query, rootPath)
// Why: search requests are fired on each query/options change. If the
// previous ripgrep process keeps running, it can continue streaming and
// parsing thousands of matches on the Electron main thread after the UI
// no longer cares about that result, which is exactly the freeze users
// experience in large repos.
activeTextSearches.get(searchKey)?.kill()
const fileMap = new Map<string, SearchFileResult>()
let totalMatches = 0
let truncated = false
let stdoutBuffer = ''
let resolved = false
let child: ChildProcess | null = null
const resolveOnce = (): void => {
if (resolved) {
return
}
resolved = true
if (activeTextSearches.get(searchKey) === child) {
activeTextSearches.delete(searchKey)
}
clearTimeout(killTimeout)
resolvePromise({
files: Array.from(fileMap.values()),
totalMatches,
truncated
})
}
const processLine = (line: string): void => {
if (!line || totalMatches >= maxResults) {
return
}
const data = msg.data
// Why: when rg runs inside WSL, output paths are Linux-native
// (e.g. /home/user/repo/src/file.ts). Translate them back to
// Windows UNC paths so path.relative() and Node fs APIs work.
const wslInfo = parseWslPath(rootPath)
const absPath: string = wslInfo
? toWindowsWslPath(data.path.text, wslInfo.distro)
: data.path.text
const relPath = normalizeRelativePath(relative(rootPath, absPath))
let fileResult = fileMap.get(absPath)
if (!fileResult) {
fileResult = { filePath: absPath, relativePath: relPath, matches: [] }
fileMap.set(absPath, fileResult)
}
for (const sub of data.submatches) {
fileResult.matches.push({
line: data.line_number,
column: sub.start + 1,
matchLength: sub.end - sub.start,
lineContent: data.lines.text.replace(/\n$/, '')
})
totalMatches++
if (totalMatches >= maxResults) {
truncated = true
child?.kill()
break
try {
const msg = JSON.parse(line)
if (msg.type !== 'match') {
return
}
const data = msg.data
// Why: when rg runs inside WSL, output paths are Linux-native
// (e.g. /home/user/repo/src/file.ts). Translate them back to
// Windows UNC paths so path.relative() and Node fs APIs work.
const wslInfo = parseWslPath(rootPath)
const absPath: string = wslInfo
? toWindowsWslPath(data.path.text, wslInfo.distro)
: data.path.text
const relPath = normalizeRelativePath(relative(rootPath, absPath))
let fileResult = fileMap.get(absPath)
if (!fileResult) {
fileResult = { filePath: absPath, relativePath: relPath, matches: [] }
fileMap.set(absPath, fileResult)
}
for (const sub of data.submatches) {
fileResult.matches.push({
line: data.line_number,
column: sub.start + 1,
matchLength: sub.end - sub.start,
lineContent: data.lines.text.replace(/\n$/, '')
})
totalMatches++
if (totalMatches >= maxResults) {
truncated = true
child?.kill()
break
}
}
} catch {
// skip malformed JSON lines
}
} catch {
// skip malformed JSON lines
}
}
const nextChild = wslAwareSpawn('rg', rgArgs, {
cwd: rootPath,
stdio: ['ignore', 'pipe', 'pipe']
})
child = nextChild
activeTextSearches.set(searchKey, nextChild)
const nextChild = wslAwareSpawn('rg', rgArgs, {
cwd: rootPath,
stdio: ['ignore', 'pipe', 'pipe']
})
child = nextChild
activeTextSearches.set(searchKey, nextChild)
nextChild.stdout!.setEncoding('utf-8')
nextChild.stdout!.on('data', (chunk: string) => {
stdoutBuffer += chunk
const lines = stdoutBuffer.split('\n')
stdoutBuffer = lines.pop() ?? ''
for (const line of lines) {
processLine(line)
}
})
nextChild.stderr!.on('data', () => {
// Drain stderr so rg cannot block on a full pipe.
})
nextChild.stdout!.setEncoding('utf-8')
nextChild.stdout!.on('data', (chunk: string) => {
stdoutBuffer += chunk
const lines = stdoutBuffer.split('\n')
stdoutBuffer = lines.pop() ?? ''
for (const line of lines) {
processLine(line)
}
})
nextChild.stderr!.on('data', () => {
// Drain stderr so rg cannot block on a full pipe.
})
nextChild.once('error', () => {
resolveOnce()
})
nextChild.once('error', () => {
resolveOnce()
})
nextChild.once('close', () => {
if (stdoutBuffer) {
processLine(stdoutBuffer)
}
resolveOnce()
})
nextChild.once('close', () => {
if (stdoutBuffer) {
processLine(stdoutBuffer)
}
resolveOnce()
})
// Why: if the timeout fires, the child is killed and results are partial.
// We must mark them as truncated so the UI can indicate incomplete results.
const killTimeout = setTimeout(() => {
truncated = true
child?.kill()
}, SEARCH_TIMEOUT_MS)
})
})
// Why: if the timeout fires, the child is killed and results are partial.
// We must mark them as truncated so the UI can indicate incomplete results.
const killTimeout = setTimeout(() => {
truncated = true
child?.kill()
}, SEARCH_TIMEOUT_MS)
})
}
)
// ─── List all files (for quick-open) ─────────────────────
ipcMain.handle(
'fs:listFiles',
async (_event, args: { rootPath: string }): Promise<string[]> =>
listQuickOpenFiles(args.rootPath, store)
async (_event, args: { rootPath: string; connectionId?: string }): Promise<string[]> => {
if (args.connectionId) {
const provider = getSshFilesystemProvider(args.connectionId)
if (!provider) {
throw new Error(`No filesystem provider for connection "${args.connectionId}"`)
}
return provider.listFiles(args.rootPath)
}
return listQuickOpenFiles(args.rootPath, store)
}
)
// ─── Git operations ─────────────────────────────────────
ipcMain.handle(
'git:status',
async (_event, args: { worktreePath: string }): Promise<GitStatusResult> => {
async (
_event,
args: { worktreePath: string; connectionId?: string }
): Promise<GitStatusResult> => {
if (args.connectionId) {
const provider = getSshGitProvider(args.connectionId)
if (!provider) {
throw new Error(`No git provider for connection "${args.connectionId}"`)
}
return provider.getStatus(args.worktreePath)
}
const worktreePath = await resolveRegisteredWorktreePath(args.worktreePath, store)
return getStatus(worktreePath)
}
@ -387,7 +461,17 @@ export function registerFilesystemHandlers(store: Store): void {
// operation finishes, without running a full `git status`.
ipcMain.handle(
'git:conflictOperation',
async (_event, args: { worktreePath: string }): Promise<GitConflictOperation> => {
async (
_event,
args: { worktreePath: string; connectionId?: string }
): Promise<GitConflictOperation> => {
if (args.connectionId) {
const provider = getSshGitProvider(args.connectionId)
if (!provider) {
throw new Error(`No git provider for connection "${args.connectionId}"`)
}
return provider.detectConflictOperation(args.worktreePath)
}
const worktreePath = await resolveRegisteredWorktreePath(args.worktreePath, store)
return detectConflictOperation(worktreePath)
}
@ -397,8 +481,15 @@ export function registerFilesystemHandlers(store: Store): void {
'git:diff',
async (
_event,
args: { worktreePath: string; filePath: string; staged: boolean }
args: { worktreePath: string; filePath: string; staged: boolean; connectionId?: string }
): Promise<GitDiffResult> => {
if (args.connectionId) {
const provider = getSshGitProvider(args.connectionId)
if (!provider) {
throw new Error(`No git provider for connection "${args.connectionId}"`)
}
return provider.getDiff(args.worktreePath, args.filePath, args.staged)
}
const worktreePath = await resolveRegisteredWorktreePath(args.worktreePath, store)
const filePath = validateGitRelativeFilePath(worktreePath, args.filePath)
return getDiff(worktreePath, filePath, args.staged)
@ -409,8 +500,15 @@ export function registerFilesystemHandlers(store: Store): void {
'git:branchCompare',
async (
_event,
args: { worktreePath: string; baseRef: string }
args: { worktreePath: string; baseRef: string; connectionId?: string }
): Promise<GitBranchCompareResult> => {
if (args.connectionId) {
const provider = getSshGitProvider(args.connectionId)
if (!provider) {
throw new Error(`No git provider for connection "${args.connectionId}"`)
}
return provider.getBranchCompare(args.worktreePath, args.baseRef)
}
const worktreePath = await resolveRegisteredWorktreePath(args.worktreePath, store)
return getBranchCompare(worktreePath, args.baseRef)
}
@ -430,8 +528,29 @@ export function registerFilesystemHandlers(store: Store): void {
}
filePath: string
oldPath?: string
connectionId?: string
}
): Promise<GitDiffResult> => {
if (args.connectionId) {
const provider = getSshGitProvider(args.connectionId)
if (!provider) {
throw new Error(`No git provider for connection "${args.connectionId}"`)
}
const results = await provider.getBranchDiff(args.worktreePath, args.compare.mergeBase, {
includePatch: true,
filePath: args.filePath,
oldPath: args.oldPath
})
return (
results[0] ?? {
kind: 'text',
originalContent: '',
modifiedContent: '',
originalIsBinary: false,
modifiedIsBinary: false
}
)
}
const worktreePath = await resolveRegisteredWorktreePath(args.worktreePath, store)
const filePath = validateGitRelativeFilePath(worktreePath, args.filePath)
const oldPath = args.oldPath
@ -448,7 +567,17 @@ export function registerFilesystemHandlers(store: Store): void {
ipcMain.handle(
'git:stage',
async (_event, args: { worktreePath: string; filePath: string }): Promise<void> => {
async (
_event,
args: { worktreePath: string; filePath: string; connectionId?: string }
): Promise<void> => {
if (args.connectionId) {
const provider = getSshGitProvider(args.connectionId)
if (!provider) {
throw new Error(`No git provider for connection "${args.connectionId}"`)
}
return provider.stageFile(args.worktreePath, args.filePath)
}
const worktreePath = await resolveRegisteredWorktreePath(args.worktreePath, store)
const filePath = validateGitRelativeFilePath(worktreePath, args.filePath)
await stageFile(worktreePath, filePath)
@ -457,7 +586,17 @@ export function registerFilesystemHandlers(store: Store): void {
ipcMain.handle(
'git:unstage',
async (_event, args: { worktreePath: string; filePath: string }): Promise<void> => {
async (
_event,
args: { worktreePath: string; filePath: string; connectionId?: string }
): Promise<void> => {
if (args.connectionId) {
const provider = getSshGitProvider(args.connectionId)
if (!provider) {
throw new Error(`No git provider for connection "${args.connectionId}"`)
}
return provider.unstageFile(args.worktreePath, args.filePath)
}
const worktreePath = await resolveRegisteredWorktreePath(args.worktreePath, store)
const filePath = validateGitRelativeFilePath(worktreePath, args.filePath)
await unstageFile(worktreePath, filePath)
@ -466,7 +605,17 @@ export function registerFilesystemHandlers(store: Store): void {
ipcMain.handle(
'git:discard',
async (_event, args: { worktreePath: string; filePath: string }): Promise<void> => {
async (
_event,
args: { worktreePath: string; filePath: string; connectionId?: string }
): Promise<void> => {
if (args.connectionId) {
const provider = getSshGitProvider(args.connectionId)
if (!provider) {
throw new Error(`No git provider for connection "${args.connectionId}"`)
}
return provider.discardChanges(args.worktreePath, args.filePath)
}
const worktreePath = await resolveRegisteredWorktreePath(args.worktreePath, store)
const filePath = validateGitRelativeFilePath(worktreePath, args.filePath)
await discardChanges(worktreePath, filePath)
@ -475,7 +624,17 @@ export function registerFilesystemHandlers(store: Store): void {
ipcMain.handle(
'git:bulkStage',
async (_event, args: { worktreePath: string; filePaths: string[] }): Promise<void> => {
async (
_event,
args: { worktreePath: string; filePaths: string[]; connectionId?: string }
): Promise<void> => {
if (args.connectionId) {
const provider = getSshGitProvider(args.connectionId)
if (!provider) {
throw new Error(`No git provider for connection "${args.connectionId}"`)
}
return provider.bulkStageFiles(args.worktreePath, args.filePaths)
}
const worktreePath = await resolveRegisteredWorktreePath(args.worktreePath, store)
const filePaths = args.filePaths.map((p) => validateGitRelativeFilePath(worktreePath, p))
await bulkStageFiles(worktreePath, filePaths)
@ -484,7 +643,17 @@ export function registerFilesystemHandlers(store: Store): void {
ipcMain.handle(
'git:bulkUnstage',
async (_event, args: { worktreePath: string; filePaths: string[] }): Promise<void> => {
async (
_event,
args: { worktreePath: string; filePaths: string[]; connectionId?: string }
): Promise<void> => {
if (args.connectionId) {
const provider = getSshGitProvider(args.connectionId)
if (!provider) {
throw new Error(`No git provider for connection "${args.connectionId}"`)
}
return provider.bulkUnstageFiles(args.worktreePath, args.filePaths)
}
const worktreePath = await resolveRegisteredWorktreePath(args.worktreePath, store)
const filePaths = args.filePaths.map((p) => validateGitRelativeFilePath(worktreePath, p))
await bulkUnstageFiles(worktreePath, filePaths)
@ -495,8 +664,18 @@ export function registerFilesystemHandlers(store: Store): void {
'git:remoteFileUrl',
async (
_event,
args: { worktreePath: string; relativePath: string; line: number }
args: { worktreePath: string; relativePath: string; line: number; connectionId?: string }
): Promise<string | null> => {
// Why: remote repos can't use the local hosted-git-info approach because
// the .git/config lives on the remote. Route through the relay's git.exec
// to fetch the remote URL and build the file link server-side.
if (args.connectionId) {
const provider = getSshGitProvider(args.connectionId)
if (!provider) {
throw new Error(`No git provider for connection "${args.connectionId}"`)
}
return provider.getRemoteFileUrl(args.worktreePath, args.relativePath, args.line)
}
const worktreePath = await resolveRegisteredWorktreePath(args.worktreePath, store)
return getRemoteFileUrl(worktreePath, args.relativePath, args.line)
}

View file

@ -92,7 +92,8 @@ describe('registerPtyHandlers', () => {
isDestroyed: () => false,
webContents: {
on: vi.fn(),
send: vi.fn()
send: vi.fn(),
removeListener: vi.fn()
}
}
@ -118,12 +119,12 @@ describe('registerPtyHandlers', () => {
mainWindow.webContents.on.mockReset()
mainWindow.webContents.send.mockReset()
handleMock.mockImplementation((channel, handler) => {
handleMock.mockImplementation((channel: string, handler: (...a: unknown[]) => unknown) => {
handlers.set(channel, handler)
})
getPathMock.mockReturnValue('/tmp/orca-user-data')
existsSyncMock.mockReturnValue(true)
statSyncMock.mockReturnValue({ isDirectory: () => true })
statSyncMock.mockReturnValue({ isDirectory: () => true, mode: 0o755 })
openCodeBuildPtyEnvMock.mockReturnValue({
ORCA_OPENCODE_HOOK_PORT: '4567',
ORCA_OPENCODE_HOOK_TOKEN: 'opencode-token',
@ -140,7 +141,9 @@ describe('registerPtyHandlers', () => {
onExit: vi.fn(() => makeDisposable()),
write: vi.fn(),
resize: vi.fn(),
kill: vi.fn()
kill: vi.fn(),
process: 'zsh',
pid: 12345
})
})
@ -172,11 +175,11 @@ describe('registerPtyHandlers', () => {
}
/** Helper: trigger pty:spawn and return the env passed to node-pty. */
function spawnAndGetEnv(
async function spawnAndGetEnv(
argsEnv?: Record<string, string>,
processEnvOverrides?: Record<string, string | undefined>,
getSelectedCodexHomePath?: () => string | null
): Record<string, string> {
): Promise<Record<string, string>> {
const savedEnv: Record<string, string | undefined> = {}
if (processEnvOverrides) {
for (const [k, v] of Object.entries(processEnvOverrides)) {
@ -194,7 +197,7 @@ describe('registerPtyHandlers', () => {
// accumulate stale state across calls within one test.
handlers.clear()
registerPtyHandlers(mainWindow as never, undefined, getSelectedCodexHomePath)
handlers.get('pty:spawn')!(null, {
await handlers.get('pty:spawn')!(null, {
cols: 80,
rows: 24,
...(argsEnv ? { env: argsEnv } : {})
@ -232,35 +235,36 @@ describe('registerPtyHandlers', () => {
}
describe('spawn environment', () => {
it('defaults LANG to en_US.UTF-8 when not inherited from process.env', () => {
const env = spawnAndGetEnv(undefined, { LANG: undefined })
it('defaults LANG to en_US.UTF-8 when not inherited from process.env', async () => {
const env = await spawnAndGetEnv(undefined, { LANG: undefined })
expect(env.LANG).toBe('en_US.UTF-8')
})
it('inherits LANG from process.env when already set', () => {
const env = spawnAndGetEnv(undefined, { LANG: 'ja_JP.UTF-8' })
it('inherits LANG from process.env when already set', async () => {
const env = await spawnAndGetEnv(undefined, { LANG: 'ja_JP.UTF-8' })
expect(env.LANG).toBe('ja_JP.UTF-8')
})
it('lets caller-provided env override LANG', () => {
const env = spawnAndGetEnv({ LANG: 'fr_FR.UTF-8' })
it('lets caller-provided env override LANG', async () => {
const env = await spawnAndGetEnv({ LANG: 'fr_FR.UTF-8' })
expect(env.LANG).toBe('fr_FR.UTF-8')
})
it('always sets TERM and COLORTERM regardless of env', () => {
const env = spawnAndGetEnv()
it('always sets TERM and COLORTERM regardless of env', async () => {
const env = await spawnAndGetEnv()
expect(env.TERM).toBe('xterm-256color')
expect(env.COLORTERM).toBe('truecolor')
expect(env.TERM_PROGRAM).toBe('Orca')
})
it('injects the selected Codex home into Orca terminal PTYs', () => {
const env = spawnAndGetEnv(undefined, undefined, () => '/tmp/orca-codex-home')
it('injects the selected Codex home into Orca terminal PTYs', async () => {
const env = await spawnAndGetEnv(undefined, undefined, () => '/tmp/orca-codex-home')
expect(env.CODEX_HOME).toBe('/tmp/orca-codex-home')
})
it('injects the OpenCode hook env into Orca terminal PTYs', () => {
const env = spawnAndGetEnv()
it('injects the OpenCode hook env into Orca terminal PTYs', async () => {
// Why: clear any ambient OPENCODE_CONFIG_DIR so the mock's value is used
const env = await spawnAndGetEnv(undefined, { OPENCODE_CONFIG_DIR: undefined })
expect(openCodeBuildPtyEnvMock).toHaveBeenCalledTimes(1)
expect(openCodeBuildPtyEnvMock.mock.calls[0]?.[0]).toEqual(expect.any(String))
expect(env.ORCA_OPENCODE_HOOK_PORT).toBe('4567')
@ -269,13 +273,17 @@ describe('registerPtyHandlers', () => {
expect(env.OPENCODE_CONFIG_DIR).toBe('/tmp/orca-opencode-config')
})
it('injects the Pi agent overlay env into Orca terminal PTYs', () => {
const env = spawnAndGetEnv(undefined, { PI_CODING_AGENT_DIR: '/tmp/user-pi-agent' })
it('injects the Pi agent overlay env into Orca terminal PTYs', async () => {
const env = await spawnAndGetEnv(undefined, { PI_CODING_AGENT_DIR: '/tmp/user-pi-agent' })
expect(piBuildPtyEnvMock).toHaveBeenCalledWith(expect.any(String), '/tmp/user-pi-agent')
expect(env.PI_CODING_AGENT_DIR).toBe('/tmp/orca-pi-agent-overlay')
})
it('leaves ambient CODEX_HOME untouched when system default is selected', () => {
const env = spawnAndGetEnv(undefined, { CODEX_HOME: '/tmp/system-codex-home' }, () => null)
it('leaves ambient CODEX_HOME untouched when system default is selected', async () => {
const env = await spawnAndGetEnv(
undefined,
{ CODEX_HOME: '/tmp/system-codex-home' },
() => null
)
expect(env.CODEX_HOME).toBe('/tmp/system-codex-home')
})
})
@ -391,7 +399,7 @@ describe('registerPtyHandlers', () => {
})
})
it('rejects missing WSL worktree cwd instead of validating only the fallback Windows cwd', () => {
it('rejects missing WSL worktree cwd instead of validating only the fallback Windows cwd', async () => {
const originalPlatform = process.platform
const originalUserProfile = process.env.USERPROFILE
@ -411,13 +419,15 @@ describe('registerPtyHandlers', () => {
try {
registerPtyHandlers(mainWindow as never)
expect(() =>
await expect(
handlers.get('pty:spawn')!(null, {
cols: 80,
rows: 24,
cwd: '\\\\wsl.localhost\\Ubuntu\\home\\jin\\missing'
})
).toThrow('Working directory "\\\\wsl.localhost\\Ubuntu\\home\\jin\\missing" does not exist.')
).rejects.toThrow(
'Working directory "\\\\wsl.localhost\\Ubuntu\\home\\jin\\missing" does not exist.'
)
expect(spawnMock).not.toHaveBeenCalled()
} finally {
Object.defineProperty(process, 'platform', {
@ -543,7 +553,7 @@ describe('registerPtyHandlers', () => {
}
})
it('falls back to a system shell when SHELL points to a missing binary', () => {
it('falls back to a system shell when SHELL points to a missing binary', async () => {
const originalShell = process.env.SHELL
const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {})
@ -555,7 +565,7 @@ describe('registerPtyHandlers', () => {
process.env.SHELL = '/opt/homebrew/bin/bash'
registerPtyHandlers(mainWindow as never)
const result = handlers.get('pty:spawn')!(null, {
const result = await handlers.get('pty:spawn')!(null, {
cols: 80,
rows: 24,
cwd: '/tmp'
@ -581,7 +591,7 @@ describe('registerPtyHandlers', () => {
}
})
it('falls back when SHELL points to a non-executable binary', () => {
it('falls back when SHELL points to a non-executable binary', async () => {
const originalShell = process.env.SHELL
const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {})
@ -595,7 +605,7 @@ describe('registerPtyHandlers', () => {
process.env.SHELL = '/opt/homebrew/bin/bash'
registerPtyHandlers(mainWindow as never)
handlers.get('pty:spawn')!(null, {
await handlers.get('pty:spawn')!(null, {
cols: 80,
rows: 24,
cwd: '/tmp'
@ -620,7 +630,7 @@ describe('registerPtyHandlers', () => {
}
})
it('prefers args.env.SHELL and normalizes the child env after fallback', () => {
it('prefers args.env.SHELL and normalizes the child env after fallback', async () => {
const originalShell = process.env.SHELL
const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {})
@ -632,7 +642,7 @@ describe('registerPtyHandlers', () => {
process.env.SHELL = '/bin/bash'
registerPtyHandlers(mainWindow as never)
handlers.get('pty:spawn')!(null, {
await handlers.get('pty:spawn')!(null, {
cols: 80,
rows: 24,
cwd: '/tmp',
@ -661,29 +671,38 @@ describe('registerPtyHandlers', () => {
}
})
it('cleans up provider-specific PTY overlays when a PTY is killed', () => {
it('cleans up provider-specific PTY overlays when a PTY is killed', async () => {
let exitCb: ((info: { exitCode: number }) => void) | undefined
const proc = {
onData: vi.fn(() => makeDisposable()),
onExit: vi.fn(() => makeDisposable()),
onExit: vi.fn((cb: (info: { exitCode: number }) => void) => {
exitCb = cb
return makeDisposable()
}),
write: vi.fn(),
resize: vi.fn(),
kill: vi.fn()
kill: vi.fn(() => {
// Simulate node-pty behavior: kill triggers onExit callback
exitCb?.({ exitCode: -1 })
}),
process: 'zsh',
pid: 12345
}
spawnMock.mockReturnValue(proc)
registerPtyHandlers(mainWindow as never)
const spawnResult = handlers.get('pty:spawn')!(null, {
const spawnResult = (await handlers.get('pty:spawn')!(null, {
cols: 80,
rows: 24
}) as { id: string }
})) as { id: string }
handlers.get('pty:kill')!(null, { id: spawnResult.id })
await handlers.get('pty:kill')!(null, { id: spawnResult.id })
expect(openCodeClearPtyMock).toHaveBeenCalledWith(spawnResult.id)
expect(piClearPtyMock).toHaveBeenCalledWith(spawnResult.id)
})
it('disposes PTY listeners before manual kill IPC', () => {
it('disposes PTY listeners before manual kill IPC', async () => {
const onDataDisposable = makeDisposable()
const onExitDisposable = makeDisposable()
const proc = {
@ -691,14 +710,19 @@ describe('registerPtyHandlers', () => {
onExit: vi.fn(() => onExitDisposable),
write: vi.fn(),
resize: vi.fn(),
kill: vi.fn()
kill: vi.fn(),
process: 'zsh',
pid: 12345
}
spawnMock.mockReturnValue(proc)
registerPtyHandlers(mainWindow as never)
const spawnResult = handlers.get('pty:spawn')!(null, { cols: 80, rows: 24 }) as { id: string }
const spawnResult = (await handlers.get('pty:spawn')!(null, {
cols: 80,
rows: 24
})) as { id: string }
handlers.get('pty:kill')!(null, { id: spawnResult.id })
await handlers.get('pty:kill')!(null, { id: spawnResult.id })
expect(onDataDisposable.dispose.mock.invocationCallOrder[0]).toBeLessThan(
proc.kill.mock.invocationCallOrder[0]
@ -708,7 +732,7 @@ describe('registerPtyHandlers', () => {
)
})
it('disposes PTY listeners before runtime controller kill', () => {
it('disposes PTY listeners before runtime controller kill', async () => {
const onDataDisposable = makeDisposable()
const onExitDisposable = makeDisposable()
const proc = {
@ -716,7 +740,9 @@ describe('registerPtyHandlers', () => {
onExit: vi.fn(() => onExitDisposable),
write: vi.fn(),
resize: vi.fn(),
kill: vi.fn()
kill: vi.fn(),
process: 'zsh',
pid: 12345
}
const runtime = {
setPtyController: vi.fn(),
@ -727,7 +753,10 @@ describe('registerPtyHandlers', () => {
spawnMock.mockReturnValue(proc)
registerPtyHandlers(mainWindow as never, runtime as never)
const spawnResult = handlers.get('pty:spawn')!(null, { cols: 80, rows: 24 }) as { id: string }
const spawnResult = (await handlers.get('pty:spawn')!(null, {
cols: 80,
rows: 24
})) as { id: string }
const runtimeController = runtime.setPtyController.mock.calls[0]?.[0] as {
kill: (ptyId: string) => boolean
}
@ -741,7 +770,7 @@ describe('registerPtyHandlers', () => {
)
})
it('disposes PTY listeners before did-finish-load orphan cleanup', () => {
it('disposes PTY listeners before did-finish-load orphan cleanup', async () => {
const onDataDisposable = makeDisposable()
const onExitDisposable = makeDisposable()
const proc = {
@ -749,7 +778,9 @@ describe('registerPtyHandlers', () => {
onExit: vi.fn(() => onExitDisposable),
write: vi.fn(),
resize: vi.fn(),
kill: vi.fn()
kill: vi.fn(),
process: 'zsh',
pid: 12345
}
const runtime = {
setPtyController: vi.fn(),
@ -764,7 +795,7 @@ describe('registerPtyHandlers', () => {
([eventName]) => eventName === 'did-finish-load'
)?.[1] as (() => void) | undefined
expect(didFinishLoad).toBeTypeOf('function')
handlers.get('pty:spawn')!(null, { cols: 80, rows: 24 })
await handlers.get('pty:spawn')!(null, { cols: 80, rows: 24 })
// The first load after spawn only advances generation. The second one sees
// this PTY as belonging to a prior page load and kills it as orphaned.
@ -779,7 +810,7 @@ describe('registerPtyHandlers', () => {
)
})
it('clears PTY state even when kill reports the process is already gone', () => {
it('clears PTY state even when kill reports the process is already gone', async () => {
const proc = {
onData: vi.fn(() => makeDisposable()),
onExit: vi.fn(() => makeDisposable()),
@ -787,16 +818,21 @@ describe('registerPtyHandlers', () => {
resize: vi.fn(),
kill: vi.fn(() => {
throw new Error('already dead')
})
}),
process: 'zsh',
pid: 12345
}
spawnMock.mockReturnValue(proc)
registerPtyHandlers(mainWindow as never)
const spawnResult = handlers.get('pty:spawn')!(null, { cols: 80, rows: 24 }) as { id: string }
const spawnResult = (await handlers.get('pty:spawn')!(null, {
cols: 80,
rows: 24
})) as { id: string }
handlers.get('pty:kill')!(null, { id: spawnResult.id })
await handlers.get('pty:kill')!(null, { id: spawnResult.id })
expect(handlers.get('pty:hasChildProcesses')!(null, { id: spawnResult.id })).toBe(false)
expect(await handlers.get('pty:hasChildProcesses')!(null, { id: spawnResult.id })).toBe(false)
expect(openCodeClearPtyMock).toHaveBeenCalledWith(spawnResult.id)
expect(piClearPtyMock).toHaveBeenCalledWith(spawnResult.id)
})

View file

@ -3,298 +3,94 @@ main-process module so spawn-time environment scoping, lifecycle cleanup,
foreground-process inspection, and renderer IPC stay behind a single audited
boundary. Splitting it by line count would scatter tightly coupled terminal
process behavior across files without a cleaner ownership seam. */
import { basename, win32 as pathWin32 } from 'path'
import {
existsSync,
accessSync,
statSync,
chmodSync,
mkdirSync,
writeFileSync,
constants as fsConstants
} from 'fs'
import { app, type BrowserWindow, ipcMain } from 'electron'
import * as pty from 'node-pty'
import { type BrowserWindow, ipcMain } from 'electron'
export { getBashShellReadyRcfileContent } from '../providers/local-pty-shell-ready'
import type { OrcaRuntimeService } from '../runtime/orca-runtime'
import { parseWslPath } from '../wsl'
import { openCodeHookService } from '../opencode/hook-service'
import { piTitlebarExtensionService } from '../pi/titlebar-extension-service'
import { LocalPtyProvider } from '../providers/local-pty-provider'
import type { IPtyProvider } from '../providers/types'
let ptyCounter = 0
const ptyProcesses = new Map<string, pty.IPty>()
/** Basename of the shell binary each PTY was spawned with (e.g. "zsh"). */
const ptyShellName = new Map<string, string>()
// Why: node-pty's onData/onExit register native NAPI ThreadSafeFunction
// callbacks. If the PTY is killed without disposing these listeners, the
// stale callbacks survive into node::FreeEnvironment() where NAPI attempts
// to invoke/clean them up on a destroyed environment, triggering a SIGABRT
// via Napi::Error::ThrowAsJavaScriptException. Storing and calling the
// disposables before proc.kill() prevents the use-after-free crash.
const ptyDisposables = new Map<string, { dispose: () => void }[]>()
// ─── Provider Registry ──────────────────────────────────────────────
// Routes PTY operations by connectionId. null = local provider.
// SSH providers will be registered here in Phase 1.
// Track which "page load generation" each PTY belongs to.
// When the renderer reloads, we only kill PTYs from previous generations,
// not ones spawned during the current page load. This prevents a race
// condition where did-finish-load fires after PTYs have already been
// created by the new page, killing them and leaving blank terminals.
let loadGeneration = 0
const ptyLoadGeneration = new Map<string, number>()
let didEnsureSpawnHelperExecutable = false
let didEnsureShellReadyWrappers = false
const localProvider = new LocalPtyProvider()
const sshProviders = new Map<string, IPtyProvider>()
// Why: PTY IDs are assigned at spawn time with a connectionId, but subsequent
// write/resize/kill calls only carry the PTY ID. This map lets us route
// post-spawn operations to the correct provider without the renderer needing
// to track connectionId per-PTY.
const ptyOwnership = new Map<string, string | null>()
function quotePosixSingle(value: string): string {
return `'${value.replace(/'/g, `'\\''`)}'`
function getProvider(connectionId: string | null | undefined): IPtyProvider {
if (!connectionId) {
return localProvider
}
const provider = sshProviders.get(connectionId)
if (!provider) {
throw new Error(`No PTY provider for connection "${connectionId}"`)
}
return provider
}
const STARTUP_COMMAND_READY_MAX_WAIT_MS = 1500
const OSC_133_A = '\x1b]133;A'
type ShellReadyScanState = {
matchPos: number
heldBytes: string
function getProviderForPty(ptyId: string): IPtyProvider {
const connectionId = ptyOwnership.get(ptyId)
if (connectionId === undefined) {
return localProvider
}
return getProvider(connectionId)
}
function createShellReadyScanState(): ShellReadyScanState {
return { matchPos: 0, heldBytes: '' }
/** Register an SSH PTY provider for a connection. */
export function registerSshPtyProvider(connectionId: string, provider: IPtyProvider): void {
sshProviders.set(connectionId, provider)
}
function scanForShellReady(
state: ShellReadyScanState,
data: string
): { output: string; matched: boolean } {
let output = ''
/** Remove an SSH PTY provider when a connection is closed. */
export function unregisterSshPtyProvider(connectionId: string): void {
sshProviders.delete(connectionId)
}
for (let i = 0; i < data.length; i += 1) {
const ch = data[i] as string
if (state.matchPos < OSC_133_A.length) {
if (ch === OSC_133_A[state.matchPos]) {
state.heldBytes += ch
state.matchPos += 1
} else {
output += state.heldBytes
state.heldBytes = ''
state.matchPos = 0
if (ch === OSC_133_A[0]) {
state.heldBytes = ch
state.matchPos = 1
} else {
output += ch
}
}
} else if (ch === '\x07') {
const remaining = data.slice(i + 1)
state.heldBytes = ''
state.matchPos = 0
return { output: output + remaining, matched: true }
} else {
state.heldBytes += ch
/** Get the SSH PTY provider for a connection (for dispose on cleanup). */
export function getSshPtyProvider(connectionId: string): IPtyProvider | undefined {
return sshProviders.get(connectionId)
}
/** Get the local PTY provider (for direct access in tests/runtime). */
export function getLocalPtyProvider(): LocalPtyProvider {
return localProvider
}
/** Get all PTY IDs owned by a given connectionId (for reconnection reattach). */
export function getPtyIdsForConnection(connectionId: string): string[] {
const ids: string[] = []
for (const [ptyId, connId] of ptyOwnership) {
if (connId === connectionId) {
ids.push(ptyId)
}
}
return { output, matched: false }
return ids
}
function getShellReadyWrapperRoot(): string {
return `${app.getPath('userData')}/shell-ready`
}
export function getBashShellReadyRcfileContent(): string {
return `# Orca bash shell-ready wrapper
[[ -f /etc/profile ]] && source /etc/profile
if [[ -f "$HOME/.bash_profile" ]]; then
source "$HOME/.bash_profile"
elif [[ -f "$HOME/.bash_login" ]]; then
source "$HOME/.bash_login"
elif [[ -f "$HOME/.profile" ]]; then
source "$HOME/.profile"
fi
# Why: preserve bash's normal login-shell contract. Many users already source
# ~/.bashrc from ~/.bash_profile; forcing ~/.bashrc again here would duplicate
# PATH edits, hooks, and prompt init in Orca startup-command shells.
# Why: append the marker through PROMPT_COMMAND so it fires after the login
# startup files have rebuilt the prompt, matching Superset's "shell ready"
# contract without re-running user rc files.
__orca_prompt_mark() {
printf "\\033]133;A\\007"
}
if [[ "$(declare -p PROMPT_COMMAND 2>/dev/null)" == "declare -a"* ]]; then
PROMPT_COMMAND=("\${PROMPT_COMMAND[@]}" "__orca_prompt_mark")
else
_orca_prev_prompt_command="\${PROMPT_COMMAND}"
if [[ -n "\${_orca_prev_prompt_command}" ]]; then
PROMPT_COMMAND="\${_orca_prev_prompt_command};__orca_prompt_mark"
else
PROMPT_COMMAND="__orca_prompt_mark"
fi
fi
`
}
function ensureShellReadyWrappers(): void {
if (didEnsureShellReadyWrappers || process.platform === 'win32') {
return
}
didEnsureShellReadyWrappers = true
const root = getShellReadyWrapperRoot()
const zshDir = `${root}/zsh`
const bashDir = `${root}/bash`
const zshEnv = `# Orca zsh shell-ready wrapper
export ORCA_ORIG_ZDOTDIR="\${ORCA_ORIG_ZDOTDIR:-$HOME}"
[[ -f "$ORCA_ORIG_ZDOTDIR/.zshenv" ]] && source "$ORCA_ORIG_ZDOTDIR/.zshenv"
export ZDOTDIR=${quotePosixSingle(zshDir)}
`
const zshProfile = `# Orca zsh shell-ready wrapper
_orca_home="\${ORCA_ORIG_ZDOTDIR:-$HOME}"
[[ -f "$_orca_home/.zprofile" ]] && source "$_orca_home/.zprofile"
`
const zshRc = `# Orca zsh shell-ready wrapper
_orca_home="\${ORCA_ORIG_ZDOTDIR:-$HOME}"
if [[ -o interactive && -f "$_orca_home/.zshrc" ]]; then
source "$_orca_home/.zshrc"
fi
`
const zshLogin = `# Orca zsh shell-ready wrapper
_orca_home="\${ORCA_ORIG_ZDOTDIR:-$HOME}"
if [[ -o interactive && -f "$_orca_home/.zlogin" ]]; then
source "$_orca_home/.zlogin"
fi
# Why: emit OSC 133;A only after the user's startup hooks finish so Orca knows
# the prompt is actually ready for a long startup command paste.
__orca_prompt_mark() {
printf "\\033]133;A\\007"
}
precmd_functions=(\${precmd_functions[@]} __orca_prompt_mark)
`
const bashRc = getBashShellReadyRcfileContent()
const files = [
[`${zshDir}/.zshenv`, zshEnv],
[`${zshDir}/.zprofile`, zshProfile],
[`${zshDir}/.zshrc`, zshRc],
[`${zshDir}/.zlogin`, zshLogin],
[`${bashDir}/rcfile`, bashRc]
] as const
for (const [path, content] of files) {
const dir = path.slice(0, path.lastIndexOf('/'))
mkdirSync(dir, { recursive: true })
writeFileSync(path, content, 'utf8')
chmodSync(path, 0o644)
}
}
function getShellReadyLaunchConfig(shellPath: string): {
args: string[] | null
env: Record<string, string>
supportsReadyMarker: boolean
} {
const shellName = basename(shellPath).toLowerCase()
if (shellName === 'zsh') {
ensureShellReadyWrappers()
return {
args: ['-l'],
env: {
ORCA_ORIG_ZDOTDIR: process.env.ZDOTDIR || process.env.HOME || '',
ZDOTDIR: `${getShellReadyWrapperRoot()}/zsh`
},
supportsReadyMarker: true
/**
* Remove all PTY ownership entries for a given connectionId.
* Why: when an SSH connection is closed, the remote PTYs are gone but their
* ownership entries linger. Without cleanup, subsequent spawn calls could
* look up a stale provider for those PTY IDs, and the map grows unboundedly.
*/
export function clearPtyOwnershipForConnection(connectionId: string): void {
for (const [ptyId, connId] of ptyOwnership) {
if (connId === connectionId) {
ptyOwnership.delete(ptyId)
}
}
if (shellName === 'bash') {
ensureShellReadyWrappers()
return {
args: ['--rcfile', `${getShellReadyWrapperRoot()}/bash/rcfile`],
env: {},
supportsReadyMarker: true
}
}
return {
args: null,
env: {},
supportsReadyMarker: false
}
}
function writeStartupCommandWhenShellReady(
readyPromise: Promise<void>,
proc: pty.IPty,
startupCommand: string,
onExit: (cleanup: () => void) => void
): void {
let sent = false
const cleanup = (): void => {
sent = true
}
// ─── Provider-scoped PTY state cleanup ──────────────────────────────
const flush = (): void => {
if (sent) {
return
}
sent = true
// Why: run startup commands inside the same interactive shell Orca keeps
// open for the pane. Spawning `shell -c <command>; exec shell -l` would
// avoid the race, but it would also replace the session after the agent
// exits and break "stay in this terminal" workflows.
const payload = startupCommand.endsWith('\n') ? startupCommand : `${startupCommand}\n`
// Why: startup commands are usually long, quoted agent launches. Writing
// them in one PTY call after the shell-ready barrier avoids the incremental
// paste behavior that still dropped characters in practice.
proc.write(payload)
}
readyPromise.then(flush)
onExit(cleanup)
}
function disposePtyListeners(id: string): void {
const disposables = ptyDisposables.get(id)
if (disposables) {
for (const d of disposables) {
d.dispose()
}
ptyDisposables.delete(id)
}
}
function clearPtyState(id: string): void {
disposePtyListeners(id)
clearPtyRegistryState(id)
}
function clearPtyRegistryState(id: string): void {
ptyProcesses.delete(id)
ptyShellName.delete(id)
ptyLoadGeneration.delete(id)
}
function killPtyProcess(id: string, proc: pty.IPty): boolean {
// Why: node-pty's listener disposables must be torn down before proc.kill()
// on every explicit teardown path, not just app quit. Some kills happen
// during reload/manual-close flows where waiting for later state cleanup is
// too late to stop the stale NAPI callbacks from surviving into shutdown.
disposePtyListeners(id)
let killed = true
try {
proc.kill()
} catch {
killed = false
}
// Why: once an explicit kill path decides this PTY is done, we must clear
// the bookkeeping maps even if node-pty reports the process was already
// gone. Leaving the stale registry entry behind makes later lookups think
// the PTY is still live even though runtime teardown already ran.
clearPtyRegistryState(id)
clearProviderPtyState(id)
return killed
}
function clearProviderPtyState(id: string): void {
export function clearProviderPtyState(id: string): void {
// Why: OpenCode and Pi both allocate PTY-scoped runtime state outside the
// node-pty process table. Centralizing provider cleanup avoids drift where a
// new teardown path forgets to remove one provider's overlay/hook state.
@ -302,64 +98,19 @@ function clearProviderPtyState(id: string): void {
piTitlebarExtensionService.clearPty(id)
}
function getShellValidationError(shellPath: string): string | null {
if (!existsSync(shellPath)) {
return (
`Shell "${shellPath}" does not exist. ` +
`Set a valid SHELL environment variable or install zsh/bash.`
)
}
try {
accessSync(shellPath, fsConstants.X_OK)
} catch {
return `Shell "${shellPath}" is not executable. Check file permissions.`
}
return null
export function deletePtyOwnership(id: string): void {
ptyOwnership.delete(id)
}
function ensureNodePtySpawnHelperExecutable(): void {
if (didEnsureSpawnHelperExecutable || process.platform === 'win32') {
return
}
didEnsureSpawnHelperExecutable = true
// Why: localProvider.onData/onExit return unsubscribe functions. Without
// storing and calling these on re-registration, macOS app re-activation
// creates a new BrowserWindow and re-calls registerPtyHandlers, leaking
// duplicate listeners that forward every event twice.
let localDataUnsub: (() => void) | null = null
let localExitUnsub: (() => void) | null = null
let didFinishLoadHandler: (() => void) | null = null
try {
const unixTerminalPath = require.resolve('node-pty/lib/unixTerminal.js')
const packageRoot =
basename(unixTerminalPath) === 'unixTerminal.js'
? unixTerminalPath.replace(/[/\\]lib[/\\]unixTerminal\.js$/, '')
: unixTerminalPath
const candidates = [
`${packageRoot}/build/Release/spawn-helper`,
`${packageRoot}/build/Debug/spawn-helper`,
`${packageRoot}/prebuilds/${process.platform}-${process.arch}/spawn-helper`
].map((candidate) =>
candidate
.replace('app.asar/', 'app.asar.unpacked/')
.replace('node_modules.asar/', 'node_modules.asar.unpacked/')
)
for (const candidate of candidates) {
if (!existsSync(candidate)) {
continue
}
const mode = statSync(candidate).mode
if ((mode & 0o111) !== 0) {
return
}
// Why: node-pty's Unix backend launches this helper before the requested
// shell binary. Some package-manager/install paths strip the execute bit
// from the prebuilt helper, which makes every PTY spawn fail with the
// misleading "posix_spawnp failed" shell error even when /bin/zsh exists.
chmodSync(candidate, mode | 0o755)
return
}
} catch (error) {
console.warn(
`[pty] Failed to ensure node-pty spawn-helper is executable: ${error instanceof Error ? error.message : String(error)}`
)
}
}
// ─── IPC Registration ───────────────────────────────────────────────
export function registerPtyHandlers(
mainWindow: BrowserWindow,
@ -375,180 +126,25 @@ export function registerPtyHandlers(
ipcMain.removeHandler('pty:getForegroundProcess')
ipcMain.removeAllListeners('pty:write')
// Kill orphaned PTY processes from previous page loads when the renderer reloads.
// PTYs tagged with the current loadGeneration were spawned during THIS page load
// and must be preserved — only kill PTYs from earlier generations.
mainWindow.webContents.on('did-finish-load', () => {
for (const [id, proc] of ptyProcesses) {
const gen = ptyLoadGeneration.get(id) ?? -1
if (gen < loadGeneration) {
killPtyProcess(id, proc)
// Why: notify runtime so the agent detector can close out any live
// agent sessions. Without this, killed PTYs would remain in the
// detector's liveAgents map and accumulate inflated durations.
runtime?.onPtyExit(id, -1)
}
}
// Advance generation for the next page load
loadGeneration++
})
runtime?.setPtyController({
write: (ptyId, data) => {
const proc = ptyProcesses.get(ptyId)
if (!proc) {
return false
}
proc.write(data)
return true
},
kill: (ptyId) => {
const proc = ptyProcesses.get(ptyId)
if (!proc) {
return false
}
if (!killPtyProcess(ptyId, proc)) {
return false
}
runtime?.onPtyExit(ptyId, -1)
return true
}
})
ipcMain.handle(
'pty:spawn',
(
_event,
args: {
cols: number
rows: number
cwd?: string
env?: Record<string, string>
command?: string
}
) => {
const id = String(++ptyCounter)
const defaultCwd =
process.platform === 'win32'
? process.env.USERPROFILE || process.env.HOMEPATH || 'C:\\'
: process.env.HOME || '/'
const cwd = args.cwd || defaultCwd
// Why: when the working directory is inside a WSL filesystem, spawn a
// WSL shell (wsl.exe) instead of a native Windows shell. This gives the
// user a Linux environment with access to their WSL-installed tools
// (git, node, etc.) rather than a PowerShell with no WSL toolchain.
const wslInfo = process.platform === 'win32' ? parseWslPath(cwd) : null
let shellPath: string
let shellArgs: string[]
let effectiveCwd: string
let validationCwd: string
let shellReadyLaunch: {
args: string[] | null
env: Record<string, string>
supportsReadyMarker: boolean
} | null = null
if (wslInfo) {
// Why: use `bash -c "cd ... && exec bash -l"` instead of `--cd` because
// wsl.exe's --cd flag fails with ERROR_PATH_NOT_FOUND in some Node
// spawn configurations. The exec replaces the outer bash with a login
// shell so the user gets their normal shell environment.
const escapedCwd = wslInfo.linuxPath.replace(/'/g, "'\\''")
shellPath = 'wsl.exe'
shellArgs = ['-d', wslInfo.distro, '--', 'bash', '-c', `cd '${escapedCwd}' && exec bash -l`]
// Why: set cwd to a valid Windows directory so node-pty's native
// spawn doesn't fail on the UNC path.
effectiveCwd = process.env.USERPROFILE || process.env.HOMEPATH || 'C:\\'
// Why: still validate the requested WSL UNC path, not the fallback
// Windows cwd. Otherwise a deleted/mistyped WSL worktree silently
// spawns a shell in the home directory and hides the real error.
validationCwd = cwd
} else if (process.platform === 'win32') {
shellPath = process.env.COMSPEC || 'powershell.exe'
// Why: use path.win32.basename so backslash-separated Windows paths
// are parsed correctly even when tests mock process.platform on Linux CI.
const shellBasename = pathWin32.basename(shellPath).toLowerCase()
// Why: On CJK Windows (Chinese, Japanese, Korean), the console code page
// defaults to the system ANSI code page (e.g. 936/GBK for Chinese).
// ConPTY encodes its output pipe using this code page, but node-pty
// always decodes as UTF-8. Without switching to code page 65001 (UTF-8),
// multi-byte CJK characters are garbled because the GBK/Shift-JIS/EUC-KR
// byte sequences are misinterpreted as UTF-8. This is especially visible
// with split-screen terminals where multiple ConPTY instances amplify the
// issue. Setting the code page at shell startup ensures all subsequent
// output — including from child processes — uses UTF-8.
if (shellBasename === 'cmd.exe') {
shellArgs = ['/K', 'chcp 65001 > nul']
} else if (shellBasename === 'powershell.exe' || shellBasename === 'pwsh.exe') {
// Why: `-NoExit -Command` alone skips the user's $PROFILE, breaking
// custom prompts (oh-my-posh, starship), aliases, and PSReadLine
// configuration. Dot-sourcing $PROFILE first restores the normal
// startup experience. The try/catch ensures a broken profile (e.g.
// terminating errors from strict-mode violations or failing module
// imports) cannot prevent the encoding commands from executing —
// otherwise the CJK fix would silently fail for those users.
shellArgs = [
'-NoExit',
'-Command',
'try { . $PROFILE } catch {}; [Console]::OutputEncoding = [System.Text.Encoding]::UTF8; [Console]::InputEncoding = [System.Text.Encoding]::UTF8'
]
} else {
shellArgs = []
}
effectiveCwd = cwd
validationCwd = cwd
} else {
// Why: startup commands can pass env overrides for the PTY. Prefer an
// explicit SHELL override when present, but still validate/fallback it
// exactly like the inherited process shell so stale config can't brick
// terminal creation.
shellPath = args.env?.SHELL || process.env.SHELL || '/bin/zsh'
shellReadyLaunch = args.command ? getShellReadyLaunchConfig(shellPath) : null
shellArgs = shellReadyLaunch?.args ?? ['-l']
effectiveCwd = cwd
validationCwd = cwd
}
ensureNodePtySpawnHelperExecutable()
if (!existsSync(validationCwd)) {
throw new Error(
`Working directory "${validationCwd}" does not exist. ` +
`It may have been deleted or is on an unmounted volume.`
)
}
if (!statSync(validationCwd).isDirectory()) {
throw new Error(`Working directory "${validationCwd}" is not a directory.`)
}
// Configure the local provider with app-specific hooks
localProvider.configure({
buildSpawnEnv: (id, baseEnv) => {
const selectedCodexHomePath = getSelectedCodexHomePath?.() ?? null
const spawnEnv = {
...process.env,
...args.env,
...shellReadyLaunch?.env,
TERM: 'xterm-256color',
COLORTERM: 'truecolor',
TERM_PROGRAM: 'Orca',
FORCE_HYPERLINK: '1'
} as Record<string, string>
const openCodeHookEnv = openCodeHookService.buildPtyEnv(id)
if (spawnEnv.OPENCODE_CONFIG_DIR) {
if (baseEnv.OPENCODE_CONFIG_DIR) {
// Why: OPENCODE_CONFIG_DIR is a singular extra config root. Replacing a
// user-provided directory would silently hide their custom OpenCode
// config, so preserve it and fall back to title-only detection there.
delete openCodeHookEnv.OPENCODE_CONFIG_DIR
}
Object.assign(spawnEnv, openCodeHookEnv)
Object.assign(baseEnv, openCodeHookEnv)
// Why: PI_CODING_AGENT_DIR owns Pi's full config/session root. Build a
// PTY-scoped overlay from the caller's chosen root so Pi sessions keep
// their user state without sharing a mutable overlay across terminals.
Object.assign(
spawnEnv,
piTitlebarExtensionService.buildPtyEnv(id, spawnEnv.PI_CODING_AGENT_DIR)
baseEnv,
piTitlebarExtensionService.buildPtyEnv(id, baseEnv.PI_CODING_AGENT_DIR)
)
// Why: the selected Codex account should affect Codex launched inside
@ -557,258 +153,140 @@ export function registerPtyHandlers(
// stays scoped to Orca terminals instead of mutating the app process or
// the user's external shells.
if (selectedCodexHomePath) {
spawnEnv.CODEX_HOME = selectedCodexHomePath
}
// Why: When Electron is launched from Finder (not a terminal), the process
// does not inherit the user's shell locale settings. Without an explicit
// UTF-8 locale, multi-byte characters (e.g. em dashes U+2014) are
// misinterpreted by the PTY and rendered as garbled sequences like "<22>~@~T".
// We default LANG to en_US.UTF-8 but let the inherited or caller-provided
// env override it so user locale preferences are respected.
spawnEnv.LANG ??= 'en_US.UTF-8'
// Why: On Windows, LANG alone does not control the console code page.
// Programs like Python and Node.js check their own encoding env vars
// independently. PYTHONUTF8=1 makes Python use UTF-8 for stdio regardless
// of the Windows console code page, preventing garbled CJK output from
// Python scripts run inside the terminal.
if (process.platform === 'win32') {
spawnEnv.PYTHONUTF8 ??= '1'
baseEnv.CODEX_HOME = selectedCodexHomePath
}
let ptyProcess: pty.IPty | undefined
let primaryError: string | null = null
if (process.platform !== 'win32') {
primaryError = getShellValidationError(shellPath)
}
return baseEnv
},
onSpawned: (id) => runtime?.onPtySpawned(id),
onExit: (id, code) => {
clearProviderPtyState(id)
ptyOwnership.delete(id)
runtime?.onPtyExit(id, code)
},
onData: (id, data, timestamp) => runtime?.onPtyData(id, data, timestamp)
})
if (!primaryError) {
try {
ptyProcess = pty.spawn(shellPath, shellArgs, {
name: 'xterm-256color',
cols: args.cols,
rows: args.rows,
cwd: effectiveCwd,
env: spawnEnv
})
} catch (err) {
// Why: node-pty.spawn can throw if posix_spawnp fails for reasons
// not caught by the validation above (e.g. architecture mismatch
// of the native addon, PTY allocation failure, or resource limits).
primaryError = err instanceof Error ? err.message : String(err)
}
}
// Wire up provider events → renderer IPC
localDataUnsub?.()
localExitUnsub?.()
localDataUnsub = localProvider.onData((payload) => {
if (!mainWindow.isDestroyed()) {
mainWindow.webContents.send('pty:data', payload)
}
})
localExitUnsub = localProvider.onExit((payload) => {
if (!mainWindow.isDestroyed()) {
mainWindow.webContents.send('pty:exit', payload)
}
})
if (!ptyProcess && process.platform !== 'win32') {
// Why: a stale login shell path (common after Homebrew/bash changes)
// should not brick Orca terminals. Fall back to system shells so the
// user still gets a working terminal while the bad SHELL config remains.
const configuredShellPath = shellPath
const fallbackShells = ['/bin/zsh', '/bin/bash', '/bin/sh'].filter(
(s) => s !== configuredShellPath
)
for (const fallback of fallbackShells) {
if (getShellValidationError(fallback)) {
continue
}
try {
// Why: set SHELL to the fallback *before* spawning so the child
// process inherits the correct value. Leaving the stale original
// SHELL in the env would confuse shell startup logic and any
// subprocesses that inspect $SHELL.
shellReadyLaunch = args.command ? getShellReadyLaunchConfig(fallback) : null
spawnEnv.SHELL = fallback
Object.assign(spawnEnv, shellReadyLaunch?.env ?? {})
ptyProcess = pty.spawn(fallback, shellReadyLaunch?.args ?? ['-l'], {
name: 'xterm-256color',
cols: args.cols,
rows: args.rows,
cwd: effectiveCwd,
env: spawnEnv
})
console.warn(
`[pty] Primary shell "${configuredShellPath}" failed (${primaryError ?? 'unknown error'}), fell back to "${fallback}"`
)
shellPath = fallback
break
} catch {
// Fallback also failed — try next.
}
}
}
// Kill orphaned PTY processes from previous page loads when the renderer reloads.
// Why: store the handler reference so we can remove it on re-registration,
// preventing duplicate handlers after macOS app re-activation.
if (didFinishLoadHandler) {
mainWindow.webContents.removeListener('did-finish-load', didFinishLoadHandler)
}
didFinishLoadHandler = () => {
const killed = localProvider.killOrphanedPtys(localProvider.advanceGeneration() - 1)
for (const { id } of killed) {
clearProviderPtyState(id)
ptyOwnership.delete(id)
runtime?.onPtyExit(id, -1)
}
}
mainWindow.webContents.on('did-finish-load', didFinishLoadHandler)
if (!ptyProcess) {
const diag = [
`shell: ${shellPath}`,
`cwd: ${effectiveCwd}`,
`arch: ${process.arch}`,
`platform: ${process.platform} ${process.getSystemVersion?.() ?? ''}`
].join(', ')
throw new Error(
`Failed to spawn shell "${shellPath}": ${primaryError ?? 'unknown error'} (${diag}). ` +
`If this persists, please file an issue.`
)
// Why: the runtime controller must route through getProviderForPty() so that
// CLI commands (terminal.send, terminal.stop) work for both local and remote PTYs.
// Hardcoding localProvider.getPtyProcess() would silently fail for remote PTYs.
runtime?.setPtyController({
write: (ptyId, data) => {
const provider = getProviderForPty(ptyId)
try {
provider.write(ptyId, data)
return true
} catch {
return false
}
},
kill: (ptyId) => {
const provider = getProviderForPty(ptyId)
// Why: shutdown() is async but the PtyController interface is sync.
// Swallowing the rejection prevents an unhandled promise rejection crash
// if the remote SSH session is already gone.
void provider.shutdown(ptyId, false).catch(() => {})
clearProviderPtyState(ptyId)
runtime?.onPtyExit(ptyId, -1)
return true
}
})
if (process.platform !== 'win32') {
// Why: after a successful fallback, update spawnEnv.SHELL to match what
// was actually launched. The value was already set inside the fallback loop
// before spawn, but we also need shellPath to reflect the fallback for the
// ptyShellName map below. (Primary-path spawns already have the correct
// SHELL from process.env / args.env.)
spawnEnv.SHELL = shellPath
}
const proc = ptyProcess
ptyProcesses.set(id, proc)
ptyShellName.set(id, basename(shellPath))
ptyLoadGeneration.set(id, loadGeneration)
runtime?.onPtySpawned(id)
// ─── IPC Handlers (thin dispatch layer) ─────────────────────────
let resolveShellReady: (() => void) | null = null
let shellReadyTimeout: ReturnType<typeof setTimeout> | null = null
const shellReadyScanState = shellReadyLaunch?.supportsReadyMarker
? createShellReadyScanState()
: null
const shellReadyPromise = args.command
? new Promise<void>((resolve) => {
resolveShellReady = resolve
})
: Promise.resolve()
const finishShellReady = (): void => {
if (!resolveShellReady) {
return
}
if (shellReadyTimeout) {
clearTimeout(shellReadyTimeout)
shellReadyTimeout = null
}
const resolve = resolveShellReady
resolveShellReady = null
resolve()
ipcMain.handle(
'pty:spawn',
async (
_event,
args: {
cols: number
rows: number
cwd?: string
env?: Record<string, string>
command?: string
connectionId?: string | null
}
if (args.command) {
if (shellReadyLaunch?.supportsReadyMarker) {
shellReadyTimeout = setTimeout(() => {
finishShellReady()
}, STARTUP_COMMAND_READY_MAX_WAIT_MS)
} else {
finishShellReady()
}
}
let startupCommandCleanup: (() => void) | null = null
const onDataDisposable = proc.onData((rawData) => {
let data = rawData
if (shellReadyScanState && resolveShellReady) {
const scanned = scanForShellReady(shellReadyScanState, rawData)
data = scanned.output
if (scanned.matched) {
finishShellReady()
}
}
if (data.length === 0) {
return
}
runtime?.onPtyData(id, data, Date.now())
if (!mainWindow.isDestroyed()) {
mainWindow.webContents.send('pty:data', { id, data })
}
) => {
const provider = getProvider(args.connectionId)
const result = await provider.spawn({
cols: args.cols,
rows: args.rows,
cwd: args.cwd,
env: args.env,
command: args.command
})
const onExitDisposable = proc.onExit(({ exitCode }) => {
if (shellReadyTimeout) {
clearTimeout(shellReadyTimeout)
shellReadyTimeout = null
}
startupCommandCleanup?.()
clearPtyState(id)
clearProviderPtyState(id)
runtime?.onPtyExit(id, exitCode)
if (!mainWindow.isDestroyed()) {
mainWindow.webContents.send('pty:exit', { id, code: exitCode })
}
})
ptyDisposables.set(id, [onDataDisposable, onExitDisposable])
if (args.command) {
writeStartupCommandWhenShellReady(shellReadyPromise, proc, args.command, (cleanup) => {
startupCommandCleanup = cleanup
})
}
return { id }
ptyOwnership.set(result.id, args.connectionId ?? null)
return result
}
)
ipcMain.on('pty:write', (_event, args: { id: string; data: string }) => {
const proc = ptyProcesses.get(args.id)
if (proc) {
proc.write(args.data)
}
getProviderForPty(args.id).write(args.id, args.data)
})
ipcMain.handle('pty:resize', (_event, args: { id: string; cols: number; rows: number }) => {
const proc = ptyProcesses.get(args.id)
if (proc) {
proc.resize(args.cols, args.rows)
}
getProviderForPty(args.id).resize(args.id, args.cols, args.rows)
})
ipcMain.handle('pty:kill', (_event, args: { id: string }) => {
const proc = ptyProcesses.get(args.id)
if (proc) {
killPtyProcess(args.id, proc)
runtime?.onPtyExit(args.id, -1)
}
})
// Check whether the terminal's foreground process differs from its shell
// (e.g. the user is running `node server.js`). Uses node-pty's native
// .process getter which reads the OS process table directly — no external
// tools like pgrep required.
ipcMain.handle('pty:hasChildProcesses', (_event, args: { id: string }): boolean => {
const proc = ptyProcesses.get(args.id)
if (!proc) {
return false
}
ipcMain.handle('pty:kill', async (_event, args: { id: string }) => {
// Why: try/finally ensures ptyOwnership is cleaned up even if shutdown
// throws (e.g. SSH connection already gone). Without this, the stale
// entry routes future lookups to a dead provider.
try {
const foreground = proc.process
const shell = ptyShellName.get(args.id)
// If we can't determine the shell name, err on the side of caution.
if (!shell) {
return true
}
return foreground !== shell
} catch {
// .process can throw if the PTY fd is already closed.
return false
await getProviderForPty(args.id).shutdown(args.id, true)
} finally {
ptyOwnership.delete(args.id)
}
})
ipcMain.handle('pty:getForegroundProcess', (_event, args: { id: string }): string | null => {
const proc = ptyProcesses.get(args.id)
if (!proc) {
return null
ipcMain.handle(
'pty:hasChildProcesses',
async (_event, args: { id: string }): Promise<boolean> => {
return getProviderForPty(args.id).hasChildProcesses(args.id)
}
try {
// Why: live Codex-session actions must key off the PTY foreground process,
// not the tab title. Agent CLIs do not reliably emit stable OSC titles,
// so title-based detection misses real Codex sessions that still need a
// restart after account switching.
return proc.process || null
} catch {
// .process can throw if the PTY fd is already closed.
return null
)
ipcMain.handle(
'pty:getForegroundProcess',
async (_event, args: { id: string }): Promise<string | null> => {
return getProviderForPty(args.id).getForegroundProcess(args.id)
}
})
)
}
/**
* Kill all PTY processes. Call on app quit.
*/
export function killAllPty(): void {
for (const [id, proc] of ptyProcesses) {
killPtyProcess(id, proc)
}
localProvider.killAll()
}

View file

@ -0,0 +1,192 @@
import { describe, expect, it, vi, beforeEach } from 'vitest'
const { handleMock, mockStore, mockGitProvider } = vi.hoisted(() => ({
handleMock: vi.fn(),
mockStore: {
getRepos: vi.fn().mockReturnValue([]),
addRepo: vi.fn(),
removeRepo: vi.fn(),
getRepo: vi.fn(),
updateRepo: vi.fn()
},
mockGitProvider: {
isGitRepo: vi.fn().mockReturnValue(true),
isGitRepoAsync: vi.fn().mockResolvedValue({ isRepo: true, rootPath: null })
}
}))
vi.mock('electron', () => ({
dialog: { showOpenDialog: vi.fn() },
ipcMain: {
handle: handleMock,
removeHandler: vi.fn()
}
}))
vi.mock('../git/repo', () => ({
isGitRepo: vi.fn().mockReturnValue(true),
getGitUsername: vi.fn().mockReturnValue(''),
getRepoName: vi.fn().mockImplementation((path: string) => path.split('/').pop()),
getBaseRefDefault: vi.fn().mockResolvedValue('origin/main'),
searchBaseRefs: vi.fn().mockResolvedValue([])
}))
vi.mock('./filesystem-auth', () => ({
rebuildAuthorizedRootsCache: vi.fn().mockResolvedValue(undefined)
}))
vi.mock('../providers/ssh-git-dispatch', () => ({
getSshGitProvider: vi.fn().mockImplementation((id: string) => {
if (id === 'conn-1') {
return mockGitProvider
}
return undefined
})
}))
import { registerRepoHandlers } from './repos'
describe('repos:addRemote', () => {
const handlers = new Map<string, (_event: unknown, args: unknown) => unknown>()
const mockWindow = {
isDestroyed: () => false,
webContents: { send: vi.fn() }
}
beforeEach(() => {
handlers.clear()
handleMock.mockReset()
handleMock.mockImplementation((channel: string, handler: (...a: unknown[]) => unknown) => {
handlers.set(channel, handler)
})
mockStore.getRepos.mockReset().mockReturnValue([])
mockStore.addRepo.mockReset()
mockWindow.webContents.send.mockReset()
registerRepoHandlers(mockWindow as never, mockStore as never)
})
it('registers the repos:addRemote handler', () => {
expect(handlers.has('repos:addRemote')).toBe(true)
})
it('creates a remote repo with connectionId', async () => {
const result = await handlers.get('repos:addRemote')!(null, {
connectionId: 'conn-1',
remotePath: '/home/user/project'
})
expect(mockStore.addRepo).toHaveBeenCalledWith(
expect.objectContaining({
path: '/home/user/project',
connectionId: 'conn-1',
kind: 'git',
displayName: 'project'
})
)
expect(result).toHaveProperty('id')
expect(result).toHaveProperty('connectionId', 'conn-1')
})
it('uses custom displayName when provided', async () => {
const result = await handlers.get('repos:addRemote')!(null, {
connectionId: 'conn-1',
remotePath: '/home/user/project',
displayName: 'My Server Repo'
})
expect(mockStore.addRepo).toHaveBeenCalledWith(
expect.objectContaining({
displayName: 'My Server Repo'
})
)
expect(result).toHaveProperty('displayName', 'My Server Repo')
})
it('returns existing repo if same connectionId and path already added', async () => {
const existing = {
id: 'existing-id',
path: '/home/user/project',
connectionId: 'conn-1',
displayName: 'project',
badgeColor: '#fff',
addedAt: 1000,
kind: 'git'
}
mockStore.getRepos.mockReturnValue([existing])
const result = await handlers.get('repos:addRemote')!(null, {
connectionId: 'conn-1',
remotePath: '/home/user/project'
})
expect(result).toEqual(existing)
expect(mockStore.addRepo).not.toHaveBeenCalled()
})
it('throws when SSH connection is not found', async () => {
await expect(
handlers.get('repos:addRemote')!(null, {
connectionId: 'unknown-conn',
remotePath: '/home/user/project'
})
).rejects.toThrow('SSH connection "unknown-conn" not found')
})
it('throws when remote path is not a git repo', async () => {
mockGitProvider.isGitRepoAsync.mockResolvedValueOnce({ isRepo: false, rootPath: null })
await expect(
handlers.get('repos:addRemote')!(null, {
connectionId: 'conn-1',
remotePath: '/home/user/documents'
})
).rejects.toThrow('Not a valid git repository')
expect(mockStore.addRepo).not.toHaveBeenCalled()
})
it('adds as folder when kind is explicitly set', async () => {
const result = await handlers.get('repos:addRemote')!(null, {
connectionId: 'conn-1',
remotePath: '/home/user/documents',
kind: 'folder'
})
expect(mockStore.addRepo).toHaveBeenCalledWith(
expect.objectContaining({
kind: 'folder',
path: '/home/user/documents'
})
)
expect(result).toHaveProperty('kind', 'folder')
})
it('uses rootPath from git detection when available', async () => {
mockGitProvider.isGitRepoAsync.mockResolvedValueOnce({
isRepo: true,
rootPath: '/home/user/project'
})
const result = await handlers.get('repos:addRemote')!(null, {
connectionId: 'conn-1',
remotePath: '/home/user/project/src'
})
expect(mockStore.addRepo).toHaveBeenCalledWith(
expect.objectContaining({
kind: 'git',
path: '/home/user/project'
})
)
expect(result).toHaveProperty('path', '/home/user/project')
})
it('notifies renderer when remote repo is added', async () => {
await handlers.get('repos:addRemote')!(null, {
connectionId: 'conn-1',
remotePath: '/home/user/project'
})
expect(mockWindow.webContents.send).toHaveBeenCalledWith('repos:changed')
})
})

View file

@ -1,3 +1,6 @@
/* eslint-disable max-lines -- Why: repo IPC is intentionally centralized so SSH
routing, clone lifecycle, and store persistence stay behind a single audited
boundary. Splitting by line count would scatter tightly coupled repo behavior. */
import type { BrowserWindow } from 'electron'
import { dialog, ipcMain } from 'electron'
import { randomUUID } from 'crypto'
@ -17,6 +20,8 @@ import {
getBaseRefDefault,
searchBaseRefs
} from '../git/repo'
import { getSshGitProvider } from '../providers/ssh-git-dispatch'
import { getActiveMultiplexer } from './ssh'
// Why: module-scoped so the abort handle survives window re-creation on macOS.
// registerRepoHandlers is called again when a new BrowserWindow is created,
@ -38,6 +43,7 @@ export function registerRepoHandlers(mainWindow: BrowserWindow, store: Store): v
ipcMain.removeHandler('repos:getGitUsername')
ipcMain.removeHandler('repos:getBaseRefDefault')
ipcMain.removeHandler('repos:searchBaseRefs')
ipcMain.removeHandler('repos:addRemote')
ipcMain.handle('repos:list', () => {
return store.getRepos()
@ -70,6 +76,83 @@ export function registerRepoHandlers(mainWindow: BrowserWindow, store: Store): v
return repo
})
ipcMain.handle(
'repos:addRemote',
async (
_event,
args: {
connectionId: string
remotePath: string
displayName?: string
kind?: 'git' | 'folder'
}
): Promise<Repo> => {
const gitProvider = getSshGitProvider(args.connectionId)
if (!gitProvider) {
throw new Error(`SSH connection "${args.connectionId}" not found or not connected`)
}
const existing = store
.getRepos()
.find((r) => r.connectionId === args.connectionId && r.path === args.remotePath)
if (existing) {
return existing
}
const pathSegments = args.remotePath.replace(/\/+$/, '').split('/')
const folderName = pathSegments.at(-1) || args.remotePath
let repoKind: 'git' | 'folder' = args.kind ?? 'git'
let resolvedPath = args.remotePath
if (args.kind !== 'folder') {
// Why: when kind is not explicitly 'folder', verify the remote path is
// a git repo. Throw on failure so the renderer can show the "Open as
// Folder" confirmation dialog — matching the local add-repo behavior
// where non-git directories require explicit user consent.
try {
const check = await gitProvider.isGitRepoAsync(args.remotePath)
if (check.isRepo) {
repoKind = 'git'
if (check.rootPath) {
resolvedPath = check.rootPath
}
} else {
throw new Error(`Not a valid git repository: ${args.remotePath}`)
}
} catch (err) {
if (err instanceof Error && err.message.includes('Not a valid git repository')) {
throw err
}
throw new Error(`Not a valid git repository: ${args.remotePath}`)
}
}
const repo: Repo = {
id: randomUUID(),
path: resolvedPath,
displayName: args.displayName || folderName,
badgeColor: REPO_COLORS[store.getRepos().length % REPO_COLORS.length],
addedAt: Date.now(),
kind: repoKind,
connectionId: args.connectionId
}
store.addRepo(repo)
notifyReposChanged(mainWindow)
// Why: register the workspace root with the relay so mutating FS operations
// are scoped to this repo's path. Without this, the relay's path ACL would
// reject writes to the workspace after the first root is registered.
const mux = getActiveMultiplexer(args.connectionId)
if (mux) {
mux.notify('session.registerRoot', { rootPath: resolvedPath })
}
return repo
}
)
ipcMain.handle('repos:remove', async (_event, args: { repoId: string }) => {
store.removeRepo(args.repoId)
await rebuildAuthorizedRootsCache(store)
@ -239,11 +322,25 @@ export function registerRepoHandlers(mainWindow: BrowserWindow, store: Store): v
}
)
ipcMain.handle('repos:getGitUsername', (_event, args: { repoId: string }) => {
ipcMain.handle('repos:getGitUsername', async (_event, args: { repoId: string }) => {
const repo = store.getRepo(args.repoId)
if (!repo || isFolderRepo(repo)) {
return ''
}
// Why: remote repos have their git config on the remote host, so we
// must route through the relay's git.exec to read user.name.
if (repo.connectionId) {
const provider = getSshGitProvider(repo.connectionId)
if (!provider) {
return ''
}
try {
const result = await provider.exec(['config', 'user.name'], repo.path)
return result.stdout.trim()
} catch {
return ''
}
}
return getGitUsername(repo.path)
})
@ -252,6 +349,27 @@ export function registerRepoHandlers(mainWindow: BrowserWindow, store: Store): v
if (!repo || isFolderRepo(repo)) {
return 'origin/main'
}
// Why: remote repos need the relay to resolve symbolic-ref on the
// remote host where the git data lives.
if (repo.connectionId) {
const provider = getSshGitProvider(repo.connectionId)
if (!provider) {
return 'origin/main'
}
try {
const result = await provider.exec(
['symbolic-ref', '--quiet', 'refs/remotes/origin/HEAD'],
repo.path
)
const ref = result.stdout.trim()
if (ref) {
return ref.replace(/^refs\/remotes\//, '')
}
} catch {
// Fall through to default
}
return 'origin/main'
}
return getBaseRefDefault(repo.path)
})
@ -262,7 +380,34 @@ export function registerRepoHandlers(mainWindow: BrowserWindow, store: Store): v
if (!repo || isFolderRepo(repo)) {
return []
}
return searchBaseRefs(repo.path, args.query, args.limit ?? 25)
const limit = args.limit ?? 25
// Why: remote repos need the relay to list branches on the remote host.
if (repo.connectionId) {
const provider = getSshGitProvider(repo.connectionId)
if (!provider) {
return []
}
try {
const result = await provider.exec(
[
'for-each-ref',
'--format=%(refname:short)',
'--sort=-committerdate',
`refs/remotes/origin/*${args.query}*`,
`refs/heads/*${args.query}*`
],
repo.path
)
return result.stdout
.split('\n')
.map((s) => s.trim())
.filter(Boolean)
.slice(0, limit)
} catch {
return []
}
}
return searchBaseRefs(repo.path, args.query, limit)
}
)
}

View file

@ -0,0 +1,68 @@
import { randomUUID } from 'crypto'
import { ipcMain, type BrowserWindow } from 'electron'
import type { SshConnectionCallbacks } from '../ssh/ssh-connection'
// Why: all three SSH auth callbacks (host-key-verify, auth-challenge, password)
// share the same IPC round-trip pattern: send a prompt event to the renderer,
// wait for a single response on a unique channel, clean up on timeout/close.
// Extracting the pattern into a generic helper avoids triplicating the cleanup
// logic and keeps ssh.ts under the max-lines threshold.
function promptRenderer<T>(
win: BrowserWindow,
sendChannel: string,
sendPayload: Record<string, unknown>,
fallback: T
): Promise<T> {
return new Promise<T>((resolve) => {
const responseChannel = `${sendChannel}-response-${randomUUID()}`
const onClosed = () => {
cleanup()
resolve(fallback)
}
const cleanup = () => {
ipcMain.removeAllListeners(responseChannel)
clearTimeout(timer)
win.removeListener('closed', onClosed)
}
const timer = setTimeout(() => {
cleanup()
resolve(fallback)
}, 120_000)
win.webContents.send(sendChannel, { ...sendPayload, responseChannel })
ipcMain.once(responseChannel, (_event, value: T) => {
cleanup()
resolve(value)
})
win.once('closed', onClosed)
})
}
export function buildSshAuthCallbacks(
getMainWindow: () => BrowserWindow | null
): Pick<SshConnectionCallbacks, 'onHostKeyVerify' | 'onAuthChallenge' | 'onPasswordPrompt'> {
return {
onHostKeyVerify: async (req) => {
const win = getMainWindow()
if (!win || win.isDestroyed()) {
return false
}
return promptRenderer<boolean>(win, 'ssh:host-key-verify', req, false)
},
onAuthChallenge: async (req) => {
const win = getMainWindow()
if (!win || win.isDestroyed()) {
return []
}
return promptRenderer<string[]>(win, 'ssh:auth-challenge', req, [])
},
onPasswordPrompt: async (targetId: string) => {
const win = getMainWindow()
if (!win || win.isDestroyed()) {
return null
}
return promptRenderer<string | null>(win, 'ssh:password-prompt', { targetId }, null)
}
}
}

108
src/main/ipc/ssh-browse.ts Normal file
View file

@ -0,0 +1,108 @@
import { ipcMain } from 'electron'
import type { SshConnectionManager } from '../ssh/ssh-connection'
export type RemoteDirEntry = {
name: string
isDirectory: boolean
}
// Why: the relay's fs.readDir enforces workspace root ACLs, which aren't
// registered until a repo is added. This handler uses a raw SSH exec channel
// to list directories, allowing the user to browse the remote filesystem
// during the "add remote repo" flow before any roots exist.
export function registerSshBrowseHandler(
getConnectionManager: () => SshConnectionManager | null
): void {
ipcMain.removeHandler('ssh:browseDir')
ipcMain.handle(
'ssh:browseDir',
async (
_event,
args: { targetId: string; dirPath: string }
): Promise<{ entries: RemoteDirEntry[]; resolvedPath: string }> => {
const mgr = getConnectionManager()
if (!mgr) {
throw new Error('SSH connection manager not initialized')
}
const conn = mgr.getConnection(args.targetId)
if (!conn) {
throw new Error(`SSH connection "${args.targetId}" not found`)
}
// Why: using printf with a delimiter instead of ls avoids issues with
// filenames containing spaces or special characters. The -1 flag outputs
// one entry per line. The -p flag appends / to directories.
// We resolve ~ and get the absolute path via `cd <path> && pwd`.
const command = `cd ${shellEscape(args.dirPath)} && pwd && ls -1ap`
const channel = await conn.exec(command)
return new Promise((resolve, reject) => {
let stdout = ''
let stderr = ''
channel.on('data', (data: Buffer) => {
stdout += data.toString()
})
channel.stderr.on('data', (data: Buffer) => {
stderr += data.toString()
})
channel.on('close', () => {
if (stderr.trim() && !stdout.trim()) {
reject(new Error(stderr.trim()))
return
}
const lines = stdout.trim().split('\n')
if (lines.length === 0) {
reject(new Error('Empty response from remote'))
return
}
const resolvedPath = lines[0]
const entries: RemoteDirEntry[] = []
for (let i = 1; i < lines.length; i++) {
const line = lines[i]
if (!line || line === './' || line === '../') {
continue
}
if (line.endsWith('/')) {
entries.push({ name: line.slice(0, -1), isDirectory: true })
} else {
entries.push({ name: line, isDirectory: false })
}
}
// Sort: directories first, then alphabetical
entries.sort((a, b) => {
if (a.isDirectory !== b.isDirectory) {
return a.isDirectory ? -1 : 1
}
return a.name.localeCompare(b.name)
})
resolve({ entries, resolvedPath })
})
})
}
)
}
// Why: prevent shell injection in the directory path. Single-quote wrapping
// with escaped internal single quotes is the safest approach for sh/bash.
// Tilde must be expanded by the shell, so paths starting with ~ use $HOME
// substitution instead of literal quoting (single quotes suppress expansion).
function shellEscape(s: string): string {
if (s === '~') {
return '"$HOME"'
}
if (s.startsWith('~/')) {
return `"$HOME"/${shellEscapeRaw(s.slice(2))}`
}
return shellEscapeRaw(s)
}
function shellEscapeRaw(s: string): string {
return `'${s.replace(/'/g, "'\\''")}'`
}

View file

@ -0,0 +1,187 @@
// Why: extracted from ssh.ts to keep the main IPC module under the max-lines
// threshold. These helpers manage relay lifecycle (cleanup, event wiring,
// reconnection) and are called from both initial connect and reconnection paths.
import type { BrowserWindow } from 'electron'
import { deployAndLaunchRelay } from '../ssh/ssh-relay-deploy'
import { SshChannelMultiplexer } from '../ssh/ssh-channel-multiplexer'
import { SshPtyProvider } from '../providers/ssh-pty-provider'
import { SshFilesystemProvider } from '../providers/ssh-filesystem-provider'
import { SshGitProvider } from '../providers/ssh-git-provider'
import {
registerSshPtyProvider,
unregisterSshPtyProvider,
getSshPtyProvider,
getPtyIdsForConnection,
clearPtyOwnershipForConnection,
clearProviderPtyState,
deletePtyOwnership
} from './pty'
import {
registerSshFilesystemProvider,
unregisterSshFilesystemProvider,
getSshFilesystemProvider
} from '../providers/ssh-filesystem-dispatch'
import { registerSshGitProvider, unregisterSshGitProvider } from '../providers/ssh-git-dispatch'
import type { SshPortForwardManager } from '../ssh/ssh-port-forward'
import type { SshConnectionManager } from '../ssh/ssh-connection'
export function cleanupConnection(
targetId: string,
activeMultiplexers: Map<string, SshChannelMultiplexer>,
initializedConnections: Set<string>,
portForwardManager: SshPortForwardManager | null
): void {
portForwardManager?.removeAllForwards(targetId)
const mux = activeMultiplexers.get(targetId)
if (mux) {
mux.dispose()
activeMultiplexers.delete(targetId)
}
// Why: clear PTY ownership entries before unregistering the provider so
// stale ownership entries don't route future lookups to a dead provider.
clearPtyOwnershipForConnection(targetId)
// Why: dispose notification subscriptions before unregistering so the
// multiplexer's handler list doesn't retain stale callbacks that fire
// into a torn-down provider after disconnect.
const ptyProvider = getSshPtyProvider(targetId)
if (ptyProvider && 'dispose' in ptyProvider) {
;(ptyProvider as { dispose: () => void }).dispose()
}
const fsProvider = getSshFilesystemProvider(targetId)
if (fsProvider && 'dispose' in fsProvider) {
;(fsProvider as { dispose: () => void }).dispose()
}
unregisterSshPtyProvider(targetId)
unregisterSshFilesystemProvider(targetId)
unregisterSshGitProvider(targetId)
initializedConnections.delete(targetId)
}
// Why: extracted so both initial connect and reconnection use the same wiring.
// Forgetting to wire PTY events on reconnect would cause silent terminal death.
export function wireUpSshPtyEvents(
ptyProvider: SshPtyProvider,
getMainWindow: () => BrowserWindow | null
): void {
// Why: resolving the window lazily on each event (instead of capturing once)
// ensures events reach the current window even if macOS app re-activation
// creates a new BrowserWindow after the initial wiring.
ptyProvider.onData((payload) => {
const win = getMainWindow()
if (win && !win.isDestroyed()) {
win.webContents.send('pty:data', payload)
}
})
ptyProvider.onExit((payload) => {
clearProviderPtyState(payload.id)
// Why: without this, the ownership entry for the exited remote PTY lingers,
// routing future lookups to the SSH provider for a PTY that no longer exists.
deletePtyOwnership(payload.id)
const win = getMainWindow()
if (win && !win.isDestroyed()) {
win.webContents.send('pty:exit', payload)
}
})
}
// Why: overlapping reconnection attempts (e.g. SSH connection flaps twice
// quickly) would cause two concurrent reestablishRelayStack calls, leaking
// relay processes and multiplexers from the first call. This map lets us
// cancel the stale attempt before starting a new one.
const reestablishAbortControllers = new Map<string, AbortController>()
export async function reestablishRelayStack(
targetId: string,
getMainWindow: () => BrowserWindow | null,
connectionManager: SshConnectionManager | null,
activeMultiplexers: Map<string, SshChannelMultiplexer>,
portForwardManager?: SshPortForwardManager | null
): Promise<void> {
const conn = connectionManager?.getConnection(targetId)
if (!conn) {
return
}
// Why: port forwards hold open local TCP servers backed by SSH channels that
// are now dead. Without cleanup, clients connecting to forwarded ports hang.
portForwardManager?.removeAllForwards(targetId)
const prevAbort = reestablishAbortControllers.get(targetId)
if (prevAbort) {
prevAbort.abort()
}
const abortController = new AbortController()
reestablishAbortControllers.set(targetId, abortController)
// Dispose old multiplexer with connection_lost reason
const oldMux = activeMultiplexers.get(targetId)
if (oldMux && !oldMux.isDisposed()) {
oldMux.dispose('connection_lost')
}
activeMultiplexers.delete(targetId)
// Why: dispose notification subscriptions before unregistering so stale
// callbacks from the old multiplexer don't fire into a torn-down provider.
const oldPtyProvider = getSshPtyProvider(targetId)
if (oldPtyProvider && 'dispose' in oldPtyProvider) {
;(oldPtyProvider as { dispose: () => void }).dispose()
}
const oldFsProvider = getSshFilesystemProvider(targetId)
if (oldFsProvider && 'dispose' in oldFsProvider) {
;(oldFsProvider as { dispose: () => void }).dispose()
}
unregisterSshPtyProvider(targetId)
unregisterSshFilesystemProvider(targetId)
unregisterSshGitProvider(targetId)
try {
const { transport } = await deployAndLaunchRelay(conn)
if (abortController.signal.aborted) {
// Why: the relay is already running on the remote. Creating a temporary
// multiplexer and immediately disposing it sends a clean shutdown to the
// relay process. Without this, the orphaned relay runs until its grace
// timer expires.
const orphanMux = new SshChannelMultiplexer(transport)
orphanMux.dispose()
return
}
const mux = new SshChannelMultiplexer(transport)
activeMultiplexers.set(targetId, mux)
const ptyProvider = new SshPtyProvider(targetId, mux)
registerSshPtyProvider(targetId, ptyProvider)
const fsProvider = new SshFilesystemProvider(targetId, mux)
registerSshFilesystemProvider(targetId, fsProvider)
const gitProvider = new SshGitProvider(targetId, mux)
registerSshGitProvider(targetId, gitProvider)
wireUpSshPtyEvents(ptyProvider, getMainWindow)
// Re-attach to any PTYs that were alive before the disconnect.
// The relay keeps them running during its grace period.
const ptyIds = getPtyIdsForConnection(targetId)
for (const ptyId of ptyIds) {
try {
await ptyProvider.attach(ptyId)
} catch {
// PTY may have exited during the disconnect — ignore
}
}
} catch (err) {
console.warn(
`[ssh] Failed to re-establish relay for ${targetId}: ${err instanceof Error ? err.message : String(err)}`
)
} finally {
if (reestablishAbortControllers.get(targetId) === abortController) {
reestablishAbortControllers.delete(targetId)
}
}
}

287
src/main/ipc/ssh.test.ts Normal file
View file

@ -0,0 +1,287 @@
import { describe, expect, it, vi, beforeEach } from 'vitest'
const {
handleMock,
mockSshStore,
mockConnectionManager,
mockDeployAndLaunchRelay,
mockMux,
mockPtyProvider,
mockFsProvider,
mockGitProvider,
mockPortForwardManager
} = vi.hoisted(() => ({
handleMock: vi.fn(),
mockSshStore: {
listTargets: vi.fn().mockReturnValue([]),
getTarget: vi.fn(),
addTarget: vi.fn(),
updateTarget: vi.fn(),
removeTarget: vi.fn(),
importFromSshConfig: vi.fn().mockReturnValue([])
},
mockConnectionManager: {
connect: vi.fn(),
disconnect: vi.fn(),
getState: vi.fn(),
disconnectAll: vi.fn()
},
mockDeployAndLaunchRelay: vi.fn(),
mockMux: {
dispose: vi.fn(),
isDisposed: vi.fn().mockReturnValue(false),
onNotification: vi.fn()
},
mockPtyProvider: {
onData: vi.fn(),
onExit: vi.fn(),
onReplay: vi.fn()
},
mockFsProvider: {},
mockGitProvider: {},
mockPortForwardManager: {
addForward: vi.fn(),
removeForward: vi.fn(),
listForwards: vi.fn().mockReturnValue([]),
removeAllForwards: vi.fn(),
dispose: vi.fn()
}
}))
vi.mock('electron', () => ({
ipcMain: {
handle: handleMock,
on: vi.fn(),
once: vi.fn(),
removeHandler: vi.fn(),
removeAllListeners: vi.fn()
}
}))
vi.mock('../ssh/ssh-connection-store', () => ({
SshConnectionStore: class MockSshConnectionStore {
constructor() {
return mockSshStore
}
}
}))
vi.mock('../ssh/ssh-connection', () => ({
SshConnectionManager: class MockSshConnectionManager {
constructor() {
return mockConnectionManager
}
}
}))
vi.mock('../ssh/ssh-relay-deploy', () => ({
deployAndLaunchRelay: mockDeployAndLaunchRelay
}))
vi.mock('../ssh/ssh-channel-multiplexer', () => ({
SshChannelMultiplexer: class MockSshChannelMultiplexer {
constructor() {
return mockMux
}
}
}))
vi.mock('../providers/ssh-pty-provider', () => ({
SshPtyProvider: class MockSshPtyProvider {
constructor() {
return mockPtyProvider
}
}
}))
vi.mock('../providers/ssh-filesystem-provider', () => ({
SshFilesystemProvider: class MockSshFilesystemProvider {
constructor() {
return mockFsProvider
}
}
}))
vi.mock('./pty', () => ({
registerSshPtyProvider: vi.fn(),
unregisterSshPtyProvider: vi.fn(),
clearPtyOwnershipForConnection: vi.fn(),
getSshPtyProvider: vi.fn(),
getPtyIdsForConnection: vi.fn().mockReturnValue([])
}))
vi.mock('../providers/ssh-filesystem-dispatch', () => ({
registerSshFilesystemProvider: vi.fn(),
unregisterSshFilesystemProvider: vi.fn(),
getSshFilesystemProvider: vi.fn()
}))
vi.mock('../providers/ssh-git-provider', () => ({
SshGitProvider: class MockSshGitProvider {
constructor() {
return mockGitProvider
}
}
}))
vi.mock('../providers/ssh-git-dispatch', () => ({
registerSshGitProvider: vi.fn(),
unregisterSshGitProvider: vi.fn()
}))
vi.mock('../ssh/ssh-port-forward', () => ({
SshPortForwardManager: class MockPortForwardManager {
constructor() {
return mockPortForwardManager
}
}
}))
import { registerSshHandlers } from './ssh'
import type { SshTarget } from '../../shared/ssh-types'
describe('SSH IPC handlers', () => {
const handlers = new Map<string, (_event: unknown, args: unknown) => unknown>()
const mockStore = {} as never
const mockWindow = {
isDestroyed: () => false,
webContents: { send: vi.fn() }
}
beforeEach(() => {
handlers.clear()
handleMock.mockReset()
handleMock.mockImplementation((channel: string, handler: (...a: unknown[]) => unknown) => {
handlers.set(channel, handler)
})
mockSshStore.listTargets.mockReset().mockReturnValue([])
mockSshStore.getTarget.mockReset()
mockSshStore.addTarget.mockReset()
mockSshStore.updateTarget.mockReset()
mockSshStore.removeTarget.mockReset()
mockSshStore.importFromSshConfig.mockReset().mockReturnValue([])
mockConnectionManager.connect.mockReset()
mockConnectionManager.disconnect.mockReset()
mockConnectionManager.getState.mockReset()
mockConnectionManager.disconnectAll.mockReset()
mockDeployAndLaunchRelay.mockReset().mockResolvedValue({
transport: { write: vi.fn(), onData: vi.fn(), onClose: vi.fn() },
platform: 'linux-x64'
})
mockMux.dispose.mockReset()
mockMux.isDisposed.mockReset().mockReturnValue(false)
mockMux.onNotification.mockReset()
mockPtyProvider.onData.mockReset()
mockPtyProvider.onExit.mockReset()
mockPtyProvider.onReplay.mockReset()
registerSshHandlers(mockStore, () => mockWindow as never)
})
it('registers all expected IPC channels', () => {
const channels = Array.from(handlers.keys())
expect(channels).toContain('ssh:listTargets')
expect(channels).toContain('ssh:addTarget')
expect(channels).toContain('ssh:updateTarget')
expect(channels).toContain('ssh:removeTarget')
expect(channels).toContain('ssh:importConfig')
expect(channels).toContain('ssh:connect')
expect(channels).toContain('ssh:disconnect')
expect(channels).toContain('ssh:getState')
expect(channels).toContain('ssh:testConnection')
})
it('ssh:listTargets returns targets from store', async () => {
const mockTargets: SshTarget[] = [
{ id: 'ssh-1', label: 'Server 1', host: 'srv1.com', port: 22, username: 'admin' }
]
mockSshStore.listTargets.mockReturnValue(mockTargets)
const result = await handlers.get('ssh:listTargets')!(null, {})
expect(result).toEqual(mockTargets)
})
it('ssh:addTarget calls store.addTarget', async () => {
const newTarget = {
label: 'New Server',
host: 'new.example.com',
port: 22,
username: 'deploy'
}
const withId = { ...newTarget, id: 'ssh-new' }
mockSshStore.addTarget.mockReturnValue(withId)
const result = await handlers.get('ssh:addTarget')!(null, { target: newTarget })
expect(mockSshStore.addTarget).toHaveBeenCalledWith(newTarget)
expect(result).toEqual(withId)
})
it('ssh:removeTarget calls store.removeTarget', async () => {
await handlers.get('ssh:removeTarget')!(null, { id: 'ssh-1' })
expect(mockSshStore.removeTarget).toHaveBeenCalledWith('ssh-1')
})
it('ssh:importConfig returns imported targets', async () => {
const imported: SshTarget[] = [
{ id: 'ssh-imp', label: 'staging', host: 'staging.com', port: 22, username: '' }
]
mockSshStore.importFromSshConfig.mockReturnValue(imported)
const result = await handlers.get('ssh:importConfig')!(null, {})
expect(result).toEqual(imported)
})
it('ssh:connect throws for unknown targetId', async () => {
mockSshStore.getTarget.mockReturnValue(undefined)
await expect(handlers.get('ssh:connect')!(null, { targetId: 'unknown' })).rejects.toThrow(
'SSH target "unknown" not found'
)
})
it('ssh:connect calls connection manager', async () => {
const target: SshTarget = {
id: 'ssh-1',
label: 'Server',
host: 'example.com',
port: 22,
username: 'deploy'
}
mockSshStore.getTarget.mockReturnValue(target)
mockConnectionManager.connect.mockResolvedValue({})
mockConnectionManager.getState.mockReturnValue({
targetId: 'ssh-1',
status: 'connected',
error: null,
reconnectAttempt: 0
})
await handlers.get('ssh:connect')!(null, { targetId: 'ssh-1' })
expect(mockConnectionManager.connect).toHaveBeenCalledWith(target)
})
it('ssh:disconnect calls connection manager', async () => {
mockConnectionManager.disconnect.mockResolvedValue(undefined)
await handlers.get('ssh:disconnect')!(null, { targetId: 'ssh-1' })
expect(mockConnectionManager.disconnect).toHaveBeenCalledWith('ssh-1')
})
it('ssh:getState returns connection state', async () => {
const state = {
targetId: 'ssh-1',
status: 'connected',
error: null,
reconnectAttempt: 0
}
mockConnectionManager.getState.mockReturnValue(state)
const result = await handlers.get('ssh:getState')!(null, { targetId: 'ssh-1' })
expect(result).toEqual(state)
})
})

299
src/main/ipc/ssh.ts Normal file
View file

@ -0,0 +1,299 @@
import { ipcMain, type BrowserWindow } from 'electron'
import type { Store } from '../persistence'
import { SshConnectionStore } from '../ssh/ssh-connection-store'
import { SshConnectionManager, type SshConnectionCallbacks } from '../ssh/ssh-connection'
import { deployAndLaunchRelay } from '../ssh/ssh-relay-deploy'
import { SshChannelMultiplexer } from '../ssh/ssh-channel-multiplexer'
import { SshPtyProvider } from '../providers/ssh-pty-provider'
import { SshFilesystemProvider } from '../providers/ssh-filesystem-provider'
import { SshGitProvider } from '../providers/ssh-git-provider'
import { registerSshPtyProvider } from './pty'
import { registerSshFilesystemProvider } from '../providers/ssh-filesystem-dispatch'
import { registerSshGitProvider } from '../providers/ssh-git-dispatch'
import { SshPortForwardManager } from '../ssh/ssh-port-forward'
import type { SshTarget, SshConnectionState } from '../../shared/ssh-types'
import { cleanupConnection, wireUpSshPtyEvents, reestablishRelayStack } from './ssh-relay-helpers'
import { buildSshAuthCallbacks } from './ssh-auth-helpers'
import { registerSshBrowseHandler } from './ssh-browse'
let sshStore: SshConnectionStore | null = null
let connectionManager: SshConnectionManager | null = null
let portForwardManager: SshPortForwardManager | null = null
// Track multiplexers and providers per connection for cleanup
const activeMultiplexers = new Map<string, SshChannelMultiplexer>()
// Why: tracks which connections have completed initial relay setup, so
// onStateChange can distinguish "reconnected after drop" from "first connect".
const initializedConnections = new Set<string>()
// Why: ssh:testConnection calls connect() then disconnect(), which fires
// state-change events to the renderer. This causes worktree cards to briefly
// flash "connected" then "disconnected". Suppressing broadcasts during tests
// avoids that visual glitch.
const testingTargets = new Set<string>()
export function registerSshHandlers(
store: Store,
getMainWindow: () => BrowserWindow | null
): { connectionManager: SshConnectionManager; sshStore: SshConnectionStore } {
// Why: on macOS, app re-activation creates a new BrowserWindow and re-calls
// this function. ipcMain.handle() throws if a handler is already registered,
// so we must remove any prior handlers before re-registering.
for (const ch of [
'ssh:listTargets',
'ssh:addTarget',
'ssh:updateTarget',
'ssh:removeTarget',
'ssh:importConfig',
'ssh:connect',
'ssh:disconnect',
'ssh:getState',
'ssh:testConnection',
'ssh:addPortForward',
'ssh:removePortForward',
'ssh:listPortForwards'
]) {
ipcMain.removeHandler(ch)
}
sshStore = new SshConnectionStore(store)
const callbacks: SshConnectionCallbacks = {
onStateChange: (targetId: string, state: SshConnectionState) => {
if (testingTargets.has(targetId)) {
return
}
const win = getMainWindow()
if (win && !win.isDestroyed()) {
win.webContents.send('ssh:state-changed', { targetId, state })
}
// Why: when SSH reconnects after a network blip, we must re-deploy the
// relay and rebuild the full provider stack. The old multiplexer's pending
// requests are already rejected with CONNECTION_LOST by dispose().
if (
state.status === 'connected' &&
state.reconnectAttempt === 0 &&
initializedConnections.has(targetId)
) {
void reestablishRelayStack(
targetId,
getMainWindow,
connectionManager,
activeMultiplexers,
portForwardManager
)
}
},
...buildSshAuthCallbacks(getMainWindow)
}
connectionManager = new SshConnectionManager(callbacks)
portForwardManager = new SshPortForwardManager()
registerSshBrowseHandler(() => connectionManager)
// ── Target CRUD ────────────────────────────────────────────────────
ipcMain.handle('ssh:listTargets', () => {
return sshStore!.listTargets()
})
ipcMain.handle('ssh:addTarget', (_event, args: { target: Omit<SshTarget, 'id'> }) => {
return sshStore!.addTarget(args.target)
})
ipcMain.handle(
'ssh:updateTarget',
(_event, args: { id: string; updates: Partial<Omit<SshTarget, 'id'>> }) => {
return sshStore!.updateTarget(args.id, args.updates)
}
)
ipcMain.handle('ssh:removeTarget', (_event, args: { id: string }) => {
sshStore!.removeTarget(args.id)
})
ipcMain.handle('ssh:importConfig', () => {
return sshStore!.importFromSshConfig()
})
// ── Connection lifecycle ───────────────────────────────────────────
ipcMain.handle('ssh:connect', async (_event, args: { targetId: string }) => {
const target = sshStore!.getTarget(args.targetId)
if (!target) {
throw new Error(`SSH target "${args.targetId}" not found`)
}
let conn
try {
conn = await connectionManager!.connect(target)
} catch (err) {
// Why: SshConnection.connect() sets its internal state to 'error', but
// the onStateChange callback may have been suppressed or the state may
// not have propagated to the renderer. Explicitly broadcast the error
// so the UI leaves 'connecting'/'host-key-verification'.
const win = getMainWindow()
if (win && !win.isDestroyed()) {
win.webContents.send('ssh:state-changed', {
targetId: args.targetId,
state: {
targetId: args.targetId,
status: 'error',
error: err instanceof Error ? err.message : String(err),
reconnectAttempt: 0
}
})
}
throw err
}
// Deploy relay and establish multiplexer
callbacks.onStateChange(args.targetId, {
targetId: args.targetId,
status: 'deploying-relay',
error: null,
reconnectAttempt: 0
})
try {
const { transport } = await deployAndLaunchRelay(conn, (status) => {
const win = getMainWindow()
if (win && !win.isDestroyed()) {
win.webContents.send('ssh:deploy-progress', { targetId: args.targetId, status })
}
})
const mux = new SshChannelMultiplexer(transport)
activeMultiplexers.set(args.targetId, mux)
const ptyProvider = new SshPtyProvider(args.targetId, mux)
registerSshPtyProvider(args.targetId, ptyProvider)
const fsProvider = new SshFilesystemProvider(args.targetId, mux)
registerSshFilesystemProvider(args.targetId, fsProvider)
const gitProvider = new SshGitProvider(args.targetId, mux)
registerSshGitProvider(args.targetId, gitProvider)
wireUpSshPtyEvents(ptyProvider, getMainWindow)
initializedConnections.add(args.targetId)
// Why: we manually pushed `deploying-relay` above, so the renderer's
// state is stuck there. Send `connected` directly to the renderer
// instead of going through callbacks.onStateChange, which would
// trigger the reconnection logic (reestablishRelayStack).
const win = getMainWindow()
if (win && !win.isDestroyed()) {
win.webContents.send('ssh:state-changed', {
targetId: args.targetId,
state: {
targetId: args.targetId,
status: 'connected',
error: null,
reconnectAttempt: 0
}
})
}
} catch (err) {
// Relay deployment failed — disconnect SSH
await connectionManager!.disconnect(args.targetId)
throw err
}
return connectionManager!.getState(args.targetId)
})
ipcMain.handle('ssh:disconnect', async (_event, args: { targetId: string }) => {
cleanupConnection(args.targetId, activeMultiplexers, initializedConnections, portForwardManager)
await connectionManager!.disconnect(args.targetId)
})
ipcMain.handle('ssh:getState', (_event, args: { targetId: string }) => {
return connectionManager!.getState(args.targetId)
})
ipcMain.handle('ssh:testConnection', async (_event, args: { targetId: string }) => {
const target = sshStore!.getTarget(args.targetId)
if (!target) {
throw new Error(`SSH target "${args.targetId}" not found`)
}
// Why: testConnection calls connect() then disconnect(). If the target
// already has an active relay session, connect() would reuse the connection
// but disconnect() would tear down the entire relay stack — killing all
// active PTYs and file watchers for a "test" that was supposed to be safe.
if (initializedConnections.has(args.targetId)) {
return { success: true, state: connectionManager!.getState(args.targetId) }
}
testingTargets.add(args.targetId)
try {
const conn = await connectionManager!.connect(target)
const state = conn.getState()
await connectionManager!.disconnect(args.targetId)
return { success: true, state }
} catch (err) {
return {
success: false,
error: err instanceof Error ? err.message : String(err)
}
} finally {
testingTargets.delete(args.targetId)
}
})
// ── Port forwarding ─────────────────────────────────────────────────
ipcMain.handle(
'ssh:addPortForward',
async (
_event,
args: {
targetId: string
localPort: number
remoteHost: string
remotePort: number
label?: string
}
) => {
const conn = connectionManager!.getConnection(args.targetId)
if (!conn) {
throw new Error(`SSH connection "${args.targetId}" not found`)
}
return portForwardManager!.addForward(
args.targetId,
conn,
args.localPort,
args.remoteHost,
args.remotePort,
args.label
)
}
)
ipcMain.handle('ssh:removePortForward', (_event, args: { id: string }) => {
return portForwardManager!.removeForward(args.id)
})
ipcMain.handle('ssh:listPortForwards', (_event, args?: { targetId?: string }) => {
return portForwardManager!.listForwards(args?.targetId)
})
return { connectionManager, sshStore }
}
export function getSshConnectionManager(): SshConnectionManager | null {
return connectionManager
}
export function getSshConnectionStore(): SshConnectionStore | null {
return sshStore
}
export function getActiveMultiplexer(connectionId: string): SshChannelMultiplexer | undefined {
return activeMultiplexers.get(connectionId)
}

View file

@ -0,0 +1,131 @@
// Why: extracted from worktrees.ts to keep the main IPC module under the
// max-lines threshold. Hooks IPC handlers (check, readIssueCommand,
// writeIssueCommand) are self-contained and don't interact with worktree
// creation or removal state.
import { ipcMain } from 'electron'
import { join } from 'path'
import type { Store } from '../persistence'
import { isFolderRepo } from '../../shared/repo-kind'
import { getSshFilesystemProvider } from '../providers/ssh-filesystem-dispatch'
import {
hasHooksFile,
hasUnrecognizedOrcaYamlKeys,
loadHooks,
readIssueCommand,
writeIssueCommand
} from '../hooks'
export function registerHooksHandlers(store: Store): void {
ipcMain.removeHandler('hooks:check')
ipcMain.removeHandler('hooks:readIssueCommand')
ipcMain.removeHandler('hooks:writeIssueCommand')
ipcMain.handle('hooks:check', async (_event, args: { repoId: string }) => {
const repo = store.getRepo(args.repoId)
if (!repo || isFolderRepo(repo)) {
return { hasHooks: false, hooks: null, mayNeedUpdate: false }
}
// Why: remote repos read orca.yaml via the SSH filesystem provider.
// Parsing happens in the main process since it's CPU-cheap and avoids
// adding YAML parsing to the relay.
if (repo.connectionId) {
const fsProvider = getSshFilesystemProvider(repo.connectionId)
if (!fsProvider) {
return { hasHooks: false, hooks: null, mayNeedUpdate: false }
}
try {
const result = await fsProvider.readFile(join(repo.path, '.orca.yaml'))
if (result.isBinary) {
return { hasHooks: false, hooks: null, mayNeedUpdate: false }
}
const { parse } = await import('yaml')
const parsed = parse(result.content)
return { hasHooks: true, hooks: parsed, mayNeedUpdate: false }
} catch {
return { hasHooks: false, hooks: null, mayNeedUpdate: false }
}
}
const has = hasHooksFile(repo.path)
const hooks = has ? loadHooks(repo.path) : null
// Why: when a newer Orca version adds a top-level key to `orca.yaml`, older
// versions that don't recognise it return null and show "could not be parsed".
// Detecting well-formed but unrecognised keys lets the UI suggest updating
// instead of implying the file is broken.
const mayNeedUpdate = has && !hooks && hasUnrecognizedOrcaYamlKeys(repo.path)
return {
hasHooks: has,
hooks,
mayNeedUpdate
}
})
ipcMain.handle('hooks:readIssueCommand', async (_event, args: { repoId: string }) => {
const repo = store.getRepo(args.repoId)
if (!repo || isFolderRepo(repo)) {
return {
localContent: null,
sharedContent: null,
effectiveContent: null,
localFilePath: '',
source: 'none' as const
}
}
if (repo.connectionId) {
const fsProvider = getSshFilesystemProvider(repo.connectionId)
if (!fsProvider) {
return {
localContent: null,
sharedContent: null,
effectiveContent: null,
localFilePath: '',
source: 'none' as const
}
}
try {
const result = await fsProvider.readFile(join(repo.path, '.orca', 'issue-command'))
return {
localContent: result.isBinary ? null : result.content,
sharedContent: null,
effectiveContent: result.isBinary ? null : result.content,
localFilePath: join(repo.path, '.orca', 'issue-command'),
source: 'local' as const
}
} catch {
return {
localContent: null,
sharedContent: null,
effectiveContent: null,
localFilePath: '',
source: 'none' as const
}
}
}
return readIssueCommand(repo.path)
})
ipcMain.handle(
'hooks:writeIssueCommand',
async (_event, args: { repoId: string; content: string }) => {
const repo = store.getRepo(args.repoId)
if (!repo || isFolderRepo(repo)) {
return
}
if (repo.connectionId) {
const fsProvider = getSshFilesystemProvider(repo.connectionId)
if (!fsProvider) {
return
}
await fsProvider.writeFile(join(repo.path, '.orca', 'issue-command'), args.content)
return
}
writeIssueCommand(repo.path, args.content)
}
)
}

View file

@ -0,0 +1,248 @@
// Why: extracted from worktrees.ts to keep the main IPC module under the
// max-lines threshold. Worktree creation helpers (local and remote) live
// here so the IPC dispatch file stays focused on handler wiring.
import type { BrowserWindow } from 'electron'
import { join } from 'path'
import type { Store } from '../persistence'
import type {
CreateWorktreeArgs,
CreateWorktreeResult,
Repo,
WorktreeMeta
} from '../../shared/types'
import { getPRForBranch } from '../github/client'
import { listWorktrees, addWorktree } from '../git/worktree'
import { getGitUsername, getDefaultBaseRef, getBranchConflictKind } from '../git/repo'
import { gitExecFileSync } from '../git/runner'
import { isWslPath, parseWslPath, getWslHome } from '../wsl'
import { createSetupRunnerScript, getEffectiveHooks, shouldRunSetupForCreate } from '../hooks'
import { getSshGitProvider } from '../providers/ssh-git-dispatch'
import type { SshGitProvider } from '../providers/ssh-git-provider'
import {
sanitizeWorktreeName,
computeBranchName,
computeWorktreePath,
ensurePathWithinWorkspace,
shouldSetDisplayName,
mergeWorktree,
areWorktreePathsEqual
} from './worktree-logic'
import { rebuildAuthorizedRootsCache } from './filesystem-auth'
export function notifyWorktreesChanged(mainWindow: BrowserWindow, repoId: string): void {
if (!mainWindow.isDestroyed()) {
mainWindow.webContents.send('worktrees:changed', { repoId })
}
}
export async function createRemoteWorktree(
args: CreateWorktreeArgs,
repo: Repo,
store: Store,
mainWindow: BrowserWindow
): Promise<CreateWorktreeResult> {
const provider = getSshGitProvider(repo.connectionId!) as SshGitProvider | undefined
if (!provider) {
throw new Error(`No git provider for connection "${repo.connectionId}"`)
}
const settings = store.getSettings()
const requestedName = args.name
const sanitizedName = sanitizeWorktreeName(args.name)
// Get git username from remote
let username = ''
try {
const { stdout } = await provider.exec(['config', 'user.name'], repo.path)
username = stdout.trim()
} catch {
/* no username configured */
}
const branchName = computeBranchName(sanitizedName, settings, username)
// Check branch conflict on remote
try {
const { stdout } = await provider.exec(['branch', '--list', '--all', branchName], repo.path)
if (stdout.trim()) {
throw new Error(`Branch "${branchName}" already exists. Pick a different worktree name.`)
}
} catch (e) {
if (e instanceof Error && e.message.includes('already exists')) {
throw e
}
}
// Compute worktree path relative to the repo's parent on the remote
const remotePath = `${repo.path}/../${sanitizedName}`
// Determine base branch
let baseBranch = args.baseBranch || repo.worktreeBaseRef
if (!baseBranch) {
try {
const { stdout } = await provider.exec(
['symbolic-ref', 'refs/remotes/origin/HEAD', '--short'],
repo.path
)
baseBranch = stdout.trim()
} catch {
baseBranch = 'origin/main'
}
}
// Fetch latest
const remote = baseBranch.includes('/') ? baseBranch.split('/')[0] : 'origin'
try {
await provider.exec(['fetch', remote], repo.path)
} catch {
/* best-effort */
}
// Create worktree via relay
await provider.addWorktree(repo.path, branchName, remotePath, {
base: baseBranch,
track: baseBranch.includes('/')
})
// Re-list to get the created worktree info
const gitWorktrees = await provider.listWorktrees(repo.path)
const created = gitWorktrees.find(
(gw) => gw.branch?.endsWith(branchName) || gw.path.endsWith(sanitizedName)
)
if (!created) {
throw new Error('Worktree created but not found in listing')
}
const worktreeId = `${repo.id}::${created.path}`
const metaUpdates: Partial<WorktreeMeta> = {
lastActivityAt: Date.now(),
...(shouldSetDisplayName(requestedName, branchName, sanitizedName)
? { displayName: requestedName }
: {})
}
const meta = store.setWorktreeMeta(worktreeId, metaUpdates)
const worktree = mergeWorktree(repo.id, created, meta)
notifyWorktreesChanged(mainWindow, repo.id)
return { worktree }
}
export async function createLocalWorktree(
args: CreateWorktreeArgs,
repo: Repo,
store: Store,
mainWindow: BrowserWindow
): Promise<CreateWorktreeResult> {
const settings = store.getSettings()
const requestedName = args.name
const sanitizedName = sanitizeWorktreeName(args.name)
// Compute branch name with prefix
const username = getGitUsername(repo.path)
const branchName = computeBranchName(sanitizedName, settings, username)
const branchConflictKind = await getBranchConflictKind(repo.path, branchName)
if (branchConflictKind) {
throw new Error(
`Branch "${branchName}" already exists ${branchConflictKind === 'local' ? 'locally' : 'on a remote'}. Pick a different worktree name.`
)
}
// Why: the UI resolves PR status by branch name alone. Reusing a historical
// PR head name would make a fresh worktree inherit that old merged/closed PR
// immediately, so we reject the name instead of silently suffixing it.
// The lookup is best-effort — don't block creation if GitHub is unreachable.
let existingPR: Awaited<ReturnType<typeof getPRForBranch>> | null = null
try {
existingPR = await getPRForBranch(repo.path, branchName)
} catch {
// GitHub API may be unreachable, rate-limited, or token missing
}
if (existingPR) {
throw new Error(
`Branch "${branchName}" already has PR #${existingPR.number}. Pick a different worktree name.`
)
}
// Compute worktree path
let worktreePath = computeWorktreePath(sanitizedName, repo.path, settings)
// Why: WSL worktrees live under ~/orca/workspaces inside the WSL
// filesystem. Validate against that root, not the Windows workspace dir.
// If WSL home lookup fails, keep using the configured workspace root so
// the path traversal guard still runs on the fallback path.
const wslInfo = isWslPath(repo.path) ? parseWslPath(repo.path) : null
const wslHome = wslInfo ? getWslHome(wslInfo.distro) : null
const workspaceRoot = wslHome ? join(wslHome, 'orca', 'workspaces') : settings.workspaceDir
worktreePath = ensurePathWithinWorkspace(worktreePath, workspaceRoot)
// Determine base branch
const baseBranch = args.baseBranch || repo.worktreeBaseRef || getDefaultBaseRef(repo.path)
const setupScript = getEffectiveHooks(repo)?.scripts.setup
// Why: `ask` is a pre-create choice gate, not a post-create side effect.
// Resolve it before mutating git state so missing UI input cannot strand
// a real worktree on disk while the renderer reports "create failed".
const shouldLaunchSetup = setupScript ? shouldRunSetupForCreate(repo, args.setupDecision) : false
// Fetch latest from remote so the worktree starts with up-to-date content
const remote = baseBranch.includes('/') ? baseBranch.split('/')[0] : 'origin'
try {
gitExecFileSync(['fetch', remote], { cwd: repo.path })
} catch {
// Fetch is best-effort — don't block worktree creation if offline
}
addWorktree(
repo.path,
worktreePath,
branchName,
baseBranch,
settings.refreshLocalBaseRefOnWorktreeCreate
)
// Re-list to get the freshly created worktree info
const gitWorktrees = await listWorktrees(repo.path)
const created = gitWorktrees.find((gw) => areWorktreePathsEqual(gw.path, worktreePath))
if (!created) {
throw new Error('Worktree created but not found in listing')
}
const worktreeId = `${repo.id}::${created.path}`
const metaUpdates: Partial<WorktreeMeta> = {
// Stamp activity so the worktree sorts into its final position
// immediately — prevents scroll-to-reveal racing with a later
// bumpWorktreeActivity that would re-sort the list.
lastActivityAt: Date.now(),
...(shouldSetDisplayName(requestedName, branchName, sanitizedName)
? { displayName: requestedName }
: {})
}
const meta = store.setWorktreeMeta(worktreeId, metaUpdates)
const worktree = mergeWorktree(repo.id, created, meta)
await rebuildAuthorizedRootsCache(store)
let setup: CreateWorktreeResult['setup']
if (setupScript && shouldLaunchSetup) {
try {
// Why: setup now runs in a visible terminal owned by the renderer so users
// can inspect failures, answer prompts, and rerun it. The main process only
// resolves policy and writes the runner script; it must not execute setup
// itself anymore or we would reintroduce the hidden background-hook behavior.
//
// Why: the git worktree already exists at this point. If runner generation
// fails, surfacing the error as a hard create failure would lie to the UI
// about the underlying git state and strand a real worktree on disk.
// Degrade to "created without setup launch" instead.
setup = createSetupRunnerScript(repo, worktreePath, setupScript)
} catch (error) {
console.error(`[hooks] Failed to prepare setup runner for ${worktreePath}:`, error)
}
}
notifyWorktreesChanged(mainWindow, repo.id)
return {
worktree,
...(setup ? { setup } : {})
}
}

View file

@ -9,37 +9,31 @@ import type {
Worktree,
WorktreeMeta
} from '../../shared/types'
import { getPRForBranch } from '../github/client'
import { listWorktrees, addWorktree, removeWorktree } from '../git/worktree'
import { getGitUsername, getDefaultBaseRef, getBranchConflictKind } from '../git/repo'
import { gitExecFileAsync, gitExecFileSync } from '../git/runner'
import { isWslPath, parseWslPath, getWslHome } from '../wsl'
import { join } from 'path'
import { listRepoWorktrees } from '../repo-worktrees'
import { removeWorktree } from '../git/worktree'
import { gitExecFileAsync } from '../git/runner'
import { listRepoWorktrees, createFolderWorktree } from '../repo-worktrees'
import { getSshGitProvider } from '../providers/ssh-git-dispatch'
import {
createIssueCommandRunnerScript,
createSetupRunnerScript,
getEffectiveHooks,
loadHooks,
readIssueCommand,
runHook,
hasHooksFile,
hasUnrecognizedOrcaYamlKeys,
shouldRunSetupForCreate,
writeIssueCommand
} from '../hooks'
import {
sanitizeWorktreeName,
computeBranchName,
computeWorktreePath,
ensurePathWithinWorkspace,
shouldSetDisplayName,
mergeWorktree,
parseWorktreeId,
areWorktreePathsEqual,
formatWorktreeRemovalError,
isOrphanedWorktreeError
} from './worktree-logic'
import {
createLocalWorktree,
createRemoteWorktree,
notifyWorktreesChanged
} from './worktree-remote'
import { rebuildAuthorizedRootsCache, ensureAuthorizedRootsCache } from './filesystem-auth'
export function registerWorktreeHandlers(mainWindow: BrowserWindow, store: Store): void {
@ -65,7 +59,20 @@ export function registerWorktreeHandlers(mainWindow: BrowserWindow, store: Store
const allWorktrees: Worktree[] = []
for (const repo of repos) {
const gitWorktrees = await listRepoWorktrees(repo)
let gitWorktrees
if (isFolderRepo(repo)) {
gitWorktrees = [createFolderWorktree(repo)]
} else if (repo.connectionId) {
const provider = getSshGitProvider(repo.connectionId)
// Why: when SSH is disconnected the provider is null. Skip this repo
// so the renderer keeps its cached worktree list instead of clearing it.
if (!provider) {
continue
}
gitWorktrees = await provider.listWorktrees(repo.path)
} else {
gitWorktrees = await listRepoWorktrees(repo)
}
for (const gw of gitWorktrees) {
const worktreeId = `${repo.id}::${gw.path}`
const meta = store.getWorktreeMeta(worktreeId)
@ -86,7 +93,21 @@ export function registerWorktreeHandlers(mainWindow: BrowserWindow, store: Store
return []
}
const gitWorktrees = await listRepoWorktrees(repo)
let gitWorktrees
if (isFolderRepo(repo)) {
gitWorktrees = [createFolderWorktree(repo)]
} else if (repo.connectionId) {
const provider = getSshGitProvider(repo.connectionId)
// Why: when SSH is disconnected the provider is null. Throwing here
// makes the renderer's fetchWorktrees catch block preserve its cached
// worktree list instead of replacing it with an empty array.
if (!provider) {
throw new Error(`SSH connection "${repo.connectionId}" is not active`)
}
gitWorktrees = await provider.listWorktrees(repo.path)
} else {
gitWorktrees = await listRepoWorktrees(repo)
}
return gitWorktrees.map((gw) => {
const worktreeId = `${repo.id}::${gw.path}`
const meta = store.getWorktreeMeta(worktreeId)
@ -105,119 +126,12 @@ export function registerWorktreeHandlers(mainWindow: BrowserWindow, store: Store
throw new Error('Folder mode does not support creating worktrees.')
}
const settings = store.getSettings()
const requestedName = args.name
const sanitizedName = sanitizeWorktreeName(args.name)
// Compute branch name with prefix
const username = getGitUsername(repo.path)
const branchName = computeBranchName(sanitizedName, settings, username)
const branchConflictKind = await getBranchConflictKind(repo.path, branchName)
if (branchConflictKind) {
throw new Error(
`Branch "${branchName}" already exists ${branchConflictKind === 'local' ? 'locally' : 'on a remote'}. Pick a different worktree name.`
)
// Remote repos route all git operations through the relay
if (repo.connectionId) {
return createRemoteWorktree(args, repo, store, mainWindow)
}
// Why: the UI resolves PR status by branch name alone. Reusing a historical
// PR head name would make a fresh worktree inherit that old merged/closed PR
// immediately, so we reject the name instead of silently suffixing it.
// The lookup is best-effort — don't block creation if GitHub is unreachable.
let existingPR: Awaited<ReturnType<typeof getPRForBranch>> | null = null
try {
existingPR = await getPRForBranch(repo.path, branchName)
} catch {
// GitHub API may be unreachable, rate-limited, or token missing
}
if (existingPR) {
throw new Error(
`Branch "${branchName}" already has PR #${existingPR.number}. Pick a different worktree name.`
)
}
// Compute worktree path
let worktreePath = computeWorktreePath(sanitizedName, repo.path, settings)
// Why: WSL worktrees live under ~/orca/workspaces inside the WSL
// filesystem. Validate against that root, not the Windows workspace dir.
// If WSL home lookup fails, keep using the configured workspace root so
// the path traversal guard still runs on the fallback path.
const wslInfo = isWslPath(repo.path) ? parseWslPath(repo.path) : null
const wslHome = wslInfo ? getWslHome(wslInfo.distro) : null
const workspaceRoot = wslHome ? join(wslHome, 'orca', 'workspaces') : settings.workspaceDir
worktreePath = ensurePathWithinWorkspace(worktreePath, workspaceRoot)
// Determine base branch
const baseBranch = args.baseBranch || repo.worktreeBaseRef || getDefaultBaseRef(repo.path)
const setupScript = getEffectiveHooks(repo)?.scripts.setup
// Why: `ask` is a pre-create choice gate, not a post-create side effect.
// Resolve it before mutating git state so missing UI input cannot strand
// a real worktree on disk while the renderer reports "create failed".
const shouldLaunchSetup = setupScript
? shouldRunSetupForCreate(repo, args.setupDecision)
: false
// Fetch latest from remote so the worktree starts with up-to-date content
const remote = baseBranch.includes('/') ? baseBranch.split('/')[0] : 'origin'
try {
gitExecFileSync(['fetch', remote], { cwd: repo.path })
} catch {
// Fetch is best-effort — don't block worktree creation if offline
}
addWorktree(
repo.path,
worktreePath,
branchName,
baseBranch,
settings.refreshLocalBaseRefOnWorktreeCreate
)
// Re-list to get the freshly created worktree info
const gitWorktrees = await listWorktrees(repo.path)
const created = gitWorktrees.find((gw) => areWorktreePathsEqual(gw.path, worktreePath))
if (!created) {
throw new Error('Worktree created but not found in listing')
}
const worktreeId = `${repo.id}::${created.path}`
const metaUpdates: Partial<WorktreeMeta> = {
// Stamp activity so the worktree sorts into its final position
// immediately — prevents scroll-to-reveal racing with a later
// bumpWorktreeActivity that would re-sort the list.
lastActivityAt: Date.now(),
...(shouldSetDisplayName(requestedName, branchName, sanitizedName)
? { displayName: requestedName }
: {})
}
const meta = store.setWorktreeMeta(worktreeId, metaUpdates)
const worktree = mergeWorktree(repo.id, created, meta)
await rebuildAuthorizedRootsCache(store)
let setup: CreateWorktreeResult['setup']
if (setupScript && shouldLaunchSetup) {
try {
// Why: setup now runs in a visible terminal owned by the renderer so users
// can inspect failures, answer prompts, and rerun it. The main process only
// resolves policy and writes the runner script; it must not execute setup
// itself anymore or we would reintroduce the hidden background-hook behavior.
//
// Why: the git worktree already exists at this point. If runner generation
// fails, surfacing the error as a hard create failure would lie to the UI
// about the underlying git state and strand a real worktree on disk.
// Degrade to "created without setup launch" instead.
setup = createSetupRunnerScript(repo, worktreePath, setupScript)
} catch (error) {
console.error(`[hooks] Failed to prepare setup runner for ${worktreePath}:`, error)
}
}
notifyWorktreesChanged(mainWindow, repo.id)
return {
worktree,
...(setup ? { setup } : {})
}
return createLocalWorktree(args, repo, store, mainWindow)
}
)
@ -233,6 +147,17 @@ export function registerWorktreeHandlers(mainWindow: BrowserWindow, store: Store
throw new Error('Folder mode does not support deleting worktrees.')
}
if (repo.connectionId) {
const provider = getSshGitProvider(repo.connectionId)
if (!provider) {
throw new Error(`No git provider for connection "${repo.connectionId}"`)
}
await provider.removeWorktree(worktreePath, args.force)
store.removeWorktreeMeta(args.worktreeId)
notifyWorktreesChanged(mainWindow, repoId)
return
}
// Run archive hook before removal
const hooks = getEffectiveHooks(repo)
if (hooks?.scripts.archive) {
@ -355,9 +280,3 @@ export function registerWorktreeHandlers(mainWindow: BrowserWindow, store: Store
writeIssueCommand(repo.path, args.content)
})
}
function notifyWorktreesChanged(mainWindow: BrowserWindow, repoId: string): void {
if (!mainWindow.isDestroyed()) {
mainWindow.webContents.send('worktrees:changed', { repoId })
}
}

View file

@ -3,6 +3,7 @@ import { readFileSync, writeFileSync, mkdirSync, existsSync, renameSync } from '
import { join, dirname } from 'path'
import { homedir } from 'os'
import type { PersistedState, Repo, WorktreeMeta, GlobalSettings } from '../shared/types'
import type { SshTarget } from '../shared/ssh-types'
import { isFolderRepo } from '../shared/repo-kind'
import { getGitUsername } from './git/repo'
import {
@ -83,7 +84,8 @@ export class Store {
...parsed.ui,
sortBy: normalizeSortBy(parsed.ui?.sortBy)
},
workspaceSession: { ...defaults.workspaceSession, ...parsed.workspaceSession }
workspaceSession: { ...defaults.workspaceSession, ...parsed.workspaceSession },
sshTargets: parsed.sshTargets ?? []
}
}
} catch (err) {
@ -278,6 +280,42 @@ export class Store {
this.scheduleSave()
}
// ── SSH Targets ────────────────────────────────────────────────────
getSshTargets(): SshTarget[] {
return this.state.sshTargets ?? []
}
getSshTarget(id: string): SshTarget | undefined {
return this.state.sshTargets?.find((t) => t.id === id)
}
addSshTarget(target: SshTarget): void {
if (!this.state.sshTargets) {
this.state.sshTargets = []
}
this.state.sshTargets.push(target)
this.scheduleSave()
}
updateSshTarget(id: string, updates: Partial<Omit<SshTarget, 'id'>>): SshTarget | null {
const target = this.state.sshTargets?.find((t) => t.id === id)
if (!target) {
return null
}
Object.assign(target, updates)
this.scheduleSave()
return { ...target }
}
removeSshTarget(id: string): void {
if (!this.state.sshTargets) {
return
}
this.state.sshTargets = this.state.sshTargets.filter((t) => t.id !== id)
this.scheduleSave()
}
// ── Flush (for shutdown) ───────────────────────────────────────────
flush(): void {

View file

@ -0,0 +1,272 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
const { existsSyncMock, statSyncMock, accessSyncMock, spawnMock } = vi.hoisted(() => ({
existsSyncMock: vi.fn(),
statSyncMock: vi.fn(),
accessSyncMock: vi.fn(),
spawnMock: vi.fn()
}))
vi.mock('fs', () => ({
existsSync: existsSyncMock,
statSync: statSyncMock,
accessSync: accessSyncMock,
chmodSync: vi.fn(),
constants: { X_OK: 1 }
}))
vi.mock('node-pty', () => ({
spawn: spawnMock
}))
vi.mock('../wsl', () => ({
parseWslPath: () => null
}))
import { LocalPtyProvider } from './local-pty-provider'
describe('LocalPtyProvider', () => {
let provider: LocalPtyProvider
let mockProc: {
onData: ReturnType<typeof vi.fn>
onExit: ReturnType<typeof vi.fn>
write: ReturnType<typeof vi.fn>
resize: ReturnType<typeof vi.fn>
kill: ReturnType<typeof vi.fn>
process: string
pid: number
}
let exitCb: ((info: { exitCode: number }) => void) | undefined
let origShell: string | undefined
beforeEach(() => {
origShell = process.env.SHELL
process.env.SHELL = '/bin/zsh'
existsSyncMock.mockReturnValue(true)
statSyncMock.mockReturnValue({ isDirectory: () => true, mode: 0o755 })
accessSyncMock.mockReturnValue(undefined)
exitCb = undefined
mockProc = {
onData: vi.fn(),
onExit: vi.fn((cb: (info: { exitCode: number }) => void) => {
exitCb = cb
}),
write: vi.fn(),
resize: vi.fn(),
kill: vi.fn(() => {
exitCb?.({ exitCode: -1 })
}),
process: 'zsh',
pid: 12345
}
spawnMock.mockReturnValue(mockProc)
provider = new LocalPtyProvider()
})
afterEach(() => {
if (origShell === undefined) {
delete process.env.SHELL
} else {
process.env.SHELL = origShell
}
})
describe('spawn', () => {
it('returns a unique PTY id', async () => {
const result = await provider.spawn({ cols: 80, rows: 24 })
expect(result.id).toBeTruthy()
expect(typeof result.id).toBe('string')
})
it('calls node-pty spawn with correct args', async () => {
await provider.spawn({ cols: 120, rows: 40, cwd: '/tmp' })
expect(spawnMock).toHaveBeenCalledWith(
expect.any(String),
expect.any(Array),
expect.objectContaining({
cols: 120,
rows: 40,
cwd: '/tmp'
})
)
})
it('throws when cwd does not exist', async () => {
existsSyncMock.mockImplementation((p: string) => p !== '/nonexistent')
await expect(provider.spawn({ cols: 80, rows: 24, cwd: '/nonexistent' })).rejects.toThrow(
'does not exist'
)
})
it('invokes onSpawned callback', async () => {
const onSpawned = vi.fn()
provider.configure({ onSpawned })
const { id } = await provider.spawn({ cols: 80, rows: 24 })
expect(onSpawned).toHaveBeenCalledWith(id)
})
it('invokes buildSpawnEnv callback to customize environment', async () => {
const buildSpawnEnv = vi.fn((_id: string, env: Record<string, string>) => {
env.CUSTOM_VAR = 'custom-value'
return env
})
provider.configure({ buildSpawnEnv })
await provider.spawn({ cols: 80, rows: 24 })
const spawnCall = spawnMock.mock.calls.at(-1)!
expect(spawnCall[2].env.CUSTOM_VAR).toBe('custom-value')
})
})
describe('write', () => {
it('writes data to the PTY process', async () => {
const { id } = await provider.spawn({ cols: 80, rows: 24 })
provider.write(id, 'hello')
expect(mockProc.write).toHaveBeenCalledWith('hello')
})
it('is a no-op for unknown PTY ids', () => {
provider.write('nonexistent', 'hello')
expect(mockProc.write).not.toHaveBeenCalled()
})
})
describe('resize', () => {
it('resizes the PTY process', async () => {
const { id } = await provider.spawn({ cols: 80, rows: 24 })
provider.resize(id, 120, 40)
expect(mockProc.resize).toHaveBeenCalledWith(120, 40)
})
})
describe('shutdown', () => {
it('kills the PTY process', async () => {
const { id } = await provider.spawn({ cols: 80, rows: 24 })
await provider.shutdown(id, true)
expect(mockProc.kill).toHaveBeenCalled()
})
it('invokes onExit callback via the node-pty exit handler', async () => {
const onExit = vi.fn()
provider.configure({ onExit })
const { id } = await provider.spawn({ cols: 80, rows: 24 })
await provider.shutdown(id, true)
expect(onExit).toHaveBeenCalledWith(id, -1)
})
it('is a no-op for unknown PTY ids', async () => {
await provider.shutdown('nonexistent', true)
expect(mockProc.kill).not.toHaveBeenCalled()
})
})
describe('hasChildProcesses', () => {
it('returns false when foreground process matches shell', async () => {
const { id } = await provider.spawn({ cols: 80, rows: 24 })
expect(await provider.hasChildProcesses(id)).toBe(false)
})
it('returns true when foreground process differs from shell', async () => {
mockProc.process = 'node'
const { id } = await provider.spawn({ cols: 80, rows: 24 })
expect(await provider.hasChildProcesses(id)).toBe(true)
})
it('returns false for unknown PTY ids', async () => {
expect(await provider.hasChildProcesses('nonexistent')).toBe(false)
})
})
describe('getForegroundProcess', () => {
it('returns the process name', async () => {
const { id } = await provider.spawn({ cols: 80, rows: 24 })
expect(await provider.getForegroundProcess(id)).toBe('zsh')
})
it('returns null for unknown PTY ids', async () => {
expect(await provider.getForegroundProcess('nonexistent')).toBeNull()
})
})
describe('event listeners', () => {
it('notifies data listeners when PTY produces output', async () => {
const dataHandler = vi.fn()
provider.onData(dataHandler)
const { id } = await provider.spawn({ cols: 80, rows: 24 })
// Simulate node-pty data event
const onDataCb = mockProc.onData.mock.calls[0][0]
onDataCb('hello world')
expect(dataHandler).toHaveBeenCalledWith({ id, data: 'hello world' })
})
it('notifies exit listeners when PTY exits', async () => {
const exitHandler = vi.fn()
provider.onExit(exitHandler)
const { id } = await provider.spawn({ cols: 80, rows: 24 })
// Simulate node-pty exit event
exitCb?.({ exitCode: 0 })
expect(exitHandler).toHaveBeenCalledWith({ id, code: 0 })
})
it('allows unsubscribing from events', async () => {
const dataHandler = vi.fn()
const unsub = provider.onData(dataHandler)
const { id: _id } = await provider.spawn({ cols: 80, rows: 24 })
unsub()
const onDataCb = mockProc.onData.mock.calls[0][0]
onDataCb('hello')
expect(dataHandler).not.toHaveBeenCalled()
})
})
describe('listProcesses', () => {
it('returns spawned PTYs', async () => {
const before = await provider.listProcesses()
await provider.spawn({ cols: 80, rows: 24 })
await provider.spawn({ cols: 80, rows: 24 })
const after = await provider.listProcesses()
expect(after.length - before.length).toBe(2)
const newEntries = after.slice(before.length)
expect(newEntries[0]).toHaveProperty('id')
expect(newEntries[0]).toHaveProperty('title', 'zsh')
})
})
describe('getDefaultShell', () => {
it('returns SHELL env var on Unix', async () => {
const originalShell = process.env.SHELL
try {
process.env.SHELL = '/bin/bash'
expect(await provider.getDefaultShell()).toBe('/bin/bash')
} finally {
if (originalShell === undefined) {
delete process.env.SHELL
} else {
process.env.SHELL = originalShell
}
}
})
})
describe('killAll', () => {
it('kills all PTY processes', async () => {
await provider.spawn({ cols: 80, rows: 24 })
await provider.spawn({ cols: 80, rows: 24 })
provider.killAll()
expect(mockProc.kill).toHaveBeenCalled()
const list = await provider.listProcesses()
expect(list).toHaveLength(0)
})
})
})

View file

@ -0,0 +1,441 @@
/* eslint-disable max-lines -- Why: shell-ready startup command integration adds
~70 lines of scanner/promise wiring to spawn(). Splitting the method would scatter
tightly coupled PTY lifecycle logic (scan ready write exit cleanup) across
files without a cleaner ownership seam. */
import { basename, win32 as pathWin32 } from 'path'
import { existsSync } from 'fs'
import * as pty from 'node-pty'
import { parseWslPath } from '../wsl'
import type { IPtyProvider, PtySpawnOptions, PtySpawnResult } from './types'
import {
ensureNodePtySpawnHelperExecutable,
validateWorkingDirectory,
spawnShellWithFallback
} from './local-pty-utils'
import {
getShellReadyLaunchConfig,
createShellReadyScanState,
scanForShellReady,
writeStartupCommandWhenShellReady,
STARTUP_COMMAND_READY_MAX_WAIT_MS
} from './local-pty-shell-ready'
let ptyCounter = 0
const ptyProcesses = new Map<string, pty.IPty>()
const ptyShellName = new Map<string, string>()
// Why: node-pty's onData/onExit register native NAPI ThreadSafeFunction
// callbacks. If the PTY is killed without disposing these listeners, the
// stale callbacks survive into node::FreeEnvironment() where NAPI attempts
// to invoke/clean them up on a destroyed environment, triggering a SIGABRT.
const ptyDisposables = new Map<string, { dispose: () => void }[]>()
let loadGeneration = 0
const ptyLoadGeneration = new Map<string, number>()
type DataCallback = (payload: { id: string; data: string }) => void
type ExitCallback = (payload: { id: string; code: number }) => void
const dataListeners = new Set<DataCallback>()
const exitListeners = new Set<ExitCallback>()
function disposePtyListeners(id: string): void {
const disposables = ptyDisposables.get(id)
if (disposables) {
for (const d of disposables) {
d.dispose()
}
ptyDisposables.delete(id)
}
}
function clearPtyState(id: string): void {
disposePtyListeners(id)
ptyProcesses.delete(id)
ptyShellName.delete(id)
ptyLoadGeneration.delete(id)
}
function safeKillAndClean(id: string, proc: pty.IPty): void {
disposePtyListeners(id)
try {
proc.kill()
} catch {
/* Process may already be dead */
}
clearPtyState(id)
}
export type LocalPtyProviderOptions = {
buildSpawnEnv?: (id: string, baseEnv: Record<string, string>) => Record<string, string>
onSpawned?: (id: string) => void
onExit?: (id: string, code: number) => void
onData?: (id: string, data: string, timestamp: number) => void
}
export class LocalPtyProvider implements IPtyProvider {
private opts: LocalPtyProviderOptions
constructor(opts: LocalPtyProviderOptions = {}) {
this.opts = opts
}
/** Reconfigure the provider with new hooks (e.g. after window re-creation). */
configure(opts: LocalPtyProviderOptions): void {
this.opts = opts
}
async spawn(args: PtySpawnOptions): Promise<PtySpawnResult> {
const id = String(++ptyCounter)
const defaultCwd =
process.platform === 'win32'
? process.env.USERPROFILE || process.env.HOMEPATH || 'C:\\'
: process.env.HOME || '/'
const cwd = args.cwd || defaultCwd
const wslInfo = process.platform === 'win32' ? parseWslPath(cwd) : null
let shellPath: string
let shellArgs: string[]
let effectiveCwd: string
let validationCwd: string
let shellReadyLaunch: ReturnType<typeof getShellReadyLaunchConfig> | null = null
if (wslInfo) {
const escapedCwd = wslInfo.linuxPath.replace(/'/g, "'\\''")
shellPath = 'wsl.exe'
shellArgs = ['-d', wslInfo.distro, '--', 'bash', '-c', `cd '${escapedCwd}' && exec bash -l`]
effectiveCwd = process.env.USERPROFILE || process.env.HOMEPATH || 'C:\\'
validationCwd = cwd
} else if (process.platform === 'win32') {
shellPath = process.env.COMSPEC || 'powershell.exe'
// Why: use path.win32.basename so backslash-separated Windows paths
// are parsed correctly even when tests mock process.platform on Linux CI.
const shellBasename = pathWin32.basename(shellPath).toLowerCase()
// Why: On CJK Windows (Chinese, Japanese, Korean), the console code page
// defaults to the system ANSI code page (e.g. 936/GBK for Chinese).
// ConPTY encodes its output pipe using this code page, but node-pty
// always decodes as UTF-8. Without switching to code page 65001 (UTF-8),
// multi-byte CJK characters are garbled because the GBK/Shift-JIS/EUC-KR
// byte sequences are misinterpreted as UTF-8.
if (shellBasename === 'cmd.exe') {
shellArgs = ['/K', 'chcp 65001 > nul']
} else if (shellBasename === 'powershell.exe' || shellBasename === 'pwsh.exe') {
// Why: `-NoExit -Command` alone skips the user's $PROFILE, breaking
// custom prompts (oh-my-posh, starship), aliases, and PSReadLine
// configuration. Dot-sourcing $PROFILE first restores the normal
// startup experience.
shellArgs = [
'-NoExit',
'-Command',
'try { . $PROFILE } catch {}; [Console]::OutputEncoding = [System.Text.Encoding]::UTF8; [Console]::InputEncoding = [System.Text.Encoding]::UTF8'
]
} else {
shellArgs = []
}
effectiveCwd = cwd
validationCwd = cwd
} else {
shellPath = args.env?.SHELL || process.env.SHELL || '/bin/zsh'
shellReadyLaunch = args.command ? getShellReadyLaunchConfig(shellPath) : null
shellArgs = shellReadyLaunch?.args ?? ['-l']
effectiveCwd = cwd
validationCwd = cwd
}
ensureNodePtySpawnHelperExecutable()
validateWorkingDirectory(validationCwd)
const spawnEnv: Record<string, string> = {
...process.env,
...args.env,
...shellReadyLaunch?.env,
TERM: 'xterm-256color',
COLORTERM: 'truecolor',
TERM_PROGRAM: 'Orca',
FORCE_HYPERLINK: '1'
} as Record<string, string>
spawnEnv.LANG ??= 'en_US.UTF-8'
// Why: On Windows, LANG alone does not control the console code page.
// Programs like Python and Node.js check their own encoding env vars
// independently. PYTHONUTF8=1 makes Python use UTF-8 for stdio regardless
// of the Windows console code page, preventing garbled CJK output from
// Python scripts run inside the terminal.
if (process.platform === 'win32') {
spawnEnv.PYTHONUTF8 ??= '1'
}
const finalEnv = this.opts.buildSpawnEnv ? this.opts.buildSpawnEnv(id, spawnEnv) : spawnEnv
const spawnResult = spawnShellWithFallback({
shellPath,
shellArgs,
cols: args.cols,
rows: args.rows,
cwd: effectiveCwd,
env: finalEnv,
ptySpawn: pty.spawn,
getShellReadyConfig: args.command ? (shell) => getShellReadyLaunchConfig(shell) : undefined
})
shellPath = spawnResult.shellPath
if (process.platform !== 'win32') {
finalEnv.SHELL = shellPath
}
const proc = spawnResult.process
ptyProcesses.set(id, proc)
ptyShellName.set(id, basename(shellPath))
ptyLoadGeneration.set(id, loadGeneration)
this.opts.onSpawned?.(id)
// Shell-ready startup command support
let resolveShellReady: (() => void) | null = null
let shellReadyTimeout: ReturnType<typeof setTimeout> | null = null
const shellReadyScanState = shellReadyLaunch?.supportsReadyMarker
? createShellReadyScanState()
: null
const shellReadyPromise = args.command
? new Promise<void>((resolve) => {
resolveShellReady = resolve
})
: Promise.resolve()
const finishShellReady = (): void => {
if (!resolveShellReady) {
return
}
if (shellReadyTimeout) {
clearTimeout(shellReadyTimeout)
shellReadyTimeout = null
}
const resolve = resolveShellReady
resolveShellReady = null
resolve()
}
if (args.command) {
if (shellReadyLaunch?.supportsReadyMarker) {
shellReadyTimeout = setTimeout(() => {
finishShellReady()
}, STARTUP_COMMAND_READY_MAX_WAIT_MS)
} else {
finishShellReady()
}
}
let startupCommandCleanup: (() => void) | null = null
const disposables: { dispose: () => void }[] = []
const onDataDisposable = proc.onData((rawData) => {
let data = rawData
if (shellReadyScanState && resolveShellReady) {
const scanned = scanForShellReady(shellReadyScanState, rawData)
data = scanned.output
if (scanned.matched) {
finishShellReady()
}
}
if (data.length === 0) {
return
}
this.opts.onData?.(id, data, Date.now())
for (const cb of dataListeners) {
cb({ id, data })
}
})
if (onDataDisposable) {
disposables.push(onDataDisposable)
}
const onExitDisposable = proc.onExit(({ exitCode }) => {
if (shellReadyTimeout) {
clearTimeout(shellReadyTimeout)
shellReadyTimeout = null
}
startupCommandCleanup?.()
clearPtyState(id)
this.opts.onExit?.(id, exitCode)
for (const cb of exitListeners) {
cb({ id, code: exitCode })
}
})
if (onExitDisposable) {
disposables.push(onExitDisposable)
}
ptyDisposables.set(id, disposables)
if (args.command) {
writeStartupCommandWhenShellReady(shellReadyPromise, proc, args.command, (cleanup) => {
startupCommandCleanup = cleanup
})
}
return { id }
}
// Local PTYs are always attached -- no-op. Remote providers use this to resubscribe.
async attach(_id: string): Promise<void> {}
write(id: string, data: string): void {
ptyProcesses.get(id)?.write(data)
}
resize(id: string, cols: number, rows: number): void {
ptyProcesses.get(id)?.resize(cols, rows)
}
async shutdown(id: string, _immediate: boolean): Promise<void> {
const proc = ptyProcesses.get(id)
if (!proc) {
return
}
// Why: disposePtyListeners removes the onExit callback, so the natural
// exit cleanup path from node-pty won't fire. Cleanup and notification
// must happen unconditionally after the try/catch.
disposePtyListeners(id)
try {
proc.kill()
} catch {
/* Process may already be dead */
}
clearPtyState(id)
this.opts.onExit?.(id, -1)
for (const cb of exitListeners) {
cb({ id, code: -1 })
}
}
async sendSignal(id: string, signal: string): Promise<void> {
const proc = ptyProcesses.get(id)
if (!proc) {
return
}
try {
process.kill(proc.pid, signal)
} catch {
/* Process may already be dead */
}
}
async getCwd(id: string): Promise<string> {
if (!ptyProcesses.has(id)) {
throw new Error(`PTY ${id} not found`)
}
// node-pty doesn't expose cwd; would need /proc on Linux or lsof on macOS
return ''
}
async getInitialCwd(_id: string): Promise<string> {
return ''
}
async clearBuffer(_id: string): Promise<void> {
/* handled client-side in xterm.js */
}
acknowledgeDataEvent(_id: string, _charCount: number): void {
/* no flow control for local */
}
async hasChildProcesses(id: string): Promise<boolean> {
const proc = ptyProcesses.get(id)
if (!proc) {
return false
}
try {
const foreground = proc.process
const shell = ptyShellName.get(id)
if (!shell) {
return true
}
return foreground !== shell
} catch {
return false
}
}
async getForegroundProcess(id: string): Promise<string | null> {
const proc = ptyProcesses.get(id)
if (!proc) {
return null
}
try {
return proc.process || null
} catch {
return null
}
}
async serialize(_ids: string[]): Promise<string> {
return '{}'
}
async revive(_state: string): Promise<void> {
/* re-spawning handles local revival */
}
async listProcesses(): Promise<{ id: string; cwd: string; title: string }[]> {
return Array.from(ptyProcesses.entries()).map(([id, proc]) => ({
id,
cwd: '',
title: proc.process || ptyShellName.get(id) || 'shell'
}))
}
async getDefaultShell(): Promise<string> {
if (process.platform === 'win32') {
return process.env.COMSPEC || 'powershell.exe'
}
return process.env.SHELL || '/bin/zsh'
}
async getProfiles(): Promise<{ name: string; path: string }[]> {
if (process.platform === 'win32') {
return [
{ name: 'PowerShell', path: 'powershell.exe' },
{ name: 'Command Prompt', path: 'cmd.exe' }
]
}
const shells = ['/bin/zsh', '/bin/bash', '/bin/sh']
return shells.filter((s) => existsSync(s)).map((s) => ({ name: basename(s), path: s }))
}
onData(callback: DataCallback): () => void {
dataListeners.add(callback)
return () => dataListeners.delete(callback)
}
// Local PTYs don't replay -- this is for remote reconnection
onReplay(_callback: (payload: { id: string; data: string }) => void): () => void {
return () => {}
}
onExit(callback: ExitCallback): () => void {
exitListeners.add(callback)
return () => exitListeners.delete(callback)
}
// ─── Local-only helpers (not part of IPtyProvider interface) ───────
/** Kill orphaned PTYs from previous page loads. */
killOrphanedPtys(currentGeneration: number): { id: string }[] {
const killed: { id: string }[] = []
for (const [id, proc] of ptyProcesses) {
if ((ptyLoadGeneration.get(id) ?? -1) < currentGeneration) {
safeKillAndClean(id, proc)
killed.push({ id })
}
}
return killed
}
/** Advance the load generation counter (called on renderer reload). */
advanceGeneration(): number {
return ++loadGeneration
}
/** Get a writable reference to a PTY (for runtime controller). */
getPtyProcess(id: string): pty.IPty | undefined {
return ptyProcesses.get(id)
}
/** Kill all PTYs. Call on app quit. */
killAll(): void {
for (const [id, proc] of ptyProcesses) {
safeKillAndClean(id, proc)
}
}
}

View file

@ -0,0 +1,237 @@
/**
* Shell-ready startup command support for local PTYs.
*
* Why: when Orca needs to inject a startup command (e.g. issue command runner),
* it must wait until the shell has fully initialized before writing. This module
* provides shell wrapper rcfiles that emit an OSC 133;A marker after startup,
* and a data scanner that detects that marker so the command can be written at
* the right time.
*/
import { basename } from 'path'
import { mkdirSync, writeFileSync, chmodSync } from 'fs'
import { app } from 'electron'
import type * as pty from 'node-pty'
let didEnsureShellReadyWrappers = false
function quotePosixSingle(value: string): string {
return `'${value.replace(/'/g, `'\\''`)}'`
}
const STARTUP_COMMAND_READY_MAX_WAIT_MS = 1500
const OSC_133_A = '\x1b]133;A'
// ── OSC 133;A scanner ───────────────────────────────────────────────
export type ShellReadyScanState = {
matchPos: number
heldBytes: string
}
export function createShellReadyScanState(): ShellReadyScanState {
return { matchPos: 0, heldBytes: '' }
}
export function scanForShellReady(
state: ShellReadyScanState,
data: string
): { output: string; matched: boolean } {
let output = ''
for (let i = 0; i < data.length; i += 1) {
const ch = data[i] as string
if (state.matchPos < OSC_133_A.length) {
if (ch === OSC_133_A[state.matchPos]) {
state.heldBytes += ch
state.matchPos += 1
} else {
output += state.heldBytes
state.heldBytes = ''
state.matchPos = 0
if (ch === OSC_133_A[0]) {
state.heldBytes = ch
state.matchPos = 1
} else {
output += ch
}
}
} else if (ch === '\x07') {
const remaining = data.slice(i + 1)
state.heldBytes = ''
state.matchPos = 0
return { output: output + remaining, matched: true }
} else {
state.heldBytes += ch
}
}
return { output, matched: false }
}
// ── Shell wrapper files ─────────────────────────────────────────────
function getShellReadyWrapperRoot(): string {
return `${app.getPath('userData')}/shell-ready`
}
export function getBashShellReadyRcfileContent(): string {
return `# Orca bash shell-ready wrapper
[[ -f /etc/profile ]] && source /etc/profile
if [[ -f "$HOME/.bash_profile" ]]; then
source "$HOME/.bash_profile"
elif [[ -f "$HOME/.bash_login" ]]; then
source "$HOME/.bash_login"
elif [[ -f "$HOME/.profile" ]]; then
source "$HOME/.profile"
fi
# Why: preserve bash's normal login-shell contract. Many users already source
# ~/.bashrc from ~/.bash_profile; forcing ~/.bashrc again here would duplicate
# PATH edits, hooks, and prompt init in Orca startup-command shells.
# Why: append the marker through PROMPT_COMMAND so it fires after the login
# startup files have rebuilt the prompt, matching Superset's "shell ready"
# contract without re-running user rc files.
__orca_prompt_mark() {
printf "\\033]133;A\\007"
}
if [[ "$(declare -p PROMPT_COMMAND 2>/dev/null)" == "declare -a"* ]]; then
PROMPT_COMMAND=("\${PROMPT_COMMAND[@]}" "__orca_prompt_mark")
else
_orca_prev_prompt_command="\${PROMPT_COMMAND}"
if [[ -n "\${_orca_prev_prompt_command}" ]]; then
PROMPT_COMMAND="\${_orca_prev_prompt_command};__orca_prompt_mark"
else
PROMPT_COMMAND="__orca_prompt_mark"
fi
fi
`
}
function ensureShellReadyWrappers(): void {
if (didEnsureShellReadyWrappers || process.platform === 'win32') {
return
}
didEnsureShellReadyWrappers = true
const root = getShellReadyWrapperRoot()
const zshDir = `${root}/zsh`
const bashDir = `${root}/bash`
const zshEnv = `# Orca zsh shell-ready wrapper
export ORCA_ORIG_ZDOTDIR="\${ORCA_ORIG_ZDOTDIR:-$HOME}"
[[ -f "$ORCA_ORIG_ZDOTDIR/.zshenv" ]] && source "$ORCA_ORIG_ZDOTDIR/.zshenv"
export ZDOTDIR=${quotePosixSingle(zshDir)}
`
const zshProfile = `# Orca zsh shell-ready wrapper
_orca_home="\${ORCA_ORIG_ZDOTDIR:-$HOME}"
[[ -f "$_orca_home/.zprofile" ]] && source "$_orca_home/.zprofile"
`
const zshRc = `# Orca zsh shell-ready wrapper
_orca_home="\${ORCA_ORIG_ZDOTDIR:-$HOME}"
if [[ -o interactive && -f "$_orca_home/.zshrc" ]]; then
source "$_orca_home/.zshrc"
fi
`
const zshLogin = `# Orca zsh shell-ready wrapper
_orca_home="\${ORCA_ORIG_ZDOTDIR:-$HOME}"
if [[ -o interactive && -f "$_orca_home/.zlogin" ]]; then
source "$_orca_home/.zlogin"
fi
# Why: emit OSC 133;A only after the user's startup hooks finish so Orca knows
# the prompt is actually ready for a long startup command paste.
__orca_prompt_mark() {
printf "\\033]133;A\\007"
}
precmd_functions=(\${precmd_functions[@]} __orca_prompt_mark)
`
const bashRc = getBashShellReadyRcfileContent()
const files = [
[`${zshDir}/.zshenv`, zshEnv],
[`${zshDir}/.zprofile`, zshProfile],
[`${zshDir}/.zshrc`, zshRc],
[`${zshDir}/.zlogin`, zshLogin],
[`${bashDir}/rcfile`, bashRc]
] as const
for (const [path, content] of files) {
const dir = path.slice(0, path.lastIndexOf('/'))
mkdirSync(dir, { recursive: true })
writeFileSync(path, content, 'utf8')
chmodSync(path, 0o644)
}
}
// ── Shell launch config ─────────────────────────────────────────────
export type ShellReadyLaunchConfig = {
args: string[] | null
env: Record<string, string>
supportsReadyMarker: boolean
}
export function getShellReadyLaunchConfig(shellPath: string): ShellReadyLaunchConfig {
const shellName = basename(shellPath).toLowerCase()
if (shellName === 'zsh') {
ensureShellReadyWrappers()
return {
args: ['-l'],
env: {
ORCA_ORIG_ZDOTDIR: process.env.ZDOTDIR || process.env.HOME || '',
ZDOTDIR: `${getShellReadyWrapperRoot()}/zsh`
},
supportsReadyMarker: true
}
}
if (shellName === 'bash') {
ensureShellReadyWrappers()
return {
args: ['--rcfile', `${getShellReadyWrapperRoot()}/bash/rcfile`],
env: {},
supportsReadyMarker: true
}
}
return {
args: null,
env: {},
supportsReadyMarker: false
}
}
// ── Startup command writer ──────────────────────────────────────────
export function writeStartupCommandWhenShellReady(
readyPromise: Promise<void>,
proc: pty.IPty,
startupCommand: string,
onExit: (cleanup: () => void) => void
): void {
let sent = false
const cleanup = (): void => {
sent = true
}
const flush = (): void => {
if (sent) {
return
}
sent = true
// Why: run startup commands inside the same interactive shell Orca keeps
// open for the pane. Spawning `shell -c <command>; exec shell -l` would
// avoid the race, but it would also replace the session after the agent
// exits and break "stay in this terminal" workflows.
const payload = startupCommand.endsWith('\n') ? startupCommand : `${startupCommand}\n`
// Why: startup commands are usually long, quoted agent launches. Writing
// them in one PTY call after the shell-ready barrier avoids the incremental
// paste behavior that still dropped characters in practice.
proc.write(payload)
}
readyPromise.then(flush)
onExit(cleanup)
}
export { STARTUP_COMMAND_READY_MAX_WAIT_MS }

View file

@ -0,0 +1,168 @@
import { basename } from 'path'
import { existsSync, accessSync, statSync, chmodSync, constants as fsConstants } from 'fs'
import type * as pty from 'node-pty'
let didEnsureSpawnHelperExecutable = false
/**
* Validate that a shell binary exists and is executable.
* Returns an error message string if invalid, null if valid.
*/
export function getShellValidationError(shellPath: string): string | null {
if (!existsSync(shellPath)) {
return (
`Shell "${shellPath}" does not exist. ` +
`Set a valid SHELL environment variable or install zsh/bash.`
)
}
try {
accessSync(shellPath, fsConstants.X_OK)
} catch {
return `Shell "${shellPath}" is not executable. Check file permissions.`
}
return null
}
/**
* Ensure the node-pty spawn-helper binary has the executable bit set.
*
* Why: when Electron packages the app via asar, the native spawn-helper
* binary may lose its +x permission. This function detects and repairs
* that so pty.spawn() does not fail with EACCES on first launch.
*/
export function ensureNodePtySpawnHelperExecutable(): void {
if (didEnsureSpawnHelperExecutable || process.platform === 'win32') {
return
}
didEnsureSpawnHelperExecutable = true
try {
const unixTerminalPath = require.resolve('node-pty/lib/unixTerminal.js')
const packageRoot =
basename(unixTerminalPath) === 'unixTerminal.js'
? unixTerminalPath.replace(/[/\\]lib[/\\]unixTerminal\.js$/, '')
: unixTerminalPath
const candidates = [
`${packageRoot}/build/Release/spawn-helper`,
`${packageRoot}/build/Debug/spawn-helper`,
`${packageRoot}/prebuilds/${process.platform}-${process.arch}/spawn-helper`
].map((candidate) =>
candidate
.replace('app.asar/', 'app.asar.unpacked/')
.replace('node_modules.asar/', 'node_modules.asar.unpacked/')
)
for (const candidate of candidates) {
if (!existsSync(candidate)) {
continue
}
const mode = statSync(candidate).mode
if ((mode & 0o111) !== 0) {
return
}
chmodSync(candidate, mode | 0o755)
return
}
} catch (error) {
console.warn(
`[pty] Failed to ensure node-pty spawn-helper is executable: ${error instanceof Error ? error.message : String(error)}`
)
}
}
/**
* Validate that a working directory exists and is a directory.
* Throws a descriptive Error if not.
*/
export function validateWorkingDirectory(cwd: string): void {
if (!existsSync(cwd)) {
throw new Error(
`Working directory "${cwd}" does not exist. ` +
`It may have been deleted or is on an unmounted volume.`
)
}
if (!statSync(cwd).isDirectory()) {
throw new Error(`Working directory "${cwd}" is not a directory.`)
}
}
export type ShellSpawnParams = {
shellPath: string
shellArgs: string[]
cols: number
rows: number
cwd: string
env: Record<string, string>
ptySpawn: typeof pty.spawn
getShellReadyConfig?: (
shell: string
) => { args: string[] | null; env: Record<string, string> } | null
}
export type ShellSpawnResult = {
process: pty.IPty
shellPath: string
}
/**
* Attempt to spawn a PTY shell. If the primary shell fails on Unix,
* try common fallback shells before giving up.
*/
export function spawnShellWithFallback(params: ShellSpawnParams): ShellSpawnResult {
const { shellPath, shellArgs, cols, rows, cwd, env, ptySpawn, getShellReadyConfig } = params
let primaryError: string | null = null
if (process.platform !== 'win32') {
primaryError = getShellValidationError(shellPath)
}
if (!primaryError) {
try {
return {
process: ptySpawn(shellPath, shellArgs, { name: 'xterm-256color', cols, rows, cwd, env }),
shellPath
}
} catch (err) {
primaryError = err instanceof Error ? err.message : String(err)
}
}
// Try fallback shells on Unix
if (process.platform !== 'win32') {
const fallbackShells = ['/bin/zsh', '/bin/bash', '/bin/sh'].filter((s) => s !== shellPath)
for (const fallback of fallbackShells) {
if (getShellValidationError(fallback)) {
continue
}
try {
const fallbackReady = getShellReadyConfig?.(fallback)
env.SHELL = fallback
Object.assign(env, fallbackReady?.env ?? {})
const proc = ptySpawn(fallback, fallbackReady?.args ?? ['-l'], {
name: 'xterm-256color',
cols,
rows,
cwd,
env
})
console.warn(
`[pty] Primary shell "${shellPath}" failed (${primaryError ?? 'unknown error'}), fell back to "${fallback}"`
)
return { process: proc, shellPath: fallback }
} catch {
// Fallback also failed -- try next.
}
}
}
const diag = [
`shell: ${shellPath}`,
`cwd: ${cwd}`,
`arch: ${process.arch}`,
`platform: ${process.platform} ${process.getSystemVersion?.() ?? ''}`
].join(', ')
throw new Error(
`Failed to spawn shell "${shellPath}": ${primaryError ?? 'unknown error'} (${diag}). ` +
`If this persists, please file an issue.`
)
}

View file

@ -0,0 +1,175 @@
import { describe, expect, it, vi } from 'vitest'
const { handleMock, removeHandlerMock, removeAllListenersMock } = vi.hoisted(() => ({
handleMock: vi.fn(),
removeHandlerMock: vi.fn(),
removeAllListenersMock: vi.fn()
}))
vi.mock('electron', () => ({
ipcMain: {
handle: handleMock,
on: vi.fn(),
removeHandler: removeHandlerMock,
removeAllListeners: removeAllListenersMock
}
}))
vi.mock('fs', () => ({
existsSync: () => true,
statSync: () => ({ isDirectory: () => true, mode: 0o755 }),
accessSync: () => undefined,
chmodSync: vi.fn(),
constants: { X_OK: 1 }
}))
vi.mock('node-pty', () => ({
spawn: vi.fn().mockReturnValue({
onData: vi.fn(),
onExit: vi.fn(),
write: vi.fn(),
resize: vi.fn(),
kill: vi.fn(),
process: 'zsh',
pid: 12345
})
}))
vi.mock('../opencode/hook-service', () => ({
openCodeHookService: { buildPtyEnv: () => ({}), clearPty: vi.fn() }
}))
vi.mock('../pi/titlebar-extension-service', () => ({
piTitlebarExtensionService: { buildPtyEnv: () => ({}), clearPty: vi.fn() }
}))
import { registerPtyHandlers, registerSshPtyProvider, unregisterSshPtyProvider } from '../ipc/pty'
import type { IPtyProvider } from './types'
describe('PTY provider dispatch', () => {
const handlers = new Map<string, (...args: unknown[]) => unknown>()
const mainWindow = {
isDestroyed: () => false,
webContents: { on: vi.fn(), send: vi.fn(), removeListener: vi.fn() }
}
function setup(): void {
handlers.clear()
handleMock.mockReset()
handleMock.mockImplementation((channel: string, handler: (...a: unknown[]) => unknown) => {
handlers.set(channel, handler)
})
registerPtyHandlers(mainWindow as never)
}
it('routes to local provider when connectionId is null', async () => {
setup()
const result = (await handlers.get('pty:spawn')!(null, {
cols: 80,
rows: 24,
connectionId: null
})) as { id: string }
expect(result.id).toBeTruthy()
})
it('routes to local provider when connectionId is undefined', async () => {
setup()
const result = (await handlers.get('pty:spawn')!(null, {
cols: 80,
rows: 24
})) as { id: string }
expect(result.id).toBeTruthy()
})
it('routes to SSH provider when connectionId is set', async () => {
setup()
const mockSshProvider: IPtyProvider = {
spawn: vi.fn().mockResolvedValue({ id: 'ssh-pty-1' }),
attach: vi.fn(),
write: vi.fn(),
resize: vi.fn(),
shutdown: vi.fn(),
sendSignal: vi.fn(),
getCwd: vi.fn(),
getInitialCwd: vi.fn(),
clearBuffer: vi.fn(),
acknowledgeDataEvent: vi.fn(),
hasChildProcesses: vi.fn(),
getForegroundProcess: vi.fn(),
serialize: vi.fn(),
revive: vi.fn(),
listProcesses: vi.fn(),
getDefaultShell: vi.fn(),
getProfiles: vi.fn(),
onData: vi.fn().mockReturnValue(() => {}),
onReplay: vi.fn().mockReturnValue(() => {}),
onExit: vi.fn().mockReturnValue(() => {})
}
registerSshPtyProvider('conn-123', mockSshProvider)
const result = (await handlers.get('pty:spawn')!(null, {
cols: 80,
rows: 24,
connectionId: 'conn-123'
})) as { id: string }
expect(result.id).toBe('ssh-pty-1')
expect(mockSshProvider.spawn).toHaveBeenCalledWith({
cols: 80,
rows: 24,
cwd: undefined,
env: undefined
})
unregisterSshPtyProvider('conn-123')
})
it('throws for unknown connectionId', async () => {
setup()
await expect(
handlers.get('pty:spawn')!(null, {
cols: 80,
rows: 24,
connectionId: 'unknown-conn'
})
).rejects.toThrow('No PTY provider for connection "unknown-conn"')
})
it('unregisterSshPtyProvider removes the provider', async () => {
setup()
const mockProvider: IPtyProvider = {
spawn: vi.fn().mockResolvedValue({ id: 'ssh-pty-2' }),
attach: vi.fn(),
write: vi.fn(),
resize: vi.fn(),
shutdown: vi.fn(),
sendSignal: vi.fn(),
getCwd: vi.fn(),
getInitialCwd: vi.fn(),
clearBuffer: vi.fn(),
acknowledgeDataEvent: vi.fn(),
hasChildProcesses: vi.fn(),
getForegroundProcess: vi.fn(),
serialize: vi.fn(),
revive: vi.fn(),
listProcesses: vi.fn(),
getDefaultShell: vi.fn(),
getProfiles: vi.fn(),
onData: vi.fn().mockReturnValue(() => {}),
onReplay: vi.fn().mockReturnValue(() => {}),
onExit: vi.fn().mockReturnValue(() => {})
}
registerSshPtyProvider('conn-456', mockProvider)
unregisterSshPtyProvider('conn-456')
await expect(
handlers.get('pty:spawn')!(null, {
cols: 80,
rows: 24,
connectionId: 'conn-456'
})
).rejects.toThrow('No PTY provider for connection "conn-456"')
})
})

View file

@ -0,0 +1,18 @@
import type { IFilesystemProvider } from './types'
const sshProviders = new Map<string, IFilesystemProvider>()
export function registerSshFilesystemProvider(
connectionId: string,
provider: IFilesystemProvider
): void {
sshProviders.set(connectionId, provider)
}
export function unregisterSshFilesystemProvider(connectionId: string): void {
sshProviders.delete(connectionId)
}
export function getSshFilesystemProvider(connectionId: string): IFilesystemProvider | undefined {
return sshProviders.get(connectionId)
}

View file

@ -0,0 +1,179 @@
import { describe, expect, it, vi, beforeEach } from 'vitest'
import { SshFilesystemProvider } from './ssh-filesystem-provider'
type MockMultiplexer = {
request: ReturnType<typeof vi.fn>
notify: ReturnType<typeof vi.fn>
onNotification: ReturnType<typeof vi.fn>
dispose: ReturnType<typeof vi.fn>
isDisposed: ReturnType<typeof vi.fn>
}
function createMockMux(): MockMultiplexer {
return {
request: vi.fn().mockResolvedValue(undefined),
notify: vi.fn(),
onNotification: vi.fn(),
dispose: vi.fn(),
isDisposed: vi.fn().mockReturnValue(false)
}
}
describe('SshFilesystemProvider', () => {
let mux: MockMultiplexer
let provider: SshFilesystemProvider
beforeEach(() => {
mux = createMockMux()
provider = new SshFilesystemProvider('conn-1', mux as never)
})
it('returns the connectionId', () => {
expect(provider.getConnectionId()).toBe('conn-1')
})
describe('readDir', () => {
it('sends fs.readDir request', async () => {
const entries = [
{ name: 'src', isDirectory: true, isSymlink: false },
{ name: 'README.md', isDirectory: false, isSymlink: false }
]
mux.request.mockResolvedValue(entries)
const result = await provider.readDir('/home/user/project')
expect(mux.request).toHaveBeenCalledWith('fs.readDir', { dirPath: '/home/user/project' })
expect(result).toEqual(entries)
})
})
describe('readFile', () => {
it('sends fs.readFile request', async () => {
const fileResult = { content: 'hello world', isBinary: false }
mux.request.mockResolvedValue(fileResult)
const result = await provider.readFile('/home/user/file.txt')
expect(mux.request).toHaveBeenCalledWith('fs.readFile', { filePath: '/home/user/file.txt' })
expect(result).toEqual(fileResult)
})
})
describe('writeFile', () => {
it('sends fs.writeFile request', async () => {
await provider.writeFile('/home/user/file.txt', 'new content')
expect(mux.request).toHaveBeenCalledWith('fs.writeFile', {
filePath: '/home/user/file.txt',
content: 'new content'
})
})
})
describe('stat', () => {
it('sends fs.stat request', async () => {
const statResult = { size: 1024, type: 'file', mtime: 1234567890 }
mux.request.mockResolvedValue(statResult)
const result = await provider.stat('/home/user/file.txt')
expect(mux.request).toHaveBeenCalledWith('fs.stat', { filePath: '/home/user/file.txt' })
expect(result).toEqual(statResult)
})
})
it('deletePath sends fs.deletePath request', async () => {
await provider.deletePath('/home/user/file.txt')
expect(mux.request).toHaveBeenCalledWith('fs.deletePath', { targetPath: '/home/user/file.txt' })
})
it('createFile sends fs.createFile request', async () => {
await provider.createFile('/home/user/new.txt')
expect(mux.request).toHaveBeenCalledWith('fs.createFile', { filePath: '/home/user/new.txt' })
})
it('createDir sends fs.createDir request', async () => {
await provider.createDir('/home/user/newdir')
expect(mux.request).toHaveBeenCalledWith('fs.createDir', { dirPath: '/home/user/newdir' })
})
it('rename sends fs.rename request', async () => {
await provider.rename('/home/old.txt', '/home/new.txt')
expect(mux.request).toHaveBeenCalledWith('fs.rename', {
oldPath: '/home/old.txt',
newPath: '/home/new.txt'
})
})
it('copy sends fs.copy request', async () => {
await provider.copy('/home/src.txt', '/home/dst.txt')
expect(mux.request).toHaveBeenCalledWith('fs.copy', {
source: '/home/src.txt',
destination: '/home/dst.txt'
})
})
it('realpath sends fs.realpath request', async () => {
mux.request.mockResolvedValue('/home/user/real/path')
const result = await provider.realpath('/home/user/link')
expect(result).toBe('/home/user/real/path')
})
it('search sends fs.search request with all options', async () => {
const searchResult = { files: [], totalMatches: 0, truncated: false }
mux.request.mockResolvedValue(searchResult)
const opts = {
query: 'TODO',
rootPath: '/home/user/project',
caseSensitive: true
}
const result = await provider.search(opts)
expect(mux.request).toHaveBeenCalledWith('fs.search', opts)
expect(result).toEqual(searchResult)
})
it('listFiles sends fs.listFiles request', async () => {
mux.request.mockResolvedValue(['src/index.ts', 'package.json'])
const result = await provider.listFiles('/home/user/project')
expect(mux.request).toHaveBeenCalledWith('fs.listFiles', { rootPath: '/home/user/project' })
expect(result).toEqual(['src/index.ts', 'package.json'])
})
describe('watch', () => {
it('sends fs.watch request and returns unsubscribe', async () => {
const callback = vi.fn()
const unsub = await provider.watch('/home/user/project', callback)
expect(mux.request).toHaveBeenCalledWith('fs.watch', { rootPath: '/home/user/project' })
expect(typeof unsub).toBe('function')
})
it('forwards fs.changed notifications to watch callback', async () => {
const callback = vi.fn()
await provider.watch('/home/user/project', callback)
const notifHandler = mux.onNotification.mock.calls[0][0]
const events = [{ kind: 'update', absolutePath: '/home/user/project/file.ts' }]
notifHandler('fs.changed', { events })
expect(callback).toHaveBeenCalledWith(events)
})
it('sends fs.unwatch when last listener unsubscribes', async () => {
const callback = vi.fn()
const unsub = await provider.watch('/home/user/project', callback)
unsub()
expect(mux.notify).toHaveBeenCalledWith('fs.unwatch', { rootPath: '/home/user/project' })
})
it('does not send fs.unwatch while other roots are watched', async () => {
const cb1 = vi.fn()
const cb2 = vi.fn()
const unsub1 = await provider.watch('/home/user/project-a', cb1)
await provider.watch('/home/user/project-b', cb2)
unsub1()
expect(mux.notify).not.toHaveBeenCalledWith('fs.unwatch', {
rootPath: '/home/user/project-b'
})
})
})
})

View file

@ -0,0 +1,106 @@
import type { SshChannelMultiplexer } from '../ssh/ssh-channel-multiplexer'
import type { IFilesystemProvider, FileStat, FileReadResult } from './types'
import type { DirEntry, FsChangeEvent, SearchOptions, SearchResult } from '../../shared/types'
export class SshFilesystemProvider implements IFilesystemProvider {
private connectionId: string
private mux: SshChannelMultiplexer
// Why: each watch() call registers for a specific rootPath, but the relay
// sends all fs.changed events on one notification channel. Keying by rootPath
// prevents cross-pollination between different worktree watchers.
private watchListeners = new Map<string, (events: FsChangeEvent[]) => void>()
// Why: store the unsubscribe handle so dispose() can detach from the
// multiplexer. Without this, notification callbacks keep firing after
// the provider is torn down on disconnect, routing events to stale state.
private unsubscribeNotifications: (() => void) | null = null
constructor(connectionId: string, mux: SshChannelMultiplexer) {
this.connectionId = connectionId
this.mux = mux
this.unsubscribeNotifications = mux.onNotification((method, params) => {
if (method === 'fs.changed') {
const events = params.events as FsChangeEvent[]
for (const [rootPath, cb] of this.watchListeners) {
const matching = events.filter((e) => e.absolutePath.startsWith(rootPath))
if (matching.length > 0) {
cb(matching)
}
}
}
})
}
dispose(): void {
if (this.unsubscribeNotifications) {
this.unsubscribeNotifications()
this.unsubscribeNotifications = null
}
this.watchListeners.clear()
}
getConnectionId(): string {
return this.connectionId
}
async readDir(dirPath: string): Promise<DirEntry[]> {
return (await this.mux.request('fs.readDir', { dirPath })) as DirEntry[]
}
async readFile(filePath: string): Promise<FileReadResult> {
return (await this.mux.request('fs.readFile', { filePath })) as FileReadResult
}
async writeFile(filePath: string, content: string): Promise<void> {
await this.mux.request('fs.writeFile', { filePath, content })
}
async stat(filePath: string): Promise<FileStat> {
return (await this.mux.request('fs.stat', { filePath })) as FileStat
}
async deletePath(targetPath: string, recursive?: boolean): Promise<void> {
await this.mux.request('fs.deletePath', { targetPath, recursive })
}
async createFile(filePath: string): Promise<void> {
await this.mux.request('fs.createFile', { filePath })
}
async createDir(dirPath: string): Promise<void> {
await this.mux.request('fs.createDir', { dirPath })
}
async rename(oldPath: string, newPath: string): Promise<void> {
await this.mux.request('fs.rename', { oldPath, newPath })
}
async copy(source: string, destination: string): Promise<void> {
await this.mux.request('fs.copy', { source, destination })
}
async realpath(filePath: string): Promise<string> {
return (await this.mux.request('fs.realpath', { filePath })) as string
}
async search(opts: SearchOptions): Promise<SearchResult> {
return (await this.mux.request('fs.search', opts)) as SearchResult
}
async listFiles(rootPath: string): Promise<string[]> {
return (await this.mux.request('fs.listFiles', { rootPath })) as string[]
}
async watch(rootPath: string, callback: (events: FsChangeEvent[]) => void): Promise<() => void> {
this.watchListeners.set(rootPath, callback)
await this.mux.request('fs.watch', { rootPath })
return () => {
this.watchListeners.delete(rootPath)
// Why: each watch() starts a @parcel/watcher on the relay for this specific
// rootPath. We must always notify the relay to stop it, not only when all
// watchers are gone — otherwise the remote watcher leaks inotify descriptors.
this.mux.notify('fs.unwatch', { rootPath })
}
}
}

View file

@ -0,0 +1,15 @@
import type { IGitProvider } from './types'
const sshProviders = new Map<string, IGitProvider>()
export function registerSshGitProvider(connectionId: string, provider: IGitProvider): void {
sshProviders.set(connectionId, provider)
}
export function unregisterSshGitProvider(connectionId: string): void {
sshProviders.delete(connectionId)
}
export function getSshGitProvider(connectionId: string): IGitProvider | undefined {
return sshProviders.get(connectionId)
}

View file

@ -0,0 +1,168 @@
import { describe, expect, it, vi, beforeEach } from 'vitest'
import { SshGitProvider } from './ssh-git-provider'
type MockMultiplexer = {
request: ReturnType<typeof vi.fn>
notify: ReturnType<typeof vi.fn>
onNotification: ReturnType<typeof vi.fn>
dispose: ReturnType<typeof vi.fn>
isDisposed: ReturnType<typeof vi.fn>
}
function createMockMux(): MockMultiplexer {
return {
request: vi.fn().mockResolvedValue(undefined),
notify: vi.fn(),
onNotification: vi.fn(),
dispose: vi.fn(),
isDisposed: vi.fn().mockReturnValue(false)
}
}
describe('SshGitProvider', () => {
let mux: MockMultiplexer
let provider: SshGitProvider
beforeEach(() => {
mux = createMockMux()
provider = new SshGitProvider('conn-1', mux as never)
})
it('returns the connectionId', () => {
expect(provider.getConnectionId()).toBe('conn-1')
})
it('getStatus sends git.status request', async () => {
const statusResult = { entries: [], conflictOperation: 'unknown' }
mux.request.mockResolvedValue(statusResult)
const result = await provider.getStatus('/home/user/repo')
expect(mux.request).toHaveBeenCalledWith('git.status', { worktreePath: '/home/user/repo' })
expect(result).toEqual(statusResult)
})
it('getDiff sends git.diff request', async () => {
const diffResult = { kind: 'text', originalContent: '', modifiedContent: 'hello' }
mux.request.mockResolvedValue(diffResult)
const result = await provider.getDiff('/home/user/repo', 'src/index.ts', true)
expect(mux.request).toHaveBeenCalledWith('git.diff', {
worktreePath: '/home/user/repo',
filePath: 'src/index.ts',
staged: true
})
expect(result).toEqual(diffResult)
})
it('stageFile sends git.stage request', async () => {
await provider.stageFile('/home/user/repo', 'src/file.ts')
expect(mux.request).toHaveBeenCalledWith('git.stage', {
worktreePath: '/home/user/repo',
filePath: 'src/file.ts'
})
})
it('unstageFile sends git.unstage request', async () => {
await provider.unstageFile('/home/user/repo', 'src/file.ts')
expect(mux.request).toHaveBeenCalledWith('git.unstage', {
worktreePath: '/home/user/repo',
filePath: 'src/file.ts'
})
})
it('bulkStageFiles sends git.bulkStage request', async () => {
await provider.bulkStageFiles('/home/user/repo', ['a.ts', 'b.ts'])
expect(mux.request).toHaveBeenCalledWith('git.bulkStage', {
worktreePath: '/home/user/repo',
filePaths: ['a.ts', 'b.ts']
})
})
it('bulkUnstageFiles sends git.bulkUnstage request', async () => {
await provider.bulkUnstageFiles('/home/user/repo', ['a.ts', 'b.ts'])
expect(mux.request).toHaveBeenCalledWith('git.bulkUnstage', {
worktreePath: '/home/user/repo',
filePaths: ['a.ts', 'b.ts']
})
})
it('discardChanges sends git.discard request', async () => {
await provider.discardChanges('/home/user/repo', 'src/file.ts')
expect(mux.request).toHaveBeenCalledWith('git.discard', {
worktreePath: '/home/user/repo',
filePath: 'src/file.ts'
})
})
it('detectConflictOperation sends git.conflictOperation request', async () => {
mux.request.mockResolvedValue('rebase')
const result = await provider.detectConflictOperation('/home/user/repo')
expect(mux.request).toHaveBeenCalledWith('git.conflictOperation', {
worktreePath: '/home/user/repo'
})
expect(result).toBe('rebase')
})
it('getBranchCompare sends git.branchCompare request', async () => {
const compareResult = { summary: { ahead: 2, behind: 0 }, entries: [] }
mux.request.mockResolvedValue(compareResult)
const result = await provider.getBranchCompare('/home/user/repo', 'main')
expect(mux.request).toHaveBeenCalledWith('git.branchCompare', {
worktreePath: '/home/user/repo',
baseRef: 'main'
})
expect(result).toEqual(compareResult)
})
it('getBranchDiff sends git.branchDiff request', async () => {
const diffs = [{ kind: 'text', originalContent: '', modifiedContent: 'new' }]
mux.request.mockResolvedValue(diffs)
const result = await provider.getBranchDiff('/home/user/repo', 'main')
expect(mux.request).toHaveBeenCalledWith('git.branchDiff', {
worktreePath: '/home/user/repo',
baseRef: 'main'
})
expect(result).toEqual(diffs)
})
it('listWorktrees sends git.listWorktrees request', async () => {
const worktrees = [
{
path: '/home/user/repo',
head: 'abc123',
branch: 'main',
isBare: false,
isMainWorktree: true
}
]
mux.request.mockResolvedValue(worktrees)
const result = await provider.listWorktrees('/home/user/repo')
expect(mux.request).toHaveBeenCalledWith('git.listWorktrees', { repoPath: '/home/user/repo' })
expect(result).toEqual(worktrees)
})
it('addWorktree sends git.addWorktree request', async () => {
await provider.addWorktree('/home/user/repo', 'feature', '/home/user/feat', { base: 'main' })
expect(mux.request).toHaveBeenCalledWith('git.addWorktree', {
repoPath: '/home/user/repo',
branchName: 'feature',
targetDir: '/home/user/feat',
base: 'main'
})
})
it('removeWorktree sends git.removeWorktree request', async () => {
await provider.removeWorktree('/home/user/feat', true)
expect(mux.request).toHaveBeenCalledWith('git.removeWorktree', {
worktreePath: '/home/user/feat',
force: true
})
})
it('isGitRepo always returns true for remote paths', () => {
expect(provider.isGitRepo('/any/path')).toBe(true)
})
})

View file

@ -0,0 +1,175 @@
import type { SshChannelMultiplexer } from '../ssh/ssh-channel-multiplexer'
import type { IGitProvider } from './types'
import hostedGitInfo from 'hosted-git-info'
import type {
GitStatusResult,
GitDiffResult,
GitBranchCompareResult,
GitConflictOperation,
GitWorktreeInfo
} from '../../shared/types'
export class SshGitProvider implements IGitProvider {
private connectionId: string
private mux: SshChannelMultiplexer
constructor(connectionId: string, mux: SshChannelMultiplexer) {
this.connectionId = connectionId
this.mux = mux
}
getConnectionId(): string {
return this.connectionId
}
async getStatus(worktreePath: string): Promise<GitStatusResult> {
return (await this.mux.request('git.status', { worktreePath })) as GitStatusResult
}
async getDiff(worktreePath: string, filePath: string, staged: boolean): Promise<GitDiffResult> {
return (await this.mux.request('git.diff', {
worktreePath,
filePath,
staged
})) as GitDiffResult
}
async stageFile(worktreePath: string, filePath: string): Promise<void> {
await this.mux.request('git.stage', { worktreePath, filePath })
}
async unstageFile(worktreePath: string, filePath: string): Promise<void> {
await this.mux.request('git.unstage', { worktreePath, filePath })
}
async bulkStageFiles(worktreePath: string, filePaths: string[]): Promise<void> {
await this.mux.request('git.bulkStage', { worktreePath, filePaths })
}
async bulkUnstageFiles(worktreePath: string, filePaths: string[]): Promise<void> {
await this.mux.request('git.bulkUnstage', { worktreePath, filePaths })
}
async discardChanges(worktreePath: string, filePath: string): Promise<void> {
await this.mux.request('git.discard', { worktreePath, filePath })
}
async detectConflictOperation(worktreePath: string): Promise<GitConflictOperation> {
return (await this.mux.request('git.conflictOperation', {
worktreePath
})) as GitConflictOperation
}
async getBranchCompare(worktreePath: string, baseRef: string): Promise<GitBranchCompareResult> {
return (await this.mux.request('git.branchCompare', {
worktreePath,
baseRef
})) as GitBranchCompareResult
}
async getBranchDiff(
worktreePath: string,
baseRef: string,
options?: { includePatch?: boolean; filePath?: string; oldPath?: string }
): Promise<GitDiffResult[]> {
return (await this.mux.request('git.branchDiff', {
worktreePath,
baseRef,
...options
})) as GitDiffResult[]
}
async listWorktrees(repoPath: string): Promise<GitWorktreeInfo[]> {
return (await this.mux.request('git.listWorktrees', {
repoPath
})) as GitWorktreeInfo[]
}
async addWorktree(
repoPath: string,
branchName: string,
targetDir: string,
options?: { base?: string; track?: boolean }
): Promise<void> {
await this.mux.request('git.addWorktree', {
repoPath,
branchName,
targetDir,
...options
})
}
async removeWorktree(worktreePath: string, force?: boolean): Promise<void> {
await this.mux.request('git.removeWorktree', { worktreePath, force })
}
async exec(args: string[], cwd: string): Promise<{ stdout: string; stderr: string }> {
return (await this.mux.request('git.exec', { args, cwd })) as {
stdout: string
stderr: string
}
}
async isGitRepoAsync(dirPath: string): Promise<{ isRepo: boolean; rootPath: string | null }> {
return (await this.mux.request('git.isGitRepo', { dirPath })) as {
isRepo: boolean
rootPath: string | null
}
}
// Why: isGitRepo requires synchronous return in the interface, but remote
// operations are async. We always return true for remote paths since the
// relay validates git repos on its side. The renderer already guards git
// operations behind worktree registration which validates the path.
isGitRepo(_path: string): boolean {
return true
}
// Why: the local getRemoteFileUrl uses hosted-git-info which requires the
// remote URL from .git/config. For SSH connections we must fetch the remote
// URL from the relay, then apply the same hosted-git-info logic locally.
async getRemoteFileUrl(
worktreePath: string,
relativePath: string,
line: number
): Promise<string | null> {
let remoteUrl: string
try {
const result = await this.exec(['remote', 'get-url', 'origin'], worktreePath)
remoteUrl = result.stdout.trim()
} catch {
return null
}
if (!remoteUrl) {
return null
}
const info = hostedGitInfo.fromUrl(remoteUrl)
if (!info) {
return null
}
let defaultBranch = 'main'
try {
const refResult = await this.exec(
['symbolic-ref', '--quiet', 'refs/remotes/origin/HEAD'],
worktreePath
)
const ref = refResult.stdout.trim()
if (ref) {
defaultBranch = ref.replace(/^refs\/remotes\/origin\//, '')
}
} catch {
// Fall back to 'main'
}
const browseUrl = info.browseFile(relativePath, { committish: defaultBranch })
if (!browseUrl) {
return null
}
// Why: hosted-git-info lowercases the fragment, but GitHub convention
// uses uppercase L for line links (e.g. #L42). Append manually.
return `${browseUrl}#L${line}`
}
}

View file

@ -0,0 +1,191 @@
import { describe, expect, it, vi, beforeEach } from 'vitest'
import { SshPtyProvider } from './ssh-pty-provider'
type MockMultiplexer = {
request: ReturnType<typeof vi.fn>
notify: ReturnType<typeof vi.fn>
onNotification: ReturnType<typeof vi.fn>
dispose: ReturnType<typeof vi.fn>
isDisposed: ReturnType<typeof vi.fn>
}
function createMockMux(): MockMultiplexer {
return {
request: vi.fn().mockResolvedValue(undefined),
notify: vi.fn(),
onNotification: vi.fn(),
dispose: vi.fn(),
isDisposed: vi.fn().mockReturnValue(false)
}
}
describe('SshPtyProvider', () => {
let mux: MockMultiplexer
let provider: SshPtyProvider
beforeEach(() => {
mux = createMockMux()
provider = new SshPtyProvider('conn-1', mux as never)
})
it('returns the connectionId', () => {
expect(provider.getConnectionId()).toBe('conn-1')
})
describe('spawn', () => {
it('sends pty.spawn request through multiplexer', async () => {
mux.request.mockResolvedValue({ id: 'pty-1' })
const result = await provider.spawn({ cols: 80, rows: 24 })
expect(mux.request).toHaveBeenCalledWith('pty.spawn', {
cols: 80,
rows: 24,
cwd: undefined,
env: undefined
})
expect(result).toEqual({ id: 'pty-1' })
})
it('passes cwd and env through', async () => {
mux.request.mockResolvedValue({ id: 'pty-2' })
await provider.spawn({
cols: 120,
rows: 40,
cwd: '/home/user',
env: { FOO: 'bar' }
})
expect(mux.request).toHaveBeenCalledWith('pty.spawn', {
cols: 120,
rows: 40,
cwd: '/home/user',
env: { FOO: 'bar' }
})
})
})
it('attach sends pty.attach request', async () => {
await provider.attach('pty-1')
expect(mux.request).toHaveBeenCalledWith('pty.attach', { id: 'pty-1' })
})
it('write sends pty.data notification', () => {
provider.write('pty-1', 'hello')
expect(mux.notify).toHaveBeenCalledWith('pty.data', { id: 'pty-1', data: 'hello' })
})
it('resize sends pty.resize notification', () => {
provider.resize('pty-1', 120, 40)
expect(mux.notify).toHaveBeenCalledWith('pty.resize', { id: 'pty-1', cols: 120, rows: 40 })
})
it('shutdown sends pty.shutdown request', async () => {
await provider.shutdown('pty-1', true)
expect(mux.request).toHaveBeenCalledWith('pty.shutdown', { id: 'pty-1', immediate: true })
})
it('sendSignal sends pty.sendSignal request', async () => {
await provider.sendSignal('pty-1', 'SIGINT')
expect(mux.request).toHaveBeenCalledWith('pty.sendSignal', { id: 'pty-1', signal: 'SIGINT' })
})
it('getCwd sends pty.getCwd request', async () => {
mux.request.mockResolvedValue('/home/user/project')
const cwd = await provider.getCwd('pty-1')
expect(cwd).toBe('/home/user/project')
})
it('clearBuffer sends pty.clearBuffer request', async () => {
await provider.clearBuffer('pty-1')
expect(mux.request).toHaveBeenCalledWith('pty.clearBuffer', { id: 'pty-1' })
})
it('acknowledgeDataEvent sends pty.ackData notification', () => {
provider.acknowledgeDataEvent('pty-1', 1024)
expect(mux.notify).toHaveBeenCalledWith('pty.ackData', { id: 'pty-1', charCount: 1024 })
})
it('hasChildProcesses sends request and returns result', async () => {
mux.request.mockResolvedValue(true)
const result = await provider.hasChildProcesses('pty-1')
expect(result).toBe(true)
})
it('getForegroundProcess returns process name', async () => {
mux.request.mockResolvedValue('node')
const result = await provider.getForegroundProcess('pty-1')
expect(result).toBe('node')
})
it('listProcesses returns process list', async () => {
const processes = [{ id: 'pty-1', cwd: '/home', title: 'zsh' }]
mux.request.mockResolvedValue(processes)
const result = await provider.listProcesses()
expect(result).toEqual(processes)
})
it('getDefaultShell returns shell path', async () => {
mux.request.mockResolvedValue('/bin/bash')
const result = await provider.getDefaultShell()
expect(result).toBe('/bin/bash')
})
describe('event listeners', () => {
it('forwards pty.data notifications to data listeners', () => {
const handler = vi.fn()
provider.onData(handler)
// Get the notification handler that was registered
const notifHandler = mux.onNotification.mock.calls[0][0]
notifHandler('pty.data', { id: 'pty-1', data: 'output' })
expect(handler).toHaveBeenCalledWith({ id: 'pty-1', data: 'output' })
})
it('forwards pty.replay notifications to replay listeners', () => {
const handler = vi.fn()
provider.onReplay(handler)
const notifHandler = mux.onNotification.mock.calls[0][0]
notifHandler('pty.replay', { id: 'pty-1', data: 'buffered output' })
expect(handler).toHaveBeenCalledWith({ id: 'pty-1', data: 'buffered output' })
})
it('forwards pty.exit notifications to exit listeners', () => {
const handler = vi.fn()
provider.onExit(handler)
const notifHandler = mux.onNotification.mock.calls[0][0]
notifHandler('pty.exit', { id: 'pty-1', code: 0 })
expect(handler).toHaveBeenCalledWith({ id: 'pty-1', code: 0 })
})
it('allows unsubscribing from events', () => {
const handler = vi.fn()
const unsub = provider.onData(handler)
unsub()
const notifHandler = mux.onNotification.mock.calls[0][0]
notifHandler('pty.data', { id: 'pty-1', data: 'output' })
expect(handler).not.toHaveBeenCalled()
})
it('supports multiple listeners', () => {
const handler1 = vi.fn()
const handler2 = vi.fn()
provider.onData(handler1)
provider.onData(handler2)
const notifHandler = mux.onNotification.mock.calls[0][0]
notifHandler('pty.data', { id: 'pty-1', data: 'output' })
expect(handler1).toHaveBeenCalled()
expect(handler2).toHaveBeenCalled()
})
})
})

View file

@ -0,0 +1,162 @@
import type { SshChannelMultiplexer } from '../ssh/ssh-channel-multiplexer'
import type { IPtyProvider, PtySpawnOptions, PtySpawnResult } from './types'
type DataCallback = (payload: { id: string; data: string }) => void
type ReplayCallback = (payload: { id: string; data: string }) => void
type ExitCallback = (payload: { id: string; code: number }) => void
/**
* Remote PTY provider that proxies all operations through the relay
* via the JSON-RPC multiplexer. Implements the same IPtyProvider interface
* as LocalPtyProvider so the dispatch layer can route transparently.
*/
export class SshPtyProvider implements IPtyProvider {
private mux: SshChannelMultiplexer
private connectionId: string
private dataListeners = new Set<DataCallback>()
private replayListeners = new Set<ReplayCallback>()
private exitListeners = new Set<ExitCallback>()
// Why: store the unsubscribe handle so dispose() can detach from the
// multiplexer. Without this, notification callbacks keep firing after
// the provider is torn down on disconnect, routing events to stale state.
private unsubscribeNotifications: (() => void) | null = null
constructor(connectionId: string, mux: SshChannelMultiplexer) {
this.connectionId = connectionId
this.mux = mux
// Subscribe to relay notifications for PTY events
this.unsubscribeNotifications = mux.onNotification((method, params) => {
switch (method) {
case 'pty.data':
for (const cb of this.dataListeners) {
cb({ id: params.id as string, data: params.data as string })
}
break
case 'pty.replay':
for (const cb of this.replayListeners) {
cb({ id: params.id as string, data: params.data as string })
}
break
case 'pty.exit':
for (const cb of this.exitListeners) {
cb({ id: params.id as string, code: params.code as number })
}
break
}
})
}
dispose(): void {
if (this.unsubscribeNotifications) {
this.unsubscribeNotifications()
this.unsubscribeNotifications = null
}
this.dataListeners.clear()
this.replayListeners.clear()
this.exitListeners.clear()
}
getConnectionId(): string {
return this.connectionId
}
async spawn(opts: PtySpawnOptions): Promise<PtySpawnResult> {
const result = await this.mux.request('pty.spawn', {
cols: opts.cols,
rows: opts.rows,
cwd: opts.cwd,
env: opts.env
})
return result as PtySpawnResult
}
async attach(id: string): Promise<void> {
await this.mux.request('pty.attach', { id })
}
write(id: string, data: string): void {
this.mux.notify('pty.data', { id, data })
}
resize(id: string, cols: number, rows: number): void {
this.mux.notify('pty.resize', { id, cols, rows })
}
async shutdown(id: string, immediate: boolean): Promise<void> {
await this.mux.request('pty.shutdown', { id, immediate })
}
async sendSignal(id: string, signal: string): Promise<void> {
await this.mux.request('pty.sendSignal', { id, signal })
}
async getCwd(id: string): Promise<string> {
const result = await this.mux.request('pty.getCwd', { id })
return result as string
}
async getInitialCwd(id: string): Promise<string> {
const result = await this.mux.request('pty.getInitialCwd', { id })
return result as string
}
async clearBuffer(id: string): Promise<void> {
await this.mux.request('pty.clearBuffer', { id })
}
acknowledgeDataEvent(id: string, charCount: number): void {
this.mux.notify('pty.ackData', { id, charCount })
}
async hasChildProcesses(id: string): Promise<boolean> {
const result = await this.mux.request('pty.hasChildProcesses', { id })
return result as boolean
}
async getForegroundProcess(id: string): Promise<string | null> {
const result = await this.mux.request('pty.getForegroundProcess', { id })
return result as string | null
}
async serialize(ids: string[]): Promise<string> {
const result = await this.mux.request('pty.serialize', { ids })
return result as string
}
async revive(state: string): Promise<void> {
await this.mux.request('pty.revive', { state })
}
async listProcesses(): Promise<{ id: string; cwd: string; title: string }[]> {
const result = await this.mux.request('pty.listProcesses')
return result as { id: string; cwd: string; title: string }[]
}
async getDefaultShell(): Promise<string> {
const result = await this.mux.request('pty.getDefaultShell')
return result as string
}
async getProfiles(): Promise<{ name: string; path: string }[]> {
const result = await this.mux.request('pty.getProfiles')
return result as { name: string; path: string }[]
}
onData(callback: DataCallback): () => void {
this.dataListeners.add(callback)
return () => this.dataListeners.delete(callback)
}
onReplay(callback: ReplayCallback): () => void {
this.replayListeners.add(callback)
return () => this.replayListeners.delete(callback)
}
onExit(callback: ExitCallback): () => void {
this.exitListeners.add(callback)
return () => this.exitListeners.delete(callback)
}
}

122
src/main/providers/types.ts Normal file
View file

@ -0,0 +1,122 @@
import type {
DirEntry,
FsChangeEvent,
GitStatusResult,
GitDiffResult,
GitBranchCompareResult,
GitConflictOperation,
GitWorktreeInfo,
SearchOptions,
SearchResult
} from '../../shared/types'
// ─── PTY Provider ───────────────────────────────────────────────────
export type PtySpawnOptions = {
cols: number
rows: number
cwd?: string
env?: Record<string, string>
command?: string
}
export type PtySpawnResult = {
id: string
}
export type IPtyProvider = {
spawn(opts: PtySpawnOptions): Promise<PtySpawnResult>
attach(id: string): Promise<void>
write(id: string, data: string): void
resize(id: string, cols: number, rows: number): void
shutdown(id: string, immediate: boolean): Promise<void>
sendSignal(id: string, signal: string): Promise<void>
getCwd(id: string): Promise<string>
getInitialCwd(id: string): Promise<string>
clearBuffer(id: string): Promise<void>
acknowledgeDataEvent(id: string, charCount: number): void
hasChildProcesses(id: string): Promise<boolean>
getForegroundProcess(id: string): Promise<string | null>
serialize(ids: string[]): Promise<string>
revive(state: string): Promise<void>
listProcesses(): Promise<{ id: string; cwd: string; title: string }[]>
getDefaultShell(): Promise<string>
getProfiles(): Promise<{ name: string; path: string }[]>
onData(callback: (payload: { id: string; data: string }) => void): () => void
onReplay(callback: (payload: { id: string; data: string }) => void): () => void
onExit(callback: (payload: { id: string; code: number }) => void): () => void
}
// ─── Filesystem Provider ────────────────────────────────────────────
export type FileStat = {
size: number
type: 'file' | 'directory' | 'symlink'
mtime: number
}
export type FileReadResult = {
content: string
isBinary: boolean
isImage?: boolean
mimeType?: string
}
export type IFilesystemProvider = {
readDir(dirPath: string): Promise<DirEntry[]>
readFile(filePath: string): Promise<FileReadResult>
writeFile(filePath: string, content: string): Promise<void>
stat(filePath: string): Promise<FileStat>
deletePath(targetPath: string): Promise<void>
createFile(filePath: string): Promise<void>
createDir(dirPath: string): Promise<void>
rename(oldPath: string, newPath: string): Promise<void>
copy(source: string, destination: string): Promise<void>
realpath(filePath: string): Promise<string>
search(opts: SearchOptions): Promise<SearchResult>
listFiles(rootPath: string): Promise<string[]>
watch(rootPath: string, callback: (events: FsChangeEvent[]) => void): Promise<() => void>
}
// ─── Git Provider ───────────────────────────────────────────────────
export type IGitProvider = {
getStatus(worktreePath: string): Promise<GitStatusResult>
getDiff(worktreePath: string, filePath: string, staged: boolean): Promise<GitDiffResult>
stageFile(worktreePath: string, filePath: string): Promise<void>
unstageFile(worktreePath: string, filePath: string): Promise<void>
bulkStageFiles(worktreePath: string, filePaths: string[]): Promise<void>
bulkUnstageFiles(worktreePath: string, filePaths: string[]): Promise<void>
discardChanges(worktreePath: string, filePath: string): Promise<void>
detectConflictOperation(worktreePath: string): Promise<GitConflictOperation>
getBranchCompare(worktreePath: string, baseRef: string): Promise<GitBranchCompareResult>
getBranchDiff(
worktreePath: string,
baseRef: string,
options?: { includePatch?: boolean; filePath?: string; oldPath?: string }
): Promise<GitDiffResult[]>
listWorktrees(repoPath: string): Promise<GitWorktreeInfo[]>
addWorktree(
repoPath: string,
branchName: string,
targetDir: string,
options?: { base?: string; track?: boolean }
): Promise<void>
removeWorktree(worktreePath: string, force?: boolean): Promise<void>
isGitRepo(path: string): boolean
isGitRepoAsync(dirPath: string): Promise<{ isRepo: boolean; rootPath: string | null }>
exec(args: string[], cwd: string): Promise<{ stdout: string; stderr: string }>
getRemoteFileUrl(worktreePath: string, relativePath: string, line: number): Promise<string | null>
}
// ─── Provider Registry ──────────────────────────────────────────────
/**
* Routes operations to the correct provider based on connectionId.
* null/undefined connectionId = local provider.
*/
export type IProviderRegistry = {
getPtyProvider(connectionId: string | null | undefined): IPtyProvider
getFilesystemProvider(connectionId: string | null | undefined): IFilesystemProvider
getGitProvider(connectionId: string | null | undefined): IGitProvider
}

View file

@ -0,0 +1,222 @@
import { describe, expect, it } from 'vitest'
import {
HEADER_LENGTH,
MessageType,
encodeFrame,
encodeJsonRpcFrame,
encodeKeepAliveFrame,
FrameDecoder,
parseJsonRpcMessage,
parseUnameToRelayPlatform,
type JsonRpcRequest,
type DecodedFrame
} from './relay-protocol'
describe('frame encoding', () => {
it('encodes a frame with 13-byte header', () => {
const payload = Buffer.from('hello')
const frame = encodeFrame(MessageType.Regular, 1, 0, payload)
expect(frame.length).toBe(HEADER_LENGTH + payload.length)
expect(frame[0]).toBe(MessageType.Regular)
expect(frame.readUInt32BE(1)).toBe(1) // ID
expect(frame.readUInt32BE(5)).toBe(0) // ACK
expect(frame.readUInt32BE(9)).toBe(5) // LENGTH
expect(frame.subarray(HEADER_LENGTH).toString()).toBe('hello')
})
it('encodes keepalive frame with empty payload', () => {
const frame = encodeKeepAliveFrame(42, 10)
expect(frame.length).toBe(HEADER_LENGTH)
expect(frame[0]).toBe(MessageType.KeepAlive)
expect(frame.readUInt32BE(1)).toBe(42) // ID
expect(frame.readUInt32BE(5)).toBe(10) // ACK
expect(frame.readUInt32BE(9)).toBe(0) // LENGTH
})
it('encodes JSON-RPC frame', () => {
const msg: JsonRpcRequest = {
jsonrpc: '2.0',
id: 1,
method: 'pty.spawn',
params: { cols: 80, rows: 24 }
}
const frame = encodeJsonRpcFrame(msg, 5, 3)
expect(frame[0]).toBe(MessageType.Regular)
expect(frame.readUInt32BE(1)).toBe(5)
expect(frame.readUInt32BE(5)).toBe(3)
const payloadLen = frame.readUInt32BE(9)
const payload = frame.subarray(HEADER_LENGTH, HEADER_LENGTH + payloadLen)
const decoded = JSON.parse(payload.toString('utf-8'))
expect(decoded.method).toBe('pty.spawn')
expect(decoded.params.cols).toBe(80)
})
it('rejects messages larger than MAX_MESSAGE_SIZE', () => {
const bigPayload = {
jsonrpc: '2.0' as const,
id: 1,
method: 'x',
params: { data: 'a'.repeat(17 * 1024 * 1024) }
}
expect(() => encodeJsonRpcFrame(bigPayload, 1, 0)).toThrow('Message too large')
})
})
describe('FrameDecoder', () => {
it('decodes a complete frame', () => {
const frames: DecodedFrame[] = []
const decoder = new FrameDecoder((f) => frames.push(f))
const payload = Buffer.from('test')
const encoded = encodeFrame(MessageType.Regular, 1, 0, payload)
decoder.feed(encoded)
expect(frames).toHaveLength(1)
expect(frames[0].type).toBe(MessageType.Regular)
expect(frames[0].id).toBe(1)
expect(frames[0].ack).toBe(0)
expect(frames[0].payload.toString()).toBe('test')
})
it('handles partial frames across multiple feeds', () => {
const frames: DecodedFrame[] = []
const decoder = new FrameDecoder((f) => frames.push(f))
const payload = Buffer.from('hello world')
const encoded = encodeFrame(MessageType.Regular, 2, 1, payload)
// Feed in two parts
decoder.feed(encoded.subarray(0, 10))
expect(frames).toHaveLength(0)
decoder.feed(encoded.subarray(10))
expect(frames).toHaveLength(1)
expect(frames[0].payload.toString()).toBe('hello world')
})
it('decodes multiple frames from a single chunk', () => {
const frames: DecodedFrame[] = []
const decoder = new FrameDecoder((f) => frames.push(f))
const frame1 = encodeFrame(MessageType.Regular, 1, 0, Buffer.from('a'))
const frame2 = encodeFrame(MessageType.Regular, 2, 1, Buffer.from('b'))
const combined = Buffer.concat([frame1, frame2])
decoder.feed(combined)
expect(frames).toHaveLength(2)
expect(frames[0].payload.toString()).toBe('a')
expect(frames[1].payload.toString()).toBe('b')
})
it('decodes keepalive frames', () => {
const frames: DecodedFrame[] = []
const decoder = new FrameDecoder((f) => frames.push(f))
decoder.feed(encodeKeepAliveFrame(5, 3))
expect(frames).toHaveLength(1)
expect(frames[0].type).toBe(MessageType.KeepAlive)
expect(frames[0].payload.length).toBe(0)
})
it('skips oversized frames and calls onError instead of throwing', () => {
const errors: Error[] = []
const frames: DecodedFrame[] = []
const decoder = new FrameDecoder(
(f) => frames.push(f),
(err) => errors.push(err)
)
const oversizedLength = 17 * 1024 * 1024
const header = Buffer.alloc(HEADER_LENGTH)
header[0] = MessageType.Regular
header.writeUInt32BE(1, 1)
header.writeUInt32BE(0, 5)
header.writeUInt32BE(oversizedLength, 9)
const fakePayload = Buffer.alloc(oversizedLength)
const fullFrame = Buffer.concat([header, fakePayload])
decoder.feed(fullFrame)
expect(frames).toHaveLength(0)
expect(errors).toHaveLength(1)
expect(errors[0].message).toContain('discarded')
})
it('reset clears internal buffer', () => {
const frames: DecodedFrame[] = []
const decoder = new FrameDecoder((f) => frames.push(f))
// Feed a partial frame
const encoded = encodeFrame(MessageType.Regular, 1, 0, Buffer.from('test'))
decoder.feed(encoded.subarray(0, 5))
decoder.reset()
// Feed a new complete frame
decoder.feed(encodeFrame(MessageType.Regular, 2, 0, Buffer.from('new')))
expect(frames).toHaveLength(1)
expect(frames[0].id).toBe(2)
})
})
describe('parseJsonRpcMessage', () => {
it('parses a valid JSON-RPC request', () => {
const payload = Buffer.from(
JSON.stringify({
jsonrpc: '2.0',
id: 1,
method: 'pty.spawn',
params: { cols: 80 }
})
)
const msg = parseJsonRpcMessage(payload)
expect('method' in msg && msg.method).toBe('pty.spawn')
})
it('throws on invalid jsonrpc version', () => {
const payload = Buffer.from(JSON.stringify({ jsonrpc: '1.0', id: 1, method: 'x' }))
expect(() => parseJsonRpcMessage(payload)).toThrow('Invalid JSON-RPC version')
})
it('throws on malformed JSON', () => {
const payload = Buffer.from('not json')
expect(() => parseJsonRpcMessage(payload)).toThrow()
})
})
describe('parseUnameToRelayPlatform', () => {
it('maps Linux x86_64', () => {
expect(parseUnameToRelayPlatform('Linux', 'x86_64')).toBe('linux-x64')
})
it('maps Linux aarch64', () => {
expect(parseUnameToRelayPlatform('Linux', 'aarch64')).toBe('linux-arm64')
})
it('maps Darwin x86_64', () => {
expect(parseUnameToRelayPlatform('Darwin', 'x86_64')).toBe('darwin-x64')
})
it('maps Darwin arm64', () => {
expect(parseUnameToRelayPlatform('Darwin', 'arm64')).toBe('darwin-arm64')
})
it('handles amd64 alias', () => {
expect(parseUnameToRelayPlatform('Linux', 'amd64')).toBe('linux-x64')
})
it('returns null for unsupported OS', () => {
expect(parseUnameToRelayPlatform('FreeBSD', 'x86_64')).toBeNull()
})
it('returns null for unsupported arch', () => {
expect(parseUnameToRelayPlatform('Linux', 'mips')).toBeNull()
})
it('is case-insensitive', () => {
expect(parseUnameToRelayPlatform('LINUX', 'X86_64')).toBe('linux-x64')
})
})

View file

@ -0,0 +1,205 @@
// ─── Relay Protocol ─────────────────────────────────────────────────
// 13-byte framing header matching VS Code's PersistentProtocol wire format.
// See design-ssh-support.md § JSON-RPC Protocol Specification.
export const RELAY_VERSION = '0.1.0'
export const RELAY_SENTINEL = `ORCA-RELAY v${RELAY_VERSION} READY\n`
export const RELAY_SENTINEL_TIMEOUT_MS = 10_000
export const RELAY_REMOTE_DIR = '.orca-remote'
// ── Framing constants (VS Code ProtocolConstants) ───────────────────
export const HEADER_LENGTH = 13
export const MAX_MESSAGE_SIZE = 16 * 1024 * 1024 // 16 MB
/** Message type byte. */
export const MessageType = {
Regular: 1,
KeepAlive: 9
} as const
/** Keepalive/timeout (VS Code ProtocolConstants). */
export const KEEPALIVE_SEND_MS = 5_000
export const TIMEOUT_MS = 20_000
/** PTY flow control watermarks (VS Code FlowControlConstants). */
export const PTY_FLOW_HIGH_WATERMARK = 100_000
export const PTY_FLOW_LOW_WATERMARK = 5_000
/** Reconnection grace period (default, overridable by relay --grace-time). */
export const DEFAULT_GRACE_TIME_MS = 5 * 60 * 1000 // 5 minutes
// ── Relay error codes ───────────────────────────────────────────────
export const RelayErrorCode = {
CommandNotFound: -33001,
PermissionDenied: -33002,
PathNotFound: -33003,
PtyAllocationFailed: -33004,
DiskFull: -33005
} as const
// ── JSON-RPC types ──────────────────────────────────────────────────
export type JsonRpcRequest = {
jsonrpc: '2.0'
id: number
method: string
params?: Record<string, unknown>
}
export type JsonRpcResponse = {
jsonrpc: '2.0'
id: number
result?: unknown
error?: { code: number; message: string; data?: unknown }
}
export type JsonRpcNotification = {
jsonrpc: '2.0'
method: string
params?: Record<string, unknown>
}
export type JsonRpcMessage = JsonRpcRequest | JsonRpcResponse | JsonRpcNotification
// ── Framing: encode / decode ────────────────────────────────────────
/**
* Encode a message into a framed buffer (13-byte header + payload).
*
* Header layout:
* - [0]: TYPE (1 byte)
* - [1-4]: ID (uint32 big-endian)
* - [5-8]: ACK (uint32 big-endian)
* - [9-12]: LENGTH (uint32 big-endian)
*/
export function encodeFrame(
type: number,
id: number,
ack: number,
payload: Buffer | Uint8Array
): Buffer {
const header = Buffer.alloc(HEADER_LENGTH)
header[0] = type
header.writeUInt32BE(id, 1)
header.writeUInt32BE(ack, 5)
header.writeUInt32BE(payload.length, 9)
return Buffer.concat([header, payload])
}
export function encodeJsonRpcFrame(msg: JsonRpcMessage, id: number, ack: number): Buffer {
const payload = Buffer.from(JSON.stringify(msg), 'utf-8')
if (payload.length > MAX_MESSAGE_SIZE) {
throw new Error(`Message too large: ${payload.length} bytes (max ${MAX_MESSAGE_SIZE})`)
}
return encodeFrame(MessageType.Regular, id, ack, payload)
}
export function encodeKeepAliveFrame(id: number, ack: number): Buffer {
return encodeFrame(MessageType.KeepAlive, id, ack, Buffer.alloc(0))
}
export type DecodedFrame = {
type: number
id: number
ack: number
payload: Buffer
}
/**
* Incremental frame parser. Feed it chunks of data; it emits complete frames.
*/
export class FrameDecoder {
private buffer = Buffer.alloc(0)
private onFrame: (frame: DecodedFrame) => void
private onError: ((err: Error) => void) | null
constructor(onFrame: (frame: DecodedFrame) => void, onError?: (err: Error) => void) {
this.onFrame = onFrame
this.onError = onError ?? null
}
feed(chunk: Buffer | Uint8Array): void {
this.buffer = Buffer.concat([this.buffer, chunk])
while (this.buffer.length >= HEADER_LENGTH) {
const length = this.buffer.readUInt32BE(9)
const totalLength = HEADER_LENGTH + length
// Why: throwing here would leave the buffer in a partially consumed
// state — subsequent feed() calls would try to parse leftover payload
// bytes as a new header, corrupting every future frame. Instead we
// skip the entire oversized frame so the decoder stays synchronized.
if (length > MAX_MESSAGE_SIZE) {
if (this.buffer.length < totalLength) {
break
}
this.buffer = this.buffer.subarray(totalLength)
const err = new Error(`Frame payload too large: ${length} bytes — discarded`)
if (this.onError) {
this.onError(err)
}
continue
}
if (this.buffer.length < totalLength) {
break
}
const frame: DecodedFrame = {
type: this.buffer[0],
id: this.buffer.readUInt32BE(1),
ack: this.buffer.readUInt32BE(5),
payload: this.buffer.subarray(HEADER_LENGTH, totalLength)
}
this.buffer = this.buffer.subarray(totalLength)
this.onFrame(frame)
}
}
reset(): void {
this.buffer = Buffer.alloc(0)
}
}
/**
* Parse a JSON-RPC message from a frame payload.
*/
export function parseJsonRpcMessage(payload: Buffer): JsonRpcMessage {
const text = payload.toString('utf-8')
const msg = JSON.parse(text) as JsonRpcMessage
if (msg.jsonrpc !== '2.0') {
throw new Error(`Invalid JSON-RPC version: ${(msg as Record<string, unknown>).jsonrpc}`)
}
return msg
}
// ── Supported platforms ─────────────────────────────────────────────
export type RelayPlatform = 'linux-x64' | 'linux-arm64' | 'darwin-x64' | 'darwin-arm64'
export function parseUnameToRelayPlatform(os: string, arch: string): RelayPlatform | null {
const normalizedOs = os.toLowerCase().trim()
const normalizedArch = arch.toLowerCase().trim()
let relayOs: string | null = null
if (normalizedOs === 'linux') {
relayOs = 'linux'
} else if (normalizedOs === 'darwin') {
relayOs = 'darwin'
}
let relayArch: string | null = null
if (normalizedArch === 'x86_64' || normalizedArch === 'amd64') {
relayArch = 'x64'
} else if (normalizedArch === 'aarch64' || normalizedArch === 'arm64') {
relayArch = 'arm64'
}
if (!relayOs || !relayArch) {
return null
}
return `${relayOs}-${relayArch}` as RelayPlatform
}

View file

@ -0,0 +1,222 @@
import { describe, expect, it, vi, beforeEach, afterEach } from 'vitest'
import { SshChannelMultiplexer, type MultiplexerTransport } from './ssh-channel-multiplexer'
import { encodeFrame, MessageType, HEADER_LENGTH, encodeKeepAliveFrame } from './relay-protocol'
function createMockTransport(): MultiplexerTransport & {
dataCallbacks: ((data: Buffer) => void)[]
closeCallbacks: (() => void)[]
written: Buffer[]
} {
const dataCallbacks: ((data: Buffer) => void)[] = []
const closeCallbacks: (() => void)[] = []
const written: Buffer[] = []
return {
write: (data: Buffer) => written.push(data),
onData: (cb) => dataCallbacks.push(cb),
onClose: (cb) => closeCallbacks.push(cb),
dataCallbacks,
closeCallbacks,
written
}
}
function makeResponseFrame(requestId: number, result: unknown, seq: number): Buffer {
const payload = Buffer.from(
JSON.stringify({
jsonrpc: '2.0',
id: requestId,
result
})
)
return encodeFrame(MessageType.Regular, seq, 0, payload)
}
function makeErrorResponseFrame(
requestId: number,
code: number,
message: string,
seq: number
): Buffer {
const payload = Buffer.from(
JSON.stringify({
jsonrpc: '2.0',
id: requestId,
error: { code, message }
})
)
return encodeFrame(MessageType.Regular, seq, 0, payload)
}
function makeNotificationFrame(
method: string,
params: Record<string, unknown>,
seq: number
): Buffer {
const payload = Buffer.from(
JSON.stringify({
jsonrpc: '2.0',
method,
params
})
)
return encodeFrame(MessageType.Regular, seq, 0, payload)
}
describe('SshChannelMultiplexer', () => {
let transport: ReturnType<typeof createMockTransport>
let mux: SshChannelMultiplexer
beforeEach(() => {
vi.useFakeTimers()
transport = createMockTransport()
mux = new SshChannelMultiplexer(transport)
})
afterEach(() => {
mux.dispose()
vi.useRealTimers()
})
describe('request/response', () => {
it('sends a JSON-RPC request and resolves on response', async () => {
const promise = mux.request('pty.spawn', { cols: 80, rows: 24 })
// Verify the request was written
expect(transport.written.length).toBe(1)
const frame = transport.written[0]
expect(frame[0]).toBe(MessageType.Regular)
const payloadLen = frame.readUInt32BE(9)
const payload = JSON.parse(
frame.subarray(HEADER_LENGTH, HEADER_LENGTH + payloadLen).toString()
)
expect(payload.method).toBe('pty.spawn')
expect(payload.id).toBe(1)
// Simulate response from relay
const response = makeResponseFrame(1, { id: 'pty-1' }, 1)
transport.dataCallbacks[0](response)
const result = await promise
expect(result).toEqual({ id: 'pty-1' })
})
it('rejects on error response', async () => {
const promise = mux.request('pty.spawn', { cols: 80, rows: 24 })
const response = makeErrorResponseFrame(1, -33004, 'PTY allocation failed', 1)
transport.dataCallbacks[0](response)
await expect(promise).rejects.toThrow('PTY allocation failed')
})
it('times out after 30s with no response', async () => {
const promise = mux.request('pty.spawn')
// Feed keepalive frames periodically to prevent the connection-level
// timeout (20s no-data) from firing before the 30s request timeout.
for (let i = 0; i < 6; i++) {
vi.advanceTimersByTime(5_000)
transport.dataCallbacks[0](encodeKeepAliveFrame(i + 1, 0))
}
vi.advanceTimersByTime(1_000)
await expect(promise).rejects.toThrow('timed out')
})
it('assigns unique request IDs', async () => {
void mux.request('method1').catch(() => {})
void mux.request('method2').catch(() => {})
expect(transport.written.length).toBe(2)
const id1 = JSON.parse(
transport.written[0]
.subarray(HEADER_LENGTH, HEADER_LENGTH + transport.written[0].readUInt32BE(9))
.toString()
).id
const id2 = JSON.parse(
transport.written[1]
.subarray(HEADER_LENGTH, HEADER_LENGTH + transport.written[1].readUInt32BE(9))
.toString()
).id
expect(id1).not.toBe(id2)
})
})
describe('notifications', () => {
it('sends notifications without expecting a response', () => {
mux.notify('pty.data', { id: 'pty-1', data: 'hello' })
expect(transport.written.length).toBe(1)
const payload = JSON.parse(
transport.written[0]
.subarray(HEADER_LENGTH, HEADER_LENGTH + transport.written[0].readUInt32BE(9))
.toString()
)
expect(payload.method).toBe('pty.data')
expect(payload.id).toBeUndefined()
})
it('dispatches incoming notifications to handler', () => {
const handler = vi.fn()
mux.onNotification(handler)
const frame = makeNotificationFrame('pty.exit', { id: 'pty-1', code: 0 }, 1)
transport.dataCallbacks[0](frame)
expect(handler).toHaveBeenCalledWith('pty.exit', { id: 'pty-1', code: 0 })
})
})
describe('keepalive', () => {
it('sends keepalive frames periodically', () => {
const initialWrites = transport.written.length
vi.advanceTimersByTime(5_000)
expect(transport.written.length).toBeGreaterThan(initialWrites)
const lastFrame = transport.written.at(-1)!
expect(lastFrame[0]).toBe(MessageType.KeepAlive)
})
})
describe('dispose', () => {
it('rejects all pending requests on dispose', async () => {
const promise = mux.request('pty.spawn')
mux.dispose()
await expect(promise).rejects.toThrow('Multiplexer disposed')
})
it('throws on request after dispose', async () => {
mux.dispose()
await expect(mux.request('pty.spawn')).rejects.toThrow('Multiplexer disposed')
})
it('ignores notify after dispose', () => {
mux.dispose()
mux.notify('pty.data', { id: 'pty-1', data: 'x' })
// No writes should happen after the initial keepalive writes
})
it('reports isDisposed correctly', () => {
expect(mux.isDisposed()).toBe(false)
mux.dispose()
expect(mux.isDisposed()).toBe(true)
})
})
describe('transport close', () => {
it('disposes multiplexer when transport closes', async () => {
const promise = mux.request('pty.spawn')
transport.closeCallbacks[0]()
await expect(promise).rejects.toThrow('SSH connection lost, reconnecting...')
expect(mux.isDisposed()).toBe(true)
})
})
})

View file

@ -0,0 +1,284 @@
import {
FrameDecoder,
MessageType,
encodeJsonRpcFrame,
encodeKeepAliveFrame,
parseJsonRpcMessage,
KEEPALIVE_SEND_MS,
TIMEOUT_MS,
type DecodedFrame,
type JsonRpcMessage,
type JsonRpcRequest,
type JsonRpcResponse,
type JsonRpcNotification
} from './relay-protocol'
export type MultiplexerTransport = {
write: (data: Buffer) => void
onData: (cb: (data: Buffer) => void) => void
onClose: (cb: () => void) => void
}
type PendingRequest = {
resolve: (result: unknown) => void
reject: (error: Error) => void
timer: ReturnType<typeof setTimeout>
}
export type NotificationHandler = (method: string, params: Record<string, unknown>) => void
const REQUEST_TIMEOUT_MS = 30_000
export class SshChannelMultiplexer {
private decoder: FrameDecoder
private transport: MultiplexerTransport
private nextRequestId = 1
private nextOutgoingSeq = 1
private highestReceivedSeq = 0
private highestAckedBySelf = 0
private lastReceivedAt = Date.now()
private pendingRequests = new Map<number, PendingRequest>()
private notificationHandlers: NotificationHandler[] = []
private keepaliveTimer: ReturnType<typeof setInterval> | null = null
private timeoutTimer: ReturnType<typeof setInterval> | null = null
private disposed = false
// Track the oldest unacked outgoing message timestamp
private unackedTimestamps = new Map<number, number>()
constructor(transport: MultiplexerTransport) {
this.transport = transport
this.decoder = new FrameDecoder(
(frame) => this.handleFrame(frame),
(err) => this.handleProtocolError(err)
)
transport.onData((data) => {
if (this.disposed) {
return
}
this.lastReceivedAt = Date.now()
this.decoder.feed(data)
})
transport.onClose(() => {
this.dispose('connection_lost')
})
this.startKeepalive()
this.startTimeoutCheck()
}
onNotification(handler: NotificationHandler): () => void {
this.notificationHandlers.push(handler)
return () => {
const idx = this.notificationHandlers.indexOf(handler)
if (idx !== -1) {
this.notificationHandlers.splice(idx, 1)
}
}
}
/**
* Send a JSON-RPC request and wait for the response.
*/
async request(method: string, params?: Record<string, unknown>): Promise<unknown> {
if (this.disposed) {
throw new Error('Multiplexer disposed')
}
const id = this.nextRequestId++
const msg: JsonRpcRequest = {
jsonrpc: '2.0',
id,
method,
...(params !== undefined ? { params } : {})
}
return new Promise((resolve, reject) => {
const timer = setTimeout(() => {
this.pendingRequests.delete(id)
reject(new Error(`Request "${method}" timed out after ${REQUEST_TIMEOUT_MS}ms`))
}, REQUEST_TIMEOUT_MS)
this.pendingRequests.set(id, { resolve, reject, timer })
this.sendMessage(msg)
})
}
/**
* Send a JSON-RPC notification (no response expected).
*/
notify(method: string, params?: Record<string, unknown>): void {
if (this.disposed) {
return
}
const msg: JsonRpcNotification = {
jsonrpc: '2.0',
method,
...(params !== undefined ? { params } : {})
}
this.sendMessage(msg)
}
dispose(reason: 'shutdown' | 'connection_lost' = 'shutdown'): void {
if (this.disposed) {
return
}
this.disposed = true
if (this.keepaliveTimer) {
clearInterval(this.keepaliveTimer)
this.keepaliveTimer = null
}
if (this.timeoutTimer) {
clearInterval(this.timeoutTimer)
this.timeoutTimer = null
}
// Why: the renderer uses the error code to distinguish temporary disconnects
// (show reconnection overlay) from permanent shutdown (show error toast).
const errorMessage =
reason === 'connection_lost' ? 'SSH connection lost, reconnecting...' : 'Multiplexer disposed'
const errorCode = reason === 'connection_lost' ? 'CONNECTION_LOST' : 'DISPOSED'
for (const [id, pending] of this.pendingRequests) {
clearTimeout(pending.timer)
const err = new Error(errorMessage) as Error & { code: string }
err.code = errorCode
pending.reject(err)
this.pendingRequests.delete(id)
}
this.decoder.reset()
}
isDisposed(): boolean {
return this.disposed
}
// ── Private ───────────────────────────────────────────────────────
private sendMessage(msg: JsonRpcMessage): void {
const seq = this.nextOutgoingSeq++
const frame = encodeJsonRpcFrame(msg, seq, this.highestReceivedSeq)
this.unackedTimestamps.set(seq, Date.now())
this.transport.write(frame)
}
private sendKeepAlive(): void {
if (this.disposed) {
return
}
const seq = this.nextOutgoingSeq++
const frame = encodeKeepAliveFrame(seq, this.highestReceivedSeq)
this.unackedTimestamps.set(seq, Date.now())
this.transport.write(frame)
}
private handleFrame(frame: DecodedFrame): void {
// Update ack tracking
if (frame.id > this.highestReceivedSeq) {
this.highestReceivedSeq = frame.id
}
// Process ack from remote: discard timestamps for acked messages
if (frame.ack > this.highestAckedBySelf) {
for (let i = this.highestAckedBySelf + 1; i <= frame.ack; i++) {
this.unackedTimestamps.delete(i)
}
this.highestAckedBySelf = frame.ack
}
if (frame.type === MessageType.KeepAlive) {
return
}
if (frame.type === MessageType.Regular) {
try {
const msg = parseJsonRpcMessage(frame.payload)
this.handleMessage(msg)
} catch (err) {
this.handleProtocolError(err)
}
}
}
private handleMessage(msg: JsonRpcMessage): void {
if ('id' in msg && ('result' in msg || 'error' in msg)) {
this.handleResponse(msg as JsonRpcResponse)
} else if ('method' in msg && !('id' in msg)) {
this.handleNotification(msg as JsonRpcNotification)
}
// Requests from relay to client are not expected in Phase 2
}
private handleResponse(msg: JsonRpcResponse): void {
const pending = this.pendingRequests.get(msg.id)
if (!pending) {
return
}
clearTimeout(pending.timer)
this.pendingRequests.delete(msg.id)
if (msg.error) {
const err = new Error(msg.error.message)
Object.defineProperty(err, 'code', { value: msg.error.code })
Object.defineProperty(err, 'data', { value: msg.error.data })
pending.reject(err)
} else {
pending.resolve(msg.result)
}
}
private handleNotification(msg: JsonRpcNotification): void {
const params = msg.params ?? {}
// Why: handlers may unsubscribe during iteration (via the returned disposer
// from onNotification), which splices the live array and skips the next handler.
// Iterating a snapshot prevents that.
const snapshot = Array.from(this.notificationHandlers)
for (const handler of snapshot) {
handler(msg.method, params)
}
}
private startKeepalive(): void {
this.keepaliveTimer = setInterval(() => {
this.sendKeepAlive()
}, KEEPALIVE_SEND_MS)
}
private startTimeoutCheck(): void {
this.timeoutTimer = setInterval(() => {
if (this.disposed) {
return
}
const now = Date.now()
const noDataReceived = now - this.lastReceivedAt > TIMEOUT_MS
// Check oldest unacked message
let oldestUnacked = Infinity
for (const ts of this.unackedTimestamps.values()) {
if (ts < oldestUnacked) {
oldestUnacked = ts
}
}
const oldestUnackedStale = oldestUnacked !== Infinity && now - oldestUnacked > TIMEOUT_MS
// Connection considered dead when BOTH conditions met
if (noDataReceived && oldestUnackedStale) {
this.handleProtocolError(new Error('Connection timed out (no ack received)'))
}
}, KEEPALIVE_SEND_MS)
}
private handleProtocolError(err: unknown): void {
console.warn(`[ssh-mux] Protocol error: ${err instanceof Error ? err.message : String(err)}`)
this.dispose('connection_lost')
}
}

View file

@ -0,0 +1,211 @@
import { describe, expect, it, vi } from 'vitest'
import { parseSshConfig, sshConfigHostsToTargets } from './ssh-config-parser'
vi.mock('os', () => ({
homedir: () => '/home/testuser'
}))
describe('parseSshConfig', () => {
it('parses a basic host block', () => {
const config = `
Host myserver
HostName 192.168.1.100
User deploy
Port 2222
`
const hosts = parseSshConfig(config)
expect(hosts).toHaveLength(1)
expect(hosts[0]).toEqual({
host: 'myserver',
hostname: '192.168.1.100',
user: 'deploy',
port: 2222
})
})
it('parses multiple host blocks', () => {
const config = `
Host staging
HostName staging.example.com
User admin
Host production
HostName prod.example.com
User deploy
Port 2222
`
const hosts = parseSshConfig(config)
expect(hosts).toHaveLength(2)
expect(hosts[0].host).toBe('staging')
expect(hosts[1].host).toBe('production')
expect(hosts[1].port).toBe(2222)
})
it('skips wildcard-only Host entries', () => {
const config = `
Host *
ServerAliveInterval 60
Host myserver
HostName 10.0.0.1
`
const hosts = parseSshConfig(config)
expect(hosts).toHaveLength(1)
expect(hosts[0].host).toBe('myserver')
})
it('skips Host entries with only pattern characters', () => {
const config = `
Host *.example.com
User admin
Host dev
HostName dev.example.com
`
const hosts = parseSshConfig(config)
expect(hosts).toHaveLength(1)
expect(hosts[0].host).toBe('dev')
})
it('parses IdentityFile with ~ expansion', () => {
const config = `
Host myserver
HostName example.com
IdentityFile ~/.ssh/id_ed25519
`
const hosts = parseSshConfig(config)
expect(hosts[0].identityFile).toBe('/home/testuser/.ssh/id_ed25519')
})
it('parses ProxyCommand and ProxyJump', () => {
const config = `
Host internal
HostName 10.0.0.5
ProxyCommand ssh -W %h:%p bastion
ProxyJump bastion.example.com
`
const hosts = parseSshConfig(config)
expect(hosts[0].proxyCommand).toBe('ssh -W %h:%p bastion')
expect(hosts[0].proxyJump).toBe('bastion.example.com')
})
it('ignores comments and blank lines', () => {
const config = `
# This is a comment
Host myserver
# Another comment
HostName example.com
User admin
`
const hosts = parseSshConfig(config)
expect(hosts).toHaveLength(1)
expect(hosts[0].user).toBe('admin')
})
it('handles case-insensitive keywords', () => {
const config = `
Host myserver
hostname EXAMPLE.COM
user Admin
port 3022
`
const hosts = parseSshConfig(config)
expect(hosts[0].hostname).toBe('EXAMPLE.COM')
expect(hosts[0].user).toBe('Admin')
expect(hosts[0].port).toBe(3022)
})
it('stops current block on Match directive', () => {
const config = `
Host myserver
HostName example.com
Match host *.internal
User internal-admin
Host other
HostName other.com
`
const hosts = parseSshConfig(config)
expect(hosts).toHaveLength(2)
expect(hosts[0].host).toBe('myserver')
expect(hosts[1].host).toBe('other')
})
it('returns empty array for empty input', () => {
expect(parseSshConfig('')).toEqual([])
})
it('uses first pattern from multi-pattern Host line', () => {
const config = `
Host staging stage
HostName staging.example.com
`
const hosts = parseSshConfig(config)
expect(hosts).toHaveLength(1)
expect(hosts[0].host).toBe('staging')
})
it('defaults port to 22 for invalid port values', () => {
const config = `
Host myserver
Port notanumber
`
const hosts = parseSshConfig(config)
expect(hosts[0].port).toBe(22)
})
})
describe('sshConfigHostsToTargets', () => {
it('converts hosts to SshTarget objects', () => {
const hosts = [{ host: 'myserver', hostname: '10.0.0.1', port: 22, user: 'deploy' }]
const targets = sshConfigHostsToTargets(hosts, new Set())
expect(targets).toHaveLength(1)
expect(targets[0]).toMatchObject({
label: 'myserver',
host: '10.0.0.1',
port: 22,
username: 'deploy'
})
expect(targets[0].id).toMatch(/^ssh-/)
})
it('uses host alias as hostname when HostName is missing', () => {
const hosts = [{ host: 'myserver' }]
const targets = sshConfigHostsToTargets(hosts, new Set())
expect(targets[0].host).toBe('myserver')
})
it('skips hosts that are already imported', () => {
const hosts = [
{ host: 'existing', hostname: '10.0.0.1' },
{ host: 'new-host', hostname: '10.0.0.2' }
]
const targets = sshConfigHostsToTargets(hosts, new Set(['existing']))
expect(targets).toHaveLength(1)
expect(targets[0].label).toBe('new-host')
})
it('defaults username to empty string when not specified', () => {
const hosts = [{ host: 'nouser', hostname: '10.0.0.1' }]
const targets = sshConfigHostsToTargets(hosts, new Set())
expect(targets[0].username).toBe('')
})
it('carries through identityFile, proxyCommand, and jumpHost', () => {
const hosts = [
{
host: 'internal',
hostname: '10.0.0.5',
identityFile: '/home/user/.ssh/id_rsa',
proxyCommand: 'ssh -W %h:%p bastion',
proxyJump: 'bastion.example.com'
}
]
const targets = sshConfigHostsToTargets(hosts, new Set())
expect(targets[0].identityFile).toBe('/home/user/.ssh/id_rsa')
expect(targets[0].proxyCommand).toBe('ssh -W %h:%p bastion')
expect(targets[0].jumpHost).toBe('bastion.example.com')
})
})

View file

@ -0,0 +1,150 @@
import { readFileSync, existsSync } from 'fs'
import { join } from 'path'
import { homedir } from 'os'
import type { SshTarget } from '../../shared/ssh-types'
export type SshConfigHost = {
host: string
hostname?: string
port?: number
user?: string
identityFile?: string
proxyCommand?: string
proxyJump?: string
}
/**
* Parse an OpenSSH config file into structured host entries.
* Handles Host blocks with single or multiple patterns.
* Ignores wildcard-only patterns (e.g. "Host *").
*/
export function parseSshConfig(content: string): SshConfigHost[] {
const hosts: SshConfigHost[] = []
let current: SshConfigHost | null = null
for (const rawLine of content.split('\n')) {
const line = rawLine.trim()
if (!line || line.startsWith('#')) {
continue
}
const match = line.match(/^(\S+)\s+(.+)$/)
if (!match) {
continue
}
const [, keyword, rawValue] = match
const key = keyword.toLowerCase()
const value = rawValue.trim()
if (key === 'host') {
if (current) {
hosts.push(current)
}
// Skip wildcard-only entries (e.g. "Host *" or "Host *.*")
const patterns = value.split(/\s+/)
const hasConcretePattern = patterns.some((p) => !p.includes('*') && !p.includes('?'))
if (!hasConcretePattern) {
current = null
continue
}
current = { host: patterns[0] }
continue
}
if (key === 'match') {
// Match blocks are complex conditionals — push current and skip
if (current) {
hosts.push(current)
}
current = null
continue
}
if (!current) {
continue
}
switch (key) {
case 'hostname':
current.hostname = value
break
case 'port':
current.port = parseInt(value, 10) || 22
break
case 'user':
current.user = value
break
case 'identityfile':
current.identityFile = resolveHomePath(value)
break
case 'proxycommand':
current.proxyCommand = value
break
case 'proxyjump':
current.proxyJump = value
break
}
}
if (current) {
hosts.push(current)
}
return hosts
}
function resolveHomePath(filepath: string): string {
if (filepath.startsWith('~/') || filepath === '~') {
return join(homedir(), filepath.slice(1))
}
return filepath
}
/** Read and parse the user's ~/.ssh/config file. Returns empty array if not found. */
export function loadUserSshConfig(): SshConfigHost[] {
const configPath = join(homedir(), '.ssh', 'config')
if (!existsSync(configPath)) {
return []
}
try {
const content = readFileSync(configPath, 'utf-8')
return parseSshConfig(content)
} catch {
console.warn(`[ssh] Failed to read SSH config at ${configPath}`)
return []
}
}
/** Convert parsed SSH config hosts into SshTarget objects for import. */
export function sshConfigHostsToTargets(
hosts: SshConfigHost[],
existingTargetHosts: Set<string>
): SshTarget[] {
const targets: SshTarget[] = []
for (const entry of hosts) {
const effectiveHost = entry.hostname || entry.host
const label = entry.host
// Skip if already imported (match on label, which is the Host alias)
if (existingTargetHosts.has(label)) {
continue
}
targets.push({
id: `ssh-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`,
label,
host: effectiveHost,
port: entry.port ?? 22,
username: entry.user ?? '',
identityFile: entry.identityFile,
proxyCommand: entry.proxyCommand,
jumpHost: entry.proxyJump
})
}
return targets
}

View file

@ -0,0 +1,84 @@
import type { SshTarget, SshConnectionState } from '../../shared/ssh-types'
import { SshConnection, type SshConnectionCallbacks } from './ssh-connection'
// ── Connection Manager ──────────────────────────────────────────────
// Why: extracted from ssh-connection.ts to keep each file under the
// 300-line oxlint max-lines threshold while preserving a clear
// single-responsibility boundary (connection lifecycle vs. pool management).
export class SshConnectionManager {
private connections = new Map<string, SshConnection>()
private callbacks: SshConnectionCallbacks
// Why: two concurrent connect() calls for the same target would both pass
// the "existing" check, create two SshConnections, and orphan the first.
// This set prevents a second call from racing with an in-progress one.
private connectingTargets = new Set<string>()
constructor(callbacks: SshConnectionCallbacks) {
this.callbacks = callbacks
}
async connect(target: SshTarget): Promise<SshConnection> {
const existing = this.connections.get(target.id)
if (existing?.getState().status === 'connected') {
return existing
}
if (this.connectingTargets.has(target.id)) {
throw new Error(`Connection to ${target.label} is already in progress`)
}
this.connectingTargets.add(target.id)
try {
if (existing) {
await existing.disconnect()
}
const conn = new SshConnection(target, this.callbacks)
this.connections.set(target.id, conn)
try {
await conn.connect()
} catch (err) {
this.connections.delete(target.id)
throw err
}
return conn
} finally {
this.connectingTargets.delete(target.id)
}
}
async disconnect(targetId: string): Promise<void> {
const conn = this.connections.get(targetId)
if (!conn) {
return
}
await conn.disconnect()
this.connections.delete(targetId)
}
getConnection(targetId: string): SshConnection | undefined {
return this.connections.get(targetId)
}
getState(targetId: string): SshConnectionState | null {
return this.connections.get(targetId)?.getState() ?? null
}
getAllStates(): Map<string, SshConnectionState> {
const states = new Map<string, SshConnectionState>()
for (const [id, conn] of this.connections) {
states.set(id, conn.getState())
}
return states
}
async disconnectAll(): Promise<void> {
const disconnects = Array.from(this.connections.values()).map((c) => c.disconnect())
await Promise.allSettled(disconnects)
this.connections.clear()
}
}

View file

@ -0,0 +1,141 @@
import { describe, expect, it, vi, beforeEach } from 'vitest'
import { SshConnectionStore } from './ssh-connection-store'
import type { SshTarget } from '../../shared/ssh-types'
const { loadUserSshConfigMock, sshConfigHostsToTargetsMock } = vi.hoisted(() => ({
loadUserSshConfigMock: vi.fn(),
sshConfigHostsToTargetsMock: vi.fn()
}))
vi.mock('./ssh-config-parser', () => ({
loadUserSshConfig: loadUserSshConfigMock,
sshConfigHostsToTargets: sshConfigHostsToTargetsMock
}))
function createMockStore() {
const targets: SshTarget[] = []
return {
getSshTargets: vi.fn(() => [...targets]),
getSshTarget: vi.fn((id: string) => targets.find((t) => t.id === id)),
addSshTarget: vi.fn((target: SshTarget) => targets.push(target)),
updateSshTarget: vi.fn((id: string, updates: Partial<Omit<SshTarget, 'id'>>) => {
const target = targets.find((t) => t.id === id)
if (!target) {
return null
}
Object.assign(target, updates)
return { ...target }
}),
removeSshTarget: vi.fn((id: string) => {
const idx = targets.findIndex((t) => t.id === id)
if (idx !== -1) {
targets.splice(idx, 1)
}
})
}
}
describe('SshConnectionStore', () => {
let mockStore: ReturnType<typeof createMockStore>
let sshStore: SshConnectionStore
beforeEach(() => {
mockStore = createMockStore()
sshStore = new SshConnectionStore(mockStore as never)
loadUserSshConfigMock.mockReset()
sshConfigHostsToTargetsMock.mockReset()
})
it('listTargets delegates to store', () => {
sshStore.listTargets()
expect(mockStore.getSshTargets).toHaveBeenCalled()
})
it('getTarget delegates to store', () => {
sshStore.getTarget('test-id')
expect(mockStore.getSshTarget).toHaveBeenCalledWith('test-id')
})
it('addTarget generates an id and persists', () => {
const target = sshStore.addTarget({
label: 'My Server',
host: 'example.com',
port: 22,
username: 'deploy'
})
expect(target.id).toMatch(/^ssh-/)
expect(target.label).toBe('My Server')
expect(mockStore.addSshTarget).toHaveBeenCalledWith(target)
})
it('updateTarget delegates to store', () => {
const original: SshTarget = {
id: 'ssh-1',
label: 'Old Name',
host: 'example.com',
port: 22,
username: 'user'
}
mockStore.addSshTarget(original)
const result = sshStore.updateTarget('ssh-1', { label: 'New Name' })
expect(result).toBeTruthy()
expect(mockStore.updateSshTarget).toHaveBeenCalledWith('ssh-1', { label: 'New Name' })
})
it('removeTarget delegates to store', () => {
sshStore.removeTarget('ssh-1')
expect(mockStore.removeSshTarget).toHaveBeenCalledWith('ssh-1')
})
describe('importFromSshConfig', () => {
it('imports new hosts from SSH config', () => {
const configHosts = [{ host: 'staging', hostname: 'staging.example.com' }]
const newTargets: SshTarget[] = [
{
id: 'ssh-new-1',
label: 'staging',
host: 'staging.example.com',
port: 22,
username: ''
}
]
loadUserSshConfigMock.mockReturnValue(configHosts)
sshConfigHostsToTargetsMock.mockReturnValue(newTargets)
const result = sshStore.importFromSshConfig()
expect(result).toEqual(newTargets)
expect(mockStore.addSshTarget).toHaveBeenCalledWith(newTargets[0])
})
it('passes existing target labels to avoid duplicates', () => {
const existing: SshTarget = {
id: 'ssh-existing',
label: 'production',
host: 'prod.example.com',
port: 22,
username: 'deploy'
}
mockStore.addSshTarget(existing)
loadUserSshConfigMock.mockReturnValue([])
sshConfigHostsToTargetsMock.mockReturnValue([])
sshStore.importFromSshConfig()
expect(sshConfigHostsToTargetsMock).toHaveBeenCalledWith([], new Set(['production']))
})
it('returns empty array when no new hosts found', () => {
loadUserSshConfigMock.mockReturnValue([])
sshConfigHostsToTargetsMock.mockReturnValue([])
const result = sshStore.importFromSshConfig()
expect(result).toEqual([])
})
})
})

View file

@ -0,0 +1,48 @@
import type { Store } from '../persistence'
import type { SshTarget } from '../../shared/ssh-types'
import { loadUserSshConfig, sshConfigHostsToTargets } from './ssh-config-parser'
export class SshConnectionStore {
constructor(private store: Store) {}
listTargets(): SshTarget[] {
return this.store.getSshTargets()
}
getTarget(id: string): SshTarget | undefined {
return this.store.getSshTarget(id)
}
addTarget(target: Omit<SshTarget, 'id'>): SshTarget {
const full: SshTarget = {
...target,
id: `ssh-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`
}
this.store.addSshTarget(full)
return full
}
updateTarget(id: string, updates: Partial<Omit<SshTarget, 'id'>>): SshTarget | null {
return this.store.updateSshTarget(id, updates)
}
removeTarget(id: string): void {
this.store.removeSshTarget(id)
}
/**
* Import hosts from ~/.ssh/config that don't already exist as targets.
* Returns the newly imported targets.
*/
importFromSshConfig(): SshTarget[] {
const existingLabels = new Set(this.store.getSshTargets().map((t) => t.label))
const configHosts = loadUserSshConfig()
const newTargets = sshConfigHostsToTargets(configHosts, existingLabels)
for (const target of newTargets) {
this.store.addSshTarget(target)
}
return newTargets
}
}

View file

@ -0,0 +1,299 @@
import { Client as SshClient } from 'ssh2'
import type { ConnectConfig, ClientChannel } from 'ssh2'
import { type ChildProcess, execFileSync } from 'child_process'
import { readFileSync } from 'fs'
import { createHash } from 'crypto'
import type { Socket as NetSocket } from 'net'
import type { SshTarget, SshConnectionState } from '../../shared/ssh-types'
// Why: types live here (not ssh-connection.ts) to break a circular import.
export type HostKeyVerifyRequest = {
host: string
ip: string
fingerprint: string
keyType: string
}
export type AuthChallengeRequest = {
targetId: string
name: string
instructions: string
prompts: { prompt: string; echo: boolean }[]
}
export type SshConnectionCallbacks = {
onStateChange: (targetId: string, state: SshConnectionState) => void
onHostKeyVerify: (req: HostKeyVerifyRequest) => Promise<boolean>
onAuthChallenge: (req: AuthChallengeRequest) => Promise<string[]>
onPasswordPrompt: (targetId: string) => Promise<string | null>
}
export const INITIAL_RETRY_ATTEMPTS = 5
export const INITIAL_RETRY_DELAY_MS = 2000
export const RECONNECT_BACKOFF_MS = [1000, 2000, 5000, 5000, 10000, 10000, 10000, 30000, 30000]
export const AUTH_CHALLENGE_TIMEOUT_MS = 60_000
export const CONNECT_TIMEOUT_MS = 15_000
const TRANSIENT_ERROR_CODES = new Set([
'ETIMEDOUT',
'ECONNREFUSED',
'ECONNRESET',
'EHOSTUNREACH',
'ENETUNREACH',
'EAI_AGAIN'
])
export function isTransientError(err: Error): boolean {
const code = (err as NodeJS.ErrnoException).code
if (code && TRANSIENT_ERROR_CODES.has(code)) {
return true
}
if (err.message.includes('ETIMEDOUT')) {
return true
}
if (err.message.includes('ECONNREFUSED')) {
return true
}
if (err.message.includes('ECONNRESET')) {
return true
}
return false
}
export function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms))
}
// Why: prevents shell injection when interpolating into ProxyCommand.
export function shellEscape(s: string): string {
return `'${s.replace(/'/g, "'\\''")}'`
}
// Why: ssh2 doesn't check known_hosts. Without this, every connection blocks
// on a UI prompt that isn't wired up yet, causing a silent timeout.
function isHostKnown(host: string, port: number): boolean {
try {
const lookup = port === 22 ? host : `[${host}]:${port}`
execFileSync('ssh-keygen', ['-F', lookup], { stdio: 'pipe', timeout: 3000 })
return true
} catch {
return false
}
}
// ── Auth handler state (passed in by the connection) ────────────────
export type AuthHandlerState = {
agentAttempted: boolean
keyAttempted: boolean
setState: (status: string, error?: string) => void
}
export type ConnectConfigResult = {
config: ConnectConfig
jumpClient: SshClient | null
proxyProcess: ChildProcess | null
}
export async function buildConnectConfig(
target: SshTarget,
callbacks: SshConnectionCallbacks,
authState: AuthHandlerState
): Promise<ConnectConfigResult> {
const config: ConnectConfig = {
host: target.host,
port: target.port,
username: target.username,
readyTimeout: CONNECT_TIMEOUT_MS,
keepaliveInterval: 5000,
keepaliveCountMax: 4,
// Why: ssh2's hostVerifier callback form `(key, verify) => void` blocks
// the handshake until `verify(true/false)` is called. We check
// known_hosts first so trusted hosts connect without a UI prompt.
hostVerifier: (key: Buffer, verify: (accept: boolean) => void) => {
if (isHostKnown(target.host, target.port)) {
verify(true)
return
}
const fingerprint = createHash('sha256').update(key).digest('base64')
const keyType = 'unknown'
authState.setState('host-key-verification')
callbacks
.onHostKeyVerify({
host: target.host,
ip: target.host,
fingerprint,
keyType
})
.then((accepted) => {
verify(accepted)
})
.catch(() => {
verify(false)
})
},
authHandler: (methodsLeft, _partialSuccess, callback) => {
// ssh2 passes null on the first call, meaning "try whatever you want".
// Treat it as all methods available.
const methods = methodsLeft ?? ['publickey', 'keyboard-interactive', 'password']
// Try auth methods in order: agent -> publickey -> keyboard-interactive -> password
// The custom authHandler overrides ssh2's built-in sequence, so we must
// explicitly try agent auth here -- the config.agent field alone is not enough.
if (methods.includes('publickey') && process.env.SSH_AUTH_SOCK && !authState.agentAttempted) {
authState.agentAttempted = true
callback({
type: 'agent' as const,
agent: process.env.SSH_AUTH_SOCK,
username: target.username
} as never)
return
}
if (methods.includes('publickey') && target.identityFile && !authState.keyAttempted) {
authState.keyAttempted = true
try {
callback({
type: 'publickey' as const,
username: target.username,
key: readFileSync(target.identityFile)
} as never)
return
} catch {
// Key file unreadable -- fall through to next method
}
}
if (methods.includes('keyboard-interactive')) {
callback({
type: 'keyboard-interactive' as const,
username: target.username,
prompt: async (
_name: string,
instructions: string,
_lang: string,
prompts: { prompt: string; echo: boolean }[],
finish: (responses: string[]) => void
) => {
authState.setState('auth-challenge')
const timeoutPromise = sleep(AUTH_CHALLENGE_TIMEOUT_MS).then(() => null)
const responsePromise = callbacks.onAuthChallenge({
targetId: target.id,
name: _name,
instructions,
prompts
})
const responses = await Promise.race([responsePromise, timeoutPromise])
if (!responses) {
finish([])
return
}
finish(responses)
}
} as never)
return
}
if (methods.includes('password')) {
callbacks
.onPasswordPrompt(target.id)
.then((password) => {
if (password === null) {
authState.setState('auth-failed', 'Authentication cancelled')
callback(false as never)
return
}
callback({
type: 'password' as const,
username: target.username,
password
} as never)
})
.catch(() => {
callback(false as never)
})
return
}
authState.setState('auth-failed', 'No supported authentication methods')
callback(false as never)
}
}
// If an identity file is specified, try it for the initial attempt
if (target.identityFile) {
try {
config.privateKey = readFileSync(target.identityFile)
} catch {
// Will fall through to other auth methods
}
}
// Try SSH agent by default
if (process.env.SSH_AUTH_SOCK) {
config.agent = process.env.SSH_AUTH_SOCK
}
let proxyProcess: ChildProcess | null = null
if (target.proxyCommand) {
const { spawn } = await import('child_process')
const expanded = target.proxyCommand
.replace(/%h/g, shellEscape(target.host))
.replace(/%p/g, shellEscape(String(target.port)))
.replace(/%r/g, shellEscape(target.username))
proxyProcess = spawn('/bin/sh', ['-c', expanded], { stdio: ['pipe', 'pipe', 'pipe'] })
// Why: a single PassThrough used for both directions creates a feedback loop —
// proxy stdout data flows through the PassThrough and gets piped right back to
// proxy stdin. Use a Duplex wrapper where reads come from stdout and writes
// go to stdin independently.
const { Duplex } = await import('stream')
const stream = new Duplex({
read() {},
write(chunk, _encoding, cb) {
proxyProcess!.stdin!.write(chunk, cb)
}
})
proxyProcess.stdout!.on('data', (data) => stream.push(data))
proxyProcess.stdout!.on('end', () => stream.push(null))
config.sock = stream as unknown as NetSocket
}
// Wire JumpHost: establish an intermediate SSH connection and forward a channel.
// Why: the jump client is returned to the caller so it can be destroyed on
// disconnect — otherwise the intermediate TCP connection leaks.
let jumpClient: SshClient | null = null
if (target.jumpHost && !target.proxyCommand) {
jumpClient = new SshClient()
const jumpConn = jumpClient
await new Promise<void>((resolve, reject) => {
jumpConn.on('ready', () => resolve())
jumpConn.on('error', (err) => reject(err))
jumpConn.connect({
host: target.jumpHost!,
port: 22,
username: target.username,
agent: process.env.SSH_AUTH_SOCK ?? undefined,
readyTimeout: CONNECT_TIMEOUT_MS
})
})
const forwardedChannel = await new Promise<ClientChannel>((resolve, reject) => {
jumpConn.forwardOut('127.0.0.1', 0, target.host, target.port, (err, channel) => {
if (err) {
reject(err)
} else {
resolve(channel)
}
})
})
config.sock = forwardedChannel as unknown as NetSocket
}
return { config, jumpClient, proxyProcess }
}

View file

@ -0,0 +1,221 @@
import { describe, expect, it, vi, beforeEach } from 'vitest'
let eventHandlers: Map<string, (...args: unknown[]) => void>
let connectBehavior: 'ready' | 'error' = 'ready'
let connectErrorMessage = ''
vi.mock('ssh2', () => ({
Client: class MockSshClient {
on(event: string, handler: (...args: unknown[]) => void) {
eventHandlers?.set(event, handler)
}
connect() {
setTimeout(() => {
if (connectBehavior === 'error') {
eventHandlers?.get('error')?.(new Error(connectErrorMessage))
} else {
eventHandlers?.get('ready')?.()
}
}, 0)
}
end() {}
destroy() {}
exec() {}
sftp() {}
}
}))
vi.mock('./ssh-system-fallback', () => ({
spawnSystemSsh: vi.fn().mockReturnValue({
stdin: {},
stdout: {},
stderr: {},
kill: vi.fn(),
onExit: vi.fn(),
pid: 99999
})
}))
import { SshConnection, SshConnectionManager, type SshConnectionCallbacks } from './ssh-connection'
import type { SshTarget } from '../../shared/ssh-types'
function createTarget(overrides?: Partial<SshTarget>): SshTarget {
return {
id: 'target-1',
label: 'Test Server',
host: 'example.com',
port: 22,
username: 'deploy',
...overrides
}
}
function createCallbacks(overrides?: Partial<SshConnectionCallbacks>): SshConnectionCallbacks {
return {
onStateChange: vi.fn(),
onHostKeyVerify: vi.fn().mockResolvedValue(true),
onAuthChallenge: vi.fn().mockResolvedValue(['123456']),
onPasswordPrompt: vi.fn().mockResolvedValue('password'),
...overrides
}
}
describe('SshConnection', () => {
beforeEach(() => {
eventHandlers = new Map()
connectBehavior = 'ready'
connectErrorMessage = ''
})
it('transitions to connected on successful connect', async () => {
const callbacks = createCallbacks()
const conn = new SshConnection(createTarget(), callbacks)
await conn.connect()
expect(conn.getState().status).toBe('connected')
expect(callbacks.onStateChange).toHaveBeenCalledWith(
'target-1',
expect.objectContaining({ status: 'connected' })
)
})
it('transitions through connecting → connected states', async () => {
const states: string[] = []
const callbacks = createCallbacks({
onStateChange: vi.fn((_id, state) => states.push(state.status))
})
const conn = new SshConnection(createTarget(), callbacks)
await conn.connect()
expect(states).toContain('connecting')
expect(states).toContain('connected')
})
it('reports error state on connection failure', async () => {
connectBehavior = 'error'
connectErrorMessage = 'Connection refused'
const callbacks = createCallbacks()
const conn = new SshConnection(createTarget(), callbacks)
await expect(conn.connect()).rejects.toThrow('Connection refused')
expect(conn.getState().status).toBe('error')
})
it('disconnect cleans up and sets state to disconnected', async () => {
const callbacks = createCallbacks()
const conn = new SshConnection(createTarget(), callbacks)
await conn.connect()
await conn.disconnect()
expect(conn.getState().status).toBe('disconnected')
})
it('getTarget returns a copy of the target', () => {
const target = createTarget()
const conn = new SshConnection(target, createCallbacks())
const returned = conn.getTarget()
expect(returned).toEqual(target)
expect(returned).not.toBe(target)
})
it('getState returns a copy of the state', () => {
const conn = new SshConnection(createTarget(), createCallbacks())
const state1 = conn.getState()
const state2 = conn.getState()
expect(state1).toEqual(state2)
expect(state1).not.toBe(state2)
})
it('throws when connecting a disposed connection', async () => {
const conn = new SshConnection(createTarget(), createCallbacks())
await conn.disconnect()
await expect(conn.connect()).rejects.toThrow('Connection disposed')
})
})
describe('SshConnectionManager', () => {
beforeEach(() => {
eventHandlers = new Map()
connectBehavior = 'ready'
connectErrorMessage = ''
})
it('connect creates and stores a connection', async () => {
const mgr = new SshConnectionManager(createCallbacks())
const target = createTarget()
const conn = await mgr.connect(target)
expect(conn.getState().status).toBe('connected')
expect(mgr.getConnection(target.id)).toBe(conn)
})
it('getState returns connection state', async () => {
const mgr = new SshConnectionManager(createCallbacks())
const target = createTarget()
await mgr.connect(target)
const state = mgr.getState(target.id)
expect(state).toBeTruthy()
expect(state!.status).toBe('connected')
})
it('getState returns null for unknown targets', () => {
const mgr = new SshConnectionManager(createCallbacks())
expect(mgr.getState('unknown')).toBeNull()
})
it('disconnect removes the connection', async () => {
const mgr = new SshConnectionManager(createCallbacks())
const target = createTarget()
await mgr.connect(target)
await mgr.disconnect(target.id)
expect(mgr.getConnection(target.id)).toBeUndefined()
})
it('disconnect is a no-op for unknown targets', async () => {
const mgr = new SshConnectionManager(createCallbacks())
await mgr.disconnect('unknown')
})
it('reuses existing connected connection for same target', async () => {
const mgr = new SshConnectionManager(createCallbacks())
const target = createTarget()
const conn1 = await mgr.connect(target)
const conn2 = await mgr.connect(target)
expect(conn2).toBe(conn1)
})
it('getAllStates returns all connection states', async () => {
const mgr = new SshConnectionManager(createCallbacks())
await mgr.connect(createTarget({ id: 'a' }))
await mgr.connect(createTarget({ id: 'b' }))
const states = mgr.getAllStates()
expect(states.size).toBe(2)
expect(states.get('a')?.status).toBe('connected')
expect(states.get('b')?.status).toBe('connected')
})
it('disconnectAll disconnects all connections', async () => {
const mgr = new SshConnectionManager(createCallbacks())
await mgr.connect(createTarget({ id: 'a' }))
await mgr.connect(createTarget({ id: 'b' }))
await mgr.disconnectAll()
expect(mgr.getConnection('a')).toBeUndefined()
expect(mgr.getConnection('b')).toBeUndefined()
})
})

View file

@ -0,0 +1,371 @@
import { Client as SshClient } from 'ssh2'
import type { ChildProcess } from 'child_process'
import type { ClientChannel, SFTPWrapper } from 'ssh2'
import type { SshTarget, SshConnectionState, SshConnectionStatus } from '../../shared/ssh-types'
import { spawnSystemSsh, type SystemSshProcess } from './ssh-system-fallback'
import {
INITIAL_RETRY_ATTEMPTS,
INITIAL_RETRY_DELAY_MS,
RECONNECT_BACKOFF_MS,
CONNECT_TIMEOUT_MS,
isTransientError,
sleep,
buildConnectConfig,
type SshConnectionCallbacks
} from './ssh-connection-utils'
// Why: type definitions live in ssh-connection-utils.ts to break a circular
// import. Re-exported here so existing import sites keep working.
export type {
HostKeyVerifyRequest,
AuthChallengeRequest,
SshConnectionCallbacks
} from './ssh-connection-utils'
export class SshConnection {
private client: SshClient | null = null
/** Why: the jump host client must be tracked so it can be torn down on
* disconnect otherwise the intermediate TCP connection leaks. */
private jumpClient: SshClient | null = null
private proxyProcess: ChildProcess | null = null
private systemSsh: SystemSshProcess | null = null
private state: SshConnectionState
private callbacks: SshConnectionCallbacks
private target: SshTarget
private reconnectTimer: ReturnType<typeof setTimeout> | null = null
private disposed = false
private agentAttempted = false
private keyAttempted = false
constructor(target: SshTarget, callbacks: SshConnectionCallbacks) {
this.target = target
this.callbacks = callbacks
this.state = {
targetId: target.id,
status: 'disconnected',
error: null,
reconnectAttempt: 0
}
}
getState(): SshConnectionState {
return { ...this.state }
}
getClient(): SshClient | null {
return this.client
}
getTarget(): SshTarget {
return { ...this.target }
}
/** Open an exec channel. Used by relay deployment to run commands on the remote. */
async exec(command: string): Promise<ClientChannel> {
const client = this.client
if (!client) {
throw new Error('Not connected')
}
return new Promise((resolve, reject) => {
client.exec(command, (err, channel) => {
if (err) {
reject(err)
} else {
resolve(channel)
}
})
})
}
/** Open an SFTP session for file transfers (relay deployment). */
async sftp(): Promise<SFTPWrapper> {
const client = this.client
if (!client) {
throw new Error('Not connected')
}
return new Promise((resolve, reject) => {
client.sftp((err, sftp) => {
if (err) {
reject(err)
} else {
resolve(sftp)
}
})
})
}
async connect(): Promise<void> {
if (this.disposed) {
throw new Error('Connection disposed')
}
let lastError: Error | null = null
for (let attempt = 0; attempt < INITIAL_RETRY_ATTEMPTS; attempt++) {
try {
await this.attemptConnect()
return
} catch (err) {
lastError = err instanceof Error ? err : new Error(String(err))
if (!isTransientError(lastError)) {
throw lastError
}
if (attempt < INITIAL_RETRY_ATTEMPTS - 1) {
await sleep(INITIAL_RETRY_DELAY_MS)
}
}
}
const finalError = lastError ?? new Error('Connection failed')
this.setState('error', finalError.message)
throw finalError
}
private async attemptConnect(): Promise<void> {
this.setState('connecting')
this.agentAttempted = false
this.keyAttempted = false
// Why: clean up resources from a prior failed attempt before overwriting.
// Without this, a retry after timeout/auth-failure orphans the old jump
// host TCP connection and proxy child process.
if (this.jumpClient) {
this.jumpClient.end()
this.jumpClient = null
}
if (this.proxyProcess) {
this.proxyProcess.kill()
this.proxyProcess = null
}
const { config, jumpClient, proxyProcess } = await this.buildConfig()
this.jumpClient = jumpClient
this.proxyProcess = proxyProcess
return new Promise<void>((resolve, reject) => {
const client = new SshClient()
let settled = false
const timeout = setTimeout(() => {
if (!settled) {
settled = true
client.destroy()
const msg = `Connection timed out after ${CONNECT_TIMEOUT_MS}ms`
this.setState('error', msg)
reject(new Error(msg))
}
}, CONNECT_TIMEOUT_MS)
// Why: host key verification is now handled inside the hostVerifier
// callback in buildConnectConfig (ssh-connection-utils.ts). The
// callback form `(key, verify) => void` blocks the handshake until
// the user accepts/rejects, so no separate 'handshake' listener is
// needed here.
client.on('ready', () => {
if (settled) {
return
}
settled = true
clearTimeout(timeout)
this.client = client
this.setState('connected')
this.setupDisconnectHandler(client)
resolve()
})
client.on('error', (err) => {
if (settled) {
return
}
settled = true
clearTimeout(timeout)
this.setState('error', err.message)
reject(err)
})
client.connect(config)
})
}
private async buildConfig() {
// Why: config-building logic extracted to ssh-connection-utils.ts (max-lines).
return buildConnectConfig(this.target, this.callbacks, {
agentAttempted: this.agentAttempted,
keyAttempted: this.keyAttempted,
setState: (status: string, error?: string) => {
this.setState(status as SshConnectionStatus, error)
}
})
}
// Why: both `end` and `close` fire on disconnect. If reconnect succeeds
// between the two events, the second handler would null out the *new*
// connection. Guarding on `this.client === client` prevents that.
private setupDisconnectHandler(client: SshClient): void {
const handleDisconnect = () => {
if (this.disposed || this.client !== client) {
return
}
this.client = null
this.scheduleReconnect()
}
client.on('end', handleDisconnect)
client.on('close', handleDisconnect)
client.on('error', (err) => {
if (this.disposed || this.client !== client) {
return
}
console.warn(`[ssh] Connection error for ${this.target.label}: ${err.message}`)
this.client = null
this.scheduleReconnect()
})
}
private scheduleReconnect(): void {
if (this.disposed || this.reconnectTimer) {
return
}
const attempt = this.state.reconnectAttempt
if (attempt >= RECONNECT_BACKOFF_MS.length) {
this.setState('reconnection-failed', 'Max reconnection attempts reached')
return
}
this.setState('reconnecting')
const delay = RECONNECT_BACKOFF_MS[attempt]
this.reconnectTimer = setTimeout(async () => {
this.reconnectTimer = null
if (this.disposed) {
return
}
try {
await this.attemptConnect()
// Why: reset the counter and re-broadcast so the UI shows attempt 0.
// attemptConnect already calls setState('connected'), but the attempt
// counter must be zeroed *before* so the broadcast carries the right value.
this.state.reconnectAttempt = 0
this.setState('connected')
} catch {
// Why: increment before scheduleReconnect so the setState('reconnecting')
// call inside it broadcasts the updated attempt number to the UI.
this.state.reconnectAttempt++
this.scheduleReconnect()
}
}, delay)
}
/** Fall back to system SSH binary when ssh2 cannot handle auth (FIDO2, ControlMaster). */
async connectViaSystemSsh(): Promise<SystemSshProcess> {
if (this.disposed) {
throw new Error('Connection disposed')
}
// Why: if connectViaSystemSsh is called again after a prior failed attempt,
// the old process may still be running. Without cleanup, overwriting
// this.systemSsh at line 267 would orphan the old process.
if (this.systemSsh) {
this.systemSsh.kill()
this.systemSsh = null
}
this.setState('connecting')
try {
const proc = spawnSystemSsh(this.target)
this.systemSsh = proc
// Why: two onExit handlers are registered — one for the initial handshake
// (reject the promise on early exit) and one for post-connect reconnection.
// Without a settled flag, an early exit during handshake would fire both,
// causing the reconnection handler to schedule a reconnect for a connection
// that was never established.
let settled = false
// Why: verify the SSH connection succeeded before reporting connected.
// Wait for relay sentinel output or a non-zero exit.
await new Promise<void>((resolve, reject) => {
const timeout = setTimeout(() => {
settled = true
reject(new Error('System SSH connection timed out'))
}, CONNECT_TIMEOUT_MS)
proc.stdout.once('data', () => {
settled = true
clearTimeout(timeout)
resolve()
})
proc.onExit((code) => {
if (settled) {
return
}
settled = true
clearTimeout(timeout)
if (code !== 0) {
reject(new Error(`System SSH exited with code ${code}`))
}
})
})
this.setState('connected')
// Why: unlike ssh2 Client which emits end/close, the system SSH process
// only signals disconnection through its exit event. Without this handler
// an unexpected exit would leave the connection in 'connected' state with
// no underlying transport.
proc.onExit((_code) => {
if (!this.disposed && this.systemSsh === proc) {
this.systemSsh = null
this.scheduleReconnect()
}
})
return proc
} catch (err) {
const msg = err instanceof Error ? err.message : String(err)
this.setState('error', msg)
throw err
}
}
async disconnect(): Promise<void> {
this.disposed = true
if (this.reconnectTimer) {
clearTimeout(this.reconnectTimer)
this.reconnectTimer = null
}
if (this.client) {
this.client.end()
this.client = null
}
// Why: the jump host client holds an open TCP connection to the
// intermediate host. Failing to close it would leak the socket.
if (this.jumpClient) {
this.jumpClient.end()
this.jumpClient = null
}
if (this.proxyProcess) {
this.proxyProcess.kill()
this.proxyProcess = null
}
if (this.systemSsh) {
this.systemSsh.kill()
this.systemSsh = null
}
this.setState('disconnected')
}
private setState(status: SshConnectionStatus, error?: string): void {
this.state = {
...this.state,
status,
error: error ?? null
}
this.callbacks.onStateChange(this.target.id, { ...this.state })
}
}
// Why: extracted to ssh-connection-manager.ts to stay under 300-line max-lines.
export { SshConnectionManager } from './ssh-connection-manager'

View file

@ -0,0 +1,129 @@
import { describe, expect, it, vi, beforeEach } from 'vitest'
import { SshPortForwardManager } from './ssh-port-forward'
function createMockConn(forwardOutErr?: Error) {
const mockChannel = {
pipe: vi.fn().mockReturnThis(),
on: vi.fn(),
close: vi.fn()
}
const mockClient = {
forwardOut: vi.fn().mockImplementation((_bindAddr, _bindPort, _destHost, _destPort, cb) => {
if (forwardOutErr) {
cb(forwardOutErr, null)
} else {
cb(null, mockChannel)
}
})
}
return {
getClient: vi.fn().mockReturnValue(mockClient),
mockClient,
mockChannel
}
}
// Mock the net module to avoid real TCP listeners
vi.mock('net', () => {
const listeners = new Map<string, (...args: unknown[]) => void>()
return {
createServer: vi.fn().mockImplementation((connectionHandler) => {
const server = {
listen: vi.fn().mockImplementation((_port, _host, cb) => cb()),
close: vi.fn(),
on: vi.fn().mockImplementation((event: string, handler: (...args: unknown[]) => void) => {
listeners.set(event, handler)
}),
removeListener: vi.fn(),
_connectionHandler: connectionHandler,
_listeners: listeners
}
return server
})
}
})
describe('SshPortForwardManager', () => {
let manager: SshPortForwardManager
beforeEach(() => {
manager = new SshPortForwardManager()
})
it('adds a port forward and returns entry', async () => {
const conn = createMockConn()
const entry = await manager.addForward('conn-1', conn as never, 3000, 'localhost', 8080)
expect(entry).toMatchObject({
connectionId: 'conn-1',
localPort: 3000,
remoteHost: 'localhost',
remotePort: 8080
})
expect(entry.id).toBeDefined()
})
it('throws when SSH client is not connected', async () => {
const conn = { getClient: vi.fn().mockReturnValue(null) }
await expect(
manager.addForward('conn-1', conn as never, 3000, 'localhost', 8080)
).rejects.toThrow('SSH connection is not established')
})
it('lists forwards filtered by connectionId', async () => {
const conn = createMockConn()
await manager.addForward('conn-1', conn as never, 3000, 'localhost', 8080)
await manager.addForward('conn-2', conn as never, 3001, 'localhost', 8081)
await manager.addForward('conn-1', conn as never, 3002, 'localhost', 8082)
expect(manager.listForwards('conn-1')).toHaveLength(2)
expect(manager.listForwards('conn-2')).toHaveLength(1)
expect(manager.listForwards()).toHaveLength(3)
})
it('removes a forward by id', async () => {
const conn = createMockConn()
const entry = await manager.addForward('conn-1', conn as never, 3000, 'localhost', 8080)
expect(manager.removeForward(entry.id)).toBe(true)
expect(manager.listForwards()).toHaveLength(0)
})
it('returns false when removing nonexistent forward', () => {
expect(manager.removeForward('nonexistent')).toBe(false)
})
it('removes all forwards for a connection', async () => {
const conn = createMockConn()
await manager.addForward('conn-1', conn as never, 3000, 'localhost', 8080)
await manager.addForward('conn-1', conn as never, 3001, 'localhost', 8081)
await manager.addForward('conn-2', conn as never, 3002, 'localhost', 8082)
manager.removeAllForwards('conn-1')
expect(manager.listForwards()).toHaveLength(1)
expect(manager.listForwards('conn-2')).toHaveLength(1)
})
it('dispose removes all forwards', async () => {
const conn = createMockConn()
await manager.addForward('conn-1', conn as never, 3000, 'localhost', 8080)
await manager.addForward('conn-2', conn as never, 3001, 'localhost', 8081)
manager.dispose()
expect(manager.listForwards()).toHaveLength(0)
})
it('stores label in the entry', async () => {
const conn = createMockConn()
const entry = await manager.addForward(
'conn-1',
conn as never,
3000,
'localhost',
8080,
'Web Server'
)
expect(entry.label).toBe('Web Server')
})
})

View file

@ -0,0 +1,117 @@
import { createServer, type Server, type Socket } from 'net'
import type { SshConnection } from './ssh-connection'
export type PortForwardEntry = {
id: string
connectionId: string
localPort: number
remoteHost: string
remotePort: number
label?: string
}
type ActiveForward = {
entry: PortForwardEntry
server: Server
activeSockets: Set<Socket>
}
export class SshPortForwardManager {
private forwards = new Map<string, ActiveForward>()
private nextId = 1
async addForward(
connectionId: string,
conn: SshConnection,
localPort: number,
remoteHost: string,
remotePort: number,
label?: string
): Promise<PortForwardEntry> {
const id = `pf-${this.nextId++}`
const entry: PortForwardEntry = {
id,
connectionId,
localPort,
remoteHost,
remotePort,
label
}
const client = conn.getClient()
if (!client) {
throw new Error('SSH connection is not established')
}
const activeSockets = new Set<Socket>()
const server = createServer((socket) => {
activeSockets.add(socket)
socket.on('close', () => activeSockets.delete(socket))
client.forwardOut('127.0.0.1', localPort, remoteHost, remotePort, (err, channel) => {
if (err) {
socket.destroy()
return
}
socket.pipe(channel).pipe(socket)
channel.on('close', () => socket.destroy())
socket.on('close', () => channel.close())
})
})
await new Promise<void>((resolve, reject) => {
server.on('error', reject)
server.listen(localPort, '127.0.0.1', () => {
server.removeListener('error', reject)
resolve()
})
})
this.forwards.set(id, { entry, server, activeSockets })
return entry
}
removeForward(id: string): boolean {
const forward = this.forwards.get(id)
if (!forward) {
return false
}
for (const socket of forward.activeSockets) {
socket.destroy()
}
forward.server.close()
this.forwards.delete(id)
return true
}
listForwards(connectionId?: string): PortForwardEntry[] {
const entries: PortForwardEntry[] = []
for (const { entry } of this.forwards.values()) {
if (!connectionId || entry.connectionId === connectionId) {
entries.push(entry)
}
}
return entries
}
removeAllForwards(connectionId: string): void {
// Why: removeForward deletes from this.forwards. Collecting IDs first
// avoids mutating the map during iteration, which is fragile if
// removeForward ever gains cascading cleanup.
const toRemove = [...this.forwards.entries()]
.filter(([, { entry }]) => entry.connectionId === connectionId)
.map(([id]) => id)
for (const id of toRemove) {
this.removeForward(id)
}
}
dispose(): void {
const ids = [...this.forwards.keys()]
for (const id of ids) {
this.removeForward(id)
}
}
}

View file

@ -0,0 +1,244 @@
import { createReadStream } from 'fs'
import type { SFTPWrapper, ClientChannel } from 'ssh2'
import type { SshConnection } from './ssh-connection'
import { RELAY_SENTINEL, RELAY_SENTINEL_TIMEOUT_MS } from './relay-protocol'
import type { MultiplexerTransport } from './ssh-channel-multiplexer'
// ── SFTP upload helpers ───────────────────────────────────────────────
export async function uploadDirectory(
sftp: SFTPWrapper,
localDir: string,
remoteDir: string
): Promise<void> {
const { readdirSync, statSync } = await import('fs')
const { join: pathJoin } = await import('path')
const entries = readdirSync(localDir)
for (const entry of entries) {
const localPath = pathJoin(localDir, entry)
const remotePath = `${remoteDir}/${entry}`
const stat = statSync(localPath)
if (stat.isDirectory()) {
await mkdirSftp(sftp, remotePath)
await uploadDirectory(sftp, localPath, remotePath)
} else {
await uploadFile(sftp, localPath, remotePath)
}
}
}
export function mkdirSftp(sftp: SFTPWrapper, path: string): Promise<void> {
return new Promise((resolve, reject) => {
sftp.mkdir(path, (err) => {
// Ignore "already exists" errors (SFTP status code 4 = SSH_FX_FAILURE)
if (err && (err as { code?: number }).code !== 4) {
reject(err)
} else {
resolve()
}
})
})
}
export function uploadFile(
sftp: SFTPWrapper,
localPath: string,
remotePath: string
): Promise<void> {
return new Promise((resolve, reject) => {
const readStream = createReadStream(localPath)
const writeStream = sftp.createWriteStream(remotePath)
writeStream.on('close', resolve)
writeStream.on('error', reject)
readStream.on('error', reject)
readStream.pipe(writeStream)
})
}
// ── Sentinel detection ────────────────────────────────────────────────
export function waitForSentinel(channel: ClientChannel): Promise<MultiplexerTransport> {
return new Promise<MultiplexerTransport>((resolve, reject) => {
let sentinelReceived = false
let stderrOutput = ''
let bufferedStdout = Buffer.alloc(0)
const timeout = setTimeout(() => {
if (!sentinelReceived) {
channel.close()
reject(
new Error(
`Relay failed to start within ${RELAY_SENTINEL_TIMEOUT_MS / 1000}s.${stderrOutput ? ` stderr: ${stderrOutput.trim()}` : ''}`
)
)
}
}, RELAY_SENTINEL_TIMEOUT_MS)
const MAX_BUFFER_CAP = 64 * 1024
channel.stderr.on('data', (data: Buffer) => {
stderrOutput += data.toString('utf-8')
if (stderrOutput.length > MAX_BUFFER_CAP) {
stderrOutput = stderrOutput.slice(-MAX_BUFFER_CAP)
}
})
channel.on('close', () => {
if (!sentinelReceived) {
clearTimeout(timeout)
reject(
new Error(
`Relay process exited before ready.${stderrOutput ? ` stderr: ${stderrOutput.trim()}` : ''}`
)
)
}
})
const dataCallbacks: ((data: Buffer) => void)[] = []
const closeCallbacks: (() => void)[] = []
channel.on('data', (data: Buffer) => {
if (sentinelReceived) {
for (const cb of dataCallbacks) {
cb(data)
}
return
}
bufferedStdout = Buffer.concat([bufferedStdout, data])
const text = bufferedStdout.toString('utf-8')
const sentinelIdx = text.indexOf(RELAY_SENTINEL)
if (sentinelIdx !== -1) {
sentinelReceived = true
clearTimeout(timeout)
const afterSentinel = bufferedStdout.subarray(
Buffer.byteLength(text.substring(0, sentinelIdx + RELAY_SENTINEL.length), 'utf-8')
)
const transport: MultiplexerTransport = {
write: (buf: Buffer) => channel.stdin.write(buf),
onData: (cb) => {
dataCallbacks.push(cb)
},
onClose: (cb) => {
closeCallbacks.push(cb)
}
}
channel.on('close', () => {
for (const cb of closeCallbacks) {
cb()
}
})
resolve(transport)
if (afterSentinel.length > 0) {
for (const cb of dataCallbacks) {
cb(afterSentinel)
}
}
}
})
})
}
// ── Remote command execution ──────────────────────────────────────────
const EXEC_TIMEOUT_MS = 30_000
export async function execCommand(conn: SshConnection, command: string): Promise<string> {
const channel = await conn.exec(command)
return new Promise((resolve, reject) => {
let stdout = ''
let stderr = ''
let settled = false
const timeout = setTimeout(() => {
if (!settled) {
settled = true
channel.close()
reject(new Error(`Command "${command}" timed out after ${EXEC_TIMEOUT_MS / 1000}s`))
}
}, EXEC_TIMEOUT_MS)
channel.on('data', (data: Buffer) => {
stdout += data.toString('utf-8')
})
channel.stderr.on('data', (data: Buffer) => {
stderr += data.toString('utf-8')
})
channel.on('close', (code: number) => {
if (settled) {
return
}
settled = true
clearTimeout(timeout)
if (code !== 0) {
reject(new Error(`Command "${command}" failed (exit ${code}): ${stderr.trim()}`))
} else {
resolve(stdout)
}
})
})
}
// ── Remote Node.js resolution ─────────────────────────────────────────
// Why: non-login SSH shells (the default for `exec`) don't source
// .bashrc/.zshrc, so node installed via nvm/fnm/Homebrew isn't in PATH.
// We try common locations and fall back to a login-shell `which`.
export async function resolveRemoteNodePath(conn: SshConnection): Promise<string> {
// Why: non-login SSH exec channels don't source .bashrc/.zshrc, so node
// installed via nvm/fnm/Homebrew may not be in PATH. We probe common
// locations directly, then fall back to sourcing the profile explicitly.
// The glob in $HOME/.nvm/... is expanded by the shell, not by `command -v`.
const script = [
'command -v node 2>/dev/null',
'command -v /usr/local/bin/node 2>/dev/null',
'command -v /opt/homebrew/bin/node 2>/dev/null',
// Why: nvm installs into a versioned directory. `ls -1` sorts
// alphabetically, which misorders versions (e.g. v9 > v18). Pipe
// through `sort -V` (version sort) so we pick the highest version.
'ls -1 $HOME/.nvm/versions/node/*/bin/node 2>/dev/null | sort -V | tail -1',
'command -v $HOME/.local/bin/node 2>/dev/null',
'command -v $HOME/.fnm/aliases/default/bin/node 2>/dev/null'
].join(' || ')
try {
const result = await execCommand(conn, script)
const nodePath = result.trim().split('\n')[0]
if (nodePath) {
console.log(`[ssh-relay] Found node at: ${nodePath}`)
return nodePath
}
} catch {
// Fall through to login shell attempt
}
// Why: last resort — source the full login profile. This is separated into
// its own exec because `bash -lc` can hang on remotes with interactive
// shell configs (conda prompts, etc.). If this times out, the error message
// from execCommand will tell us it was the login shell attempt.
try {
console.log('[ssh-relay] Trying login shell to find node...')
const result = await execCommand(conn, "bash -lc 'command -v node' 2>/dev/null")
const nodePath = result.trim().split('\n')[0]
if (nodePath) {
console.log(`[ssh-relay] Found node via login shell: ${nodePath}`)
return nodePath
}
} catch {
// Fall through
}
throw new Error(
'Node.js not found on remote host. Orca relay requires Node.js 18+. ' +
'Install Node.js on the remote and try again.'
)
}

View file

@ -0,0 +1,255 @@
import { join } from 'path'
import { existsSync } from 'fs'
import { app } from 'electron'
import type { SshConnection } from './ssh-connection'
import {
RELAY_VERSION,
RELAY_REMOTE_DIR,
parseUnameToRelayPlatform,
type RelayPlatform
} from './relay-protocol'
import type { MultiplexerTransport } from './ssh-channel-multiplexer'
import {
uploadDirectory,
waitForSentinel,
execCommand,
resolveRemoteNodePath
} from './ssh-relay-deploy-helpers'
import { shellEscape } from './ssh-connection-utils'
export type RelayDeployResult = {
transport: MultiplexerTransport
platform: RelayPlatform
}
/**
* Deploy the relay to the remote host and launch it.
*
* Steps:
* 1. Detect remote OS/arch via `uname -sm`
* 2. Check if correct relay version is already deployed
* 3. If not, SCP the relay package
* 4. Launch relay via exec channel
* 5. Wait for ORCA-RELAY sentinel on stdout
* 6. Return the transport (relay's stdin/stdout) for multiplexer use
*/
export async function deployAndLaunchRelay(
conn: SshConnection,
onProgress?: (status: string) => void
): Promise<RelayDeployResult> {
onProgress?.('Detecting remote platform...')
console.log('[ssh-relay] Detecting remote platform...')
const platform = await detectRemotePlatform(conn)
if (!platform) {
throw new Error(
'Unsupported remote platform. Orca relay supports: linux-x64, linux-arm64, darwin-x64, darwin-arm64.'
)
}
console.log(`[ssh-relay] Platform: ${platform}`)
// Why: SFTP does not expand `~`, so we must resolve the remote home directory
// explicitly. `echo $HOME` over exec gives us the absolute path.
const remoteHome = (await execCommand(conn, 'echo $HOME')).trim()
// Why: a malicious or misconfigured remote could return a $HOME containing
// shell metacharacters. Validate it looks like a reasonable path.
if (!remoteHome || !/^\/[a-zA-Z0-9/_.-]+$/.test(remoteHome)) {
throw new Error(`Remote $HOME is not a valid path: ${remoteHome.slice(0, 100)}`)
}
const remoteRelayDir = `${remoteHome}/${RELAY_REMOTE_DIR}/relay-v${RELAY_VERSION}`
console.log(`[ssh-relay] Remote dir: ${remoteRelayDir}`)
onProgress?.('Checking existing relay...')
const localRelayDir = getLocalRelayPath(platform)
const alreadyDeployed = await checkRelayExists(conn, remoteRelayDir, localRelayDir)
console.log(`[ssh-relay] Already deployed: ${alreadyDeployed}`)
if (!alreadyDeployed) {
onProgress?.('Uploading relay...')
console.log('[ssh-relay] Uploading relay...')
await uploadRelay(conn, platform, remoteRelayDir)
console.log('[ssh-relay] Upload complete')
onProgress?.('Installing native dependencies...')
console.log('[ssh-relay] Installing node-pty...')
await installNativeDeps(conn, remoteRelayDir)
console.log('[ssh-relay] Native deps installed')
}
onProgress?.('Starting relay...')
console.log('[ssh-relay] Launching relay...')
const transport = await launchRelay(conn, remoteRelayDir)
console.log('[ssh-relay] Relay started successfully')
return { transport, platform }
}
async function detectRemotePlatform(conn: SshConnection): Promise<RelayPlatform | null> {
const output = await execCommand(conn, 'uname -sm')
const parts = output.trim().split(/\s+/)
if (parts.length < 2) {
return null
}
return parseUnameToRelayPlatform(parts[0], parts[1])
}
async function checkRelayExists(
conn: SshConnection,
remoteDir: string,
localRelayDir: string | null
): Promise<boolean> {
try {
const output = await execCommand(
conn,
`test -f ${shellEscape(`${remoteDir}/relay.js`)} && echo OK || echo MISSING`
)
if (output.trim() !== 'OK') {
return false
}
// Why: compare against the local .version file content (which includes a
// content hash) so any code change triggers re-deploy, even without bumping
// RELAY_VERSION. Falls back to the bare RELAY_VERSION for safety.
let expectedVersion = RELAY_VERSION
if (localRelayDir) {
try {
const { readFileSync } = await import('fs')
expectedVersion = readFileSync(join(localRelayDir, '.version'), 'utf-8').trim()
} catch {
/* fall back to RELAY_VERSION */
}
}
const versionOutput = await execCommand(
conn,
`cat ${shellEscape(`${remoteDir}/.version`)} 2>/dev/null || echo MISSING`
)
return versionOutput.trim() === expectedVersion
} catch {
return false
}
}
async function uploadRelay(
conn: SshConnection,
platform: RelayPlatform,
remoteDir: string
): Promise<void> {
const localRelayDir = getLocalRelayPath(platform)
if (!localRelayDir || !existsSync(localRelayDir)) {
throw new Error(
`Relay package for ${platform} not found at ${localRelayDir}. ` +
`This may be a packaging issue — try reinstalling Orca.`
)
}
// Create remote directory
await execCommand(conn, `mkdir -p ${shellEscape(remoteDir)}`)
// Upload via SFTP
const sftp = await conn.sftp()
try {
await uploadDirectory(sftp, localRelayDir, remoteDir)
} finally {
sftp.end()
}
// Make the node binary executable
await execCommand(conn, `chmod +x ${shellEscape(`${remoteDir}/node`)} 2>/dev/null; true`)
// Why: version marker includes a content hash so code changes trigger
// re-deploy even without bumping RELAY_VERSION. Read from the local build
// output so the remote marker matches exactly what checkRelayExists expects.
// Why: we write the version file via SFTP instead of a shell command to
// avoid shell injection — the version string could contain characters
// that break or escape single-quoted shell interpolation.
let versionString = RELAY_VERSION
const localVersionFile = join(localRelayDir, '.version')
if (existsSync(localVersionFile)) {
const { readFileSync } = await import('fs')
versionString = readFileSync(localVersionFile, 'utf-8').trim()
}
const versionSftp = await conn.sftp()
try {
await new Promise<void>((resolve, reject) => {
const ws = versionSftp.createWriteStream(`${remoteDir}/.version`)
ws.on('close', resolve)
ws.on('error', reject)
ws.end(versionString)
})
} finally {
versionSftp.end()
}
}
// Why: node-pty is a native addon that can't be bundled by esbuild. It must
// be compiled on the remote host against its Node.js version and OS. We run
// `npm init -y && npm install node-pty` in the relay directory so
// `require('node-pty')` resolves to the local node_modules.
async function installNativeDeps(conn: SshConnection, remoteDir: string): Promise<void> {
const nodePath = await resolveRemoteNodePath(conn)
// Why: node's bin directory must be in PATH for npm's child processes.
// npm install runs node-pty's prebuild script (`node scripts/prebuild.js`)
// which spawns `node` as a child — if node isn't in PATH, that child
// fails with exit 127 even though we invoked npm via its full path.
const nodeBinDir = nodePath.replace(/\/node$/, '')
const escapedDir = shellEscape(remoteDir)
const escapedBinDir = shellEscape(nodeBinDir)
try {
await execCommand(
conn,
`export PATH=${escapedBinDir}:$PATH && cd ${escapedDir} && npm init -y --silent 2>/dev/null && npm install node-pty 2>&1`
)
// Why: SFTP uploads preserve file content but not Unix execute bits.
// node-pty ships a prebuilt `spawn-helper` binary that must be executable
// for posix_spawnp to fork the PTY process.
await execCommand(
conn,
`find ${shellEscape(`${remoteDir}/node_modules/node-pty/prebuilds`)} -name spawn-helper -exec chmod +x {} + 2>/dev/null; true`
)
} catch (err) {
// Why: node-pty install can fail if build tools (python, make, g++) are
// missing on the remote. Log the error but don't block relay startup —
// the relay will degrade gracefully (pty.spawn returns an error).
console.warn('[ssh-relay] Failed to install node-pty:', (err as Error).message)
}
}
function getLocalRelayPath(platform: RelayPlatform): string | null {
if (process.env.ORCA_RELAY_PATH) {
const override = join(process.env.ORCA_RELAY_PATH, platform)
if (existsSync(override)) {
return override
}
}
// Production: bundled alongside the app
const prodPath = join(app.getAppPath(), 'resources', 'relay', platform)
if (existsSync(prodPath)) {
return prodPath
}
// Development: built by `pnpm build:relay` into out/relay/{platform}/
const devPath = join(app.getAppPath(), 'out', 'relay', platform)
if (existsSync(devPath)) {
return devPath
}
return null
}
async function launchRelay(conn: SshConnection, remoteDir: string): Promise<MultiplexerTransport> {
// Why: Phase 1 of the plan requires Node.js on the remote. We use the
// system `node` rather than bundling a node binary, keeping the relay
// package small (~100KB JS vs ~60MB with embedded node).
// Non-login SSH shells may not have node in PATH, so we source the
// user's profile to pick up nvm/fnm/brew PATH entries.
const nodePath = await resolveRemoteNodePath(conn)
// Why: both remoteDir and nodePath come from the remote host and could
// contain shell metacharacters. Single-quote escaping prevents injection.
const channel = await conn.exec(
`cd ${shellEscape(remoteDir)} && ${shellEscape(nodePath)} relay.js --grace-time 60`
)
return waitForSentinel(channel)
}

View file

@ -0,0 +1,133 @@
import { describe, expect, it, vi, beforeEach } from 'vitest'
const { existsSyncMock, spawnMock } = vi.hoisted(() => ({
existsSyncMock: vi.fn(),
spawnMock: vi.fn()
}))
vi.mock('fs', () => ({
existsSync: existsSyncMock
}))
vi.mock('child_process', () => ({
spawn: spawnMock
}))
import { findSystemSsh, spawnSystemSsh } from './ssh-system-fallback'
import type { SshTarget } from '../../shared/ssh-types'
function createTarget(overrides?: Partial<SshTarget>): SshTarget {
return {
id: 'target-1',
label: 'Test Server',
host: 'example.com',
port: 22,
username: 'deploy',
...overrides
}
}
describe('findSystemSsh', () => {
beforeEach(() => {
existsSyncMock.mockReset()
})
it('returns the first existing ssh path', () => {
existsSyncMock.mockImplementation((p: string) => p === '/usr/bin/ssh')
expect(findSystemSsh()).toBe('/usr/bin/ssh')
})
it('returns null when no ssh binary is found', () => {
existsSyncMock.mockReturnValue(false)
expect(findSystemSsh()).toBeNull()
})
})
describe('spawnSystemSsh', () => {
let mockProc: {
stdin: object
stdout: object
stderr: object
pid: number
on: ReturnType<typeof vi.fn>
kill: ReturnType<typeof vi.fn>
}
beforeEach(() => {
existsSyncMock.mockReset()
spawnMock.mockReset()
mockProc = {
stdin: { write: vi.fn(), end: vi.fn() },
stdout: { on: vi.fn() },
stderr: { on: vi.fn() },
pid: 12345,
on: vi.fn(),
kill: vi.fn()
}
spawnMock.mockReturnValue(mockProc)
existsSyncMock.mockImplementation((p: string) => p === '/usr/bin/ssh')
})
it('spawns ssh with correct arguments for basic target', () => {
spawnSystemSsh(createTarget())
expect(spawnMock).toHaveBeenCalledWith(
'/usr/bin/ssh',
expect.arrayContaining(['-T', 'deploy@example.com']),
expect.objectContaining({ stdio: ['pipe', 'pipe', 'pipe'] })
)
})
it('includes port flag when not 22', () => {
spawnSystemSsh(createTarget({ port: 2222 }))
const args = spawnMock.mock.calls[0][1] as string[]
expect(args).toContain('-p')
expect(args).toContain('2222')
})
it('does not include port flag when port is 22', () => {
spawnSystemSsh(createTarget({ port: 22 }))
const args = spawnMock.mock.calls[0][1] as string[]
expect(args).not.toContain('-p')
})
it('includes identity file flag', () => {
spawnSystemSsh(createTarget({ identityFile: '/home/user/.ssh/id_ed25519' }))
const args = spawnMock.mock.calls[0][1] as string[]
expect(args).toContain('-i')
expect(args).toContain('/home/user/.ssh/id_ed25519')
})
it('includes jump host flag', () => {
spawnSystemSsh(createTarget({ jumpHost: 'bastion.example.com' }))
const args = spawnMock.mock.calls[0][1] as string[]
expect(args).toContain('-J')
expect(args).toContain('bastion.example.com')
})
it('includes proxy command flag', () => {
spawnSystemSsh(createTarget({ proxyCommand: 'ssh -W %h:%p bastion' }))
const args = spawnMock.mock.calls[0][1] as string[]
expect(args).toContain('-o')
expect(args).toContain('ProxyCommand=ssh -W %h:%p bastion')
})
it('throws when no system ssh is found', () => {
existsSyncMock.mockReturnValue(false)
expect(() => spawnSystemSsh(createTarget())).toThrow('No system ssh binary found')
})
it('returns a process wrapper with kill and onExit', () => {
const result = spawnSystemSsh(createTarget())
expect(result.pid).toBe(12345)
expect(typeof result.kill).toBe('function')
expect(typeof result.onExit).toBe('function')
})
})

View file

@ -0,0 +1,101 @@
import { spawn, type ChildProcess } from 'child_process'
import { existsSync } from 'fs'
import type { SshTarget } from '../../shared/ssh-types'
const SYSTEM_SSH_PATHS =
process.platform === 'win32'
? ['C:\\Windows\\System32\\OpenSSH\\ssh.exe', 'ssh.exe']
: ['/usr/bin/ssh', '/usr/local/bin/ssh', '/opt/homebrew/bin/ssh']
export type SystemSshProcess = {
stdin: NodeJS.WritableStream
stdout: NodeJS.ReadableStream
stderr: NodeJS.ReadableStream
kill: () => void
onExit: (cb: (code: number | null) => void) => void
pid: number | undefined
}
/**
* Find the system ssh binary path. Returns null if not found.
*/
export function findSystemSsh(): string | null {
for (const candidate of SYSTEM_SSH_PATHS) {
if (existsSync(candidate)) {
return candidate
}
}
return null
}
/**
* Spawn a system ssh process connecting to the given target.
* Used when ssh2 cannot handle the auth method (FIDO2, ControlMaster).
*
* The returned process's stdin/stdout are used as the transport for
* the relay's JSON-RPC protocol, exactly like an ssh2 channel.
*/
export function spawnSystemSsh(target: SshTarget): SystemSshProcess {
const sshPath = findSystemSsh()
if (!sshPath) {
throw new Error(
'No system ssh binary found. Install OpenSSH to use FIDO2 keys or ControlMaster.'
)
}
const args = buildSshArgs(target)
const proc = spawn(sshPath, args, {
stdio: ['pipe', 'pipe', 'pipe'],
windowsHide: true
})
return wrapChildProcess(proc)
}
function buildSshArgs(target: SshTarget): string[] {
const args: string[] = []
args.push('-o', 'BatchMode=no')
// Forward stdin/stdout for relay communication
args.push('-T')
if (target.port !== 22) {
args.push('-p', String(target.port))
}
if (target.identityFile) {
args.push('-i', target.identityFile)
}
if (target.jumpHost) {
args.push('-J', target.jumpHost)
}
if (target.proxyCommand) {
args.push('-o', `ProxyCommand=${target.proxyCommand}`)
}
const userHost = target.username ? `${target.username}@${target.host}` : target.host
args.push(userHost)
return args
}
function wrapChildProcess(proc: ChildProcess): SystemSshProcess {
return {
stdin: proc.stdin!,
stdout: proc.stdout!,
stderr: proc.stderr!,
pid: proc.pid,
kill: () => {
try {
proc.kill('SIGTERM')
} catch {
// Process may already be dead
}
},
onExit: (cb) => {
proc.on('exit', (code) => cb(code))
}
}
}

View file

@ -9,6 +9,7 @@ import { ORCA_BROWSER_PARTITION } from '../../shared/constants'
import { registerRepoHandlers } from '../ipc/repos'
import { registerWorktreeHandlers } from '../ipc/worktrees'
import { registerPtyHandlers } from '../ipc/pty'
import { registerSshHandlers } from '../ipc/ssh'
import { browserManager } from '../browser/browser-manager'
import type { OrcaRuntimeService } from '../runtime/orca-runtime'
import {
@ -29,6 +30,7 @@ export function attachMainWindowServices(
registerRepoHandlers(mainWindow, store)
registerWorktreeHandlers(mainWindow, store)
registerPtyHandlers(mainWindow, runtime, getSelectedCodexHomePath)
registerSshHandlers(store, () => mainWindow)
registerFileDropRelay(mainWindow)
setupAutoUpdater(mainWindow, {
getLastUpdateCheckAt: () => store.getUI().lastUpdateCheckAt,

View file

@ -216,6 +216,12 @@ export type PreloadApi = {
pickDirectory: () => Promise<string | null>
clone: (args: { url: string; destination: string }) => Promise<Repo>
cloneAbort: () => Promise<void>
addRemote: (args: {
connectionId: string
remotePath: string
displayName?: string
kind?: 'git' | 'folder'
}) => Promise<Repo>
onCloneProgress: (callback: (data: { phase: string; percent: number }) => void) => () => void
getGitUsername: (args: { repoId: string }) => Promise<string>
getBaseRefDefault: (args: { repoId: string }) => Promise<string>
@ -243,6 +249,7 @@ export type PreloadApi = {
cwd?: string
env?: Record<string, string>
command?: string
connectionId?: string | null
}) => Promise<{ id: string }>
write: (id: string, data: string) => void
resize: (id: string, cols: number, rows: number) => void
@ -364,36 +371,46 @@ export type PreloadApi = {
claudeUsage: ClaudeUsageApi
codexUsage: CodexUsageApi
fs: {
readDir: (args: { dirPath: string }) => Promise<DirEntry[]>
readDir: (args: { dirPath: string; connectionId?: string }) => Promise<DirEntry[]>
readFile: (args: {
filePath: string
connectionId?: string
}) => Promise<{ content: string; isBinary: boolean; isImage?: boolean; mimeType?: string }>
writeFile: (args: { filePath: string; content: string }) => Promise<void>
createFile: (args: { filePath: string }) => Promise<void>
createDir: (args: { dirPath: string }) => Promise<void>
rename: (args: { oldPath: string; newPath: string }) => Promise<void>
deletePath: (args: { targetPath: string }) => Promise<void>
writeFile: (args: { filePath: string; content: string; connectionId?: string }) => Promise<void>
createFile: (args: { filePath: string; connectionId?: string }) => Promise<void>
createDir: (args: { dirPath: string; connectionId?: string }) => Promise<void>
rename: (args: { oldPath: string; newPath: string; connectionId?: string }) => Promise<void>
deletePath: (args: { targetPath: string; connectionId?: string }) => Promise<void>
authorizeExternalPath: (args: { targetPath: string }) => Promise<void>
stat: (args: {
filePath: string
connectionId?: string
}) => Promise<{ size: number; isDirectory: boolean; mtime: number }>
listFiles: (args: { rootPath: string }) => Promise<string[]>
search: (args: SearchOptions) => Promise<SearchResult>
watchWorktree: (args: { worktreePath: string }) => Promise<void>
unwatchWorktree: (args: { worktreePath: string }) => Promise<void>
listFiles: (args: { rootPath: string; connectionId?: string }) => Promise<string[]>
search: (args: SearchOptions & { connectionId?: string }) => Promise<SearchResult>
watchWorktree: (args: { worktreePath: string; connectionId?: string }) => Promise<void>
unwatchWorktree: (args: { worktreePath: string; connectionId?: string }) => Promise<void>
onFsChanged: (callback: (payload: FsChangedPayload) => void) => () => void
}
git: {
status: (args: { worktreePath: string }) => Promise<{ entries: GitStatusEntry[] }>
conflictOperation: (args: { worktreePath: string }) => Promise<GitConflictOperation>
status: (args: {
worktreePath: string
connectionId?: string
}) => Promise<{ entries: GitStatusEntry[] }>
conflictOperation: (args: {
worktreePath: string
connectionId?: string
}) => Promise<GitConflictOperation>
diff: (args: {
worktreePath: string
filePath: string
staged: boolean
connectionId?: string
}) => Promise<GitDiffResult>
branchCompare: (args: {
worktreePath: string
baseRef: string
connectionId?: string
}) => Promise<GitBranchCompareResult>
branchDiff: (args: {
worktreePath: string
@ -405,16 +422,38 @@ export type PreloadApi = {
}
filePath: string
oldPath?: string
connectionId?: string
}) => Promise<GitDiffResult>
stage: (args: { worktreePath: string; filePath: string }) => Promise<void>
bulkStage: (args: { worktreePath: string; filePaths: string[] }) => Promise<void>
unstage: (args: { worktreePath: string; filePath: string }) => Promise<void>
bulkUnstage: (args: { worktreePath: string; filePaths: string[] }) => Promise<void>
discard: (args: { worktreePath: string; filePath: string }) => Promise<void>
stage: (args: {
worktreePath: string
filePath: string
connectionId?: string
}) => Promise<void>
bulkStage: (args: {
worktreePath: string
filePaths: string[]
connectionId?: string
}) => Promise<void>
unstage: (args: {
worktreePath: string
filePath: string
connectionId?: string
}) => Promise<void>
bulkUnstage: (args: {
worktreePath: string
filePaths: string[]
connectionId?: string
}) => Promise<void>
discard: (args: {
worktreePath: string
filePath: string
connectionId?: string
}) => Promise<void>
remoteFileUrl: (args: {
worktreePath: string
relativePath: string
line: number
connectionId?: string
}) => Promise<string | null>
}
ui: {
@ -460,4 +499,55 @@ export type PreloadApi = {
setPollingInterval: (ms: number) => Promise<void>
onUpdate: (callback: (state: RateLimitState) => void) => () => void
}
ssh: {
listTargets: () => Promise<unknown[]>
addTarget: (args: { target: Record<string, unknown> }) => Promise<unknown>
updateTarget: (args: { id: string; updates: Record<string, unknown> }) => Promise<unknown>
removeTarget: (args: { id: string }) => Promise<void>
importConfig: () => Promise<unknown[]>
connect: (args: { targetId: string }) => Promise<unknown>
disconnect: (args: { targetId: string }) => Promise<void>
getState: (args: { targetId: string }) => Promise<unknown>
testConnection: (args: {
targetId: string
}) => Promise<{ success: boolean; error?: string; state?: unknown }>
onStateChanged: (callback: (data: { targetId: string; state: unknown }) => void) => () => void
onHostKeyVerify: (
callback: (data: {
host: string
ip: string
fingerprint: string
keyType: string
responseChannel: string
}) => void
) => () => void
respondHostKeyVerify: (args: { channel: string; accepted: boolean }) => void
onAuthChallenge: (
callback: (data: {
targetId: string
name: string
instructions: string
prompts: { prompt: string; echo: boolean }[]
responseChannel: string
}) => void
) => () => void
respondAuthChallenge: (args: { channel: string; responses: string[] }) => void
onPasswordPrompt: (
callback: (data: { targetId: string; responseChannel: string }) => void
) => () => void
respondPassword: (args: { channel: string; password: string | null }) => void
addPortForward: (args: {
targetId: string
localPort: number
remoteHost: string
remotePort: number
label?: string
}) => Promise<unknown>
removePortForward: (args: { id: string }) => Promise<boolean>
listPortForwards: (args?: { targetId?: string }) => Promise<unknown[]>
browseDir: (args: { targetId: string; dirPath: string }) => Promise<{
entries: { name: string; isDirectory: boolean }[]
resolvedPath: string
}>
}
}

View file

@ -5,11 +5,18 @@ import type {
CreateWorktreeArgs,
OpenCodeStatusEvent
} from '../../shared/types'
import type { SshTarget, SshConnectionState } from '../../shared/ssh-types'
import type { PreloadApi } from './api-types'
type ReposApi = {
list: () => Promise<Repo[]>
add: (args: { path: string; kind?: 'git' | 'folder' }) => Promise<Repo>
addRemote: (args: {
connectionId: string
remotePath: string
displayName?: string
kind?: 'git' | 'folder'
}) => Promise<Repo>
remove: (args: { repoId: string }) => Promise<void>
update: (args: {
repoId: string
@ -44,6 +51,7 @@ type PtyApi = {
rows: number
cwd?: string
env?: Record<string, string>
connectionId?: string | null
}) => Promise<{ id: string }>
write: (id: string, data: string) => void
resize: (id: string, cols: number, rows: number) => void
@ -112,10 +120,33 @@ type ShellApi = {
copyFile: (args: { srcPath: string; destPath: string }) => Promise<void>
}
type SshApi = {
listTargets: () => Promise<SshTarget[]>
addTarget: (args: { target: Omit<SshTarget, 'id'> }) => Promise<SshTarget>
updateTarget: (args: {
id: string
updates: Partial<Omit<SshTarget, 'id'>>
}) => Promise<SshTarget>
removeTarget: (args: { id: string }) => Promise<void>
importConfig: () => Promise<SshTarget[]>
connect: (args: { targetId: string }) => Promise<SshConnectionState>
disconnect: (args: { targetId: string }) => Promise<void>
getState: (args: { targetId: string }) => Promise<SshConnectionState | null>
testConnection: (args: { targetId: string }) => Promise<{ success: boolean; error?: string }>
onStateChanged: (
callback: (data: { targetId: string; state: SshConnectionState }) => void
) => () => void
browseDir: (args: { targetId: string; dirPath: string }) => Promise<{
entries: { name: string; isDirectory: boolean }[]
resolvedPath: string
}>
}
type Api = PreloadApi & {
repos: ReposApi
worktrees: WorktreesApi
pty: PtyApi
ssh: SshApi
}
declare global {

View file

@ -106,6 +106,13 @@ const api = {
add: (args: { path: string; kind?: 'git' | 'folder' }): Promise<unknown> =>
ipcRenderer.invoke('repos:add', args),
addRemote: (args: {
connectionId: string
remotePath: string
displayName?: string
kind?: 'git' | 'folder'
}): Promise<unknown> => ipcRenderer.invoke('repos:addRemote', args),
remove: (args: { repoId: string }): Promise<void> => ipcRenderer.invoke('repos:remove', args),
update: (args: { repoId: string; updates: Record<string, unknown> }): Promise<unknown> =>
@ -186,6 +193,7 @@ const api = {
cwd?: string
env?: Record<string, string>
command?: string
connectionId?: string | null
}): Promise<{ id: string }> => ipcRenderer.invoke('pty:spawn', opts),
write: (id: string, data: string): void => {
@ -703,29 +711,35 @@ const api = {
fs: {
readDir: (args: {
dirPath: string
connectionId?: string
}): Promise<{ name: string; isDirectory: boolean; isSymlink: boolean }[]> =>
ipcRenderer.invoke('fs:readDir', args),
readFile: (args: {
filePath: string
connectionId?: string
}): Promise<{ content: string; isBinary: boolean; isImage?: boolean; mimeType?: string }> =>
ipcRenderer.invoke('fs:readFile', args),
writeFile: (args: { filePath: string; content: string }): Promise<void> =>
ipcRenderer.invoke('fs:writeFile', args),
createFile: (args: { filePath: string }): Promise<void> =>
writeFile: (args: {
filePath: string
content: string
connectionId?: string
}): Promise<void> => ipcRenderer.invoke('fs:writeFile', args),
createFile: (args: { filePath: string; connectionId?: string }): Promise<void> =>
ipcRenderer.invoke('fs:createFile', args),
createDir: (args: { dirPath: string }): Promise<void> =>
createDir: (args: { dirPath: string; connectionId?: string }): Promise<void> =>
ipcRenderer.invoke('fs:createDir', args),
rename: (args: { oldPath: string; newPath: string }): Promise<void> =>
rename: (args: { oldPath: string; newPath: string; connectionId?: string }): Promise<void> =>
ipcRenderer.invoke('fs:rename', args),
deletePath: (args: { targetPath: string }): Promise<void> =>
deletePath: (args: { targetPath: string; connectionId?: string }): Promise<void> =>
ipcRenderer.invoke('fs:deletePath', args),
authorizeExternalPath: (args: { targetPath: string }): Promise<void> =>
ipcRenderer.invoke('fs:authorizeExternalPath', args),
stat: (args: {
filePath: string
connectionId?: string
}): Promise<{ size: number; isDirectory: boolean; mtime: number }> =>
ipcRenderer.invoke('fs:stat', args),
listFiles: (args: { rootPath: string }): Promise<string[]> =>
listFiles: (args: { rootPath: string; connectionId?: string }): Promise<string[]> =>
ipcRenderer.invoke('fs:listFiles', args),
search: (args: {
query: string
@ -736,6 +750,7 @@ const api = {
includePattern?: string
excludePattern?: string
maxResults?: number
connectionId?: string
}): Promise<{
files: {
filePath: string
@ -745,9 +760,9 @@ const api = {
totalMatches: number
truncated: boolean
}> => ipcRenderer.invoke('fs:search', args),
watchWorktree: (args: { worktreePath: string }): Promise<void> =>
watchWorktree: (args: { worktreePath: string; connectionId?: string }): Promise<void> =>
ipcRenderer.invoke('fs:watchWorktree', args),
unwatchWorktree: (args: { worktreePath: string }): Promise<void> =>
unwatchWorktree: (args: { worktreePath: string; connectionId?: string }): Promise<void> =>
ipcRenderer.invoke('fs:unwatchWorktree', args),
onFsChanged: (callback: (payload: FsChangedPayload) => void): (() => void) => {
const listener = (_event: Electron.IpcRendererEvent, payload: FsChangedPayload) =>
@ -758,34 +773,58 @@ const api = {
},
git: {
status: (args: { worktreePath: string }): Promise<unknown> =>
status: (args: { worktreePath: string; connectionId?: string }): Promise<unknown> =>
ipcRenderer.invoke('git:status', args),
conflictOperation: (args: { worktreePath: string }): Promise<unknown> =>
conflictOperation: (args: { worktreePath: string; connectionId?: string }): Promise<unknown> =>
ipcRenderer.invoke('git:conflictOperation', args),
diff: (args: { worktreePath: string; filePath: string; staged: boolean }): Promise<unknown> =>
ipcRenderer.invoke('git:diff', args),
branchCompare: (args: { worktreePath: string; baseRef: string }): Promise<unknown> =>
ipcRenderer.invoke('git:branchCompare', args),
diff: (args: {
worktreePath: string
filePath: string
staged: boolean
connectionId?: string
}): Promise<unknown> => ipcRenderer.invoke('git:diff', args),
branchCompare: (args: {
worktreePath: string
baseRef: string
connectionId?: string
}): Promise<unknown> => ipcRenderer.invoke('git:branchCompare', args),
branchDiff: (args: {
worktreePath: string
compare: { baseRef: string; baseOid: string; headOid: string; mergeBase: string }
filePath: string
oldPath?: string
connectionId?: string
}): Promise<unknown> => ipcRenderer.invoke('git:branchDiff', args),
stage: (args: { worktreePath: string; filePath: string }): Promise<void> =>
ipcRenderer.invoke('git:stage', args),
bulkStage: (args: { worktreePath: string; filePaths: string[] }): Promise<void> =>
ipcRenderer.invoke('git:bulkStage', args),
unstage: (args: { worktreePath: string; filePath: string }): Promise<void> =>
ipcRenderer.invoke('git:unstage', args),
bulkUnstage: (args: { worktreePath: string; filePaths: string[] }): Promise<void> =>
ipcRenderer.invoke('git:bulkUnstage', args),
discard: (args: { worktreePath: string; filePath: string }): Promise<void> =>
ipcRenderer.invoke('git:discard', args),
stage: (args: {
worktreePath: string
filePath: string
connectionId?: string
}): Promise<void> => ipcRenderer.invoke('git:stage', args),
bulkStage: (args: {
worktreePath: string
filePaths: string[]
connectionId?: string
}): Promise<void> => ipcRenderer.invoke('git:bulkStage', args),
unstage: (args: {
worktreePath: string
filePath: string
connectionId?: string
}): Promise<void> => ipcRenderer.invoke('git:unstage', args),
bulkUnstage: (args: {
worktreePath: string
filePaths: string[]
connectionId?: string
}): Promise<void> => ipcRenderer.invoke('git:bulkUnstage', args),
discard: (args: {
worktreePath: string
filePath: string
connectionId?: string
}): Promise<void> => ipcRenderer.invoke('git:discard', args),
remoteFileUrl: (args: {
worktreePath: string
relativePath: string
line: number
connectionId?: string
}): Promise<string | null> => ipcRenderer.invoke('git:remoteFileUrl', args)
},
@ -977,6 +1016,137 @@ const api = {
ipcRenderer.on('rateLimits:update', listener)
return () => ipcRenderer.removeListener('rateLimits:update', listener)
}
},
ssh: {
listTargets: (): Promise<unknown[]> => ipcRenderer.invoke('ssh:listTargets'),
addTarget: (args: { target: Record<string, unknown> }): Promise<unknown> =>
ipcRenderer.invoke('ssh:addTarget', args),
updateTarget: (args: { id: string; updates: Record<string, unknown> }): Promise<unknown> =>
ipcRenderer.invoke('ssh:updateTarget', args),
removeTarget: (args: { id: string }): Promise<void> =>
ipcRenderer.invoke('ssh:removeTarget', args),
importConfig: (): Promise<unknown[]> => ipcRenderer.invoke('ssh:importConfig'),
connect: (args: { targetId: string }): Promise<unknown> =>
ipcRenderer.invoke('ssh:connect', args),
disconnect: (args: { targetId: string }): Promise<void> =>
ipcRenderer.invoke('ssh:disconnect', args),
getState: (args: { targetId: string }): Promise<unknown> =>
ipcRenderer.invoke('ssh:getState', args),
testConnection: (args: {
targetId: string
}): Promise<{ success: boolean; error?: string; state?: unknown }> =>
ipcRenderer.invoke('ssh:testConnection', args),
onStateChanged: (
callback: (data: { targetId: string; state: unknown }) => void
): (() => void) => {
const listener = (
_event: Electron.IpcRendererEvent,
data: { targetId: string; state: unknown }
) => callback(data)
ipcRenderer.on('ssh:state-changed', listener)
return () => ipcRenderer.removeListener('ssh:state-changed', listener)
},
onHostKeyVerify: (
callback: (data: {
host: string
ip: string
fingerprint: string
keyType: string
responseChannel: string
}) => void
): (() => void) => {
const listener = (
_event: Electron.IpcRendererEvent,
data: {
host: string
ip: string
fingerprint: string
keyType: string
responseChannel: string
}
) => callback(data)
ipcRenderer.on('ssh:host-key-verify', listener)
return () => ipcRenderer.removeListener('ssh:host-key-verify', listener)
},
respondHostKeyVerify: (args: { channel: string; accepted: boolean }): void => {
ipcRenderer.send(args.channel, args.accepted)
},
onAuthChallenge: (
callback: (data: {
targetId: string
name: string
instructions: string
prompts: { prompt: string; echo: boolean }[]
responseChannel: string
}) => void
): (() => void) => {
const listener = (
_event: Electron.IpcRendererEvent,
data: {
targetId: string
name: string
instructions: string
prompts: { prompt: string; echo: boolean }[]
responseChannel: string
}
) => callback(data)
ipcRenderer.on('ssh:auth-challenge', listener)
return () => ipcRenderer.removeListener('ssh:auth-challenge', listener)
},
respondAuthChallenge: (args: { channel: string; responses: string[] }): void => {
ipcRenderer.send(args.channel, args.responses)
},
onPasswordPrompt: (
callback: (data: { targetId: string; responseChannel: string }) => void
): (() => void) => {
const listener = (
_event: Electron.IpcRendererEvent,
data: { targetId: string; responseChannel: string }
) => callback(data)
ipcRenderer.on('ssh:password-prompt', listener)
return () => ipcRenderer.removeListener('ssh:password-prompt', listener)
},
respondPassword: (args: { channel: string; password: string | null }): void => {
ipcRenderer.send(args.channel, args.password)
},
addPortForward: (args: {
targetId: string
localPort: number
remoteHost: string
remotePort: number
label?: string
}): Promise<unknown> => ipcRenderer.invoke('ssh:addPortForward', args),
removePortForward: (args: { id: string }): Promise<boolean> =>
ipcRenderer.invoke('ssh:removePortForward', args),
listPortForwards: (args?: { targetId?: string }): Promise<unknown[]> =>
ipcRenderer.invoke('ssh:listPortForwards', args),
browseDir: (args: {
targetId: string
dirPath: string
}): Promise<{
entries: { name: string; isDirectory: boolean }[]
resolvedPath: string
}> => ipcRenderer.invoke('ssh:browseDir', args)
}
}

70
src/relay/context.ts Normal file
View file

@ -0,0 +1,70 @@
import { resolve, relative, isAbsolute } from 'path'
import { realpathSync } from 'fs'
import { realpath } from 'fs/promises'
// Why: mutating FS operations on the remote must be scoped to workspace roots
// registered by the main process. Without this, a compromised or buggy client
// could delete arbitrary files on the remote host.
export class RelayContext {
readonly authorizedRoots = new Set<string>()
// Why: before any root is registered there is a race window where
// authorizedRoots is empty. If we allowed all paths during that window a
// compromised client could read or mutate arbitrary files before the first
// workspace root is registered. We track registration explicitly and reject
// every validatePath call until at least one root has been added.
private rootsRegistered = false
registerRoot(rootPath: string): void {
const resolved = resolve(rootPath)
this.authorizedRoots.add(resolved)
// Why: on macOS, /tmp is a symlink to /private/tmp. If a root is registered
// as /tmp/workspace, validatePathResolved would resolve it to /private/tmp/
// workspace, which fails the textual root check. Register both forms so the
// resolved path also passes validation.
try {
const real = realpathSync(resolved)
if (real !== resolved) {
this.authorizedRoots.add(real)
}
} catch {
// Root doesn't exist yet — textual form is sufficient
}
this.rootsRegistered = true
}
validatePath(targetPath: string): void {
if (!this.rootsRegistered) {
throw new Error('No workspace roots registered yet — path validation denied')
}
const resolved = resolve(targetPath)
for (const root of this.authorizedRoots) {
const rel = relative(root, resolved)
if (!rel.startsWith('..') && !isAbsolute(rel)) {
return
}
}
throw new Error(`Path outside authorized workspace: ${targetPath}`)
}
// Why: validatePath only normalizes `..` textually. A symlink inside the
// workspace pointing outside it (e.g., workspace/evil -> /etc/) would pass
// textual validation. This async variant resolves symlinks via realpath
// before checking the path, closing the symlink traversal vector.
async validatePathResolved(targetPath: string): Promise<void> {
this.validatePath(targetPath)
try {
const real = await realpath(targetPath)
this.validatePath(real)
} catch (err) {
// Why: ENOENT/ENOTDIR means the path doesn't exist yet (e.g., createFile)
// so the textual check above is sufficient. Other errors (EACCES, EIO)
// indicate real problems that should propagate.
const code = (err as NodeJS.ErrnoException).code
if (code !== 'ENOENT' && code !== 'ENOTDIR') {
throw err
}
}
}
}

View file

@ -0,0 +1,219 @@
import { describe, expect, it, vi, beforeEach, afterEach } from 'vitest'
import { RelayDispatcher } from './dispatcher'
import {
encodeJsonRpcFrame,
encodeKeepAliveFrame,
MessageType,
type JsonRpcRequest,
type JsonRpcNotification
} from './protocol'
function decodeFirstFrame(buf: Buffer): { type: number; id: number; ack: number; payload: Buffer } {
const type = buf[0]
const id = buf.readUInt32BE(1)
const ack = buf.readUInt32BE(5)
const len = buf.readUInt32BE(9)
const payload = buf.subarray(13, 13 + len)
return { type, id, ack, payload }
}
describe('RelayDispatcher', () => {
let dispatcher: RelayDispatcher
let written: Buffer[]
beforeEach(() => {
vi.useFakeTimers()
written = []
dispatcher = new RelayDispatcher((data) => {
written.push(Buffer.from(data))
})
})
afterEach(() => {
dispatcher.dispose()
vi.useRealTimers()
})
it('sends keepalive frames on interval', () => {
expect(written.length).toBe(0)
vi.advanceTimersByTime(5_000)
expect(written.length).toBe(1)
const frame = decodeFirstFrame(written[0])
expect(frame.type).toBe(MessageType.KeepAlive)
expect(frame.id).toBe(1)
})
it('dispatches JSON-RPC requests to registered handlers', async () => {
const handler = vi.fn().mockResolvedValue({ result: 42 })
dispatcher.onRequest('test.method', handler)
const req: JsonRpcRequest = {
jsonrpc: '2.0',
id: 1,
method: 'test.method',
params: { foo: 'bar' }
}
const frame = encodeJsonRpcFrame(req, 1, 0)
dispatcher.feed(frame)
// Let the handler promise resolve
await vi.advanceTimersByTimeAsync(0)
expect(handler).toHaveBeenCalledWith({ foo: 'bar' })
// Should have sent a response (after keepalive timer writes)
const responses = written.filter((buf) => {
const f = decodeFirstFrame(buf)
if (f.type !== MessageType.Regular) {
return false
}
try {
const msg = JSON.parse(f.payload.toString('utf-8'))
return 'id' in msg && 'result' in msg
} catch {
return false
}
})
expect(responses.length).toBe(1)
const resp = JSON.parse(decodeFirstFrame(responses[0]).payload.toString('utf-8'))
expect(resp.result).toEqual({ result: 42 })
expect(resp.id).toBe(1)
})
it('sends error response when handler throws', async () => {
dispatcher.onRequest('fail.method', async () => {
throw new Error('boom')
})
const req: JsonRpcRequest = {
jsonrpc: '2.0',
id: 5,
method: 'fail.method'
}
dispatcher.feed(encodeJsonRpcFrame(req, 1, 0))
await vi.advanceTimersByTimeAsync(0)
const errors = written.filter((buf) => {
const f = decodeFirstFrame(buf)
if (f.type !== MessageType.Regular) {
return false
}
try {
const msg = JSON.parse(f.payload.toString('utf-8'))
return 'error' in msg
} catch {
return false
}
})
expect(errors.length).toBe(1)
const resp = JSON.parse(decodeFirstFrame(errors[0]).payload.toString('utf-8'))
expect(resp.error.message).toBe('boom')
expect(resp.id).toBe(5)
})
it('sends method-not-found for unknown methods', async () => {
const req: JsonRpcRequest = {
jsonrpc: '2.0',
id: 10,
method: 'unknown.method'
}
dispatcher.feed(encodeJsonRpcFrame(req, 1, 0))
await vi.advanceTimersByTimeAsync(0)
const errors = written.filter((buf) => {
const f = decodeFirstFrame(buf)
if (f.type !== MessageType.Regular) {
return false
}
try {
const msg = JSON.parse(f.payload.toString('utf-8'))
return msg.error?.code === -32601
} catch {
return false
}
})
expect(errors.length).toBe(1)
})
it('dispatches notifications to registered handlers', () => {
const handler = vi.fn()
dispatcher.onNotification('event.happened', handler)
const notif: JsonRpcNotification = {
jsonrpc: '2.0',
method: 'event.happened',
params: { x: 1 }
}
dispatcher.feed(encodeJsonRpcFrame(notif, 1, 0))
expect(handler).toHaveBeenCalledWith({ x: 1 })
})
it('sends notifications via notify()', () => {
dispatcher.notify('my.event', { data: 'hello' })
const notifs = written.filter((buf) => {
const f = decodeFirstFrame(buf)
if (f.type !== MessageType.Regular) {
return false
}
try {
const msg = JSON.parse(f.payload.toString('utf-8'))
return 'method' in msg && !('id' in msg)
} catch {
return false
}
})
expect(notifs.length).toBe(1)
const msg = JSON.parse(decodeFirstFrame(notifs[0]).payload.toString('utf-8'))
expect(msg.method).toBe('my.event')
expect(msg.params).toEqual({ data: 'hello' })
})
it('tracks highest received seq in ack field', async () => {
const handler = vi.fn().mockResolvedValue('ok')
dispatcher.onRequest('ping', handler)
// Send request with seq=50
const req: JsonRpcRequest = { jsonrpc: '2.0', id: 1, method: 'ping' }
dispatcher.feed(encodeJsonRpcFrame(req, 50, 0))
await vi.advanceTimersByTimeAsync(0)
// The response frame should have ack=50
const responseFrames = written.filter((buf) => {
const f = decodeFirstFrame(buf)
if (f.type !== MessageType.Regular) {
return false
}
try {
const msg = JSON.parse(f.payload.toString('utf-8'))
return 'result' in msg
} catch {
return false
}
})
expect(responseFrames.length).toBe(1)
expect(decodeFirstFrame(responseFrames[0]).ack).toBe(50)
})
it('silently handles keepalive frames', () => {
const frame = encodeKeepAliveFrame(1, 0)
// Should not throw
dispatcher.feed(frame)
})
it('stops sending after dispose', () => {
dispatcher.dispose()
const before = written.length
dispatcher.notify('test', {})
expect(written.length).toBe(before)
vi.advanceTimersByTime(10_000)
expect(written.length).toBe(before)
})
})

170
src/relay/dispatcher.ts Normal file
View file

@ -0,0 +1,170 @@
import {
FrameDecoder,
MessageType,
encodeJsonRpcFrame,
encodeKeepAliveFrame,
parseJsonRpcMessage,
KEEPALIVE_SEND_MS,
type DecodedFrame,
type JsonRpcRequest,
type JsonRpcNotification,
type JsonRpcResponse
} from './protocol'
export type MethodHandler = (params: Record<string, unknown>) => Promise<unknown>
export type NotificationHandler = (params: Record<string, unknown>) => void
export class RelayDispatcher {
private decoder: FrameDecoder
private write: (data: Buffer) => void
private requestHandlers = new Map<string, MethodHandler>()
private notificationHandlers = new Map<string, NotificationHandler>()
private nextOutgoingSeq = 1
private highestReceivedSeq = 0
private keepaliveTimer: ReturnType<typeof setInterval> | null = null
private disposed = false
constructor(write: (data: Buffer) => void) {
this.write = write
this.decoder = new FrameDecoder((frame) => this.handleFrame(frame))
this.startKeepalive()
}
onRequest(method: string, handler: MethodHandler): void {
this.requestHandlers.set(method, handler)
}
onNotification(method: string, handler: NotificationHandler): void {
this.notificationHandlers.set(method, handler)
}
feed(data: Buffer): void {
if (this.disposed) {
return
}
try {
this.decoder.feed(data)
} catch (err) {
process.stderr.write(
`[relay] Protocol error: ${err instanceof Error ? err.message : String(err)}\n`
)
}
}
notify(method: string, params?: Record<string, unknown>): void {
if (this.disposed) {
return
}
const msg: JsonRpcNotification = {
jsonrpc: '2.0',
method,
...(params !== undefined ? { params } : {})
}
this.sendFrame(msg)
}
dispose(): void {
if (this.disposed) {
return
}
this.disposed = true
if (this.keepaliveTimer) {
clearInterval(this.keepaliveTimer)
this.keepaliveTimer = null
}
}
private handleFrame(frame: DecodedFrame): void {
if (frame.id > this.highestReceivedSeq) {
this.highestReceivedSeq = frame.id
}
if (frame.type === MessageType.KeepAlive) {
return
}
if (frame.type === MessageType.Regular) {
try {
const msg = parseJsonRpcMessage(frame.payload)
this.handleMessage(msg)
} catch (err) {
process.stderr.write(
`[relay] Parse error: ${err instanceof Error ? err.message : String(err)}\n`
)
}
}
}
private handleMessage(msg: JsonRpcRequest | JsonRpcNotification | JsonRpcResponse): void {
if ('id' in msg && 'method' in msg) {
void this.handleRequest(msg as JsonRpcRequest)
} else if ('method' in msg && !('id' in msg)) {
this.handleNotification(msg as JsonRpcNotification)
}
}
private async handleRequest(req: JsonRpcRequest): Promise<void> {
const handler = this.requestHandlers.get(req.method)
if (!handler) {
this.sendResponse(req.id, undefined, {
code: -32601,
message: `Method not found: ${req.method}`
})
return
}
try {
const result = await handler(req.params ?? {})
this.sendResponse(req.id, result)
} catch (err) {
const message = err instanceof Error ? err.message : String(err)
const code = (err as { code?: number }).code ?? -32000
this.sendResponse(req.id, undefined, { code, message })
}
}
private handleNotification(notif: JsonRpcNotification): void {
const handler = this.notificationHandlers.get(notif.method)
if (handler) {
handler(notif.params ?? {})
}
}
private sendResponse(
id: number,
result?: unknown,
error?: { code: number; message: string; data?: unknown }
): void {
const msg: JsonRpcResponse = {
jsonrpc: '2.0',
id,
...(error ? { error } : { result: result ?? null })
}
this.sendFrame(msg)
}
private sendFrame(msg: JsonRpcRequest | JsonRpcResponse | JsonRpcNotification): void {
if (this.disposed) {
return
}
const seq = this.nextOutgoingSeq++
const frame = encodeJsonRpcFrame(msg, seq, this.highestReceivedSeq)
this.write(frame)
}
private startKeepalive(): void {
this.keepaliveTimer = setInterval(() => {
if (this.disposed) {
return
}
const seq = this.nextOutgoingSeq++
const frame = encodeKeepAliveFrame(seq, this.highestReceivedSeq)
this.write(frame)
}, KEEPALIVE_SEND_MS)
// Why: without unref, the keepalive interval keeps the event loop alive
// even when the relay should be winding down (e.g. after stdin ends and
// all PTYs have exited). unref lets the process exit naturally.
this.keepaliveTimer.unref()
}
}

View file

@ -0,0 +1,288 @@
/**
* Pure helpers and child-process search utilities extracted from fs-handler.ts.
*
* Why: oxlint max-lines requires .ts files to stay under 300 lines.
* These functions depend only on their arguments (plus `rg` being on PATH),
* so they are straightforward to test independently.
*/
import { relative } from 'path'
import { execFile, type ChildProcess } from 'child_process'
// ─── Constants ───────────────────────────────────────────────────────
export const MAX_FILE_SIZE = 5 * 1024 * 1024
export const SEARCH_TIMEOUT_MS = 15_000
export const MAX_MATCHES_PER_FILE = 100
export const DEFAULT_MAX_RESULTS = 2000
export const IMAGE_MIME_TYPES: Record<string, string> = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.svg': 'image/svg+xml',
'.webp': 'image/webp',
'.bmp': 'image/bmp',
'.ico': 'image/x-icon',
'.pdf': 'application/pdf'
}
// ─── Binary detection ────────────────────────────────────────────────
export function isBinaryBuffer(buffer: Buffer): boolean {
const len = Math.min(buffer.length, 8192)
for (let i = 0; i < len; i++) {
if (buffer[i] === 0) {
return true
}
}
return false
}
// ─── Search types ────────────────────────────────────────────────────
export type SearchOptions = {
caseSensitive?: boolean
wholeWord?: boolean
useRegex?: boolean
includePattern?: string
excludePattern?: string
maxResults: number
}
type FileResult = {
filePath: string
relativePath: string
matches: {
line: number
column: number
matchLength: number
lineContent: string
}[]
}
export type SearchResult = {
files: FileResult[]
totalMatches: number
truncated: boolean
}
// ─── rg-based search ─────────────────────────────────────────────────
/**
* Run ripgrep (`rg`) with JSON output to collect text matches.
* Returns a structured result that the relay can send to the client.
*/
export function searchWithRg(
rootPath: string,
query: string,
opts: SearchOptions
): Promise<SearchResult> {
return new Promise((resolve) => {
const rgArgs = [
'--json',
'--hidden',
'--glob',
'!.git',
'--max-count',
String(MAX_MATCHES_PER_FILE),
'--max-filesize',
`${Math.floor(MAX_FILE_SIZE / 1024 / 1024)}M`
]
if (!opts.caseSensitive) {
rgArgs.push('--ignore-case')
}
if (opts.wholeWord) {
rgArgs.push('--word-regexp')
}
if (!opts.useRegex) {
rgArgs.push('--fixed-strings')
}
if (opts.includePattern) {
for (const p of opts.includePattern
.split(',')
.map((s) => s.trim())
.filter(Boolean)) {
rgArgs.push('--glob', p)
}
}
if (opts.excludePattern) {
for (const p of opts.excludePattern
.split(',')
.map((s) => s.trim())
.filter(Boolean)) {
rgArgs.push('--glob', `!${p}`)
}
}
rgArgs.push('--', query, rootPath)
const fileMap = new Map<string, FileResult>()
let totalMatches = 0
let truncated = false
let buffer = ''
let resolved = false
let child: ChildProcess | null = null
const resolveOnce = () => {
if (resolved) {
return
}
resolved = true
clearTimeout(killTimeout)
resolve({ files: Array.from(fileMap.values()), totalMatches, truncated })
}
try {
child = execFile('rg', rgArgs, { maxBuffer: 50 * 1024 * 1024 })
} catch {
resolve({ files: [], totalMatches: 0, truncated: false })
return
}
child.stdout!.setEncoding('utf-8')
child.stdout!.on('data', (chunk: string) => {
buffer += chunk
const lines = buffer.split('\n')
buffer = lines.pop() ?? ''
for (const line of lines) {
if (!line || totalMatches >= opts.maxResults) {
continue
}
try {
const msg = JSON.parse(line)
if (msg.type !== 'match') {
continue
}
const data = msg.data
const absPath = data.path.text as string
const relPath = relative(rootPath, absPath).replace(/\\/g, '/')
let fileResult = fileMap.get(absPath)
if (!fileResult) {
fileResult = { filePath: absPath, relativePath: relPath, matches: [] }
fileMap.set(absPath, fileResult)
}
for (const sub of data.submatches) {
fileResult.matches.push({
line: data.line_number,
column: sub.start + 1,
matchLength: sub.end - sub.start,
lineContent: data.lines.text.replace(/\n$/, '')
})
totalMatches++
if (totalMatches >= opts.maxResults) {
truncated = true
child?.kill()
break
}
}
} catch {
/* skip malformed */
}
}
})
child.stderr!.on('data', () => {
/* drain */
})
child.once('error', () => resolveOnce())
child.once('close', () => {
if (buffer && totalMatches < opts.maxResults) {
try {
const msg = JSON.parse(buffer)
if (msg.type === 'match') {
const data = msg.data
const absPath = data.path.text as string
const relPath = relative(rootPath, absPath).replace(/\\/g, '/')
let fileResult = fileMap.get(absPath)
if (!fileResult) {
fileResult = { filePath: absPath, relativePath: relPath, matches: [] }
fileMap.set(absPath, fileResult)
}
for (const sub of data.submatches) {
fileResult.matches.push({
line: data.line_number,
column: sub.start + 1,
matchLength: sub.end - sub.start,
lineContent: data.lines.text.replace(/\n$/, '')
})
totalMatches++
if (totalMatches >= opts.maxResults) {
truncated = true
break
}
}
}
} catch {
/* skip malformed */
}
}
resolveOnce()
})
const killTimeout = setTimeout(() => {
truncated = true
child?.kill()
}, SEARCH_TIMEOUT_MS)
})
}
// ─── rg-based file listing ───────────────────────────────────────────
/**
* List all non-ignored files under `rootPath` using ripgrep's `--files` mode.
* Returns relative POSIX paths.
*/
export function listFilesWithRg(rootPath: string): Promise<string[]> {
return new Promise((resolve) => {
const files: string[] = []
let buffer = ''
let done = false
const finish = () => {
if (done) {
return
}
done = true
clearTimeout(timer)
resolve(files)
}
const child = execFile(
'rg',
['--files', '--hidden', '--glob', '!**/node_modules', '--glob', '!**/.git', rootPath],
{ maxBuffer: 50 * 1024 * 1024 }
)
child.stdout!.setEncoding('utf-8')
child.stdout!.on('data', (chunk: string) => {
buffer += chunk
const lines = buffer.split('\n')
buffer = lines.pop() ?? ''
for (const line of lines) {
if (!line) {
continue
}
const relPath = relative(rootPath, line).replace(/\\/g, '/')
if (!relPath.startsWith('..')) {
files.push(relPath)
}
}
})
child.stderr!.on('data', () => {
/* drain */
})
child.once('error', () => finish())
child.once('close', () => {
if (buffer) {
const relPath = relative(rootPath, buffer.trim()).replace(/\\/g, '/')
if (relPath && !relPath.startsWith('..')) {
files.push(relPath)
}
}
finish()
})
const timer = setTimeout(() => child.kill(), 10_000)
})
}

View file

@ -0,0 +1,225 @@
import { describe, expect, it, vi, beforeEach, afterEach } from 'vitest'
import { FsHandler } from './fs-handler'
import { RelayContext } from './context'
import type { RelayDispatcher } from './dispatcher'
import * as fs from 'fs/promises'
import * as path from 'path'
import { mkdtempSync, writeFileSync, mkdirSync, symlinkSync } from 'fs'
import { tmpdir } from 'os'
function createMockDispatcher() {
const requestHandlers = new Map<string, (params: Record<string, unknown>) => Promise<unknown>>()
const notificationHandlers = new Map<string, (params: Record<string, unknown>) => void>()
const notifications: { method: string; params?: Record<string, unknown> }[] = []
return {
onRequest: vi.fn(
(method: string, handler: (params: Record<string, unknown>) => Promise<unknown>) => {
requestHandlers.set(method, handler)
}
),
onNotification: vi.fn((method: string, handler: (params: Record<string, unknown>) => void) => {
notificationHandlers.set(method, handler)
}),
notify: vi.fn((method: string, params?: Record<string, unknown>) => {
notifications.push({ method, params })
}),
_requestHandlers: requestHandlers,
_notificationHandlers: notificationHandlers,
_notifications: notifications,
async callRequest(method: string, params: Record<string, unknown> = {}) {
const handler = requestHandlers.get(method)
if (!handler) {
throw new Error(`No handler for ${method}`)
}
return handler(params)
},
callNotification(method: string, params: Record<string, unknown> = {}) {
const handler = notificationHandlers.get(method)
if (!handler) {
throw new Error(`No handler for ${method}`)
}
handler(params)
}
}
}
describe('FsHandler', () => {
let dispatcher: ReturnType<typeof createMockDispatcher>
let handler: FsHandler
let tmpDir: string
beforeEach(() => {
tmpDir = mkdtempSync(path.join(tmpdir(), 'relay-fs-'))
dispatcher = createMockDispatcher()
const ctx = new RelayContext()
ctx.registerRoot(tmpDir)
handler = new FsHandler(dispatcher as unknown as RelayDispatcher, ctx)
})
afterEach(async () => {
handler.dispose()
await fs.rm(tmpDir, { recursive: true, force: true })
})
it('registers all expected handlers', () => {
const methods = Array.from(dispatcher._requestHandlers.keys())
expect(methods).toContain('fs.readDir')
expect(methods).toContain('fs.readFile')
expect(methods).toContain('fs.writeFile')
expect(methods).toContain('fs.stat')
expect(methods).toContain('fs.deletePath')
expect(methods).toContain('fs.createFile')
expect(methods).toContain('fs.createDir')
expect(methods).toContain('fs.rename')
expect(methods).toContain('fs.copy')
expect(methods).toContain('fs.realpath')
expect(methods).toContain('fs.search')
expect(methods).toContain('fs.listFiles')
expect(methods).toContain('fs.watch')
const notifMethods = Array.from(dispatcher._notificationHandlers.keys())
expect(notifMethods).toContain('fs.unwatch')
})
it('readDir returns sorted entries with directories first', async () => {
mkdirSync(path.join(tmpDir, 'subdir'))
writeFileSync(path.join(tmpDir, 'file.txt'), 'hello')
writeFileSync(path.join(tmpDir, 'aaa.txt'), 'world')
const result = (await dispatcher.callRequest('fs.readDir', { dirPath: tmpDir })) as {
name: string
isDirectory: boolean
}[]
expect(result[0].name).toBe('subdir')
expect(result[0].isDirectory).toBe(true)
expect(result.find((e) => e.name === 'file.txt')).toBeDefined()
expect(result.find((e) => e.name === 'aaa.txt')).toBeDefined()
})
it('readFile returns text content for text files', async () => {
const filePath = path.join(tmpDir, 'test.txt')
writeFileSync(filePath, 'hello world')
const result = (await dispatcher.callRequest('fs.readFile', { filePath })) as {
content: string
isBinary: boolean
}
expect(result.content).toBe('hello world')
expect(result.isBinary).toBe(false)
})
it('readFile returns base64 for image files', async () => {
const filePath = path.join(tmpDir, 'test.png')
writeFileSync(filePath, Buffer.from([0x89, 0x50, 0x4e, 0x47]))
const result = (await dispatcher.callRequest('fs.readFile', { filePath })) as {
content: string
isBinary: boolean
isImage: boolean
mimeType: string
}
expect(result.isBinary).toBe(true)
expect(result.isImage).toBe(true)
expect(result.mimeType).toBe('image/png')
expect(result.content).toBeTruthy()
})
it('readFile throws for files exceeding size limit', async () => {
const filePath = path.join(tmpDir, 'huge.txt')
// Write 6MB file
writeFileSync(filePath, Buffer.alloc(6 * 1024 * 1024))
await expect(dispatcher.callRequest('fs.readFile', { filePath })).rejects.toThrow(
'File too large'
)
})
it('writeFile creates/overwrites file content', async () => {
const filePath = path.join(tmpDir, 'write-test.txt')
await dispatcher.callRequest('fs.writeFile', { filePath, content: 'new content' })
const content = await fs.readFile(filePath, 'utf-8')
expect(content).toBe('new content')
})
it('stat returns file metadata', async () => {
const filePath = path.join(tmpDir, 'stat-test.txt')
writeFileSync(filePath, 'test')
const result = (await dispatcher.callRequest('fs.stat', { filePath })) as {
size: number
type: string
mtime: number
}
expect(result.type).toBe('file')
expect(result.size).toBe(4)
expect(typeof result.mtime).toBe('number')
})
it('stat returns directory type for directories', async () => {
const result = (await dispatcher.callRequest('fs.stat', { filePath: tmpDir })) as {
type: string
}
expect(result.type).toBe('directory')
})
it('deletePath removes files', async () => {
const filePath = path.join(tmpDir, 'to-delete.txt')
writeFileSync(filePath, 'bye')
await dispatcher.callRequest('fs.deletePath', { targetPath: filePath })
await expect(fs.access(filePath)).rejects.toThrow()
})
it('createFile creates an empty file with parent dirs', async () => {
const filePath = path.join(tmpDir, 'deep', 'nested', 'file.txt')
await dispatcher.callRequest('fs.createFile', { filePath })
const content = await fs.readFile(filePath, 'utf-8')
expect(content).toBe('')
})
it('createDir creates directories recursively', async () => {
const dirPath = path.join(tmpDir, 'a', 'b', 'c')
await dispatcher.callRequest('fs.createDir', { dirPath })
const stats = await fs.stat(dirPath)
expect(stats.isDirectory()).toBe(true)
})
it('rename moves files', async () => {
const oldPath = path.join(tmpDir, 'old.txt')
const newPath = path.join(tmpDir, 'new.txt')
writeFileSync(oldPath, 'content')
await dispatcher.callRequest('fs.rename', { oldPath, newPath })
await expect(fs.access(oldPath)).rejects.toThrow()
const content = await fs.readFile(newPath, 'utf-8')
expect(content).toBe('content')
})
it('copy duplicates files', async () => {
const src = path.join(tmpDir, 'src.txt')
const dst = path.join(tmpDir, 'dst.txt')
writeFileSync(src, 'original')
await dispatcher.callRequest('fs.copy', { source: src, destination: dst })
const content = await fs.readFile(dst, 'utf-8')
expect(content).toBe('original')
})
it('realpath resolves symlinks', async () => {
const realFile = path.join(tmpDir, 'real.txt')
const linkPath = path.join(tmpDir, 'link.txt')
writeFileSync(realFile, 'real')
symlinkSync(realFile, linkPath)
const result = (await dispatcher.callRequest('fs.realpath', { filePath: linkPath })) as string
// On macOS, /var is a symlink to /private/var, so resolve both to compare
const { realpathSync } = await import('fs')
expect(result).toBe(realpathSync(realFile))
})
})

276
src/relay/fs-handler.ts Normal file
View file

@ -0,0 +1,276 @@
import {
readdir,
readFile,
writeFile,
stat,
lstat,
mkdir,
rename,
cp,
rm,
realpath
} from 'fs/promises'
import { extname } from 'path'
import type { RelayDispatcher } from './dispatcher'
import type { RelayContext } from './context'
import {
MAX_FILE_SIZE,
DEFAULT_MAX_RESULTS,
IMAGE_MIME_TYPES,
isBinaryBuffer,
searchWithRg,
listFilesWithRg
} from './fs-handler-utils'
type WatchState = {
rootPath: string
unwatchFn: (() => void) | null
}
export class FsHandler {
private dispatcher: RelayDispatcher
private context: RelayContext
private watches = new Map<string, WatchState>()
constructor(dispatcher: RelayDispatcher, context: RelayContext) {
this.dispatcher = dispatcher
this.context = context
this.registerHandlers()
}
private registerHandlers(): void {
this.dispatcher.onRequest('fs.readDir', (p) => this.readDir(p))
this.dispatcher.onRequest('fs.readFile', (p) => this.readFile(p))
this.dispatcher.onRequest('fs.writeFile', (p) => this.writeFile(p))
this.dispatcher.onRequest('fs.stat', (p) => this.stat(p))
this.dispatcher.onRequest('fs.deletePath', (p) => this.deletePath(p))
this.dispatcher.onRequest('fs.createFile', (p) => this.createFile(p))
this.dispatcher.onRequest('fs.createDir', (p) => this.createDir(p))
this.dispatcher.onRequest('fs.rename', (p) => this.rename(p))
this.dispatcher.onRequest('fs.copy', (p) => this.copy(p))
this.dispatcher.onRequest('fs.realpath', (p) => this.realpath(p))
this.dispatcher.onRequest('fs.search', (p) => this.search(p))
this.dispatcher.onRequest('fs.listFiles', (p) => this.listFiles(p))
this.dispatcher.onRequest('fs.watch', (p) => this.watch(p))
this.dispatcher.onNotification('fs.unwatch', (p) => this.unwatch(p))
}
private async readDir(params: Record<string, unknown>) {
const dirPath = params.dirPath as string
await this.context.validatePathResolved(dirPath)
const entries = await readdir(dirPath, { withFileTypes: true })
return entries
.map((entry) => ({
name: entry.name,
isDirectory: entry.isDirectory(),
isSymlink: entry.isSymbolicLink()
}))
.sort((a, b) => {
if (a.isDirectory !== b.isDirectory) {
return a.isDirectory ? -1 : 1
}
return a.name.localeCompare(b.name)
})
}
private async readFile(params: Record<string, unknown>) {
const filePath = params.filePath as string
await this.context.validatePathResolved(filePath)
const stats = await stat(filePath)
if (stats.size > MAX_FILE_SIZE) {
throw new Error(
`File too large: ${(stats.size / 1024 / 1024).toFixed(1)}MB exceeds ${MAX_FILE_SIZE / 1024 / 1024}MB limit`
)
}
const buffer = await readFile(filePath)
const mimeType = IMAGE_MIME_TYPES[extname(filePath).toLowerCase()]
if (mimeType) {
return { content: buffer.toString('base64'), isBinary: true, isImage: true, mimeType }
}
if (isBinaryBuffer(buffer)) {
return { content: '', isBinary: true }
}
return { content: buffer.toString('utf-8'), isBinary: false }
}
private async writeFile(params: Record<string, unknown>) {
const filePath = params.filePath as string
await this.context.validatePathResolved(filePath)
const content = params.content as string
try {
const fileStats = await lstat(filePath)
if (fileStats.isDirectory()) {
throw new Error('Cannot write to a directory')
}
} catch (error) {
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
throw error
}
}
await writeFile(filePath, content, 'utf-8')
}
private async stat(params: Record<string, unknown>) {
const filePath = params.filePath as string
await this.context.validatePathResolved(filePath)
// Why: lstat is used instead of stat so that symlinks are reported as
// symlinks rather than being silently followed. stat() follows symlinks,
// meaning isSymbolicLink() would always return false.
const stats = await lstat(filePath)
let type: 'file' | 'directory' | 'symlink' = 'file'
if (stats.isDirectory()) {
type = 'directory'
} else if (stats.isSymbolicLink()) {
type = 'symlink'
}
return { size: stats.size, type, mtime: stats.mtimeMs }
}
private async deletePath(params: Record<string, unknown>) {
const targetPath = params.targetPath as string
await this.context.validatePathResolved(targetPath)
const recursive = params.recursive as boolean | undefined
const stats = await stat(targetPath)
if (stats.isDirectory() && !recursive) {
throw new Error('Cannot delete directory without recursive flag')
}
await rm(targetPath, { recursive: !!recursive, force: true })
}
private async createFile(params: Record<string, unknown>) {
const filePath = params.filePath as string
// Why: symlinks in parent directories can redirect creation outside the
// workspace. validatePathResolved follows symlinks before checking roots.
await this.context.validatePathResolved(filePath)
const { dirname } = await import('path')
await mkdir(dirname(filePath), { recursive: true })
await writeFile(filePath, '', { encoding: 'utf-8', flag: 'wx' })
}
private async createDir(params: Record<string, unknown>) {
const dirPath = params.dirPath as string
await this.context.validatePathResolved(dirPath)
await mkdir(dirPath, { recursive: true })
}
private async rename(params: Record<string, unknown>) {
const oldPath = params.oldPath as string
const newPath = params.newPath as string
await this.context.validatePathResolved(oldPath)
await this.context.validatePathResolved(newPath)
await rename(oldPath, newPath)
}
private async copy(params: Record<string, unknown>) {
const source = params.source as string
const destination = params.destination as string
// Why: cp follows symlinks — a symlink inside the workspace pointing to
// /etc would copy sensitive files into the workspace where readFile can
// exfiltrate them.
await this.context.validatePathResolved(source)
await this.context.validatePathResolved(destination)
await cp(source, destination, { recursive: true })
}
private async realpath(params: Record<string, unknown>) {
const filePath = params.filePath as string
this.context.validatePath(filePath)
const resolved = await realpath(filePath)
// Why: a symlink inside the workspace may resolve to a path outside it.
// Returning the resolved path without validation leaks the external target.
this.context.validatePath(resolved)
return resolved
}
private async search(params: Record<string, unknown>) {
const query = params.query as string
const rootPath = params.rootPath as string
// Why: a symlink inside the workspace pointing to a directory outside it
// would let rg search (and return content from) files beyond the workspace.
await this.context.validatePathResolved(rootPath)
const caseSensitive = params.caseSensitive as boolean | undefined
const wholeWord = params.wholeWord as boolean | undefined
const useRegex = params.useRegex as boolean | undefined
const includePattern = params.includePattern as string | undefined
const excludePattern = params.excludePattern as string | undefined
const maxResults = Math.min(
(params.maxResults as number) || DEFAULT_MAX_RESULTS,
DEFAULT_MAX_RESULTS
)
return searchWithRg(rootPath, query, {
caseSensitive,
wholeWord,
useRegex,
includePattern,
excludePattern,
maxResults
})
}
private async listFiles(params: Record<string, unknown>): Promise<string[]> {
const rootPath = params.rootPath as string
await this.context.validatePathResolved(rootPath)
return listFilesWithRg(rootPath)
}
private async watch(params: Record<string, unknown>) {
const rootPath = params.rootPath as string
this.context.validatePath(rootPath)
if (this.watches.size >= 20) {
throw new Error('Maximum number of file watchers reached')
}
if (this.watches.has(rootPath)) {
return
}
const watchState: WatchState = { rootPath, unwatchFn: null }
this.watches.set(rootPath, watchState)
try {
const watcher = await import('@parcel/watcher')
const subscription = await watcher.subscribe(
rootPath,
(err, events) => {
if (err) {
this.dispatcher.notify('fs.changed', {
events: [{ kind: 'overflow', absolutePath: rootPath }]
})
return
}
const mapped = events.map((evt) => ({
kind: evt.type,
absolutePath: evt.path
}))
this.dispatcher.notify('fs.changed', { events: mapped })
},
{ ignore: ['.git', 'node_modules', 'dist', 'build', '.next', '.cache', '__pycache__'] }
)
watchState.unwatchFn = () => {
void subscription.unsubscribe()
}
} catch {
// @parcel/watcher not available -- polling fallback would go here
process.stderr.write('[relay] File watcher not available, fs.changed events disabled\n')
}
}
private unwatch(params: Record<string, unknown>): void {
const rootPath = params.rootPath as string
const state = this.watches.get(rootPath)
if (state) {
state.unwatchFn?.()
this.watches.delete(rootPath)
}
}
dispose(): void {
for (const [, state] of this.watches) {
state.unwatchFn?.()
}
this.watches.clear()
}
}

View file

@ -0,0 +1,177 @@
import { describe, expect, it } from 'vitest'
import { validateGitExecArgs } from './git-exec-validator'
function expectAllowed(args: string[]): void {
expect(() => validateGitExecArgs(args)).not.toThrow()
}
function expectBlocked(args: string[], message: string): void {
expect(() => validateGitExecArgs(args)).toThrow(message)
}
describe('validateGitExecArgs', () => {
describe('allowed read-only subcommands', () => {
it.each([
[['rev-parse', '--show-toplevel']],
[['branch', '--list']],
[['log', '--oneline', '-10']],
[['show-ref', '--heads']],
[['ls-remote', 'origin']],
[['remote', '-v']],
[['remote', 'get-url', 'origin']],
[['remote', 'show', 'origin']],
[['symbolic-ref', 'HEAD']],
[['symbolic-ref', '--short', 'HEAD']],
[['merge-base', 'main', 'HEAD']],
[['ls-files', '--error-unmatch', 'foo.txt']],
[['config', '--get', 'user.name']],
[['config', '--get-all', 'remote.origin.url']],
[['config', '--list']],
[['config', '-l']],
[['config', '--get-regexp', 'user']]
])('allows %j', (args) => {
expectAllowed(args)
})
})
describe('blocked subcommands', () => {
it('rejects empty args', () => {
expectBlocked([], 'git subcommand not allowed: (empty)')
})
it.each([
'push',
'pull',
'commit',
'checkout',
'reset',
'rebase',
'merge',
'stash',
'clean',
'gc',
'reflog',
'tag',
'fetch',
'worktree'
])('rejects %s', (cmd) => {
expectBlocked([cmd], 'git subcommand not allowed')
})
})
describe('global flags before subcommand', () => {
it.each([
[['-c', 'core.sshCommand=evil', 'log']],
[['--no-pager', 'log']],
[['-C', '/tmp', 'status']]
])('rejects %j', (args) => {
expectBlocked(args, 'Global git flags before the subcommand are not allowed')
})
})
describe('global denied flags', () => {
it.each([
[['log', '--output', '/tmp/leak']],
[['log', '--output=/tmp/leak']],
[['log', '-o', '/tmp/leak']],
[['rev-parse', '--exec-path=/evil']],
[['log', '--work-tree=/other']],
[['log', '--git-dir=/other/.git']]
])('rejects %j', (args) => {
expectBlocked(args, 'Dangerous git flags are not allowed')
})
it('does not false-positive on unrelated =value flags', () => {
expectAllowed(['log', '--format=%H'])
expectAllowed(['log', '--pretty=oneline'])
})
})
describe('git config', () => {
it('rejects config without read-only flag', () => {
expectBlocked(['config', 'user.name', 'Evil'], 'restricted to read-only operations')
})
it.each([
['--add'],
['--unset'],
['--unset-all'],
['--replace-all'],
['--rename-section'],
['--remove-section'],
['--edit'],
['-e'],
['--file=/etc/passwd'],
['-f'],
['--global'],
['--system']
])('rejects config with write flag %s', (flag) => {
expectBlocked(
['config', '--list', flag, 'val'],
'git config write operations are not allowed'
)
})
})
describe('git branch', () => {
it('allows safe branch flags', () => {
expectAllowed(['branch', '--list'])
expectAllowed(['branch', '-a'])
expectAllowed(['branch', '-r'])
})
it.each(['-d', '-D', '--delete', '-m', '-M', '--move', '-c', '-C', '--copy'])(
'rejects branch %s',
(flag) => {
expectBlocked(['branch', flag, 'name'], 'Destructive git branch flags')
}
)
it('catches --delete=value compound syntax', () => {
expectBlocked(['branch', '--delete=feature'], 'Destructive git branch flags')
})
})
describe('git remote', () => {
it.each([
'add',
'remove',
'rm',
'rename',
'set-url',
'set-head',
'set-branches',
'prune',
'update'
])('rejects remote %s', (subcmd) => {
expectBlocked(['remote', subcmd, 'arg'], 'Destructive git remote operations')
})
it('skips flags when finding remote subcommand', () => {
expectBlocked(['remote', '-v', 'add', 'evil', 'url'], 'Destructive git remote operations')
})
})
describe('git symbolic-ref', () => {
it('allows read operations', () => {
expectAllowed(['symbolic-ref', 'HEAD'])
expectAllowed(['symbolic-ref', '--short', 'HEAD'])
expectAllowed(['symbolic-ref', '-q', 'HEAD'])
})
it.each(['-d', '--delete', '-m'])('rejects symbolic-ref %s', (flag) => {
expectBlocked(['symbolic-ref', flag, 'HEAD'], 'git symbolic-ref write operations')
})
it('rejects two positional args (write form)', () => {
expectBlocked(
['symbolic-ref', 'HEAD', 'refs/heads/main'],
'git symbolic-ref write operations'
)
})
it('catches --delete=value compound syntax', () => {
expectBlocked(['symbolic-ref', '--delete=HEAD'], 'git symbolic-ref write operations')
})
})
})

View file

@ -0,0 +1,135 @@
/**
* Git exec argument validation for the relay's git.exec handler.
*
* Why: oxlint max-lines requires files to stay under 300 lines.
* Extracted from git-handler-ops.ts to keep both files under the limit.
*/
// Why: only read-only git subcommands are allowed via exec. config is restricted
// to read-only flags; branch rejects destructive flags; fetch/worktree removed.
const ALLOWED_GIT_SUBCOMMANDS = new Set([
'rev-parse',
'branch',
'log',
'show-ref',
'ls-remote',
'remote',
'symbolic-ref',
'merge-base',
'ls-files',
'config'
])
const CONFIG_READ_ONLY_FLAGS = new Set(['--get', '--get-all', '--list', '--get-regexp', '-l'])
// Why: checking presence of a read-only flag is insufficient — a request could
// include both --list (passes the check) and --add (performs a write). Reject
// known write operations explicitly.
const CONFIG_WRITE_FLAGS = new Set([
'--add',
'--unset',
'--unset-all',
'--replace-all',
'--rename-section',
'--remove-section',
'--edit',
'-e',
// Why: --file redirects config reads to an arbitrary file, enabling path
// traversal (e.g. `--file /etc/passwd --list` leaks file contents).
'--file',
'-f',
'--global',
'--system'
])
const BRANCH_DESTRUCTIVE_FLAGS = new Set([
'-d',
'-D',
'--delete',
'-m',
'-M',
'--move',
'-c',
'-C',
'--copy'
])
// Why: these flags are dangerous across ALL subcommands — --output writes to
// arbitrary paths, --exec-path changes where git loads helpers from, --work-tree
// and --git-dir escape the validated worktree.
const GLOBAL_DENIED_FLAGS = new Set(['--output', '-o', '--exec-path', '--work-tree', '--git-dir'])
const REMOTE_WRITE_SUBCOMMANDS = new Set([
'add',
'remove',
'rm',
'rename',
'set-head',
'set-branches',
'set-url',
'prune',
'update'
])
const SYMBOLIC_REF_WRITE_FLAGS = new Set(['-d', '--delete', '-m'])
// Why: git accepts --flag=value compound syntax (e.g. --file=/etc/passwd),
// which bypasses exact-match Set.has() checks. This helper catches both forms.
function matchesDeniedFlag(arg: string, denySet: Set<string>): boolean {
if (denySet.has(arg)) {
return true
}
const eqIdx = arg.indexOf('=')
if (eqIdx > 0) {
return denySet.has(arg.slice(0, eqIdx))
}
return false
}
export function validateGitExecArgs(args: string[]): void {
// Why: git accepts `-c key=value` before the subcommand, which can override
// config and execute arbitrary commands (e.g. core.sshCommand). Reject any
// arguments before the subcommand that look like global git flags.
let subcommandIdx = 0
while (subcommandIdx < args.length && args[subcommandIdx].startsWith('-')) {
subcommandIdx++
}
if (subcommandIdx > 0) {
throw new Error('Global git flags before the subcommand are not allowed')
}
const subcommand = args[0]
if (!subcommand || !ALLOWED_GIT_SUBCOMMANDS.has(subcommand)) {
throw new Error(`git subcommand not allowed: ${subcommand ?? '(empty)'}`)
}
const restArgs = args.slice(1)
if (restArgs.some((a) => matchesDeniedFlag(a, GLOBAL_DENIED_FLAGS))) {
throw new Error('Dangerous git flags are not allowed via exec')
}
if (subcommand === 'config') {
if (!restArgs.some((a) => CONFIG_READ_ONLY_FLAGS.has(a))) {
throw new Error('git config is restricted to read-only operations (--get, --list, etc.)')
}
if (restArgs.some((a) => matchesDeniedFlag(a, CONFIG_WRITE_FLAGS))) {
throw new Error('git config write operations are not allowed via exec')
}
}
if (subcommand === 'branch') {
if (restArgs.some((a) => matchesDeniedFlag(a, BRANCH_DESTRUCTIVE_FLAGS))) {
throw new Error('Destructive git branch flags are not allowed via exec')
}
}
if (subcommand === 'remote') {
const remoteSubcmd = restArgs.find((a) => !a.startsWith('-'))
if (remoteSubcmd && REMOTE_WRITE_SUBCOMMANDS.has(remoteSubcmd)) {
throw new Error('Destructive git remote operations are not allowed via exec')
}
}
if (subcommand === 'symbolic-ref') {
if (restArgs.some((a) => matchesDeniedFlag(a, SYMBOLIC_REF_WRITE_FLAGS))) {
throw new Error('git symbolic-ref write operations are not allowed via exec')
}
const positionalArgs = restArgs.filter((a) => !a.startsWith('-'))
if (positionalArgs.length >= 2) {
throw new Error('git symbolic-ref write operations are not allowed via exec')
}
}
}

View file

@ -0,0 +1,266 @@
/**
* Higher-level git operations extracted from git-handler.ts.
*
* Why: oxlint max-lines requires files to stay under 300 lines.
* These async operations accept a git executor callback so they
* remain decoupled from the GitHandler class.
*/
import * as path from 'path'
import { readFile } from 'fs/promises'
import { bufferToBlob, buildDiffResult, parseBranchDiff } from './git-handler-utils'
// ─── Executor types ──────────────────────────────────────────────────
export type GitExec = (args: string[], cwd: string) => Promise<{ stdout: string; stderr: string }>
export type GitBufferExec = (args: string[], cwd: string) => Promise<Buffer>
// ─── Blob reading ────────────────────────────────────────────────────
export async function readBlobAtOid(
gitBuffer: GitBufferExec,
cwd: string,
oid: string,
filePath: string
): Promise<{ content: string; isBinary: boolean }> {
try {
const buf = await gitBuffer(['show', `${oid}:${filePath}`], cwd)
return bufferToBlob(buf, filePath)
} catch {
return { content: '', isBinary: false }
}
}
export async function readBlobAtIndex(
gitBuffer: GitBufferExec,
cwd: string,
filePath: string
): Promise<{ content: string; isBinary: boolean }> {
try {
const buf = await gitBuffer(['show', `:${filePath}`], cwd)
return bufferToBlob(buf, filePath)
} catch {
return { content: '', isBinary: false }
}
}
export async function readUnstagedLeft(
gitBuffer: GitBufferExec,
cwd: string,
filePath: string
): Promise<{ content: string; isBinary: boolean }> {
const index = await readBlobAtIndex(gitBuffer, cwd, filePath)
if (index.content || index.isBinary) {
return index
}
return readBlobAtOid(gitBuffer, cwd, 'HEAD', filePath)
}
export async function readWorkingFile(
absPath: string
): Promise<{ content: string; isBinary: boolean }> {
try {
const buffer = await readFile(absPath)
return bufferToBlob(buffer)
} catch {
return { content: '', isBinary: false }
}
}
// ─── Diff ────────────────────────────────────────────────────────────
export async function computeDiff(
git: GitBufferExec,
worktreePath: string,
filePath: string,
staged: boolean
) {
let originalContent = ''
let modifiedContent = ''
let originalIsBinary = false
let modifiedIsBinary = false
try {
if (staged) {
const left = await readBlobAtOid(git, worktreePath, 'HEAD', filePath)
originalContent = left.content
originalIsBinary = left.isBinary
const right = await readBlobAtIndex(git, worktreePath, filePath)
modifiedContent = right.content
modifiedIsBinary = right.isBinary
} else {
const left = await readUnstagedLeft(git, worktreePath, filePath)
originalContent = left.content
originalIsBinary = left.isBinary
const right = await readWorkingFile(path.join(worktreePath, filePath))
modifiedContent = right.content
modifiedIsBinary = right.isBinary
}
} catch {
// Fallback to empty
}
return buildDiffResult(
originalContent,
modifiedContent,
originalIsBinary,
modifiedIsBinary,
filePath
)
}
// ─── Branch compare ──────────────────────────────────────────────────
export async function branchCompare(
git: GitExec,
worktreePath: string,
baseRef: string,
loadBranchChanges: (mergeBase: string, headOid: string) => Promise<Record<string, unknown>[]>
) {
const summary: Record<string, unknown> = {
baseRef,
baseOid: null,
compareRef: 'HEAD',
headOid: null,
mergeBase: null,
changedFiles: 0,
status: 'loading'
}
try {
const { stdout: branchOut } = await git(['branch', '--show-current'], worktreePath)
const branch = branchOut.trim()
if (branch) {
summary.compareRef = branch
}
} catch {
/* keep HEAD */
}
let headOid: string
try {
const { stdout } = await git(['rev-parse', '--verify', 'HEAD'], worktreePath)
headOid = stdout.trim()
summary.headOid = headOid
} catch {
summary.status = 'unborn-head'
summary.errorMessage =
'This branch does not have a committed HEAD yet, so compare-to-base is unavailable.'
return { summary, entries: [] }
}
let baseOid: string
try {
const { stdout } = await git(['rev-parse', '--verify', baseRef], worktreePath)
baseOid = stdout.trim()
summary.baseOid = baseOid
} catch {
summary.status = 'invalid-base'
summary.errorMessage = `Base ref ${baseRef} could not be resolved in this repository.`
return { summary, entries: [] }
}
let mergeBase: string
try {
const { stdout } = await git(['merge-base', baseOid, headOid], worktreePath)
mergeBase = stdout.trim()
summary.mergeBase = mergeBase
} catch {
summary.status = 'no-merge-base'
summary.errorMessage = `This branch and ${baseRef} do not share a merge base, so compare-to-base is unavailable.`
return { summary, entries: [] }
}
try {
const entries = await loadBranchChanges(mergeBase, headOid)
const { stdout: countOut } = await git(
['rev-list', '--count', `${baseOid}..${headOid}`],
worktreePath
)
summary.changedFiles = entries.length
summary.commitsAhead = parseInt(countOut.trim(), 10) || 0
summary.status = 'ready'
return { summary, entries }
} catch (error) {
summary.status = 'error'
summary.errorMessage = error instanceof Error ? error.message : 'Failed to load branch compare'
return { summary, entries: [] }
}
}
// ─── Branch diff ─────────────────────────────────────────────────────
export async function branchDiffEntries(
git: GitExec,
gitBuffer: GitBufferExec,
worktreePath: string,
baseRef: string,
opts: { includePatch?: boolean; filePath?: string; oldPath?: string }
) {
let headOid: string
let mergeBase: string
try {
const { stdout: headOut } = await git(['rev-parse', '--verify', 'HEAD'], worktreePath)
headOid = headOut.trim()
const { stdout: baseOut } = await git(['rev-parse', '--verify', baseRef], worktreePath)
const baseOid = baseOut.trim()
const { stdout: mbOut } = await git(['merge-base', baseOid, headOid], worktreePath)
mergeBase = mbOut.trim()
} catch {
return []
}
const { stdout } = await git(
['diff', '--name-status', '-M', '-C', mergeBase, headOid],
worktreePath
)
const allChanges = parseBranchDiff(stdout)
// Why: the IPC handler for single-file branch diff sends filePath/oldPath
// to avoid reading blobs for every changed file — only the matched file.
let changes = allChanges
if (opts.filePath) {
changes = allChanges.filter(
(c) =>
c.path === opts.filePath ||
c.oldPath === opts.filePath ||
(opts.oldPath && (c.path === opts.oldPath || c.oldPath === opts.oldPath))
)
}
if (!opts.includePatch) {
return changes.map(() => ({
kind: 'text',
originalContent: '',
modifiedContent: '',
originalIsBinary: false,
modifiedIsBinary: false
}))
}
const results: Record<string, unknown>[] = []
for (const change of changes) {
const fp = change.path as string
const oldP = (change.oldPath as string) ?? fp
try {
const left = await readBlobAtOid(gitBuffer, worktreePath, mergeBase, oldP)
const right = await readBlobAtOid(gitBuffer, worktreePath, headOid, fp)
results.push(buildDiffResult(left.content, right.content, left.isBinary, right.isBinary, fp))
} catch {
results.push({
kind: 'text',
originalContent: '',
modifiedContent: '',
originalIsBinary: false,
modifiedIsBinary: false
})
}
}
return results
}
export { validateGitExecArgs } from './git-exec-validator'

View file

@ -0,0 +1,324 @@
/**
* Pure parsing helpers extracted from git-handler.ts.
*
* Why: oxlint max-lines requires files to stay under 300 lines.
* These functions have no side-effects and depend only on their arguments,
* making them easy to test independently.
*/
import * as path from 'path'
import { existsSync } from 'fs'
// ─── Status parsing ──────────────────────────────────────────────────
export function parseStatusChar(char: string): string {
switch (char) {
case 'M':
return 'modified'
case 'A':
return 'added'
case 'D':
return 'deleted'
case 'R':
return 'renamed'
case 'C':
return 'copied'
default:
return 'modified'
}
}
export function parseBranchStatusChar(char: string): string {
switch (char) {
case 'M':
return 'modified'
case 'A':
return 'added'
case 'D':
return 'deleted'
case 'R':
return 'renamed'
case 'C':
return 'copied'
default:
return 'modified'
}
}
export function parseConflictKind(xy: string): string | null {
switch (xy) {
case 'UU':
return 'both_modified'
case 'AA':
return 'both_added'
case 'DD':
return 'both_deleted'
case 'AU':
return 'added_by_us'
case 'UA':
return 'added_by_them'
case 'DU':
return 'deleted_by_us'
case 'UD':
return 'deleted_by_them'
default:
return null
}
}
/**
* Parse `git status --porcelain=v2` output into structured entries.
* Does NOT handle unmerged entries (those require worktree access).
*/
export function parseStatusOutput(stdout: string): {
entries: Record<string, unknown>[]
unmergedLines: string[]
} {
const entries: Record<string, unknown>[] = []
const unmergedLines: string[] = []
for (const line of stdout.split(/\r?\n/)) {
if (!line) {
continue
}
if (line.startsWith('1 ') || line.startsWith('2 ')) {
const parts = line.split(' ')
const xy = parts[1]
const indexStatus = xy[0]
const worktreeStatus = xy[1]
if (line.startsWith('2 ')) {
// Why: porcelain v2 type-2 format is `2 XY sub mH mI mW hH hI Xscore path\torigPath`.
// The new path is the last space-delimited token before the tab; origPath follows the tab.
const tabParts = line.split('\t')
const spaceParts = tabParts[0].split(' ')
const filePath = spaceParts.at(-1)!
const oldPath = tabParts[1]
if (indexStatus !== '.') {
entries.push({
path: filePath,
status: parseStatusChar(indexStatus),
area: 'staged',
oldPath
})
}
if (worktreeStatus !== '.') {
entries.push({
path: filePath,
status: parseStatusChar(worktreeStatus),
area: 'unstaged',
oldPath
})
}
} else {
const filePath = parts.slice(8).join(' ')
if (indexStatus !== '.') {
entries.push({ path: filePath, status: parseStatusChar(indexStatus), area: 'staged' })
}
if (worktreeStatus !== '.') {
entries.push({
path: filePath,
status: parseStatusChar(worktreeStatus),
area: 'unstaged'
})
}
}
} else if (line.startsWith('? ')) {
entries.push({ path: line.slice(2), status: 'untracked', area: 'untracked' })
} else if (line.startsWith('u ')) {
unmergedLines.push(line)
}
}
return { entries, unmergedLines }
}
/**
* Parse a single unmerged entry line from porcelain v2 output.
* Returns null if the entry should be skipped (e.g. submodule conflicts).
*/
export function parseUnmergedEntry(
worktreePath: string,
line: string
): Record<string, unknown> | null {
const parts = line.split(' ')
const xy = parts[1]
const modeStage1 = parts[3]
const modeStage2 = parts[4]
const modeStage3 = parts[5]
const filePath = parts.slice(10).join(' ')
if (!filePath) {
return null
}
// Skip submodule conflicts (mode 160000)
if ([modeStage1, modeStage2, modeStage3].some((m) => m === '160000')) {
return null
}
const conflictKind = parseConflictKind(xy)
if (!conflictKind) {
return null
}
let status: string = 'modified'
if (conflictKind === 'both_deleted') {
status = 'deleted'
} else if (conflictKind !== 'both_modified' && conflictKind !== 'both_added') {
try {
status = existsSync(path.join(worktreePath, filePath)) ? 'modified' : 'deleted'
} catch {
// Why: defaulting to 'modified' on fs error is the least misleading option
status = 'modified'
}
}
return {
path: filePath,
area: 'unstaged',
status,
conflictKind,
conflictStatus: 'unresolved'
}
}
// ─── Branch diff parsing ─────────────────────────────────────────────
/**
* Parse `git diff --name-status` output into structured change entries.
*/
export function parseBranchDiff(stdout: string): Record<string, unknown>[] {
const entries: Record<string, unknown>[] = []
for (const line of stdout.split(/\r?\n/)) {
if (!line) {
continue
}
const parts = line.split('\t')
const rawStatus = parts[0] ?? ''
const status = parseBranchStatusChar(rawStatus[0] ?? 'M')
if (rawStatus.startsWith('R') || rawStatus.startsWith('C')) {
const oldPath = parts[1]
const filePath = parts[2]
if (filePath) {
entries.push({ path: filePath, oldPath, status })
}
} else {
const filePath = parts[1]
if (filePath) {
entries.push({ path: filePath, status })
}
}
}
return entries
}
// ─── Worktree parsing ────────────────────────────────────────────────
export function parseWorktreeList(output: string): Record<string, unknown>[] {
const worktrees: Record<string, unknown>[] = []
const blocks = output.trim().split(/\r?\n\r?\n/)
for (const block of blocks) {
if (!block.trim()) {
continue
}
const lines = block.trim().split(/\r?\n/)
let wtPath = ''
let head = ''
let branch = ''
let isBare = false
for (const line of lines) {
if (line.startsWith('worktree ')) {
wtPath = line.slice('worktree '.length)
} else if (line.startsWith('HEAD ')) {
head = line.slice('HEAD '.length)
} else if (line.startsWith('branch ')) {
branch = line.slice('branch '.length)
} else if (line === 'bare') {
isBare = true
}
}
if (wtPath) {
worktrees.push({
path: wtPath,
head,
branch,
isBare,
isMainWorktree: worktrees.length === 0
})
}
}
return worktrees
}
// ─── Binary / blob helpers ───────────────────────────────────────────
export function isBinaryBuffer(buffer: Buffer): boolean {
const len = Math.min(buffer.length, 8192)
for (let i = 0; i < len; i++) {
if (buffer[i] === 0) {
return true
}
}
return false
}
export const PREVIEWABLE_MIME: Record<string, string> = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.svg': 'image/svg+xml',
'.webp': 'image/webp',
'.bmp': 'image/bmp',
'.ico': 'image/x-icon',
'.pdf': 'application/pdf'
}
export function bufferToBlob(
buffer: Buffer,
filePath?: string
): { content: string; isBinary: boolean } {
const binary = isBinaryBuffer(buffer)
if (binary) {
const ext = filePath ? path.extname(filePath).toLowerCase() : ''
const previewable = !!PREVIEWABLE_MIME[ext]
return { content: previewable ? buffer.toString('base64') : '', isBinary: true }
}
return { content: buffer.toString('utf-8'), isBinary: false }
}
/**
* Build a diff result object from original/modified content.
* Used by both working-tree diffs and branch diffs.
*/
export function buildDiffResult(
originalContent: string,
modifiedContent: string,
originalIsBinary: boolean,
modifiedIsBinary: boolean,
filePath?: string
) {
if (originalIsBinary || modifiedIsBinary) {
const ext = filePath ? path.extname(filePath).toLowerCase() : ''
const mimeType = PREVIEWABLE_MIME[ext]
return {
kind: 'binary' as const,
originalContent,
modifiedContent,
originalIsBinary,
modifiedIsBinary,
...(mimeType ? { isImage: true, mimeType } : {})
}
}
return {
kind: 'text' as const,
originalContent,
modifiedContent,
originalIsBinary: false,
modifiedIsBinary: false
}
}

View file

@ -0,0 +1,333 @@
import { describe, expect, it, vi, beforeEach, afterEach } from 'vitest'
import { GitHandler } from './git-handler'
import { RelayContext } from './context'
import type { RelayDispatcher } from './dispatcher'
import * as fs from 'fs/promises'
import * as path from 'path'
import { mkdtempSync, writeFileSync } from 'fs'
import { tmpdir } from 'os'
import { execFileSync } from 'child_process'
function createMockDispatcher() {
const requestHandlers = new Map<string, (params: Record<string, unknown>) => Promise<unknown>>()
const notificationHandlers = new Map<string, (params: Record<string, unknown>) => void>()
return {
onRequest: vi.fn(
(method: string, handler: (params: Record<string, unknown>) => Promise<unknown>) => {
requestHandlers.set(method, handler)
}
),
onNotification: vi.fn((method: string, handler: (params: Record<string, unknown>) => void) => {
notificationHandlers.set(method, handler)
}),
notify: vi.fn(),
_requestHandlers: requestHandlers,
async callRequest(method: string, params: Record<string, unknown> = {}) {
const handler = requestHandlers.get(method)
if (!handler) {
throw new Error(`No handler for ${method}`)
}
return handler(params)
}
}
}
function gitInit(dir: string): void {
execFileSync('git', ['init'], { cwd: dir, stdio: 'pipe' })
execFileSync('git', ['config', 'user.email', 'test@test.com'], { cwd: dir, stdio: 'pipe' })
execFileSync('git', ['config', 'user.name', 'Test'], { cwd: dir, stdio: 'pipe' })
}
function gitCommit(dir: string, message: string): void {
execFileSync('git', ['add', '.'], { cwd: dir, stdio: 'pipe' })
execFileSync('git', ['commit', '-m', message, '--allow-empty'], { cwd: dir, stdio: 'pipe' })
}
describe('GitHandler', () => {
let dispatcher: ReturnType<typeof createMockDispatcher>
let tmpDir: string
beforeEach(() => {
tmpDir = mkdtempSync(path.join(tmpdir(), 'relay-git-'))
dispatcher = createMockDispatcher()
const ctx = new RelayContext()
ctx.registerRoot(tmpDir)
new GitHandler(dispatcher as unknown as RelayDispatcher, ctx)
})
afterEach(async () => {
await fs.rm(tmpDir, { recursive: true, force: true })
})
it('registers all expected handlers', () => {
const methods = Array.from(dispatcher._requestHandlers.keys())
expect(methods).toContain('git.status')
expect(methods).toContain('git.diff')
expect(methods).toContain('git.stage')
expect(methods).toContain('git.unstage')
expect(methods).toContain('git.bulkStage')
expect(methods).toContain('git.bulkUnstage')
expect(methods).toContain('git.discard')
expect(methods).toContain('git.conflictOperation')
expect(methods).toContain('git.branchCompare')
expect(methods).toContain('git.branchDiff')
expect(methods).toContain('git.listWorktrees')
expect(methods).toContain('git.addWorktree')
expect(methods).toContain('git.removeWorktree')
})
describe('status', () => {
it('returns empty entries for clean repo', async () => {
gitInit(tmpDir)
writeFileSync(path.join(tmpDir, 'file.txt'), 'hello')
gitCommit(tmpDir, 'initial')
const result = (await dispatcher.callRequest('git.status', { worktreePath: tmpDir })) as {
entries: Record<string, unknown>[]
conflictOperation: string
}
expect(result.entries).toEqual([])
expect(result.conflictOperation).toBe('unknown')
})
it('detects untracked files', async () => {
gitInit(tmpDir)
writeFileSync(path.join(tmpDir, 'tracked.txt'), 'tracked')
gitCommit(tmpDir, 'initial')
writeFileSync(path.join(tmpDir, 'new.txt'), 'new')
const result = (await dispatcher.callRequest('git.status', { worktreePath: tmpDir })) as {
entries: Record<string, unknown>[]
}
const untracked = result.entries.find((e) => e.path === 'new.txt')
expect(untracked).toBeDefined()
expect(untracked!.status).toBe('untracked')
expect(untracked!.area).toBe('untracked')
})
it('detects modified files', async () => {
gitInit(tmpDir)
writeFileSync(path.join(tmpDir, 'file.txt'), 'original')
gitCommit(tmpDir, 'initial')
writeFileSync(path.join(tmpDir, 'file.txt'), 'modified')
const result = (await dispatcher.callRequest('git.status', { worktreePath: tmpDir })) as {
entries: Record<string, unknown>[]
}
const modified = result.entries.find((e) => e.path === 'file.txt')
expect(modified).toBeDefined()
expect(modified!.status).toBe('modified')
expect(modified!.area).toBe('unstaged')
})
it('detects staged files', async () => {
gitInit(tmpDir)
writeFileSync(path.join(tmpDir, 'file.txt'), 'original')
gitCommit(tmpDir, 'initial')
writeFileSync(path.join(tmpDir, 'file.txt'), 'changed')
execFileSync('git', ['add', 'file.txt'], { cwd: tmpDir, stdio: 'pipe' })
const result = (await dispatcher.callRequest('git.status', { worktreePath: tmpDir })) as {
entries: Record<string, unknown>[]
}
const staged = result.entries.find((e) => e.area === 'staged')
expect(staged).toBeDefined()
expect(staged!.status).toBe('modified')
})
})
describe('stage and unstage', () => {
it('stages a file', async () => {
gitInit(tmpDir)
writeFileSync(path.join(tmpDir, 'file.txt'), 'content')
gitCommit(tmpDir, 'initial')
writeFileSync(path.join(tmpDir, 'file.txt'), 'changed')
await dispatcher.callRequest('git.stage', { worktreePath: tmpDir, filePath: 'file.txt' })
const output = execFileSync('git', ['diff', '--cached', '--name-only'], {
cwd: tmpDir,
encoding: 'utf-8'
})
expect(output.trim()).toBe('file.txt')
})
it('unstages a file', async () => {
gitInit(tmpDir)
writeFileSync(path.join(tmpDir, 'file.txt'), 'content')
gitCommit(tmpDir, 'initial')
writeFileSync(path.join(tmpDir, 'file.txt'), 'changed')
execFileSync('git', ['add', 'file.txt'], { cwd: tmpDir, stdio: 'pipe' })
await dispatcher.callRequest('git.unstage', { worktreePath: tmpDir, filePath: 'file.txt' })
const output = execFileSync('git', ['diff', '--cached', '--name-only'], {
cwd: tmpDir,
encoding: 'utf-8'
})
expect(output.trim()).toBe('')
})
})
describe('diff', () => {
it('returns text diff for modified file', async () => {
gitInit(tmpDir)
writeFileSync(path.join(tmpDir, 'file.txt'), 'original')
gitCommit(tmpDir, 'initial')
writeFileSync(path.join(tmpDir, 'file.txt'), 'modified')
const result = (await dispatcher.callRequest('git.diff', {
worktreePath: tmpDir,
filePath: 'file.txt',
staged: false
})) as { kind: string; originalContent: string; modifiedContent: string }
expect(result.kind).toBe('text')
expect(result.originalContent).toBe('original')
expect(result.modifiedContent).toBe('modified')
})
it('returns staged diff', async () => {
gitInit(tmpDir)
writeFileSync(path.join(tmpDir, 'file.txt'), 'original')
gitCommit(tmpDir, 'initial')
writeFileSync(path.join(tmpDir, 'file.txt'), 'staged-content')
execFileSync('git', ['add', 'file.txt'], { cwd: tmpDir, stdio: 'pipe' })
const result = (await dispatcher.callRequest('git.diff', {
worktreePath: tmpDir,
filePath: 'file.txt',
staged: true
})) as { kind: string; originalContent: string; modifiedContent: string }
expect(result.kind).toBe('text')
expect(result.originalContent).toBe('original')
expect(result.modifiedContent).toBe('staged-content')
})
})
describe('discard', () => {
it('discards changes to tracked file', async () => {
gitInit(tmpDir)
writeFileSync(path.join(tmpDir, 'file.txt'), 'original')
gitCommit(tmpDir, 'initial')
writeFileSync(path.join(tmpDir, 'file.txt'), 'modified')
await dispatcher.callRequest('git.discard', { worktreePath: tmpDir, filePath: 'file.txt' })
const content = await fs.readFile(path.join(tmpDir, 'file.txt'), 'utf-8')
expect(content).toBe('original')
})
it('deletes untracked file on discard', async () => {
gitInit(tmpDir)
gitCommit(tmpDir, 'initial')
writeFileSync(path.join(tmpDir, 'new.txt'), 'untracked')
await dispatcher.callRequest('git.discard', { worktreePath: tmpDir, filePath: 'new.txt' })
await expect(fs.access(path.join(tmpDir, 'new.txt'))).rejects.toThrow()
})
it('rejects path traversal', async () => {
gitInit(tmpDir)
await expect(
dispatcher.callRequest('git.discard', {
worktreePath: tmpDir,
filePath: '../../../etc/passwd'
})
).rejects.toThrow('outside the worktree')
})
})
describe('conflictOperation', () => {
it('returns unknown for normal repo', async () => {
gitInit(tmpDir)
gitCommit(tmpDir, 'initial')
const result = await dispatcher.callRequest('git.conflictOperation', { worktreePath: tmpDir })
expect(result).toBe('unknown')
})
})
describe('branchCompare', () => {
it('compares branch against base', async () => {
gitInit(tmpDir)
writeFileSync(path.join(tmpDir, 'base.txt'), 'base')
gitCommit(tmpDir, 'initial')
execFileSync('git', ['checkout', '-b', 'feature'], { cwd: tmpDir, stdio: 'pipe' })
writeFileSync(path.join(tmpDir, 'feature.txt'), 'feature')
gitCommit(tmpDir, 'feature commit')
const result = (await dispatcher.callRequest('git.branchCompare', {
worktreePath: tmpDir,
baseRef: 'master'
})) as { summary: Record<string, unknown>; entries: Record<string, unknown>[] }
// May be 'master' or error if default branch is 'main'
if (result.summary.status === 'ready') {
expect(result.entries.length).toBeGreaterThan(0)
expect(result.summary.commitsAhead).toBe(1)
}
})
})
describe('listWorktrees', () => {
it('lists worktrees for a repo', async () => {
gitInit(tmpDir)
writeFileSync(path.join(tmpDir, 'file.txt'), 'hello')
gitCommit(tmpDir, 'initial')
const result = (await dispatcher.callRequest('git.listWorktrees', {
repoPath: tmpDir
})) as Record<string, unknown>[]
expect(result.length).toBeGreaterThanOrEqual(1)
expect(result[0].isMainWorktree).toBe(true)
})
})
describe('bulkStage and bulkUnstage', () => {
it('stages multiple files', async () => {
gitInit(tmpDir)
writeFileSync(path.join(tmpDir, 'a.txt'), 'a')
writeFileSync(path.join(tmpDir, 'b.txt'), 'b')
gitCommit(tmpDir, 'initial')
writeFileSync(path.join(tmpDir, 'a.txt'), 'a-modified')
writeFileSync(path.join(tmpDir, 'b.txt'), 'b-modified')
await dispatcher.callRequest('git.bulkStage', {
worktreePath: tmpDir,
filePaths: ['a.txt', 'b.txt']
})
const output = execFileSync('git', ['diff', '--cached', '--name-only'], {
cwd: tmpDir,
encoding: 'utf-8'
})
expect(output).toContain('a.txt')
expect(output).toContain('b.txt')
})
it('unstages multiple files', async () => {
gitInit(tmpDir)
writeFileSync(path.join(tmpDir, 'a.txt'), 'a')
writeFileSync(path.join(tmpDir, 'b.txt'), 'b')
gitCommit(tmpDir, 'initial')
writeFileSync(path.join(tmpDir, 'a.txt'), 'changed')
writeFileSync(path.join(tmpDir, 'b.txt'), 'changed')
execFileSync('git', ['add', '.'], { cwd: tmpDir, stdio: 'pipe' })
await dispatcher.callRequest('git.bulkUnstage', {
worktreePath: tmpDir,
filePaths: ['a.txt', 'b.txt']
})
const output = execFileSync('git', ['diff', '--cached', '--name-only'], {
cwd: tmpDir,
encoding: 'utf-8'
})
expect(output.trim()).toBe('')
})
})
})

348
src/relay/git-handler.ts Normal file
View file

@ -0,0 +1,348 @@
import { execFile } from 'child_process'
import { promisify } from 'util'
import { existsSync } from 'fs'
import { readFile, rm } from 'fs/promises'
import * as path from 'path'
import type { RelayDispatcher } from './dispatcher'
import type { RelayContext } from './context'
import {
parseStatusOutput,
parseUnmergedEntry,
parseBranchDiff,
parseWorktreeList
} from './git-handler-utils'
import {
computeDiff,
branchCompare as branchCompareOp,
branchDiffEntries,
validateGitExecArgs
} from './git-handler-ops'
const execFileAsync = promisify(execFile)
const MAX_GIT_BUFFER = 10 * 1024 * 1024
const BULK_CHUNK_SIZE = 100
export class GitHandler {
private dispatcher: RelayDispatcher
private context: RelayContext
constructor(dispatcher: RelayDispatcher, context: RelayContext) {
this.dispatcher = dispatcher
this.context = context
this.registerHandlers()
}
private registerHandlers(): void {
this.dispatcher.onRequest('git.status', (p) => this.getStatus(p))
this.dispatcher.onRequest('git.diff', (p) => this.getDiff(p))
this.dispatcher.onRequest('git.stage', (p) => this.stage(p))
this.dispatcher.onRequest('git.unstage', (p) => this.unstage(p))
this.dispatcher.onRequest('git.bulkStage', (p) => this.bulkStage(p))
this.dispatcher.onRequest('git.bulkUnstage', (p) => this.bulkUnstage(p))
this.dispatcher.onRequest('git.discard', (p) => this.discard(p))
this.dispatcher.onRequest('git.conflictOperation', (p) => this.conflictOperation(p))
this.dispatcher.onRequest('git.branchCompare', (p) => this.branchCompare(p))
this.dispatcher.onRequest('git.branchDiff', (p) => this.branchDiff(p))
this.dispatcher.onRequest('git.listWorktrees', (p) => this.listWorktrees(p))
this.dispatcher.onRequest('git.addWorktree', (p) => this.addWorktree(p))
this.dispatcher.onRequest('git.removeWorktree', (p) => this.removeWorktree(p))
this.dispatcher.onRequest('git.exec', (p) => this.exec(p))
this.dispatcher.onRequest('git.isGitRepo', (p) => this.isGitRepo(p))
}
private async git(
args: string[],
cwd: string,
opts?: { maxBuffer?: number }
): Promise<{ stdout: string; stderr: string }> {
return execFileAsync('git', args, {
cwd,
encoding: 'utf-8',
maxBuffer: opts?.maxBuffer ?? MAX_GIT_BUFFER
})
}
private async gitBuffer(args: string[], cwd: string): Promise<Buffer> {
const { stdout } = (await execFileAsync('git', args, {
cwd,
encoding: 'buffer',
maxBuffer: MAX_GIT_BUFFER
})) as { stdout: Buffer }
return stdout
}
private async getStatus(params: Record<string, unknown>) {
const worktreePath = params.worktreePath as string
this.context.validatePath(worktreePath)
const conflictOperation = await this.detectConflictOperation(worktreePath)
const entries: Record<string, unknown>[] = []
try {
const { stdout } = await this.git(
['status', '--porcelain=v2', '--untracked-files=all'],
worktreePath
)
const parsed = parseStatusOutput(stdout)
entries.push(...parsed.entries)
for (const uLine of parsed.unmergedLines) {
const entry = parseUnmergedEntry(worktreePath, uLine)
if (entry) {
entries.push(entry)
}
}
} catch {
// Not a git repo or git not available
}
return { entries, conflictOperation }
}
private async detectConflictOperation(worktreePath: string): Promise<string> {
const gitDir = await this.resolveGitDir(worktreePath)
try {
if (existsSync(path.join(gitDir, 'MERGE_HEAD'))) {
return 'merge'
}
if (
existsSync(path.join(gitDir, 'rebase-merge')) ||
existsSync(path.join(gitDir, 'rebase-apply'))
) {
return 'rebase'
}
if (existsSync(path.join(gitDir, 'CHERRY_PICK_HEAD'))) {
return 'cherry-pick'
}
} catch {
// fs error
}
return 'unknown'
}
private async resolveGitDir(worktreePath: string): Promise<string> {
const dotGitPath = path.join(worktreePath, '.git')
try {
const contents = await readFile(dotGitPath, 'utf-8')
const match = contents.match(/^gitdir:\s*(.+)\s*$/m)
if (match) {
return path.resolve(worktreePath, match[1])
}
} catch {
// .git is a directory
}
return dotGitPath
}
private async getDiff(params: Record<string, unknown>) {
const worktreePath = params.worktreePath as string
this.context.validatePath(worktreePath)
const filePath = params.filePath as string
// Why: filePath is relative to worktreePath and used in readWorkingFile via
// path.join. Without validation, ../../etc/passwd traverses outside the worktree.
const resolved = path.resolve(worktreePath, filePath)
const rel = path.relative(path.resolve(worktreePath), resolved)
if (rel.startsWith('..') || path.isAbsolute(rel)) {
throw new Error(`Path "${filePath}" resolves outside the worktree`)
}
const staged = params.staged as boolean
return computeDiff(this.gitBuffer.bind(this), worktreePath, filePath, staged)
}
private async stage(params: Record<string, unknown>) {
const worktreePath = params.worktreePath as string
this.context.validatePath(worktreePath)
const filePath = params.filePath as string
await this.git(['add', '--', filePath], worktreePath)
}
private async unstage(params: Record<string, unknown>) {
const worktreePath = params.worktreePath as string
this.context.validatePath(worktreePath)
const filePath = params.filePath as string
await this.git(['restore', '--staged', '--', filePath], worktreePath)
}
private async bulkStage(params: Record<string, unknown>) {
const worktreePath = params.worktreePath as string
this.context.validatePath(worktreePath)
const filePaths = params.filePaths as string[]
for (let i = 0; i < filePaths.length; i += BULK_CHUNK_SIZE) {
const chunk = filePaths.slice(i, i + BULK_CHUNK_SIZE)
await this.git(['add', '--', ...chunk], worktreePath)
}
}
private async bulkUnstage(params: Record<string, unknown>) {
const worktreePath = params.worktreePath as string
this.context.validatePath(worktreePath)
const filePaths = params.filePaths as string[]
for (let i = 0; i < filePaths.length; i += BULK_CHUNK_SIZE) {
const chunk = filePaths.slice(i, i + BULK_CHUNK_SIZE)
await this.git(['restore', '--staged', '--', ...chunk], worktreePath)
}
}
private async discard(params: Record<string, unknown>) {
const worktreePath = params.worktreePath as string
this.context.validatePath(worktreePath)
const filePath = params.filePath as string
const resolved = path.resolve(worktreePath, filePath)
const rel = path.relative(path.resolve(worktreePath), resolved)
// Why: empty rel or '.' means the path IS the worktree root — rm -rf would
// delete the entire worktree. Reject along with parent-escaping paths.
if (!rel || rel === '.' || rel === '..' || rel.startsWith('../') || path.isAbsolute(rel)) {
throw new Error(`Path "${filePath}" resolves outside the worktree`)
}
let tracked = false
try {
await this.git(['ls-files', '--error-unmatch', '--', filePath], worktreePath)
tracked = true
} catch {
// untracked
}
if (tracked) {
await this.git(['restore', '--worktree', '--source=HEAD', '--', filePath], worktreePath)
} else {
// Why: textual path checks pass for symlinks inside the worktree, but
// rm follows symlinks — so a symlink pointing outside the workspace
// would delete the target. validatePathResolved catches this.
await this.context.validatePathResolved(resolved)
await rm(resolved, { force: true, recursive: true })
}
}
private async conflictOperation(params: Record<string, unknown>) {
const worktreePath = params.worktreePath as string
this.context.validatePath(worktreePath)
return this.detectConflictOperation(worktreePath)
}
private async branchCompare(params: Record<string, unknown>) {
const worktreePath = params.worktreePath as string
this.context.validatePath(worktreePath)
const baseRef = params.baseRef as string
// Why: a baseRef starting with '-' would be interpreted as a flag to
// git rev-parse, potentially leaking environment variables or config.
if (baseRef.startsWith('-')) {
throw new Error('Base ref must not start with "-"')
}
const gitBound = this.git.bind(this)
return branchCompareOp(gitBound, worktreePath, baseRef, async (mergeBase, headOid) => {
const { stdout } = await gitBound(
['diff', '--name-status', '-M', '-C', mergeBase, headOid],
worktreePath
)
return parseBranchDiff(stdout)
})
}
private async branchDiff(params: Record<string, unknown>) {
const worktreePath = params.worktreePath as string
this.context.validatePath(worktreePath)
const baseRef = params.baseRef as string
if (baseRef.startsWith('-')) {
throw new Error('Base ref must not start with "-"')
}
return branchDiffEntries(
this.git.bind(this),
this.gitBuffer.bind(this),
worktreePath,
baseRef,
{
includePatch: params.includePatch as boolean | undefined,
filePath: params.filePath as string | undefined,
oldPath: params.oldPath as string | undefined
}
)
}
private async exec(params: Record<string, unknown>) {
const args = params.args as string[]
const cwd = params.cwd as string
this.context.validatePath(cwd)
validateGitExecArgs(args)
const { stdout, stderr } = await this.git(args, cwd)
return { stdout, stderr }
}
// Why: isGitRepo is called during the add-repo flow before any workspace
// roots are registered with the relay. Skipping validatePath is safe because
// this is a read-only git rev-parse check — no files are mutated.
private async isGitRepo(params: Record<string, unknown>) {
const dirPath = params.dirPath as string
try {
const { stdout } = await this.git(['rev-parse', '--show-toplevel'], dirPath)
return { isRepo: true, rootPath: stdout.trim() }
} catch {
return { isRepo: false, rootPath: null }
}
}
private async listWorktrees(params: Record<string, unknown>) {
const repoPath = params.repoPath as string
this.context.validatePath(repoPath)
try {
const { stdout } = await this.git(['worktree', 'list', '--porcelain'], repoPath)
return parseWorktreeList(stdout)
} catch {
return []
}
}
private async addWorktree(params: Record<string, unknown>) {
const repoPath = params.repoPath as string
this.context.validatePath(repoPath)
const branchName = params.branchName as string
const targetDir = params.targetDir as string
this.context.validatePath(targetDir)
const base = params.base as string | undefined
const track = params.track as boolean | undefined
// Why: a branchName starting with '-' would be interpreted as a git flag,
// potentially changing the command's semantics (e.g. "--detach").
if (branchName.startsWith('-') || (base && base.startsWith('-'))) {
throw new Error('Branch name and base ref must not start with "-"')
}
const args = ['worktree', 'add']
if (track) {
args.push('--track')
}
args.push('-b', branchName, targetDir)
if (base) {
args.push(base)
}
await this.git(args, repoPath)
}
private async removeWorktree(params: Record<string, unknown>) {
const worktreePath = params.worktreePath as string
this.context.validatePath(worktreePath)
const force = params.force as boolean | undefined
let repoPath = worktreePath
try {
const { stdout } = await this.git(['rev-parse', '--git-common-dir'], worktreePath)
const commonDir = stdout.trim()
if (commonDir && commonDir !== '.git') {
repoPath = path.resolve(worktreePath, commonDir, '..')
}
} catch {
// Fall through with worktreePath as repo
}
const args = ['worktree', 'remove']
if (force) {
args.push('--force')
}
args.push(worktreePath)
await this.git(args, repoPath)
await this.git(['worktree', 'prune'], repoPath)
}
}

View file

@ -0,0 +1,403 @@
/**
* End-to-end in-process integration test.
*
* Wires the client-side SshChannelMultiplexer directly to the relay-side
* RelayDispatcher through an in-memory pipe no SSH, no subprocess.
* Validates the full JSON-RPC roundtrip: client request framing
* relay decode handler response framing client decode result.
*/
import { describe, expect, it, beforeEach, afterEach } from 'vitest'
import { mkdtempSync, writeFileSync } from 'fs'
import { rm, readFile, stat } from 'fs/promises'
import * as path from 'path'
import { tmpdir } from 'os'
import { execFileSync } from 'child_process'
import {
SshChannelMultiplexer,
type MultiplexerTransport
} from '../main/ssh/ssh-channel-multiplexer'
import { RelayDispatcher } from './dispatcher'
import { RelayContext } from './context'
import { FsHandler } from './fs-handler'
import { GitHandler } from './git-handler'
function gitInit(dir: string): void {
execFileSync('git', ['init'], { cwd: dir, stdio: 'pipe' })
execFileSync('git', ['config', 'user.email', 'test@test.com'], { cwd: dir, stdio: 'pipe' })
execFileSync('git', ['config', 'user.name', 'Test'], { cwd: dir, stdio: 'pipe' })
}
function gitCommit(dir: string, message: string): void {
execFileSync('git', ['add', '.'], { cwd: dir, stdio: 'pipe' })
execFileSync('git', ['commit', '-m', message], { cwd: dir, stdio: 'pipe' })
}
describe('Integration: Client Mux ↔ Relay Dispatcher', () => {
let tmpDir: string
let mux: SshChannelMultiplexer
let dispatcher: RelayDispatcher
let fsHandler: FsHandler
beforeEach(() => {
tmpDir = mkdtempSync(path.join(tmpdir(), 'relay-e2e-'))
// Build the in-memory pipe
let relayFeedFn: (data: Buffer) => void
const clientDataCallbacks: ((data: Buffer) => void)[] = []
const clientCloseCallbacks: (() => void)[] = []
const clientTransport: MultiplexerTransport = {
write: (data: Buffer) => {
// Client → Relay
setImmediate(() => relayFeedFn?.(data))
},
onData: (cb) => {
clientDataCallbacks.push(cb)
},
onClose: (cb) => {
clientCloseCallbacks.push(cb)
}
}
// Relay side
dispatcher = new RelayDispatcher((data: Buffer) => {
// Relay → Client
setImmediate(() => {
for (const cb of clientDataCallbacks) {
cb(data)
}
})
})
relayFeedFn = (data: Buffer) => dispatcher.feed(data)
// Register handlers on the relay
const context = new RelayContext()
context.registerRoot(tmpDir)
fsHandler = new FsHandler(dispatcher, context)
new GitHandler(dispatcher, context)
// Create client mux
mux = new SshChannelMultiplexer(clientTransport)
})
afterEach(async () => {
mux.dispose()
dispatcher.dispose()
fsHandler.dispose()
await rm(tmpDir, { recursive: true, force: true })
})
// ─── Filesystem ─────────────────────────────────────────────────
describe('Filesystem operations', () => {
it('readDir returns directory entries', async () => {
writeFileSync(path.join(tmpDir, 'hello.txt'), 'world')
writeFileSync(path.join(tmpDir, 'readme.md'), '# Hi')
const result = (await mux.request('fs.readDir', { dirPath: tmpDir })) as {
name: string
isDirectory: boolean
isSymlink: boolean
}[]
expect(result.length).toBe(2)
const names = result.map((e) => e.name).sort()
expect(names).toEqual(['hello.txt', 'readme.md'])
})
it('readFile returns text content', async () => {
writeFileSync(path.join(tmpDir, 'data.txt'), 'some content')
const result = (await mux.request('fs.readFile', {
filePath: path.join(tmpDir, 'data.txt')
})) as { content: string; isBinary: boolean }
expect(result.content).toBe('some content')
expect(result.isBinary).toBe(false)
})
it('writeFile creates/overwrites file content', async () => {
const filePath = path.join(tmpDir, 'output.txt')
await mux.request('fs.writeFile', { filePath, content: 'written via relay' })
const content = await readFile(filePath, 'utf-8')
expect(content).toBe('written via relay')
})
it('stat returns file metadata', async () => {
writeFileSync(path.join(tmpDir, 'sized.txt'), 'abcdef')
const result = (await mux.request('fs.stat', {
filePath: path.join(tmpDir, 'sized.txt')
})) as { size: number; type: string; mtime: number }
expect(result.type).toBe('file')
expect(result.size).toBe(6)
expect(typeof result.mtime).toBe('number')
})
it('createFile + deletePath roundtrip', async () => {
const filePath = path.join(tmpDir, 'nested', 'deep', 'new.txt')
await mux.request('fs.createFile', { filePath })
const s = await stat(filePath)
expect(s.isFile()).toBe(true)
await mux.request('fs.deletePath', { targetPath: filePath })
await expect(stat(filePath)).rejects.toThrow()
})
it('createDir creates directories recursively', async () => {
const dirPath = path.join(tmpDir, 'a', 'b', 'c')
await mux.request('fs.createDir', { dirPath })
const s = await stat(dirPath)
expect(s.isDirectory()).toBe(true)
})
it('rename moves files', async () => {
const oldPath = path.join(tmpDir, 'before.txt')
const newPath = path.join(tmpDir, 'after.txt')
writeFileSync(oldPath, 'moving')
await mux.request('fs.rename', { oldPath, newPath })
await expect(stat(oldPath)).rejects.toThrow()
const content = await readFile(newPath, 'utf-8')
expect(content).toBe('moving')
})
it('copy duplicates files', async () => {
const src = path.join(tmpDir, 'src.txt')
const dst = path.join(tmpDir, 'dst.txt')
writeFileSync(src, 'original')
await mux.request('fs.copy', { source: src, destination: dst })
const content = await readFile(dst, 'utf-8')
expect(content).toBe('original')
})
it('readFile returns error for non-existent file', async () => {
await expect(
mux.request('fs.readFile', { filePath: path.join(tmpDir, 'nope.txt') })
).rejects.toThrow()
})
it('errors propagate correctly through the protocol', async () => {
await expect(
mux.request('fs.stat', { filePath: '/nonexistent/path/that/does/not/exist' })
).rejects.toThrow()
})
})
// ─── Git ────────────────────────────────────────────────────────
describe('Git operations', () => {
beforeEach(() => {
gitInit(tmpDir)
writeFileSync(path.join(tmpDir, 'file.txt'), 'initial')
gitCommit(tmpDir, 'initial commit')
})
it('git.status returns clean status for committed repo', async () => {
const result = (await mux.request('git.status', {
worktreePath: tmpDir
})) as { entries: unknown[]; conflictOperation: string }
expect(result.entries).toEqual([])
expect(result.conflictOperation).toBe('unknown')
})
it('git.status detects modifications', async () => {
writeFileSync(path.join(tmpDir, 'file.txt'), 'modified')
const result = (await mux.request('git.status', {
worktreePath: tmpDir
})) as { entries: { path: string; status: string; area: string }[] }
const entry = result.entries.find((e) => e.path === 'file.txt')
expect(entry).toBeDefined()
expect(entry!.status).toBe('modified')
expect(entry!.area).toBe('unstaged')
})
it('git.status detects untracked files', async () => {
writeFileSync(path.join(tmpDir, 'new.txt'), 'new')
const result = (await mux.request('git.status', {
worktreePath: tmpDir
})) as { entries: { path: string; status: string; area: string }[] }
const entry = result.entries.find((e) => e.path === 'new.txt')
expect(entry).toBeDefined()
expect(entry!.status).toBe('untracked')
})
it('git.stage + git.status shows staged entry', async () => {
writeFileSync(path.join(tmpDir, 'file.txt'), 'changed')
await mux.request('git.stage', { worktreePath: tmpDir, filePath: 'file.txt' })
const result = (await mux.request('git.status', {
worktreePath: tmpDir
})) as { entries: { path: string; area: string }[] }
const staged = result.entries.find((e) => e.area === 'staged')
expect(staged).toBeDefined()
})
it('git.unstage reverses staging', async () => {
writeFileSync(path.join(tmpDir, 'file.txt'), 'changed')
await mux.request('git.stage', { worktreePath: tmpDir, filePath: 'file.txt' })
await mux.request('git.unstage', { worktreePath: tmpDir, filePath: 'file.txt' })
const result = (await mux.request('git.status', {
worktreePath: tmpDir
})) as { entries: { area: string }[] }
const staged = result.entries.filter((e) => e.area === 'staged')
expect(staged.length).toBe(0)
})
it('git.diff returns original and modified content', async () => {
writeFileSync(path.join(tmpDir, 'file.txt'), 'updated content')
const result = (await mux.request('git.diff', {
worktreePath: tmpDir,
filePath: 'file.txt',
staged: false
})) as { kind: string; originalContent: string; modifiedContent: string }
expect(result.kind).toBe('text')
expect(result.originalContent).toBe('initial')
expect(result.modifiedContent).toBe('updated content')
})
it('git.diff returns staged diff', async () => {
writeFileSync(path.join(tmpDir, 'file.txt'), 'staged version')
execFileSync('git', ['add', 'file.txt'], { cwd: tmpDir, stdio: 'pipe' })
const result = (await mux.request('git.diff', {
worktreePath: tmpDir,
filePath: 'file.txt',
staged: true
})) as { originalContent: string; modifiedContent: string }
expect(result.originalContent).toBe('initial')
expect(result.modifiedContent).toBe('staged version')
})
it('git.discard restores tracked file to HEAD', async () => {
writeFileSync(path.join(tmpDir, 'file.txt'), 'dirty')
await mux.request('git.discard', { worktreePath: tmpDir, filePath: 'file.txt' })
const content = await readFile(path.join(tmpDir, 'file.txt'), 'utf-8')
expect(content).toBe('initial')
})
it('git.discard removes untracked file', async () => {
writeFileSync(path.join(tmpDir, 'temp.txt'), 'throwaway')
await mux.request('git.discard', { worktreePath: tmpDir, filePath: 'temp.txt' })
await expect(stat(path.join(tmpDir, 'temp.txt'))).rejects.toThrow()
})
it('git.conflictOperation returns unknown for normal repo', async () => {
const result = await mux.request('git.conflictOperation', {
worktreePath: tmpDir
})
expect(result).toBe('unknown')
})
it('git.listWorktrees returns the main worktree', async () => {
const result = (await mux.request('git.listWorktrees', {
repoPath: tmpDir
})) as { path: string; isMainWorktree: boolean }[]
expect(result.length).toBeGreaterThanOrEqual(1)
expect(result[0].isMainWorktree).toBe(true)
})
it('git.branchCompare works across branches', async () => {
// Get current branch name (might be "main" or "master" depending on config)
const defaultBranch = execFileSync('git', ['branch', '--show-current'], {
cwd: tmpDir,
encoding: 'utf-8'
}).trim()
execFileSync('git', ['checkout', '-b', 'feature'], { cwd: tmpDir, stdio: 'pipe' })
writeFileSync(path.join(tmpDir, 'feature.txt'), 'feature work')
gitCommit(tmpDir, 'feature commit')
const result = (await mux.request('git.branchCompare', {
worktreePath: tmpDir,
baseRef: defaultBranch
})) as { summary: { status: string; commitsAhead: number }; entries: unknown[] }
expect(result.summary.status).toBe('ready')
expect(result.summary.commitsAhead).toBe(1)
expect(result.entries.length).toBe(1)
})
it('git.bulkStage stages multiple files at once', async () => {
writeFileSync(path.join(tmpDir, 'a.txt'), 'a')
writeFileSync(path.join(tmpDir, 'b.txt'), 'b')
await mux.request('git.bulkStage', {
worktreePath: tmpDir,
filePaths: ['a.txt', 'b.txt']
})
const result = (await mux.request('git.status', {
worktreePath: tmpDir
})) as { entries: { path: string; area: string }[] }
const staged = result.entries.filter((e) => e.area === 'staged')
expect(staged.length).toBe(2)
})
})
// ─── Error propagation ──────────────────────────────────────────
describe('Error propagation', () => {
it('method-not-found error for unknown methods', async () => {
await expect(mux.request('nonexistent.method', {})).rejects.toThrow('Method not found')
})
it('handler errors propagate as JSON-RPC errors', async () => {
await expect(
mux.request('fs.readFile', { filePath: '/does/not/exist/at/all' })
).rejects.toThrow()
})
})
// ─── Notifications ──────────────────────────────────────────────
describe('Notifications', () => {
it('relay notifications reach the client mux', async () => {
const received: { method: string; params: Record<string, unknown> }[] = []
mux.onNotification((method, params) => {
received.push({ method, params })
})
// Trigger a fs operation that causes the relay to send notifications
// (e.g., write a file — no notification expected for this, so we
// test notification plumbing directly via the relay dispatcher)
dispatcher.notify('custom.event', { key: 'value' })
// Wait for the async delivery through setImmediate
await new Promise((r) => setTimeout(r, 50))
expect(received.length).toBe(1)
expect(received[0].method).toBe('custom.event')
expect(received[0].params).toEqual({ key: 'value' })
})
})
})

137
src/relay/protocol.ts Normal file
View file

@ -0,0 +1,137 @@
// Self-contained relay protocol — mirrors src/main/ssh/relay-protocol.ts
// but has no Electron dependencies. Deployed standalone to remote hosts.
export const RELAY_VERSION = '0.1.0'
export const RELAY_SENTINEL = `ORCA-RELAY v${RELAY_VERSION} READY\n`
export const HEADER_LENGTH = 13
export const MAX_MESSAGE_SIZE = 16 * 1024 * 1024
export const MessageType = {
Regular: 1,
KeepAlive: 9
} as const
export const KEEPALIVE_SEND_MS = 5_000
export const TIMEOUT_MS = 20_000
export type JsonRpcRequest = {
jsonrpc: '2.0'
id: number
method: string
params?: Record<string, unknown>
}
export type JsonRpcResponse = {
jsonrpc: '2.0'
id: number
result?: unknown
error?: { code: number; message: string; data?: unknown }
}
export type JsonRpcNotification = {
jsonrpc: '2.0'
method: string
params?: Record<string, unknown>
}
export type JsonRpcMessage = JsonRpcRequest | JsonRpcResponse | JsonRpcNotification
export type DecodedFrame = {
type: number
id: number
ack: number
payload: Buffer
}
export function encodeFrame(
type: number,
id: number,
ack: number,
payload: Buffer | Uint8Array
): Buffer {
const header = Buffer.alloc(HEADER_LENGTH)
header[0] = type
header.writeUInt32BE(id, 1)
header.writeUInt32BE(ack, 5)
header.writeUInt32BE(payload.length, 9)
return Buffer.concat([header, payload])
}
export function encodeJsonRpcFrame(msg: JsonRpcMessage, id: number, ack: number): Buffer {
const payload = Buffer.from(JSON.stringify(msg), 'utf-8')
if (payload.length > MAX_MESSAGE_SIZE) {
throw new Error(`Message too large: ${payload.length} bytes`)
}
return encodeFrame(MessageType.Regular, id, ack, payload)
}
export function encodeKeepAliveFrame(id: number, ack: number): Buffer {
return encodeFrame(MessageType.KeepAlive, id, ack, Buffer.alloc(0))
}
export class FrameDecoder {
private buffer = Buffer.alloc(0)
private onFrame: (frame: DecodedFrame) => void
private onError: ((err: Error) => void) | null
constructor(onFrame: (frame: DecodedFrame) => void, onError?: (err: Error) => void) {
this.onFrame = onFrame
this.onError = onError ?? null
}
feed(chunk: Buffer | Uint8Array): void {
this.buffer = Buffer.concat([this.buffer, chunk])
while (this.buffer.length >= HEADER_LENGTH) {
const length = this.buffer.readUInt32BE(9)
const totalLength = HEADER_LENGTH + length
if (length > MAX_MESSAGE_SIZE) {
// Why: Throwing here would leave the buffer in a partially consumed
// state — subsequent feed() calls would try to parse the leftover
// payload bytes as a new header, corrupting every future frame.
// Instead we skip the entire oversized frame so the decoder stays
// synchronized with the stream.
if (this.buffer.length < totalLength) {
// Haven't received the full oversized payload yet; wait for more data.
break
}
this.buffer = this.buffer.subarray(totalLength)
const err = new Error(`Frame payload too large: ${length} bytes — discarded`)
if (this.onError) {
this.onError(err)
} else {
process.stderr.write(`[relay] ${err.message}\n`)
}
continue
}
if (this.buffer.length < totalLength) {
break
}
const frame: DecodedFrame = {
type: this.buffer[0],
id: this.buffer.readUInt32BE(1),
ack: this.buffer.readUInt32BE(5),
payload: this.buffer.subarray(HEADER_LENGTH, totalLength)
}
this.buffer = this.buffer.subarray(totalLength)
this.onFrame(frame)
}
}
reset(): void {
this.buffer = Buffer.alloc(0)
}
}
export function parseJsonRpcMessage(payload: Buffer): JsonRpcMessage {
const text = payload.toString('utf-8')
const msg = JSON.parse(text) as JsonRpcMessage
if (msg.jsonrpc !== '2.0') {
throw new Error(`Invalid JSON-RPC version: ${(msg as Record<string, unknown>).jsonrpc}`)
}
return msg
}

View file

@ -0,0 +1,276 @@
import { describe, expect, it, vi, beforeEach, afterEach } from 'vitest'
const { mockPtySpawn, mockPtyInstance } = vi.hoisted(() => ({
mockPtySpawn: vi.fn(),
mockPtyInstance: {
pid: 12345,
onData: vi.fn(),
onExit: vi.fn(),
write: vi.fn(),
resize: vi.fn(),
kill: vi.fn(),
clear: vi.fn()
}
}))
vi.mock('node-pty', () => ({
spawn: mockPtySpawn
}))
import { PtyHandler } from './pty-handler'
import type { RelayDispatcher } from './dispatcher'
function createMockDispatcher() {
const requestHandlers = new Map<string, (params: Record<string, unknown>) => Promise<unknown>>()
const notificationHandlers = new Map<string, (params: Record<string, unknown>) => void>()
const notifications: { method: string; params?: Record<string, unknown> }[] = []
const dispatcher = {
onRequest: vi.fn(
(method: string, handler: (params: Record<string, unknown>) => Promise<unknown>) => {
requestHandlers.set(method, handler)
}
),
onNotification: vi.fn((method: string, handler: (params: Record<string, unknown>) => void) => {
notificationHandlers.set(method, handler)
}),
notify: vi.fn((method: string, params?: Record<string, unknown>) => {
notifications.push({ method, params })
}),
// Helpers for tests
_requestHandlers: requestHandlers,
_notificationHandlers: notificationHandlers,
_notifications: notifications,
async callRequest(method: string, params: Record<string, unknown> = {}) {
const handler = requestHandlers.get(method)
if (!handler) {
throw new Error(`No handler for ${method}`)
}
return handler(params)
},
callNotification(method: string, params: Record<string, unknown> = {}) {
const handler = notificationHandlers.get(method)
if (!handler) {
throw new Error(`No handler for ${method}`)
}
handler(params)
}
}
return dispatcher
}
describe('PtyHandler', () => {
let dispatcher: ReturnType<typeof createMockDispatcher>
let handler: PtyHandler
beforeEach(() => {
vi.useFakeTimers()
mockPtySpawn.mockReset()
mockPtyInstance.onData.mockReset()
mockPtyInstance.onExit.mockReset()
mockPtyInstance.write.mockReset()
mockPtyInstance.resize.mockReset()
mockPtyInstance.kill.mockReset()
mockPtyInstance.clear.mockReset()
mockPtySpawn.mockReturnValue({ ...mockPtyInstance })
dispatcher = createMockDispatcher()
handler = new PtyHandler(dispatcher as unknown as RelayDispatcher)
})
afterEach(() => {
handler.dispose()
vi.useRealTimers()
})
it('registers all expected handlers', () => {
const methods = Array.from(dispatcher._requestHandlers.keys())
expect(methods).toContain('pty.spawn')
expect(methods).toContain('pty.attach')
expect(methods).toContain('pty.shutdown')
expect(methods).toContain('pty.sendSignal')
expect(methods).toContain('pty.getCwd')
expect(methods).toContain('pty.getInitialCwd')
expect(methods).toContain('pty.clearBuffer')
expect(methods).toContain('pty.hasChildProcesses')
expect(methods).toContain('pty.getForegroundProcess')
expect(methods).toContain('pty.listProcesses')
expect(methods).toContain('pty.getDefaultShell')
const notifMethods = Array.from(dispatcher._notificationHandlers.keys())
expect(notifMethods).toContain('pty.data')
expect(notifMethods).toContain('pty.resize')
expect(notifMethods).toContain('pty.ackData')
})
it('spawns a PTY and returns an id', async () => {
const result = await dispatcher.callRequest('pty.spawn', { cols: 80, rows: 24 })
expect(result).toEqual({ id: 'pty-1' })
expect(mockPtySpawn).toHaveBeenCalled()
expect(handler.activePtyCount).toBe(1)
})
it('increments PTY ids on each spawn', async () => {
const r1 = await dispatcher.callRequest('pty.spawn', {})
const r2 = await dispatcher.callRequest('pty.spawn', {})
expect((r1 as { id: string }).id).toBe('pty-1')
expect((r2 as { id: string }).id).toBe('pty-2')
})
it('forwards data from PTY to dispatcher notifications', async () => {
let dataCallback: ((data: string) => void) | undefined
mockPtySpawn.mockReturnValue({
...mockPtyInstance,
onData: vi.fn((cb: (data: string) => void) => {
dataCallback = cb
}),
onExit: vi.fn()
})
await dispatcher.callRequest('pty.spawn', {})
expect(dataCallback).toBeDefined()
dataCallback!('hello world')
expect(dispatcher.notify).toHaveBeenCalledWith('pty.data', { id: 'pty-1', data: 'hello world' })
})
it('notifies on PTY exit and removes from map', async () => {
let exitCallback: ((info: { exitCode: number }) => void) | undefined
mockPtySpawn.mockReturnValue({
...mockPtyInstance,
onData: vi.fn(),
onExit: vi.fn((cb: (info: { exitCode: number }) => void) => {
exitCallback = cb
})
})
await dispatcher.callRequest('pty.spawn', {})
expect(handler.activePtyCount).toBe(1)
exitCallback!({ exitCode: 0 })
expect(dispatcher.notify).toHaveBeenCalledWith('pty.exit', { id: 'pty-1', code: 0 })
expect(handler.activePtyCount).toBe(0)
})
it('writes data to PTY via pty.data notification', async () => {
const mockWrite = vi.fn()
mockPtySpawn.mockReturnValue({
...mockPtyInstance,
write: mockWrite,
onData: vi.fn(),
onExit: vi.fn()
})
await dispatcher.callRequest('pty.spawn', {})
dispatcher.callNotification('pty.data', { id: 'pty-1', data: 'ls\n' })
expect(mockWrite).toHaveBeenCalledWith('ls\n')
})
it('resizes PTY via pty.resize notification', async () => {
const mockResize = vi.fn()
mockPtySpawn.mockReturnValue({
...mockPtyInstance,
resize: mockResize,
onData: vi.fn(),
onExit: vi.fn()
})
await dispatcher.callRequest('pty.spawn', {})
dispatcher.callNotification('pty.resize', { id: 'pty-1', cols: 120, rows: 40 })
expect(mockResize).toHaveBeenCalledWith(120, 40)
})
it('kills PTY on shutdown with SIGTERM by default', async () => {
const mockKill = vi.fn()
mockPtySpawn.mockReturnValue({
...mockPtyInstance,
kill: mockKill,
onData: vi.fn(),
onExit: vi.fn()
})
await dispatcher.callRequest('pty.spawn', {})
await dispatcher.callRequest('pty.shutdown', { id: 'pty-1', immediate: false })
expect(mockKill).toHaveBeenCalledWith('SIGTERM')
})
it('kills PTY on shutdown with SIGKILL when immediate', async () => {
const mockKill = vi.fn()
mockPtySpawn.mockReturnValue({
...mockPtyInstance,
kill: mockKill,
onData: vi.fn(),
onExit: vi.fn()
})
await dispatcher.callRequest('pty.spawn', {})
await dispatcher.callRequest('pty.shutdown', { id: 'pty-1', immediate: true })
expect(mockKill).toHaveBeenCalledWith('SIGKILL')
})
it('throws for attach on nonexistent PTY', async () => {
await expect(dispatcher.callRequest('pty.attach', { id: 'pty-999' })).rejects.toThrow(
'PTY "pty-999" not found'
)
})
it('grace timer fires immediately when no PTYs exist', () => {
const onExpire = vi.fn()
handler.startGraceTimer(onExpire)
expect(onExpire).toHaveBeenCalledTimes(1)
})
it('grace timer fires after configured delay when PTYs exist', async () => {
mockPtySpawn.mockReturnValue({
...mockPtyInstance,
onData: vi.fn(),
onExit: vi.fn()
})
await dispatcher.callRequest('pty.spawn', {})
const onExpire = vi.fn()
handler.startGraceTimer(onExpire)
expect(onExpire).not.toHaveBeenCalled()
vi.advanceTimersByTime(5 * 60 * 1000)
expect(onExpire).toHaveBeenCalledTimes(1)
})
it('cancelGraceTimer prevents expiration', async () => {
mockPtySpawn.mockReturnValue({
...mockPtyInstance,
onData: vi.fn(),
onExit: vi.fn()
})
await dispatcher.callRequest('pty.spawn', {})
const onExpire = vi.fn()
handler.startGraceTimer(onExpire)
vi.advanceTimersByTime(60_000)
handler.cancelGraceTimer()
vi.advanceTimersByTime(5 * 60 * 1000)
expect(onExpire).not.toHaveBeenCalled()
})
it('dispose kills all PTYs', async () => {
const mockKill = vi.fn()
mockPtySpawn.mockReturnValue({
...mockPtyInstance,
kill: mockKill,
onData: vi.fn(),
onExit: vi.fn()
})
await dispatcher.callRequest('pty.spawn', {})
await dispatcher.callRequest('pty.spawn', {})
expect(handler.activePtyCount).toBe(2)
handler.dispose()
expect(mockKill).toHaveBeenCalledWith('SIGTERM')
expect(handler.activePtyCount).toBe(0)
})
})

355
src/relay/pty-handler.ts Normal file
View file

@ -0,0 +1,355 @@
import type { IPty } from 'node-pty'
import type * as NodePty from 'node-pty'
import type { RelayDispatcher } from './dispatcher'
import {
resolveDefaultShell,
resolveProcessCwd,
processHasChildren,
getForegroundProcessName,
listShellProfiles
} from './pty-shell-utils'
// Why: node-pty is a native addon that may not be installed on the remote.
// Dynamic import keeps the require() lazy so loadPty() returns null gracefully
// when the native module is unavailable. The static type import lets vitest
// intercept it in tests.
let ptyModule: typeof NodePty | null = null
async function loadPty(): Promise<typeof NodePty | null> {
if (ptyModule) {
return ptyModule
}
try {
ptyModule = await import('node-pty')
return ptyModule
} catch {
return null
}
}
type ManagedPty = {
id: string
pty: IPty
initialCwd: string
buffered: string
/** Timer for SIGKILL fallback after a graceful SIGTERM shutdown. */
killTimer?: ReturnType<typeof setTimeout>
}
const DEFAULT_GRACE_TIME_MS = 5 * 60 * 1000
export const REPLAY_BUFFER_MAX = 100 * 1024
const ALLOWED_SIGNALS = new Set([
'SIGINT',
'SIGTERM',
'SIGHUP',
'SIGKILL',
'SIGTSTP',
'SIGCONT',
'SIGUSR1',
'SIGUSR2'
])
type SerializedPtyEntry = { id: string; pid: number; cols: number; rows: number; cwd: string }
export class PtyHandler {
private ptys = new Map<string, ManagedPty>()
private nextId = 1
private dispatcher: RelayDispatcher
private graceTimeMs: number
private graceTimer: ReturnType<typeof setTimeout> | null = null
constructor(dispatcher: RelayDispatcher, graceTimeMs = DEFAULT_GRACE_TIME_MS) {
this.dispatcher = dispatcher
this.graceTimeMs = graceTimeMs
this.registerHandlers()
}
/** Wire onData/onExit listeners for a managed PTY and store it. */
private wireAndStore(managed: ManagedPty): void {
this.ptys.set(managed.id, managed)
managed.pty.onData((data: string) => {
managed.buffered += data
if (managed.buffered.length > REPLAY_BUFFER_MAX) {
managed.buffered = managed.buffered.slice(-REPLAY_BUFFER_MAX)
}
this.dispatcher.notify('pty.data', { id: managed.id, data })
})
managed.pty.onExit(({ exitCode }: { exitCode: number }) => {
// Why: If the PTY exits normally (or via SIGTERM), we must clear the
// SIGKILL fallback timer to avoid sending SIGKILL to a recycled PID.
if (managed.killTimer) {
clearTimeout(managed.killTimer)
managed.killTimer = undefined
}
this.dispatcher.notify('pty.exit', { id: managed.id, code: exitCode })
this.ptys.delete(managed.id)
})
}
private registerHandlers(): void {
this.dispatcher.onRequest('pty.spawn', (p) => this.spawn(p))
this.dispatcher.onRequest('pty.attach', (p) => this.attach(p))
this.dispatcher.onRequest('pty.shutdown', (p) => this.shutdown(p))
this.dispatcher.onRequest('pty.sendSignal', (p) => this.sendSignal(p))
this.dispatcher.onRequest('pty.getCwd', (p) => this.getCwd(p))
this.dispatcher.onRequest('pty.getInitialCwd', (p) => this.getInitialCwd(p))
this.dispatcher.onRequest('pty.clearBuffer', (p) => this.clearBuffer(p))
this.dispatcher.onRequest('pty.hasChildProcesses', (p) => this.hasChildProcesses(p))
this.dispatcher.onRequest('pty.getForegroundProcess', (p) => this.getForegroundProcess(p))
this.dispatcher.onRequest('pty.listProcesses', () => this.listProcesses())
this.dispatcher.onRequest('pty.getDefaultShell', async () => resolveDefaultShell())
this.dispatcher.onRequest('pty.serialize', (p) => this.serialize(p))
this.dispatcher.onRequest('pty.revive', (p) => this.revive(p))
this.dispatcher.onRequest('pty.getProfiles', async () => listShellProfiles())
this.dispatcher.onNotification('pty.data', (p) => this.writeData(p))
this.dispatcher.onNotification('pty.resize', (p) => this.resize(p))
this.dispatcher.onNotification('pty.ackData', (_p) => {
/* flow control ack -- not yet enforced */
})
}
private async spawn(params: Record<string, unknown>): Promise<{ id: string }> {
if (this.ptys.size >= 50) {
throw new Error('Maximum number of PTY sessions reached (50)')
}
const pty = await loadPty()
if (!pty) {
throw new Error('node-pty is not available on this remote host')
}
const cols = (params.cols as number) || 80
const rows = (params.rows as number) || 24
const cwd = (params.cwd as string) || process.env.HOME || '/'
const env = params.env as Record<string, string> | undefined
const shell = resolveDefaultShell()
const id = `pty-${this.nextId++}`
// Why: SSH exec channels give the relay a minimal environment without
// .zprofile/.bash_profile sourced. Spawning a login shell ensures PATH
// includes Homebrew, nvm, and user-installed CLIs (claude, codex, gh).
const term = pty.spawn(shell, ['-l'], {
name: 'xterm-256color',
cols,
rows,
cwd,
env: { ...process.env, ...env } as Record<string, string>
})
this.wireAndStore({ id, pty: term, initialCwd: cwd, buffered: '' })
return { id }
}
private async attach(params: Record<string, unknown>): Promise<void> {
const id = params.id as string
const managed = this.ptys.get(id)
if (!managed) {
throw new Error(`PTY "${id}" not found`)
}
// Replay buffered output
if (managed.buffered) {
this.dispatcher.notify('pty.replay', { id, data: managed.buffered })
}
}
private writeData(params: Record<string, unknown>): void {
const id = params.id as string
const data = params.data as string
if (typeof data !== 'string') {
return
}
const managed = this.ptys.get(id)
if (managed) {
managed.pty.write(data)
}
}
private resize(params: Record<string, unknown>): void {
const id = params.id as string
const cols = Math.max(1, Math.min(500, Math.floor(Number(params.cols) || 80)))
const rows = Math.max(1, Math.min(500, Math.floor(Number(params.rows) || 24)))
const managed = this.ptys.get(id)
if (managed) {
managed.pty.resize(cols, rows)
}
}
private async shutdown(params: Record<string, unknown>): Promise<void> {
const id = params.id as string
const immediate = params.immediate as boolean
const managed = this.ptys.get(id)
if (!managed) {
return
}
if (immediate) {
managed.pty.kill('SIGKILL')
} else {
managed.pty.kill('SIGTERM')
// Why: Some processes ignore SIGTERM (e.g. a hung child, a custom signal
// handler). Without a SIGKILL fallback the PTY process would leak and the
// managed entry would never be cleaned up. The 5-second window gives
// well-behaved processes time to flush and exit gracefully. The timer is
// cleared in the onExit handler if the process terminates on its own.
managed.killTimer = setTimeout(() => {
if (this.ptys.has(id)) {
managed.pty.kill('SIGKILL')
}
}, 5000)
}
}
private async sendSignal(params: Record<string, unknown>): Promise<void> {
const id = params.id as string
const signal = params.signal as string
if (!ALLOWED_SIGNALS.has(signal)) {
throw new Error(`Signal not allowed: ${signal}`)
}
const managed = this.ptys.get(id)
if (!managed) {
throw new Error(`PTY "${id}" not found`)
}
managed.pty.kill(signal)
}
private async getCwd(params: Record<string, unknown>): Promise<string> {
const id = params.id as string
const managed = this.ptys.get(id)
if (!managed) {
throw new Error(`PTY "${id}" not found`)
}
return resolveProcessCwd(managed.pty.pid, managed.initialCwd)
}
private async getInitialCwd(params: Record<string, unknown>): Promise<string> {
const id = params.id as string
const managed = this.ptys.get(id)
if (!managed) {
throw new Error(`PTY "${id}" not found`)
}
return managed.initialCwd
}
private async clearBuffer(params: Record<string, unknown>): Promise<void> {
const id = params.id as string
const managed = this.ptys.get(id)
if (managed) {
managed.pty.clear()
}
}
private async hasChildProcesses(params: Record<string, unknown>): Promise<boolean> {
const id = params.id as string
const managed = this.ptys.get(id)
if (!managed) {
return false
}
return await processHasChildren(managed.pty.pid)
}
private async getForegroundProcess(params: Record<string, unknown>): Promise<string | null> {
const id = params.id as string
const managed = this.ptys.get(id)
if (!managed) {
return null
}
return await getForegroundProcessName(managed.pty.pid)
}
private async listProcesses(): Promise<{ id: string; cwd: string; title: string }[]> {
const results: { id: string; cwd: string; title: string }[] = []
for (const [id, managed] of this.ptys) {
const title = (await getForegroundProcessName(managed.pty.pid)) || 'shell'
results.push({ id, cwd: managed.initialCwd, title })
}
return results
}
private async serialize(params: Record<string, unknown>): Promise<string> {
const ids = params.ids as string[]
const entries: SerializedPtyEntry[] = []
for (const id of ids) {
const managed = this.ptys.get(id)
if (!managed) {
continue
}
const { pid, cols, rows } = managed.pty
entries.push({ id, pid, cols, rows, cwd: managed.initialCwd })
}
return JSON.stringify(entries)
}
private async revive(params: Record<string, unknown>): Promise<void> {
const state = params.state as string
const entries = JSON.parse(state) as SerializedPtyEntry[]
for (const entry of entries) {
if (this.ptys.has(entry.id)) {
continue
}
// Only re-attach if the original process is still alive
try {
process.kill(entry.pid, 0)
} catch {
continue
}
const ptyMod = await loadPty()
if (!ptyMod) {
continue
}
const term = ptyMod.spawn(resolveDefaultShell(), ['-l'], {
name: 'xterm-256color',
cols: entry.cols,
rows: entry.rows,
cwd: entry.cwd,
env: process.env as Record<string, string>
})
this.wireAndStore({ id: entry.id, pty: term, initialCwd: entry.cwd, buffered: '' })
// Why: nextId starts at 1 and is only incremented by spawn(). Revived
// PTYs carry their original IDs (e.g. "pty-3"), so without this bump the
// next spawn() would generate an ID that collides with an already-active
// revived PTY.
const match = entry.id.match(/^pty-(\d+)$/)
if (match) {
const revivedNum = parseInt(match[1], 10)
if (revivedNum >= this.nextId) {
this.nextId = revivedNum + 1
}
}
}
}
startGraceTimer(onExpire: () => void): void {
this.cancelGraceTimer()
if (this.ptys.size === 0) {
onExpire()
return
}
this.graceTimer = setTimeout(() => {
onExpire()
}, this.graceTimeMs)
}
cancelGraceTimer(): void {
if (this.graceTimer) {
clearTimeout(this.graceTimer)
this.graceTimer = null
}
}
dispose(): void {
this.cancelGraceTimer()
for (const [, managed] of this.ptys) {
if (managed.killTimer) {
clearTimeout(managed.killTimer)
}
managed.pty.kill('SIGTERM')
}
this.ptys.clear()
}
get activePtyCount(): number {
return this.ptys.size
}
}

View file

@ -0,0 +1,133 @@
import { execFile as execFileCb } from 'child_process'
import { existsSync, readFileSync } from 'fs'
import { promisify } from 'util'
const execFile = promisify(execFileCb)
/**
* Resolve the default shell for PTY spawning.
* Prefers $SHELL, then common fallbacks.
*/
export function resolveDefaultShell(): string {
const envShell = process.env.SHELL
if (envShell && existsSync(envShell)) {
return envShell
}
for (const candidate of ['/bin/bash', '/bin/zsh', '/bin/sh']) {
if (existsSync(candidate)) {
return candidate
}
}
return '/bin/sh'
}
/**
* Resolve the current working directory of a process by pid.
* Tries /proc on Linux and lsof on macOS before falling back to `fallbackCwd`.
*/
export async function resolveProcessCwd(pid: number, fallbackCwd: string): Promise<string> {
// Try to read /proc/{pid}/cwd on Linux
const procCwd = `/proc/${pid}/cwd`
if (existsSync(procCwd)) {
try {
const { readlinkSync } = await import('fs')
return readlinkSync(procCwd)
} catch {
// Fall through
}
}
// Fallback: use lsof on macOS
// Why: `-d cwd` restricts output to the cwd file descriptor only. Without it,
// lsof returns ALL open files (sockets, log files, TTYs) and the first `n`-line
// could be any of them — not the actual working directory.
try {
const { stdout: output } = await execFile('lsof', ['-p', String(pid), '-d', 'cwd', '-Fn'], {
encoding: 'utf-8',
timeout: 3000
})
const lines = output.split('\n')
for (const line of lines) {
if (line.startsWith('n') && line.includes('/')) {
const candidate = line.slice(1)
if (existsSync(candidate)) {
return candidate
}
}
}
} catch {
// Fall through
}
return fallbackCwd
}
/**
* Check whether a process has child processes (via pgrep).
*/
export async function processHasChildren(pid: number): Promise<boolean> {
try {
const { stdout } = await execFile('pgrep', ['-P', String(pid)], {
encoding: 'utf-8',
timeout: 3000
})
return stdout.trim().length > 0
} catch {
return false
}
}
/**
* Get the foreground process name of a given pid (via ps).
*/
export async function getForegroundProcessName(pid: number): Promise<string | null> {
try {
const { stdout } = await execFile('ps', ['-o', 'comm=', '-p', String(pid)], {
encoding: 'utf-8',
timeout: 3000
})
return stdout.trim() || null
} catch {
return null
}
}
/**
* List available shell profiles from /etc/shells (or known fallbacks).
*/
export function listShellProfiles(): { name: string; path: string }[] {
const profiles: { name: string; path: string }[] = []
const seen = new Set<string>()
try {
const content = readFileSync('/etc/shells', 'utf-8')
for (const line of content.split('\n')) {
const trimmed = line.trim()
if (!trimmed || trimmed.startsWith('#')) {
continue
}
if (!existsSync(trimmed)) {
continue
}
if (seen.has(trimmed)) {
continue
}
seen.add(trimmed)
const name = trimmed.split('/').pop() || trimmed
profiles.push({ name, path: trimmed })
}
} catch {
// /etc/shells may not exist on all systems; fall back to known shells
for (const candidate of ['/bin/bash', '/bin/zsh', '/bin/sh']) {
if (existsSync(candidate) && !seen.has(candidate)) {
seen.add(candidate)
const name = candidate.split('/').pop()!
profiles.push({ name, path: candidate })
}
}
}
return profiles
}

99
src/relay/relay.ts Normal file
View file

@ -0,0 +1,99 @@
#!/usr/bin/env node
// Orca Relay — lightweight daemon deployed to remote hosts.
// Communicates over stdin/stdout using the framed JSON-RPC protocol.
// The Electron app (client) deploys this script via SCP and launches
// it via an SSH exec channel.
import { RELAY_SENTINEL } from './protocol'
import { RelayDispatcher } from './dispatcher'
import { RelayContext } from './context'
import { PtyHandler } from './pty-handler'
import { FsHandler } from './fs-handler'
import { GitHandler } from './git-handler'
const DEFAULT_GRACE_MS = 5 * 60 * 1000
function parseArgs(argv: string[]): { graceTimeMs: number } {
let graceTimeMs = DEFAULT_GRACE_MS
for (let i = 2; i < argv.length; i++) {
if (argv[i] === '--grace-time' && argv[i + 1]) {
const parsed = parseInt(argv[i + 1], 10)
// Why: the CLI flag is in seconds for ergonomics, but internally we track ms.
if (!isNaN(parsed) && parsed > 0) {
graceTimeMs = parsed * 1000
}
i++
}
}
return { graceTimeMs }
}
function main(): void {
const { graceTimeMs } = parseArgs(process.argv)
// Why: After an uncaught exception Node's internal state may be corrupted
// (e.g. half-written buffers, broken invariants). Logging and continuing
// would risk silent data corruption or zombie PTYs. We log for diagnostics
// and then exit so the client can detect the disconnect and reconnect cleanly.
process.on('uncaughtException', (err) => {
process.stderr.write(`[relay] Uncaught exception: ${err.message}\n`)
process.exit(1)
})
const dispatcher = new RelayDispatcher((data) => {
process.stdout.write(data)
})
const context = new RelayContext()
dispatcher.onNotification('session.registerRoot', (params) => {
const rootPath = params.rootPath as string
if (rootPath) {
context.registerRoot(rootPath)
}
})
const ptyHandler = new PtyHandler(dispatcher, graceTimeMs)
const fsHandler = new FsHandler(dispatcher, context)
// Why: GitHandler registers its own request handlers on construction,
// so we hold the reference only for potential future disposal.
const _gitHandler = new GitHandler(dispatcher, context)
void _gitHandler
// Read framed binary data from stdin
process.stdin.on('data', (chunk: Buffer) => {
ptyHandler.cancelGraceTimer()
dispatcher.feed(chunk)
})
process.stdin.on('end', () => {
// Client disconnected — start grace timer to keep PTYs alive
// for possible reconnection
ptyHandler.startGraceTimer(() => {
shutdown()
})
})
process.stdin.on('error', () => {
ptyHandler.startGraceTimer(() => {
shutdown()
})
})
function shutdown(): void {
dispatcher.dispose()
ptyHandler.dispose()
fsHandler.dispose()
process.exit(0)
}
process.on('SIGTERM', shutdown)
process.on('SIGINT', shutdown)
// Signal readiness to the client — the client watches for this exact
// string before sending framed data.
process.stdout.write(RELAY_SENTINEL)
}
main()

View file

@ -0,0 +1,170 @@
import { spawn, type ChildProcess } from 'child_process'
import {
RELAY_SENTINEL,
FrameDecoder,
encodeJsonRpcFrame,
parseJsonRpcMessage,
MessageType,
type JsonRpcRequest,
type JsonRpcResponse,
type JsonRpcNotification
} from './protocol'
export type RelayProcess = {
proc: ChildProcess
responses: (JsonRpcResponse | JsonRpcNotification)[]
sentinelReceived: Promise<void>
send: (method: string, params?: Record<string, unknown>) => number
sendNotification: (method: string, params?: Record<string, unknown>) => void
waitForResponse: (id: number, timeoutMs?: number) => Promise<JsonRpcResponse>
waitForNotification: (method: string, timeoutMs?: number) => Promise<JsonRpcNotification>
kill: (signal?: NodeJS.Signals) => void
waitForExit: (timeoutMs?: number) => Promise<number | null>
}
export function spawnRelay(entryPath: string, args: string[] = []): RelayProcess {
const proc = spawn('node', [entryPath, ...args], {
stdio: ['pipe', 'pipe', 'pipe']
})
const responses: (JsonRpcResponse | JsonRpcNotification)[] = []
let nextSeq = 1
let sentinelResolved = false
let stdoutBuffer = Buffer.alloc(0)
let sentinelResolve: () => void
let decoderActive = false
const sentinelReceived = new Promise<void>((resolve) => {
sentinelResolve = resolve
})
const decoder = new FrameDecoder((frame) => {
if (frame.type !== MessageType.Regular) {
return
}
try {
const msg = parseJsonRpcMessage(frame.payload)
responses.push(msg as JsonRpcResponse | JsonRpcNotification)
} catch {
/* skip malformed */
}
})
proc.stdout!.on('data', (chunk: Buffer) => {
if (!sentinelResolved) {
stdoutBuffer = Buffer.concat([stdoutBuffer, chunk])
const sentinelBuf = Buffer.from(RELAY_SENTINEL, 'utf-8')
const idx = stdoutBuffer.indexOf(sentinelBuf)
if (idx !== -1) {
sentinelResolved = true
decoderActive = true
sentinelResolve()
const remainder = stdoutBuffer.subarray(idx + sentinelBuf.length)
if (remainder.length > 0) {
decoder.feed(remainder)
}
}
} else if (decoderActive) {
decoder.feed(chunk)
}
})
proc.stderr!.on('data', () => {
/* drain */
})
const send = (method: string, params?: Record<string, unknown>): number => {
const id = nextSeq++
const req: JsonRpcRequest = {
jsonrpc: '2.0',
id,
method,
...(params !== undefined ? { params } : {})
}
proc.stdin!.write(encodeJsonRpcFrame(req, id, 0))
return id
}
const sendNotification = (method: string, params?: Record<string, unknown>): void => {
const seq = nextSeq++
const notif: JsonRpcNotification = {
jsonrpc: '2.0',
method,
...(params !== undefined ? { params } : {})
}
proc.stdin!.write(encodeJsonRpcFrame(notif, seq, 0))
}
const waitForResponse = (id: number, timeoutMs = 5000): Promise<JsonRpcResponse> => {
return new Promise((resolve, reject) => {
const deadline = Date.now() + timeoutMs
const check = () => {
const found = responses.find((r) => 'id' in r && r.id === id) as JsonRpcResponse | undefined
if (found) {
resolve(found)
return
}
if (Date.now() > deadline) {
reject(new Error(`Timed out waiting for response id=${id}`))
return
}
setTimeout(check, 10)
}
check()
})
}
const waitForNotification = (method: string, timeoutMs = 5000): Promise<JsonRpcNotification> => {
return new Promise((resolve, reject) => {
const deadline = Date.now() + timeoutMs
const seen = responses.length
const check = () => {
for (let i = seen; i < responses.length; i++) {
const r = responses[i]
if ('method' in r && r.method === method) {
resolve(r as JsonRpcNotification)
return
}
}
if (Date.now() > deadline) {
reject(new Error(`Timed out waiting for notification "${method}"`))
return
}
setTimeout(check, 10)
}
check()
})
}
const kill = (signal: NodeJS.Signals = 'SIGTERM') => {
proc.kill(signal)
}
const waitForExit = (timeoutMs = 5000): Promise<number | null> => {
return new Promise((resolve, reject) => {
if (proc.exitCode !== null) {
resolve(proc.exitCode)
return
}
const timer = setTimeout(() => {
reject(new Error('Timed out waiting for process exit'))
}, timeoutMs)
proc.once('exit', (code) => {
clearTimeout(timer)
resolve(code)
})
})
}
return {
proc,
responses,
sentinelReceived,
send,
sendNotification,
waitForResponse,
waitForNotification,
kill,
waitForExit
}
}

View file

@ -0,0 +1,202 @@
import { afterAll, beforeAll, describe, expect, it, afterEach } from 'vitest'
import { mkdtempSync, writeFileSync } from 'fs'
import { rm } from 'fs/promises'
import * as path from 'path'
import { tmpdir } from 'os'
import { execFileSync } from 'child_process'
import { build } from 'esbuild'
import { spawnRelay, type RelayProcess } from './subprocess-test-utils'
const RELAY_TS_ENTRY = path.resolve(__dirname, 'relay.ts')
let bundleDir: string
let relayEntry: string
beforeAll(async () => {
bundleDir = mkdtempSync(path.join(tmpdir(), 'relay-bundle-'))
relayEntry = path.join(bundleDir, 'relay.js')
await build({
entryPoints: [RELAY_TS_ENTRY],
bundle: true,
platform: 'node',
target: 'node18',
format: 'cjs',
outfile: relayEntry,
external: ['node-pty', '@parcel/watcher'],
sourcemap: false
})
}, 30_000)
afterAll(async () => {
if (bundleDir) {
await rm(bundleDir, { recursive: true, force: true }).catch(() => {})
}
})
function spawn(args: string[] = []): RelayProcess {
return spawnRelay(relayEntry, args)
}
describe('Subprocess: Relay entry point', () => {
let relay: RelayProcess | null = null
let tmpDir: string
afterEach(async () => {
if (relay && relay.proc.exitCode === null) {
relay.proc.kill('SIGKILL')
await relay.waitForExit().catch(() => {})
}
relay = null
if (tmpDir) {
await rm(tmpDir, { recursive: true, force: true }).catch(() => {})
}
})
it('prints sentinel on startup', async () => {
relay = spawn()
await relay.sentinelReceived
}, 10_000)
it('responds to fs.stat over stdin/stdout', async () => {
tmpDir = mkdtempSync(path.join(tmpdir(), 'relay-sub-'))
writeFileSync(path.join(tmpDir, 'test.txt'), 'hello')
relay = spawn()
await relay.sentinelReceived
relay.sendNotification('session.registerRoot', { rootPath: tmpDir })
const id = relay.send('fs.stat', { filePath: path.join(tmpDir, 'test.txt') })
const resp = await relay.waitForResponse(id)
expect(resp.result).toBeDefined()
const result = resp.result as { size: number; type: string }
expect(result.type).toBe('file')
expect(result.size).toBe(5)
}, 10_000)
it('responds to fs.readDir', async () => {
tmpDir = mkdtempSync(path.join(tmpdir(), 'relay-sub-'))
writeFileSync(path.join(tmpDir, 'a.txt'), 'a')
writeFileSync(path.join(tmpDir, 'b.txt'), 'b')
relay = spawn()
await relay.sentinelReceived
relay.sendNotification('session.registerRoot', { rootPath: tmpDir })
const id = relay.send('fs.readDir', { dirPath: tmpDir })
const resp = await relay.waitForResponse(id)
const entries = resp.result as { name: string }[]
const names = entries.map((e) => e.name).sort()
expect(names).toEqual(['a.txt', 'b.txt'])
}, 10_000)
it('responds to fs.readFile and fs.writeFile', async () => {
tmpDir = mkdtempSync(path.join(tmpdir(), 'relay-sub-'))
relay = spawn()
await relay.sentinelReceived
relay.sendNotification('session.registerRoot', { rootPath: tmpDir })
const filePath = path.join(tmpDir, 'output.txt')
const wId = relay.send('fs.writeFile', { filePath, content: 'via subprocess' })
const wResp = await relay.waitForResponse(wId)
expect(wResp.error).toBeUndefined()
const rId = relay.send('fs.readFile', { filePath })
const rResp = await relay.waitForResponse(rId)
const result = rResp.result as { content: string; isBinary: boolean }
expect(result.content).toBe('via subprocess')
expect(result.isBinary).toBe(false)
}, 10_000)
it('responds to git.status on a real repo', async () => {
tmpDir = mkdtempSync(path.join(tmpdir(), 'relay-sub-'))
execFileSync('git', ['init'], { cwd: tmpDir, stdio: 'pipe' })
execFileSync('git', ['config', 'user.email', 'test@test.com'], { cwd: tmpDir, stdio: 'pipe' })
execFileSync('git', ['config', 'user.name', 'Test'], { cwd: tmpDir, stdio: 'pipe' })
writeFileSync(path.join(tmpDir, 'file.txt'), 'content')
execFileSync('git', ['add', '.'], { cwd: tmpDir, stdio: 'pipe' })
execFileSync('git', ['commit', '-m', 'init'], { cwd: tmpDir, stdio: 'pipe' })
writeFileSync(path.join(tmpDir, 'file.txt'), 'dirty')
relay = spawn()
await relay.sentinelReceived
relay.sendNotification('session.registerRoot', { rootPath: tmpDir })
const id = relay.send('git.status', { worktreePath: tmpDir })
const resp = await relay.waitForResponse(id)
const result = resp.result as { entries: { path: string; status: string }[] }
expect(result.entries.length).toBeGreaterThan(0)
expect(result.entries[0].path).toBe('file.txt')
expect(result.entries[0].status).toBe('modified')
}, 10_000)
it('returns JSON-RPC error for unknown method', async () => {
relay = spawn()
await relay.sentinelReceived
const id = relay.send('does.not.exist', {})
const resp = await relay.waitForResponse(id)
expect(resp.error).toBeDefined()
expect(resp.error!.code).toBe(-32601)
expect(resp.error!.message).toContain('Method not found')
}, 10_000)
it('returns error for failing handler', async () => {
tmpDir = mkdtempSync(path.join(tmpdir(), 'relay-sub-'))
relay = spawn()
await relay.sentinelReceived
relay.sendNotification('session.registerRoot', { rootPath: tmpDir })
const id = relay.send('fs.readFile', { filePath: path.join(tmpDir, 'nonexistent.txt') })
const resp = await relay.waitForResponse(id)
expect(resp.error).toBeDefined()
}, 10_000)
it('handles multiple concurrent requests', async () => {
tmpDir = mkdtempSync(path.join(tmpdir(), 'relay-sub-'))
writeFileSync(path.join(tmpDir, 'one.txt'), '1')
writeFileSync(path.join(tmpDir, 'two.txt'), '22')
writeFileSync(path.join(tmpDir, 'three.txt'), '333')
relay = spawn()
await relay.sentinelReceived
relay.sendNotification('session.registerRoot', { rootPath: tmpDir })
const id1 = relay.send('fs.stat', { filePath: path.join(tmpDir, 'one.txt') })
const id2 = relay.send('fs.stat', { filePath: path.join(tmpDir, 'two.txt') })
const id3 = relay.send('fs.stat', { filePath: path.join(tmpDir, 'three.txt') })
const [r1, r2, r3] = await Promise.all([
relay.waitForResponse(id1),
relay.waitForResponse(id2),
relay.waitForResponse(id3)
])
expect((r1.result as { size: number }).size).toBe(1)
expect((r2.result as { size: number }).size).toBe(2)
expect((r3.result as { size: number }).size).toBe(3)
}, 10_000)
it('shuts down cleanly on SIGTERM', async () => {
relay = spawn()
await relay.sentinelReceived
relay.kill('SIGTERM')
await relay.waitForExit()
expect(relay.proc.exitCode !== null || relay.proc.signalCode !== null).toBe(true)
}, 10_000)
it('exits immediately on stdin close when no PTYs exist', async () => {
relay = spawn(['--grace-time', '100'])
await relay.sentinelReceived
relay.proc.stdin!.end()
await relay.waitForExit(3000)
expect(relay.proc.exitCode).toBe(0)
}, 10_000)
})

View file

@ -8,6 +8,7 @@ import type { editor as monacoEditor } from 'monaco-editor'
import { useAppStore } from '@/store'
import { joinPath } from '@/lib/path'
import { setWithLRU } from '@/lib/scroll-cache'
import { getConnectionId } from '@/lib/connection-context'
import '@/lib/monaco-setup'
import { Button } from '@/components/ui/button'
import type { OpenFile } from '@/store/slices/editor'
@ -191,6 +192,7 @@ export default function CombinedDiffViewer({ file }: { file: OpenFile }): React.
let result: GitDiffResult
try {
const connectionId = getConnectionId(file.worktreeId) ?? undefined
result =
isBranchMode && branchCompare
? ((await window.api.git.branchDiff({
@ -202,12 +204,14 @@ export default function CombinedDiffViewer({ file }: { file: OpenFile }): React.
mergeBase: branchCompare.mergeBase!
},
filePath: entry.path,
oldPath: entry.oldPath
oldPath: entry.oldPath,
connectionId
})) as GitDiffResult)
: ((await window.api.git.diff({
worktreePath: file.filePath,
filePath: entry.path,
staged: 'area' in entry && entry.area === 'staged'
staged: 'area' in entry && entry.area === 'staged',
connectionId
})) as GitDiffResult)
} catch {
result = {
@ -268,7 +272,8 @@ export default function CombinedDiffViewer({ file }: { file: OpenFile }): React.
const content = modifiedEditor.getValue()
const absolutePath = joinPath(file.filePath, section.path)
try {
await window.api.fs.writeFile({ filePath: absolutePath, content })
const connectionId = getConnectionId(file.worktreeId) ?? undefined
await window.api.fs.writeFile({ filePath: absolutePath, content, connectionId })
setSections((prev) =>
prev.map((s, i) => (i === index ? { ...s, modifiedContent: content, dirty: false } : s))
)
@ -276,7 +281,7 @@ export default function CombinedDiffViewer({ file }: { file: OpenFile }): React.
console.error('Save failed:', err)
}
},
[file.filePath, sections]
[file.filePath, file.worktreeId, sections]
)
const handleSectionSaveRef = useRef(handleSectionSave)

View file

@ -7,6 +7,7 @@ import React, { useCallback, useEffect, useRef, useState, Suspense } from 'react
import * as monaco from 'monaco-editor'
import { Columns2, Copy, ExternalLink, FileText, Rows2 } from 'lucide-react'
import { useAppStore } from '@/store'
import { getConnectionId } from '@/lib/connection-context'
import { detectLanguage } from '@/lib/language-detect'
import { getEditorHeaderCopyState, getEditorHeaderOpenFileState } from './editor-header'
import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/tooltip'
@ -160,7 +161,7 @@ export default function EditorPanel({
if (fileContents[activeFile.id]) {
return
}
void loadFileContent(activeFile.filePath, activeFile.id)
void loadFileContent(activeFile.filePath, activeFile.id, activeFile.worktreeId)
} else if (
activeFile.mode === 'diff' &&
activeFile.diffSource !== undefined &&
@ -182,17 +183,21 @@ export default function EditorPanel({
return () => window.clearTimeout(timeout)
}, [copiedPathToast])
const loadFileContent = useCallback(async (filePath: string, id: string): Promise<void> => {
try {
const result = (await window.api.fs.readFile({ filePath })) as FileContent
setFileContents((prev) => ({ ...prev, [id]: result }))
} catch (err) {
setFileContents((prev) => ({
...prev,
[id]: { content: `Error loading file: ${err}`, isBinary: false }
}))
}
}, [])
const loadFileContent = useCallback(
async (filePath: string, id: string, worktreeId?: string): Promise<void> => {
try {
const connectionId = getConnectionId(worktreeId ?? null) ?? undefined
const result = (await window.api.fs.readFile({ filePath, connectionId })) as FileContent
setFileContents((prev) => ({ ...prev, [id]: result }))
} catch (err) {
setFileContents((prev) => ({
...prev,
[id]: { content: `Error loading file: ${err}`, isBinary: false }
}))
}
},
[]
)
const loadDiffContent = useCallback(async (file: OpenFile | null): Promise<void> => {
if (!file) {
@ -208,6 +213,7 @@ export default function EditorPanel({
file.branchCompare?.baseOid && file.branchCompare.headOid && file.branchCompare.mergeBase
? file.branchCompare
: null
const connectionId = getConnectionId(file.worktreeId) ?? undefined
const result =
file.diffSource === 'branch' && branchCompare
? ((await window.api.git.branchDiff({
@ -219,12 +225,14 @@ export default function EditorPanel({
mergeBase: branchCompare.mergeBase!
},
filePath: file.relativePath,
oldPath: file.branchOldPath
oldPath: file.branchOldPath,
connectionId
})) as DiffContent)
: ((await window.api.git.diff({
worktreePath,
filePath: file.relativePath,
staged: file.diffSource === 'staged'
staged: file.diffSource === 'staged',
connectionId
})) as DiffContent)
setDiffContents((prev) => ({ ...prev, [file.id]: result }))
} catch (err) {
@ -320,7 +328,7 @@ export default function EditorPanel({
for (const file of matchingFiles) {
if (file.mode === 'edit') {
void loadFileContent(file.filePath, file.id)
void loadFileContent(file.filePath, file.id, file.worktreeId)
} else if (
file.mode === 'diff' &&
file.diffSource !== 'combined-uncommitted' &&

View file

@ -9,6 +9,7 @@ import {
DropdownMenuTrigger
} from '@/components/ui/dropdown-menu'
import { useAppStore } from '@/store'
import { getConnectionId } from '@/lib/connection-context'
import { scrollTopCache, cursorPositionCache, setWithLRU } from '@/lib/scroll-cache'
import '@/lib/monaco-setup'
import { computeEditorFontSize } from '@/lib/editor-font-zoom'
@ -373,10 +374,15 @@ export default function MonacoEditor({
onSelect={async () => {
// Derive worktree root from the absolute and relative paths
const worktreePath = filePath.slice(0, -(relativePath.length + 1))
const activeFile = useAppStore
.getState()
.openFiles.find((f) => f.filePath === filePath)
const connectionId = getConnectionId(activeFile?.worktreeId ?? null) ?? undefined
const url = await window.api.git.remoteFileUrl({
worktreePath,
relativePath,
line: gutterMenuLine
line: gutterMenuLine,
connectionId
})
if (url) {
window.api.ui.writeClipboardText(url)

View file

@ -2,6 +2,7 @@ import type { StoreApi } from 'zustand'
import { useAppStore } from '@/store'
import type { AppState } from '@/store'
import type { OpenFile } from '@/store/slices/editor'
import { getConnectionId } from '@/lib/connection-context'
import {
canAutoSaveOpenFile,
getOpenFilesForExternalFileChange,
@ -72,7 +73,12 @@ export function attachEditorAutosaveController(store: AppStoreApi): () => void {
}
const contentToSave = state.editorDrafts[file.id] ?? fallbackContent
await window.api.fs.writeFile({ filePath: liveFile.filePath, content: contentToSave })
const connectionId = getConnectionId(liveFile.worktreeId) ?? undefined
await window.api.fs.writeFile({
filePath: liveFile.filePath,
content: contentToSave,
connectionId
})
if ((saveGeneration.get(file.id) ?? 0) !== queuedGeneration) {
return

View file

@ -81,7 +81,11 @@ function isExternalUrl(src: string): boolean {
* returns the URL directly. Re-validates on window re-focus so deleted or
* replaced images are picked up.
*/
export function useLocalImageSrc(rawSrc: string | undefined, filePath: string): string | undefined {
export function useLocalImageSrc(
rawSrc: string | undefined,
filePath: string,
connectionId?: string | null
): string | undefined {
const [generation, setGeneration] = useState(cacheGeneration)
useEffect(() => {
@ -126,7 +130,7 @@ export function useLocalImageSrc(rawSrc: string | undefined, filePath: string):
let cancelled = false
window.api.fs
.readFile({ filePath: absolutePath })
.readFile({ filePath: absolutePath, connectionId: connectionId ?? undefined })
.then((result) => {
if (cancelled) {
return
@ -151,7 +155,7 @@ export function useLocalImageSrc(rawSrc: string | undefined, filePath: string):
return () => {
cancelled = true
}
}, [rawSrc, filePath, generation])
}, [rawSrc, filePath, generation, connectionId])
return displaySrc
}
@ -161,7 +165,11 @@ export function useLocalImageSrc(rawSrc: string | undefined, filePath: string):
* outside React (e.g. ProseMirror nodeViews). Resolves from cache when
* available.
*/
export async function loadLocalImageSrc(rawSrc: string, filePath: string): Promise<string | null> {
export async function loadLocalImageSrc(
rawSrc: string,
filePath: string,
connectionId?: string | null
): Promise<string | null> {
if (
rawSrc.startsWith('http://') ||
rawSrc.startsWith('https://') ||
@ -182,7 +190,10 @@ export async function loadLocalImageSrc(rawSrc: string, filePath: string): Promi
}
try {
const result = await window.api.fs.readFile({ filePath: absolutePath })
const result = await window.api.fs.readFile({
filePath: absolutePath,
connectionId: connectionId ?? undefined
})
if (result.isBinary && result.content) {
const url = base64ToBlobUrl(result.content, result.mimeType ?? 'image/png')
cacheBlobUrl(absolutePath, url)

View file

@ -1,5 +1,5 @@
import React, { useCallback, useMemo, useState } from 'react'
import { Check, ChevronsUpDown } from 'lucide-react'
import { Check, ChevronsUpDown, Globe } from 'lucide-react'
import { Button } from '@/components/ui/button'
import {
Command,
@ -67,11 +67,19 @@ export default function RepoCombobox({
data-repo-combobox-root="true"
>
{selectedRepo ? (
<RepoDotLabel
name={selectedRepo.displayName}
color={selectedRepo.badgeColor}
dotClassName="size-1.5"
/>
<span className="inline-flex min-w-0 items-center gap-1.5">
<RepoDotLabel
name={selectedRepo.displayName}
color={selectedRepo.badgeColor}
dotClassName="size-1.5"
/>
{selectedRepo.connectionId && (
<span className="shrink-0 inline-flex items-center gap-0.5 rounded bg-muted px-1 py-0.5 text-[9px] font-medium leading-none text-muted-foreground">
<Globe className="size-2.5" />
SSH
</span>
)}
</span>
) : (
<span className="text-muted-foreground">{placeholder}</span>
)}
@ -106,11 +114,19 @@ export default function RepoCombobox({
)}
/>
<div className="min-w-0 flex-1">
<RepoDotLabel
name={repo.displayName}
color={repo.badgeColor}
className="max-w-full"
/>
<span className="inline-flex items-center gap-1.5">
<RepoDotLabel
name={repo.displayName}
color={repo.badgeColor}
className="max-w-full"
/>
{repo.connectionId && (
<span className="shrink-0 inline-flex items-center gap-0.5 rounded bg-muted px-1 py-0.5 text-[9px] font-medium leading-none text-muted-foreground">
<Globe className="size-2.5" />
SSH
</span>
)}
</span>
<p className="mt-0.5 truncate text-[11px] text-muted-foreground">{repo.path}</p>
</div>
</CommandItem>

View file

@ -55,7 +55,7 @@ export default function FileExplorer(): React.JSX.Element {
refreshTree,
refreshDir,
resetAndLoad
} = useFileExplorerTree(worktreePath, expanded)
} = useFileExplorerTree(worktreePath, expanded, activeWorktreeId)
const [selectedPath, setSelectedPath] = useState<string | null>(null)
const [flashingPath, setFlashingPath] = useState<string | null>(null)

View file

@ -1,6 +1,7 @@
import React, { useCallback, useDeferredValue, useEffect, useMemo, useRef } from 'react'
import { useVirtualizer } from '@tanstack/react-virtual'
import { useAppStore } from '@/store'
import { getConnectionId } from '@/lib/connection-context'
import type { SearchFileResult, SearchMatch } from '../../../../shared/types'
import { buildSearchRows } from './search-rows'
import { cancelRevealFrame, openMatchResult } from './search-match-open'
@ -183,9 +184,11 @@ export default function Search(): React.JSX.Element {
searchTimerRef.current = null
try {
const state = useAppStore.getState()
const connectionId = getConnectionId(activeWorktreeId!) ?? undefined
const results = await window.api.fs.search({
query: query.trim(),
rootPath: worktreePath,
connectionId,
caseSensitive:
state.fileSearchStateByWorktree[activeWorktreeId!]?.caseSensitive ?? false,
wholeWord: state.fileSearchStateByWorktree[activeWorktreeId!]?.wholeWord ?? false,

View file

@ -47,6 +47,7 @@ import {
notifyEditorExternalFileChange,
requestEditorSaveQuiesce
} from '@/components/editor/editor-autosave'
import { getConnectionId } from '@/lib/connection-context'
import { PullRequestIcon } from './checks-helpers'
import type {
GitBranchChangeEntry,
@ -332,12 +333,13 @@ export default function SourceControl(): React.JSX.Element {
}
setIsExecutingBulk(true)
try {
await window.api.git.bulkStage({ worktreePath, filePaths: bulkStagePaths })
const connectionId = getConnectionId(activeWorktreeId ?? null) ?? undefined
await window.api.git.bulkStage({ worktreePath, filePaths: bulkStagePaths, connectionId })
clearSelection()
} finally {
setIsExecutingBulk(false)
}
}, [worktreePath, bulkStagePaths, clearSelection])
}, [worktreePath, bulkStagePaths, clearSelection, activeWorktreeId])
const handleBulkUnstage = useCallback(async () => {
if (!worktreePath || bulkUnstagePaths.length === 0) {
@ -345,12 +347,13 @@ export default function SourceControl(): React.JSX.Element {
}
setIsExecutingBulk(true)
try {
await window.api.git.bulkUnstage({ worktreePath, filePaths: bulkUnstagePaths })
const connectionId = getConnectionId(activeWorktreeId ?? null) ?? undefined
await window.api.git.bulkUnstage({ worktreePath, filePaths: bulkUnstagePaths, connectionId })
clearSelection()
} finally {
setIsExecutingBulk(false)
}
}, [worktreePath, bulkUnstagePaths, clearSelection])
}, [worktreePath, bulkUnstagePaths, clearSelection, activeWorktreeId])
const unresolvedConflicts = useMemo(
() => entries.filter((entry) => entry.conflictStatus === 'unresolved' && entry.conflictKind),
@ -387,9 +390,11 @@ export default function SourceControl(): React.JSX.Element {
}
try {
const connectionId = getConnectionId(activeWorktreeId ?? null) ?? undefined
const result = await window.api.git.branchCompare({
worktreePath,
baseRef: effectiveBaseRef
baseRef: effectiveBaseRef,
connectionId
})
setGitBranchCompareResult(activeWorktreeId, requestKey, result)
} catch (error) {
@ -472,12 +477,13 @@ export default function SourceControl(): React.JSX.Element {
return
}
try {
await window.api.git.stage({ worktreePath, filePath })
const connectionId = getConnectionId(activeWorktreeId ?? null) ?? undefined
await window.api.git.stage({ worktreePath, filePath, connectionId })
} catch {
// git operation failed silently
}
},
[worktreePath]
[worktreePath, activeWorktreeId]
)
const handleUnstage = useCallback(
@ -486,12 +492,13 @@ export default function SourceControl(): React.JSX.Element {
return
}
try {
await window.api.git.unstage({ worktreePath, filePath })
const connectionId = getConnectionId(activeWorktreeId ?? null) ?? undefined
await window.api.git.unstage({ worktreePath, filePath, connectionId })
} catch {
// git operation failed silently
}
},
[worktreePath]
[worktreePath, activeWorktreeId]
)
const handleDiscard = useCallback(
@ -508,7 +515,8 @@ export default function SourceControl(): React.JSX.Element {
worktreePath,
relativePath: filePath
})
await window.api.git.discard({ worktreePath, filePath })
const connectionId = getConnectionId(activeWorktreeId ?? null) ?? undefined
await window.api.git.discard({ worktreePath, filePath, connectionId })
notifyEditorExternalFileChange({
worktreeId: activeWorktreeId,
worktreePath,

View file

@ -3,6 +3,7 @@ import type { Dispatch, SetStateAction } from 'react'
import { toast } from 'sonner'
import { useAppStore } from '@/store'
import { dirname } from '@/lib/path'
import { getConnectionId } from '@/lib/connection-context'
import { isPathEqualOrDescendant } from './file-explorer-paths'
import type { PendingDelete, TreeNode } from './file-explorer-types'
import { requestEditorSaveQuiesce } from '@/components/editor/editor-autosave'
@ -89,7 +90,8 @@ export function useFileDeletion({
// action cannot be undone by a trailing write that recreates the file.
await Promise.all(filesToClose.map((file) => requestEditorSaveQuiesce({ fileId: file.id })))
await window.api.fs.deletePath({ targetPath: node.path })
const connectionId = getConnectionId(activeWorktreeId ?? null) ?? undefined
await window.api.fs.deletePath({ targetPath: node.path, connectionId })
for (const file of filesToClose) {
closeFile(file.id)

View file

@ -4,6 +4,7 @@ import { toast } from 'sonner'
import { useAppStore } from '@/store'
import { basename, dirname, joinPath } from '@/lib/path'
import { detectLanguage } from '@/lib/language-detect'
import { getConnectionId } from '@/lib/connection-context'
import { requestEditorSaveQuiesce } from '@/components/editor/editor-autosave'
function extractIpcErrorMessage(err: unknown, fallback: string): string {
@ -150,7 +151,8 @@ export function useFileExplorerDragDrop({
await Promise.all(filesToMove.map((file) => requestEditorSaveQuiesce({ fileId: file.id })))
try {
await window.api.fs.rename({ oldPath: sourcePath, newPath })
const connectionId = getConnectionId(activeWorktreeId ?? null) ?? undefined
await window.api.fs.rename({ oldPath: sourcePath, newPath, connectionId })
} catch (err) {
toast.error(extractIpcErrorMessage(err, `Failed to move '${fileName}'.`))
return

View file

@ -4,6 +4,7 @@ import { toast } from 'sonner'
import { useAppStore } from '@/store'
import { detectLanguage } from '@/lib/language-detect'
import { dirname, joinPath } from '@/lib/path'
import { getConnectionId } from '@/lib/connection-context'
import type { InlineInput } from './FileExplorerRow'
import type { TreeNode } from './file-explorer-types'
@ -119,12 +120,14 @@ export function useFileExplorerInlineInput({
return
}
const run = async (): Promise<void> => {
const connectionId = getConnectionId(activeWorktreeId ?? null) ?? undefined
if (inlineInput.type === 'rename' && inlineInput.existingPath) {
const parentDir = dirname(inlineInput.existingPath)
try {
await window.api.fs.rename({
oldPath: inlineInput.existingPath,
newPath: joinPath(parentDir, name)
newPath: joinPath(parentDir, name),
connectionId
})
} catch (err) {
toast.error(
@ -136,8 +139,8 @@ export function useFileExplorerInlineInput({
const fullPath = joinPath(inlineInput.parentPath, name)
try {
await (inlineInput.type === 'folder'
? window.api.fs.createDir({ dirPath: fullPath })
: window.api.fs.createFile({ filePath: fullPath }))
? window.api.fs.createDir({ dirPath: fullPath, connectionId })
: window.api.fs.createFile({ filePath: fullPath, connectionId }))
await refreshDir(inlineInput.parentPath)
if (inlineInput.type === 'file') {
openFile({

View file

@ -1,6 +1,7 @@
import type React from 'react'
import { useCallback, useMemo, useRef, useState } from 'react'
import { joinPath, normalizeRelativePath } from '@/lib/path'
import { getConnectionId } from '@/lib/connection-context'
import type { DirCache, TreeNode } from './file-explorer-types'
import { splitPathSegments } from './path-tree'
import { shouldIncludeFileExplorerEntry } from './file-explorer-entries'
@ -20,7 +21,8 @@ type UseFileExplorerTreeResult = {
export function useFileExplorerTree(
worktreePath: string | null,
expanded: Set<string>
expanded: Set<string>,
activeWorktreeId?: string | null
): UseFileExplorerTreeResult {
const [dirCache, setDirCache] = useState<Record<string, DirCache>>({})
const [rootError, setRootError] = useState<string | null>(null)
@ -45,7 +47,8 @@ export function useFileExplorerTree(
}
}))
try {
const entries = await window.api.fs.readDir({ dirPath })
const connectionId = getConnectionId(activeWorktreeId ?? null) ?? undefined
const entries = await window.api.fs.readDir({ dirPath, connectionId })
if (depth === -1) {
setRootError(null)
}
@ -72,7 +75,7 @@ export function useFileExplorerTree(
setDirCache((prev) => ({ ...prev, [dirPath]: { children: [], loading: false } }))
}
},
[worktreePath]
[activeWorktreeId, worktreePath]
)
const refreshTree = useCallback(async () => {

View file

@ -5,6 +5,7 @@ import type { DirCache } from './file-explorer-types'
import type { InlineInput } from './FileExplorerRow'
import { normalizeAbsolutePath } from './file-explorer-paths'
import { dirname } from '@/lib/path'
import { getConnectionId } from '@/lib/connection-context'
import {
purgeDirCacheSubtree,
purgeExpandedDirsSubtree,
@ -88,7 +89,8 @@ export function useFileExplorerWatch({
const currentWorktreePath = worktreePath
void window.api.fs.watchWorktree({ worktreePath })
const connectionId = getConnectionId(activeWorktreeId ?? null) ?? undefined
void window.api.fs.watchWorktree({ worktreePath, connectionId })
function processPayload(payload: FsChangedPayload): void {
// Why: during rapid worktree switches, in-flight batched events from
@ -209,10 +211,10 @@ export function useFileExplorerWatch({
return () => {
unsubscribeListener()
void window.api.fs.unwatchWorktree({ worktreePath })
void window.api.fs.unwatchWorktree({ worktreePath, connectionId })
deferredRef.current = []
}
}, [worktreePath, setDirCache, setSelectedPath])
}, [worktreePath, activeWorktreeId, setDirCache, setSelectedPath])
// ── Flush deferred events when interaction ends ────────────────────
useEffect(() => {

View file

@ -2,6 +2,7 @@ import { useCallback, useEffect, useMemo } from 'react'
import { useAppStore } from '@/store'
import type { GitConflictOperation, GitStatusResult } from '../../../../shared/types'
import { isGitRepoKind } from '../../../../shared/repo-kind'
import { getConnectionId } from '@/lib/connection-context'
const POLL_INTERVAL_MS = 3000
@ -70,7 +71,11 @@ export function useGitStatusPolling(): void {
return
}
try {
const status = (await window.api.git.status({ worktreePath })) as GitStatusResult
const connectionId = getConnectionId(activeWorktreeId) ?? undefined
const status = (await window.api.git.status({
worktreePath,
connectionId
})) as GitStatusResult
setGitStatus(activeWorktreeId, status)
} catch {
// ignore
@ -109,7 +114,8 @@ export function useGitStatusPolling(): void {
for (const { id, path } of staleConflictWorktrees) {
try {
const op = (await window.api.git.conflictOperation({
worktreePath: path
worktreePath: path,
connectionId: getConnectionId(id) ?? undefined
})) as GitConflictOperation
setConflictOperation(id, op)
} catch {

View file

@ -6,6 +6,7 @@ import {
GitBranch,
Keyboard,
Palette,
Server,
SlidersHorizontal,
SquareTerminal
} from 'lucide-react'
@ -21,6 +22,7 @@ import { TerminalPane, TERMINAL_PANE_SEARCH_ENTRIES } from './TerminalPane'
import { RepositoryPane, getRepositoryPaneSearchEntries } from './RepositoryPane'
import { GitPane, GIT_PANE_SEARCH_ENTRIES } from './GitPane'
import { NotificationsPane, NOTIFICATIONS_PANE_SEARCH_ENTRIES } from './NotificationsPane'
import { SshPane, SSH_PANE_SEARCH_ENTRIES } from './SshPane'
import { StatsPane, STATS_PANE_SEARCH_ENTRIES } from '../stats/StatsPane'
import { SettingsSidebar } from './SettingsSidebar'
import { SettingsSection } from './SettingsSection'
@ -34,6 +36,7 @@ type SettingsNavTarget =
| 'notifications'
| 'shortcuts'
| 'stats'
| 'ssh'
| 'repo'
type SettingsNavSection = {
@ -42,6 +45,7 @@ type SettingsNavSection = {
description: string
icon: typeof SlidersHorizontal
searchEntries: SettingsSearchEntry[]
badge?: string
}
function getSettingsSectionId(pane: SettingsNavTarget, repoId: string | null): string {
@ -259,6 +263,14 @@ function Settings(): React.JSX.Element {
icon: BarChart3,
searchEntries: STATS_PANE_SEARCH_ENTRIES
},
{
id: 'ssh',
title: 'SSH',
description: 'Remote SSH connections.',
icon: Server,
searchEntries: SSH_PANE_SEARCH_ENTRIES,
badge: 'Beta'
},
...repos.map((repo) => ({
id: `repo-${repo.id}`,
title: repo.displayName,
@ -369,7 +381,7 @@ function Settings(): React.JSX.Element {
.filter((section) => section.id.startsWith('repo-'))
.map((section) => {
const repo = repos.find((entry) => entry.id === section.id.replace('repo-', ''))
return { ...section, badgeColor: repo?.badgeColor }
return { ...section, badgeColor: repo?.badgeColor, isRemote: !!repo?.connectionId }
})
return (
@ -472,6 +484,16 @@ function Settings(): React.JSX.Element {
<StatsPane />
</SettingsSection>
<SettingsSection
id="ssh"
title="SSH"
badge="Beta"
description="Manage remote SSH connections. Connect to remote servers to browse files, run terminals, and use git."
searchEntries={SSH_PANE_SEARCH_ENTRIES}
>
<SshPane />
</SettingsSection>
{repos.map((repo) => {
const repoSectionId = `repo-${repo.id}`
const repoHooksState = repoHooksMap[repo.id]

View file

@ -9,6 +9,7 @@ type SettingsSectionProps = {
searchEntries: SettingsSearchEntry[]
children: React.ReactNode
className?: string
badge?: string
}
export function SettingsSection({
@ -17,7 +18,8 @@ export function SettingsSection({
description,
searchEntries,
children,
className
className,
badge
}: SettingsSectionProps): React.JSX.Element | null {
const query = useAppStore((state) => state.settingsSearchQuery)
if (!matchesSettingsSearch(query, searchEntries)) {
@ -37,7 +39,14 @@ export function SettingsSection({
}
>
<div className="space-y-1">
<h2 className="text-xl font-semibold">{title}</h2>
<h2 className="flex items-center gap-2 text-xl font-semibold">
{title}
{badge ? (
<span className="rounded-full bg-muted px-2 py-0.5 text-[10px] font-medium uppercase tracking-wider text-muted-foreground">
{badge}
</span>
) : null}
</h2>
<p className="text-sm text-muted-foreground">{description}</p>
</div>
{children}

View file

@ -1,4 +1,4 @@
import { ArrowLeft, Search, type LucideIcon, type LucideProps } from 'lucide-react'
import { ArrowLeft, Globe, Search, type LucideIcon, type LucideProps } from 'lucide-react'
import { Button } from '../ui/button'
import { Input } from '../ui/input'
@ -6,10 +6,12 @@ type NavSection = {
id: string
title: string
icon: LucideIcon | ((props: LucideProps) => React.JSX.Element)
badge?: string
}
type RepoNavSection = NavSection & {
badgeColor?: string
isRemote?: boolean
}
type SettingsSidebarProps = {
@ -78,6 +80,11 @@ export function SettingsSidebar({
>
<Icon className="mr-2 size-4" />
{section.title}
{section.badge ? (
<span className="ml-auto rounded-full bg-muted px-1.5 py-0.5 text-[9px] font-medium uppercase tracking-wider text-muted-foreground">
{section.badge}
</span>
) : null}
</button>
)
})}
@ -108,6 +115,12 @@ export function SettingsSidebar({
style={{ backgroundColor: section.badgeColor ?? '#6b7280' }}
/>
<span className="truncate">{section.title}</span>
{section.isRemote && (
<span className="ml-auto inline-flex shrink-0 items-center gap-1 text-[10px] text-muted-foreground">
<Globe className="size-3" />
SSH
</span>
)}
</button>
)
})}

View file

@ -0,0 +1,306 @@
import { useCallback, useEffect, useState } from 'react'
import { toast } from 'sonner'
import { Plus, Upload } from 'lucide-react'
import type { SshTarget } from '../../../../shared/ssh-types'
import { useAppStore } from '@/store'
import { Button } from '../ui/button'
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle
} from '../ui/dialog'
import type { SettingsSearchEntry } from './settings-search'
import { SshTargetCard } from './SshTargetCard'
import { SshTargetForm, EMPTY_FORM, type EditingTarget } from './SshTargetForm'
export const SSH_PANE_SEARCH_ENTRIES: SettingsSearchEntry[] = [
{
title: 'SSH Connections',
description: 'Manage remote SSH targets.',
keywords: ['ssh', 'remote', 'server', 'connection', 'host']
},
{
title: 'Add SSH Target',
description: 'Add a new remote SSH target.',
keywords: ['ssh', 'add', 'new', 'target', 'host', 'server']
},
{
title: 'Import from SSH Config',
description: 'Import hosts from ~/.ssh/config.',
keywords: ['ssh', 'import', 'config', 'hosts']
},
{
title: 'Test Connection',
description: 'Test connectivity to an SSH target.',
keywords: ['ssh', 'test', 'connection', 'ping']
}
]
type SshPaneProps = Record<string, never>
export function SshPane(_props: SshPaneProps): React.JSX.Element {
const [targets, setTargets] = useState<SshTarget[]>([])
// Why: connection states are already hydrated and kept up-to-date by the
// global store (via useIpcEvents.ts). Reading from the store avoids
// duplicating the onStateChanged listener and per-target getState IPC calls.
const sshConnectionStates = useAppStore((s) => s.sshConnectionStates)
const [showForm, setShowForm] = useState(false)
const [editingId, setEditingId] = useState<string | null>(null)
const [form, setForm] = useState<EditingTarget>(EMPTY_FORM)
const [testing, setTesting] = useState<string | null>(null)
const [pendingRemove, setPendingRemove] = useState<{ id: string; label: string } | null>(null)
const loadTargets = useCallback(async () => {
try {
const result = (await window.api.ssh.listTargets()) as SshTarget[]
setTargets(result)
} catch {
toast.error('Failed to load SSH targets')
}
}, [])
useEffect(() => {
void loadTargets()
}, [loadTargets])
const handleSave = async (): Promise<void> => {
if (!form.host.trim() || !form.username.trim()) {
toast.error('Host and username are required')
return
}
const port = parseInt(form.port, 10)
if (isNaN(port) || port < 1 || port > 65535) {
toast.error('Port must be between 1 and 65535')
return
}
const target = {
label: form.label.trim() || `${form.username}@${form.host}`,
host: form.host.trim(),
port,
username: form.username.trim(),
...(form.identityFile.trim() ? { identityFile: form.identityFile.trim() } : {}),
...(form.proxyCommand.trim() ? { proxyCommand: form.proxyCommand.trim() } : {}),
...(form.jumpHost.trim() ? { jumpHost: form.jumpHost.trim() } : {})
}
try {
if (editingId) {
await window.api.ssh.updateTarget({ id: editingId, updates: target })
toast.success('Target updated')
} else {
await window.api.ssh.addTarget({ target })
toast.success('Target added')
}
setShowForm(false)
setEditingId(null)
setForm(EMPTY_FORM)
await loadTargets()
} catch (err) {
toast.error(err instanceof Error ? err.message : 'Failed to save target')
}
}
const handleRemove = async (id: string): Promise<void> => {
try {
// Why: disconnect any non-disconnected connection, including transitional
// states (connecting, reconnecting, deploying-relay). Leaving these alive
// would orphan SSH connections with providers registered for a removed target.
const state = sshConnectionStates.get(id)
if (state && state.status !== 'disconnected') {
await window.api.ssh.disconnect({ targetId: id })
}
await window.api.ssh.removeTarget({ id })
toast.success('Target removed')
await loadTargets()
} catch (err) {
toast.error(err instanceof Error ? err.message : 'Failed to remove target')
}
}
const handleEdit = (target: SshTarget): void => {
setEditingId(target.id)
setForm({
label: target.label,
host: target.host,
port: String(target.port),
username: target.username,
identityFile: target.identityFile ?? '',
proxyCommand: target.proxyCommand ?? '',
jumpHost: target.jumpHost ?? ''
})
setShowForm(true)
}
const handleConnect = async (targetId: string): Promise<void> => {
try {
await window.api.ssh.connect({ targetId })
} catch (err) {
toast.error(err instanceof Error ? err.message : 'Connection failed')
}
}
const handleDisconnect = async (targetId: string): Promise<void> => {
try {
await window.api.ssh.disconnect({ targetId })
} catch (err) {
toast.error(err instanceof Error ? err.message : 'Disconnect failed')
}
}
const handleTest = async (targetId: string): Promise<void> => {
setTesting(targetId)
try {
const result = await window.api.ssh.testConnection({ targetId })
if (result.success) {
toast.success('Connection successful')
} else {
toast.error(result.error ?? 'Connection test failed')
}
} catch (err) {
toast.error(err instanceof Error ? err.message : 'Test failed')
} finally {
setTesting(null)
}
}
const handleImport = async (): Promise<void> => {
try {
const imported = (await window.api.ssh.importConfig()) as SshTarget[]
if (imported.length === 0) {
toast('No new hosts found in ~/.ssh/config')
} else {
toast.success(`Imported ${imported.length} host${imported.length > 1 ? 's' : ''}`)
}
await loadTargets()
} catch (err) {
toast.error(err instanceof Error ? err.message : 'Import failed')
}
}
const cancelForm = (): void => {
setShowForm(false)
setEditingId(null)
setForm(EMPTY_FORM)
}
return (
<div className="space-y-4">
{/* Header row */}
<div className="flex items-center justify-between gap-3">
<div className="space-y-0.5">
<p className="text-sm font-medium">Targets</p>
<p className="text-xs text-muted-foreground">
Add a remote host to connect to it in Orca.
</p>
</div>
<div className="flex shrink-0 items-center gap-1.5">
<Button
variant="outline"
size="xs"
onClick={() => void handleImport()}
className="gap-1.5"
>
<Upload className="size-3" />
Import
</Button>
{!showForm ? (
<Button
variant="outline"
size="xs"
onClick={() => {
setEditingId(null)
setForm(EMPTY_FORM)
setShowForm(true)
}}
className="gap-1.5"
>
<Plus className="size-3" />
Add Target
</Button>
) : null}
</div>
</div>
{/* Target list */}
{targets.length === 0 && !showForm ? (
<div className="flex items-center justify-center rounded-lg border border-dashed border-border/60 bg-card/30 px-4 py-5 text-sm text-muted-foreground">
No SSH targets configured.
</div>
) : (
<div className="space-y-2">
{targets.map((target) => (
<SshTargetCard
key={target.id}
target={target}
state={sshConnectionStates.get(target.id)}
testing={testing === target.id}
onConnect={(id) => void handleConnect(id)}
onDisconnect={(id) => void handleDisconnect(id)}
onTest={(id) => void handleTest(id)}
onEdit={handleEdit}
onRemove={(id) => setPendingRemove({ id, label: target.label })}
/>
))}
</div>
)}
{/* Add/Edit form */}
{showForm ? (
<SshTargetForm
editingId={editingId}
form={form}
onFormChange={setForm}
onSave={() => void handleSave()}
onCancel={cancelForm}
/>
) : null}
{/* Remove confirmation dialog */}
<Dialog
open={!!pendingRemove}
onOpenChange={(open) => {
if (!open) {
setPendingRemove(null)
}
}}
>
<DialogContent className="max-w-sm sm:max-w-sm" showCloseButton={false}>
<DialogHeader>
<DialogTitle className="text-sm">Remove SSH Target</DialogTitle>
<DialogDescription className="text-xs">
This will remove the target and disconnect any active sessions.
</DialogDescription>
</DialogHeader>
{pendingRemove ? (
<div className="rounded-md border border-border/70 bg-muted/35 px-3 py-2 text-xs">
<div className="break-all text-muted-foreground">{pendingRemove.label}</div>
</div>
) : null}
<DialogFooter>
<Button variant="outline" onClick={() => setPendingRemove(null)}>
Cancel
</Button>
<Button
variant="destructive"
onClick={() => {
if (pendingRemove) {
void handleRemove(pendingRemove.id)
setPendingRemove(null)
}
}}
>
Remove
</Button>
</DialogFooter>
</DialogContent>
</Dialog>
</div>
)
}

View file

@ -0,0 +1,158 @@
import { Loader2, MonitorSmartphone, Pencil, Server, Trash2, Wifi, WifiOff } from 'lucide-react'
import type {
SshTarget,
SshConnectionState,
SshConnectionStatus
} from '../../../../shared/ssh-types'
import { Button } from '../ui/button'
// ── Shared status helpers ────────────────────────────────────────────
export const STATUS_LABELS: Record<SshConnectionStatus, string> = {
disconnected: 'Disconnected',
connecting: 'Connecting\u2026',
'host-key-verification': 'Verifying host key\u2026',
'auth-challenge': 'Authenticating\u2026',
'auth-failed': 'Auth failed',
'deploying-relay': 'Deploying relay\u2026',
connected: 'Connected',
reconnecting: 'Reconnecting\u2026',
'reconnection-failed': 'Reconnection failed',
error: 'Error'
}
export function statusColor(status: SshConnectionStatus): string {
switch (status) {
case 'connected':
return 'bg-emerald-500'
case 'connecting':
case 'host-key-verification':
case 'auth-challenge':
case 'deploying-relay':
case 'reconnecting':
return 'bg-yellow-500'
case 'auth-failed':
case 'reconnection-failed':
case 'error':
return 'bg-red-500'
default:
return 'bg-muted-foreground/40'
}
}
export function isConnecting(status: SshConnectionStatus): boolean {
return ['connecting', 'host-key-verification', 'auth-challenge', 'deploying-relay'].includes(
status
)
}
// ── SshTargetCard ────────────────────────────────────────────────────
type SshTargetCardProps = {
target: SshTarget
state: SshConnectionState | undefined
testing: boolean
onConnect: (targetId: string) => void
onDisconnect: (targetId: string) => void
onTest: (targetId: string) => void
onEdit: (target: SshTarget) => void
onRemove: (targetId: string) => void
}
export function SshTargetCard({
target,
state,
testing,
onConnect,
onDisconnect,
onTest,
onEdit,
onRemove
}: SshTargetCardProps): React.JSX.Element {
const status: SshConnectionStatus = state?.status ?? 'disconnected'
return (
<div className="flex items-center gap-3 rounded-lg border border-border/50 bg-card/40 px-4 py-3">
<Server className="size-4 shrink-0 text-muted-foreground" />
<div className="min-w-0 flex-1">
<div className="flex items-center gap-2">
<span className="truncate text-sm font-medium">{target.label}</span>
<span className={`size-2 shrink-0 rounded-full ${statusColor(status)}`} />
<span className="text-[11px] text-muted-foreground">{STATUS_LABELS[status]}</span>
</div>
<p className="truncate text-xs text-muted-foreground">
{target.username}@{target.host}:{target.port}
{target.identityFile ? ` \u2022 ${target.identityFile}` : ''}
</p>
{state?.error ? (
<p className="mt-0.5 truncate text-xs text-red-400">{state.error}</p>
) : null}
</div>
<div className="flex shrink-0 items-center gap-1">
{status === 'connected' ? (
<Button
variant="ghost"
size="xs"
onClick={() => onDisconnect(target.id)}
className="gap-1.5"
>
<WifiOff className="size-3" />
Disconnect
</Button>
) : isConnecting(status) ? (
<Button variant="ghost" size="xs" disabled className="gap-1.5">
<Loader2 className="size-3 animate-spin" />
Connecting
</Button>
) : (
<>
<Button
variant="ghost"
size="xs"
onClick={() => onConnect(target.id)}
className="gap-1.5"
>
<Wifi className="size-3" />
Connect
</Button>
<Button
variant="ghost"
size="xs"
onClick={() => onTest(target.id)}
disabled={testing}
className="gap-1.5"
>
{testing ? (
<Loader2 className="size-3 animate-spin" />
) : (
<MonitorSmartphone className="size-3" />
)}
Test
</Button>
</>
)}
<Button
variant="ghost"
size="icon"
onClick={() => onEdit(target)}
className="size-7"
aria-label="Edit target"
>
<Pencil className="size-3" />
</Button>
<Button
variant="ghost"
size="icon"
onClick={() => onRemove(target.id)}
className="size-7 text-muted-foreground hover:text-red-400"
aria-label="Remove target"
>
<Trash2 className="size-3" />
</Button>
</div>
</div>
)
}

View file

@ -0,0 +1,129 @@
import { FileKey } from 'lucide-react'
import { Button } from '../ui/button'
import { Input } from '../ui/input'
import { Label } from '../ui/label'
export type EditingTarget = {
label: string
host: string
port: string
username: string
identityFile: string
proxyCommand: string
jumpHost: string
}
export const EMPTY_FORM: EditingTarget = {
label: '',
host: '',
port: '22',
username: '',
identityFile: '',
proxyCommand: '',
jumpHost: ''
}
type SshTargetFormProps = {
editingId: string | null
form: EditingTarget
onFormChange: (updater: (prev: EditingTarget) => EditingTarget) => void
onSave: () => void
onCancel: () => void
}
export function SshTargetForm({
editingId,
form,
onFormChange,
onSave,
onCancel
}: SshTargetFormProps): React.JSX.Element {
return (
<div className="space-y-4 rounded-lg border border-border/50 bg-card/40 p-4">
<p className="text-sm font-medium">{editingId ? 'Edit SSH Target' : 'New SSH Target'}</p>
<div className="grid grid-cols-2 gap-4">
<div className="space-y-1.5">
<Label>Label</Label>
<Input
value={form.label}
onChange={(e) => onFormChange((f) => ({ ...f, label: e.target.value }))}
placeholder="My Server"
/>
</div>
<div className="space-y-1.5">
<Label>Host *</Label>
<Input
value={form.host}
onChange={(e) => onFormChange((f) => ({ ...f, host: e.target.value }))}
placeholder="192.168.1.100 or server.example.com"
/>
</div>
<div className="space-y-1.5">
<Label>Username *</Label>
<Input
value={form.username}
onChange={(e) => onFormChange((f) => ({ ...f, username: e.target.value }))}
placeholder="deploy"
/>
</div>
<div className="space-y-1.5">
<Label>Port</Label>
<Input
type="number"
value={form.port}
onChange={(e) => onFormChange((f) => ({ ...f, port: e.target.value }))}
placeholder="22"
min={1}
max={65535}
/>
</div>
<div className="col-span-2 space-y-1.5">
<Label className="flex items-center gap-1.5">
<FileKey className="size-3.5" />
Identity File
</Label>
<Input
value={form.identityFile}
onChange={(e) => onFormChange((f) => ({ ...f, identityFile: e.target.value }))}
placeholder="~/.ssh/id_ed25519 (leave empty for SSH agent)"
/>
<p className="text-[11px] text-muted-foreground">
Optional. SSH agent is used by default.
</p>
</div>
<div className="col-span-2 space-y-1.5">
<Label>Proxy Command</Label>
<Input
value={form.proxyCommand}
onChange={(e) => onFormChange((f) => ({ ...f, proxyCommand: e.target.value }))}
placeholder="e.g. cloudflared access ssh --hostname %h"
/>
<p className="text-[11px] text-muted-foreground">
Optional. Used for tunneling (e.g. Cloudflare Access, ProxyCommand).
</p>
</div>
<div className="col-span-2 space-y-1.5">
<Label>Jump Host</Label>
<Input
value={form.jumpHost}
onChange={(e) => onFormChange((f) => ({ ...f, jumpHost: e.target.value }))}
placeholder="bastion.example.com"
/>
<p className="text-[11px] text-muted-foreground">
Optional. Equivalent to ProxyJump / ssh -J.
</p>
</div>
</div>
<div className="flex items-center gap-2">
<Button size="sm" onClick={onSave}>
{editingId ? 'Save Changes' : 'Add Target'}
</Button>
<Button variant="ghost" size="sm" onClick={onCancel}>
Cancel
</Button>
</div>
</div>
)
}

View file

@ -1,10 +1,6 @@
/* eslint-disable max-lines -- Why: AddRepoDialog owns a multi-step flow (add/clone/setup) with
clone progress, abort handling, and worktree setup splitting further would scatter
tightly coupled step transitions across files. */
import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react'
import { toast } from 'sonner'
import { FolderOpen, GitBranchPlus, Settings, ArrowLeft, Globe, Folder } from 'lucide-react'
import { FolderOpen, GitBranchPlus, Settings, ArrowLeft, Globe, Monitor } from 'lucide-react'
import { useAppStore } from '@/store'
import {
Dialog,
@ -14,9 +10,9 @@ import {
DialogDescription
} from '@/components/ui/dialog'
import { Button } from '@/components/ui/button'
import { Input } from '@/components/ui/input'
import { activateAndRevealWorktree } from '@/lib/worktree-activation'
import { LinkedWorktreeItem } from './LinkedWorktreeItem'
import { RemoteStep, CloneStep, useRemoteRepo } from './AddRepoSteps'
import { isGitRepoKind } from '../../../../shared/repo-kind'
import type { Repo, Worktree } from '../../../../shared/types'
@ -31,7 +27,7 @@ const AddRepoDialog = React.memo(function AddRepoDialog() {
const setActiveView = useAppStore((s) => s.setActiveView)
const openSettingsTarget = useAppStore((s) => s.openSettingsTarget)
const [step, setStep] = useState<'add' | 'clone' | 'setup'>('add')
const [step, setStep] = useState<'add' | 'clone' | 'remote' | 'setup'>('add')
const [addedRepo, setAddedRepo] = useState<Repo | null>(null)
const [isAdding, setIsAdding] = useState(false)
const [cloneUrl, setCloneUrl] = useState('')
@ -41,12 +37,23 @@ const AddRepoDialog = React.memo(function AddRepoDialog() {
const [cloneProgress, setCloneProgress] = useState<{ phase: string; percent: number } | null>(
null
)
// Why: track a monotonically increasing ID so that when the user closes the
// dialog or navigates away during a clone, the stale completion callback can
// detect it was superseded and bail out instead of corrupting dialog state.
// Why: monotonic ID so stale clone callbacks can detect they were superseded.
const cloneGenRef = useRef(0)
// Subscribe to clone progress events while cloning is active
const {
sshTargets,
selectedTargetId,
remotePath,
remoteError,
isAddingRemote,
setSelectedTargetId,
setRemotePath,
setRemoteError,
resetRemoteState,
handleOpenRemoteStep,
handleAddRemoteRepo
} = useRemoteRepo(fetchWorktrees, setStep, setAddedRepo, closeModal)
useEffect(() => {
if (!isCloning) {
return
@ -61,8 +68,7 @@ const AddRepoDialog = React.memo(function AddRepoDialog() {
return worktreesByRepo[repoId] ?? []
}, [worktreesByRepo, repoId])
// Why: sort by recent activity (lastActivityAt) with alphabetical fallback for
// worktrees not yet opened in Orca. Matches buildWorktreeComparator behavior.
// Why: sort by recent activity with alphabetical fallback.
const sortedWorktrees = useMemo(() => {
return [...worktrees].sort((a, b) => {
if (a.lastActivityAt !== b.lastActivityAt) {
@ -87,19 +93,17 @@ const AddRepoDialog = React.memo(function AddRepoDialog() {
setIsCloning(false)
setCloneError(null)
setCloneProgress(null)
}, [])
resetRemoteState()
}, [resetRemoteState])
// Why: reset all local state when the dialog closes for any reason —
// whether via onOpenChange, closeModal() from code, or activeModal
// being replaced by another modal. Without this, reopening the dialog
// can show a stale step/repo from the previous session.
// Why: reset state on close so reopening doesn't show stale step/repo.
useEffect(() => {
if (!isOpen) {
resetState()
}
}, [isOpen, resetState])
const isInputStep = step === 'add' || step === 'clone'
const isInputStep = step === 'add' || step === 'clone' || step === 'remote'
const handleBrowse = useCallback(async () => {
setIsAdding(true)
@ -110,12 +114,9 @@ const AddRepoDialog = React.memo(function AddRepoDialog() {
await fetchWorktrees(repo.id)
setStep('setup')
} else if (repo) {
// Why: non-git folders have no worktrees, so step 2 is irrelevant. Close
// the modal after the folder is added.
// Why: non-git folders have no worktrees — close immediately.
closeModal()
}
// null = user cancelled the picker, or the non-git-folder confirmation
// dialog took over (which replaces activeModal, closing this dialog).
} finally {
setIsAdding(false)
}
@ -149,11 +150,7 @@ const AddRepoDialog = React.memo(function AddRepoDialog() {
return
}
toast.success('Repository cloned', { description: repo.displayName })
// Why: eagerly upsert the cloned repo in the store so that step 2's
// "Create worktree" button finds it in eligibleRepos immediately,
// without waiting for the async repos:changed IPC event. This also
// handles the case where a folder repo was upgraded to git by the
// clone handler — the existing entry needs its kind updated.
// Why: eagerly upsert so step 2 finds the repo before the IPC event.
const state = useAppStore.getState()
const existingIdx = state.repos.findIndex((r) => r.id === repo.id)
if (existingIdx === -1) {
@ -201,34 +198,23 @@ const AddRepoDialog = React.memo(function AddRepoDialog() {
setActiveView('settings')
}, [closeModal, openSettingsTarget, setActiveView, repoId])
const handleBack = useCallback(() => {
cloneGenRef.current++
void window.api.repos.cloneAbort()
setStep('add')
setAddedRepo(null)
setCloneUrl('')
setCloneDestination('')
setIsCloning(false)
setCloneError(null)
setCloneProgress(null)
}, [])
const handleOpenChange = useCallback(
(open: boolean) => {
if (!open) {
closeModal()
resetState()
}
},
[closeModal, resetState]
)
// Why: handleBack reuses resetState which already aborts clones and resets all fields.
const handleBack = resetState
return (
<Dialog open={isOpen} onOpenChange={handleOpenChange}>
<Dialog
open={isOpen}
onOpenChange={(open) => {
if (!open) {
closeModal()
resetState()
}
}}
>
<DialogContent className="sm:max-w-lg">
{/* Step indicator row — back button (step 2 only), dots, X is rendered by DialogContent */}
<div className="flex items-center justify-center -mt-1">
{step === 'clone' && (
{(step === 'clone' || step === 'remote') && (
<button
className="absolute left-6 inline-flex items-center gap-1 text-xs text-muted-foreground hover:text-foreground transition-colors cursor-pointer"
onClick={handleBack}
@ -267,18 +253,18 @@ const AddRepoDialog = React.memo(function AddRepoDialog() {
</DialogDescription>
</DialogHeader>
<div className="grid grid-cols-2 gap-3 pt-2">
<div className="grid grid-cols-3 gap-3 pt-2">
<Button
onClick={handleBrowse}
disabled={isAdding}
variant="outline"
className="h-auto py-4 px-4 flex flex-col items-center gap-2 text-center"
className="h-auto py-5 px-2 flex flex-col items-center gap-2 text-center"
>
<FolderOpen className="size-6 text-muted-foreground" />
<div>
<p className="text-sm font-medium">Browse folder</p>
<p className="text-xs text-muted-foreground font-normal mt-0.5">
Local repository or folder
<p className="text-[11px] text-muted-foreground font-normal mt-0.5">
Local repo or folder
</p>
</div>
</Button>
@ -286,96 +272,67 @@ const AddRepoDialog = React.memo(function AddRepoDialog() {
<Button
onClick={() => setStep('clone')}
variant="outline"
className="h-auto py-4 px-4 flex flex-col items-center gap-2 text-center"
className="h-auto py-5 px-2 flex flex-col items-center gap-2 text-center"
>
<Globe className="size-6 text-muted-foreground" />
<div>
<p className="text-sm font-medium">Clone from URL</p>
<p className="text-xs text-muted-foreground font-normal mt-0.5">
<p className="text-[11px] text-muted-foreground font-normal mt-0.5">
Remote Git repository
</p>
</div>
</Button>
<Button
onClick={handleOpenRemoteStep}
variant="outline"
className="h-auto py-5 px-2 flex flex-col items-center gap-2 text-center"
>
<Monitor className="size-6 text-muted-foreground" />
<div>
<p className="text-sm font-medium">Remote repo</p>
<p className="text-[11px] text-muted-foreground font-normal mt-0.5">
SSH connected target
</p>
</div>
</Button>
</div>
</>
) : step === 'remote' ? (
<RemoteStep
sshTargets={sshTargets}
selectedTargetId={selectedTargetId}
remotePath={remotePath}
remoteError={remoteError}
isAddingRemote={isAddingRemote}
onSelectTarget={(id) => {
setSelectedTargetId(id)
setRemoteError(null)
}}
onRemotePathChange={(value) => {
setRemotePath(value)
setRemoteError(null)
}}
onAdd={handleAddRemoteRepo}
/>
) : step === 'clone' ? (
<>
<DialogHeader>
<DialogTitle>Clone from URL</DialogTitle>
<DialogDescription>Enter the Git URL and choose where to clone it.</DialogDescription>
</DialogHeader>
<div className="space-y-3 pt-1">
<div className="space-y-1">
<label className="text-[11px] font-medium text-muted-foreground">Git URL</label>
<Input
value={cloneUrl}
onChange={(e) => {
setCloneUrl(e.target.value)
setCloneError(null)
}}
placeholder="https://github.com/user/repo.git"
className="h-8 text-xs"
disabled={isCloning}
autoFocus
/>
</div>
<div className="space-y-1">
<label className="text-[11px] font-medium text-muted-foreground">
Clone location
</label>
<div className="flex gap-2">
<Input
value={cloneDestination}
onChange={(e) => {
setCloneDestination(e.target.value)
setCloneError(null)
}}
placeholder="/path/to/destination"
className="h-8 text-xs flex-1"
disabled={isCloning}
/>
<Button
variant="outline"
size="sm"
className="h-8 px-2 shrink-0"
onClick={handlePickDestination}
disabled={isCloning}
>
<Folder className="size-3.5" />
</Button>
</div>
</div>
{cloneError && <p className="text-[11px] text-destructive">{cloneError}</p>}
<Button
onClick={handleClone}
disabled={!cloneUrl.trim() || !cloneDestination.trim() || isCloning}
className="w-full"
>
{isCloning ? 'Cloning...' : 'Clone'}
</Button>
{/* Why: progress bar lives below the button so it doesn't push the
button down when it appears mid-clone. */}
{isCloning && cloneProgress && (
<div className="space-y-1.5">
<div className="flex items-center justify-between text-[11px] text-muted-foreground">
<span>{cloneProgress.phase}</span>
<span>{cloneProgress.percent}%</span>
</div>
<div className="h-1.5 w-full rounded-full bg-secondary overflow-hidden">
<div
className="h-full rounded-full bg-foreground transition-[width] duration-300 ease-out"
style={{ width: `${cloneProgress.percent}%` }}
/>
</div>
</div>
)}
</div>
</>
<CloneStep
cloneUrl={cloneUrl}
cloneDestination={cloneDestination}
cloneError={cloneError}
cloneProgress={cloneProgress}
isCloning={isCloning}
onUrlChange={(value) => {
setCloneUrl(value)
setCloneError(null)
}}
onDestChange={(value) => {
setCloneDestination(value)
setCloneError(null)
}}
onPickDestination={handlePickDestination}
onClone={handleClone}
/>
) : (
<>
<DialogHeader>
@ -424,7 +381,10 @@ const AddRepoDialog = React.memo(function AddRepoDialog() {
variant="ghost"
size="sm"
className="text-xs"
onClick={() => handleOpenChange(false)}
onClick={() => {
closeModal()
resetState()
}}
>
Skip
</Button>

Some files were not shown because too many files have changed in this diff Show more