Merge pull request #1114 from coleam00/dev

Release 0.3.6
This commit is contained in:
Rasmus Widing 2026-04-12 12:17:34 +03:00 committed by GitHub
commit 59cda08efa
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
49 changed files with 1779 additions and 656 deletions

View file

@ -29,9 +29,10 @@ bun run cli version
## Startup Behavior
1. Loads `~/.archon/.env` with `override: true` (Archon's config wins over any Bun-auto-loaded CWD vars)
2. Smart Claude auth default: if no `CLAUDE_API_KEY` or `CLAUDE_CODE_OAUTH_TOKEN`, sets `CLAUDE_USE_GLOBAL_AUTH=true`
3. Imports all commands AFTER dotenv setup
1. `@archon/paths/strip-cwd-env-boot` (first import) removes all Bun-auto-loaded CWD `.env` keys from `process.env`
2. Loads `~/.archon/.env` with `override: true` (Archon config wins over shell-inherited vars)
3. Smart Claude auth default: if no `CLAUDE_API_KEY` or `CLAUDE_CODE_OAUTH_TOKEN`, sets `CLAUDE_USE_GLOBAL_AUTH=true`
4. Imports all commands AFTER dotenv setup
## WorkflowRunOptions Interface

View file

@ -7,6 +7,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
## [0.3.6] - 2026-04-12
Web UI workflow experience improvements, CWD environment leak protection, and bug fixes.
### Added
- Workflow result card now shows status, duration, node count, and artifact links in chat (#1015)
- Loop iteration progress display in the workflow execution view (#1014)
- Artifact file paths in chat messages are now clickable (#1023)
### Changed
- CWD `.env` variables are now stripped from AI subprocess environments at the `@archon/paths` layer, replacing the old `SUBPROCESS_ENV_ALLOWLIST` approach. Prevents accidental credential leaks from target repo `.env` files (#1067, #1030, #1098, #1070)
- Update check cache TTL reduced from 24 hours to 1 hour
### Fixed
- Duplicate text and tool calls appearing in workflow execution view
- `workflow_step` SSE events not handled correctly, causing missing progress updates
- Nested interactive elements in workflow UI causing React warnings
- Workflow status messages not splitting correctly in WorkflowLogs
- Incorrect `remainingMessage` suppression in stream mode causing lost output
- Binary builds now use `BUNDLED_VERSION` for the app version instead of reading `package.json`
## [0.3.5] - 2026-04-10
Fixes for `archon serve` process lifecycle and static file serving.

View file

@ -399,7 +399,7 @@ import type { DagNode, WorkflowDefinition } from '@/lib/api';
### Architecture Layers
**Package Split:**
- **@archon/paths**: Path resolution utilities, Pino logger factory, web dist cache path (`getWebDistDir`) (no @archon/* deps)
- **@archon/paths**: Path resolution utilities, Pino logger factory, web dist cache path (`getWebDistDir`), CWD env stripper (`stripCwdEnv`, `strip-cwd-env-boot`) (no @archon/* deps; `pino` and `dotenv` are allowed external deps)
- **@archon/git**: Git operations - worktrees, branches, repos, exec wrappers (depends only on @archon/paths)
- **@archon/isolation**: Worktree isolation types, providers, resolver, error classifiers (depends only on @archon/git + @archon/paths)
- **@archon/workflows**: Workflow engine - loader, router, executor, DAG, logger, bundled defaults (depends only on @archon/git + @archon/paths + @hono/zod-openapi + zod; DB/AI/config injected via `WorkflowDeps`)

View file

@ -23,7 +23,7 @@
},
"packages/adapters": {
"name": "@archon/adapters",
"version": "0.1.0",
"version": "0.3.5",
"dependencies": {
"@archon/core": "workspace:*",
"@archon/git": "workspace:*",
@ -41,7 +41,7 @@
},
"packages/cli": {
"name": "@archon/cli",
"version": "0.2.13",
"version": "0.3.5",
"bin": {
"archon": "./src/cli.ts",
},
@ -62,7 +62,7 @@
},
"packages/core": {
"name": "@archon/core",
"version": "0.2.0",
"version": "0.3.5",
"dependencies": {
"@anthropic-ai/claude-agent-sdk": "^0.2.89",
"@archon/git": "workspace:*",
@ -83,7 +83,7 @@
},
"packages/docs-web": {
"name": "@archon/docs-web",
"version": "0.2.12",
"version": "0.3.5",
"dependencies": {
"@astrojs/starlight": "^0.38.0",
"astro": "^6.1.0",
@ -92,7 +92,7 @@
},
"packages/git": {
"name": "@archon/git",
"version": "0.1.0",
"version": "0.3.5",
"dependencies": {
"@archon/paths": "workspace:*",
},
@ -102,7 +102,7 @@
},
"packages/isolation": {
"name": "@archon/isolation",
"version": "0.1.0",
"version": "0.3.5",
"dependencies": {
"@archon/git": "workspace:*",
"@archon/paths": "workspace:*",
@ -113,8 +113,9 @@
},
"packages/paths": {
"name": "@archon/paths",
"version": "0.2.0",
"version": "0.3.5",
"dependencies": {
"dotenv": "^17",
"pino": "^9",
"pino-pretty": "^13",
},
@ -124,7 +125,7 @@
},
"packages/server": {
"name": "@archon/server",
"version": "0.2.0",
"version": "0.3.5",
"dependencies": {
"@archon/adapters": "workspace:*",
"@archon/core": "workspace:*",
@ -142,7 +143,7 @@
},
"packages/web": {
"name": "@archon/web",
"version": "0.2.0",
"version": "0.3.5",
"dependencies": {
"@dagrejs/dagre": "^2.0.4",
"@radix-ui/react-alert-dialog": "^1.1.15",
@ -194,7 +195,7 @@
},
"packages/workflows": {
"name": "@archon/workflows",
"version": "0.1.0",
"version": "0.3.5",
"dependencies": {
"@archon/git": "workspace:*",
"@archon/paths": "workspace:*",

View file

@ -1,6 +1,6 @@
{
"name": "archon",
"version": "0.3.5",
"version": "0.3.6",
"private": true,
"workspaces": [
"packages/*"

View file

@ -1,6 +1,6 @@
{
"name": "@archon/adapters",
"version": "0.3.5",
"version": "0.3.6",
"type": "module",
"main": "./src/index.ts",
"types": "./src/index.ts",

View file

@ -1,6 +1,6 @@
{
"name": "@archon/cli",
"version": "0.3.5",
"version": "0.3.6",
"type": "module",
"main": "./src/cli.ts",
"bin": {

View file

@ -7,18 +7,18 @@
* archon workflow run <name> [msg] Run a workflow
* archon version Show version info
*/
// Must be the very first import — strips Bun-auto-loaded CWD .env keys before
// any module reads process.env at init time (e.g. @archon/paths/logger reads LOG_LEVEL).
import '@archon/paths/strip-cwd-env-boot';
import { parseArgs } from 'util';
import { config } from 'dotenv';
import { resolve } from 'path';
import { existsSync } from 'fs';
// Load .env from global Archon config (override: true so ~/.archon/.env
// always wins over any Bun-auto-loaded CWD vars).
//
// Credential safety: target repo .env keys that Bun auto-loads from CWD
// cannot leak into AI subprocesses — SUBPROCESS_ENV_ALLOWLIST blocks them.
// The env-leak gate provides a second layer by scanning target repos before
// spawning. No CWD stripping needed.
// Load ~/.archon/.env with override: true — Archon-specific config must win
// over shell-inherited env vars (e.g. PORT, LOG_LEVEL from shell profile).
// CWD .env keys are already gone (stripCwdEnv above), so override only
// affects shell-inherited values, which is the intended behavior.
const globalEnvPath = resolve(process.env.HOME ?? '~', '.archon', '.env');
if (existsSync(globalEnvPath)) {
const result = config({ path: globalEnvPath, override: true });
@ -30,6 +30,9 @@ if (existsSync(globalEnvPath)) {
}
}
// CLAUDECODE=1 warning is emitted inside stripCwdEnv() (boot import above)
// BEFORE the marker is deleted from process.env. No duplicate warning here.
// Smart defaults for Claude auth
// If no explicit tokens, default to global auth from `claude /login`
if (!process.env.CLAUDE_API_KEY && !process.env.CLAUDE_CODE_OAUTH_TOKEN) {

View file

@ -60,7 +60,6 @@ export async function serveCommand(opts: ServeOptions): Promise<number> {
await startServer({
webDistPath: webDistDir,
port: opts.port,
skipPlatformAdapters: true,
});
} catch (err) {
const error = toError(err);

View file

@ -1,6 +1,6 @@
{
"name": "@archon/core",
"version": "0.3.5",
"version": "0.3.6",
"type": "module",
"main": "./src/index.ts",
"types": "./src/index.ts",
@ -23,7 +23,7 @@
"./state/*": "./src/state/*.ts"
},
"scripts": {
"test": "bun test src/clients/codex-binary-guard.test.ts && bun test src/utils/codex-binary-resolver.test.ts && bun test src/utils/codex-binary-resolver-dev.test.ts && bun test src/clients/claude.test.ts src/clients/codex.test.ts src/clients/factory.test.ts && bun test src/handlers/command-handler.test.ts && bun test src/handlers/clone.test.ts && bun test src/db/adapters/postgres.test.ts && bun test src/db/adapters/sqlite.test.ts src/db/codebases.test.ts src/db/connection.test.ts src/db/conversations.test.ts src/db/env-vars.test.ts src/db/isolation-environments.test.ts src/db/messages.test.ts src/db/sessions.test.ts src/db/workflow-events.test.ts src/db/workflows.test.ts src/utils/defaults-copy.test.ts src/utils/worktree-sync.test.ts src/utils/conversation-lock.test.ts src/utils/credential-sanitizer.test.ts src/utils/port-allocation.test.ts src/utils/error.test.ts src/utils/error-formatter.test.ts src/utils/github-graphql.test.ts src/utils/env-allowlist.test.ts src/utils/env-leak-scanner.test.ts src/config/ src/state/ && bun test src/utils/path-validation.test.ts && bun test src/services/cleanup-service.test.ts && bun test src/services/title-generator.test.ts && bun test src/workflows/ && bun test src/operations/workflow-operations.test.ts && bun test src/operations/isolation-operations.test.ts && bun test src/orchestrator/orchestrator.test.ts && bun test src/orchestrator/orchestrator-agent.test.ts && bun test src/orchestrator/orchestrator-isolation.test.ts",
"test": "bun test src/clients/codex-binary-guard.test.ts && bun test src/utils/codex-binary-resolver.test.ts && bun test src/utils/codex-binary-resolver-dev.test.ts && bun test src/clients/claude.test.ts src/clients/codex.test.ts src/clients/factory.test.ts && bun test src/handlers/command-handler.test.ts && bun test src/handlers/clone.test.ts && bun test src/db/adapters/postgres.test.ts && bun test src/db/adapters/sqlite.test.ts src/db/codebases.test.ts src/db/connection.test.ts src/db/conversations.test.ts src/db/env-vars.test.ts src/db/isolation-environments.test.ts src/db/messages.test.ts src/db/sessions.test.ts src/db/workflow-events.test.ts src/db/workflows.test.ts src/utils/defaults-copy.test.ts src/utils/worktree-sync.test.ts src/utils/conversation-lock.test.ts src/utils/credential-sanitizer.test.ts src/utils/port-allocation.test.ts src/utils/error.test.ts src/utils/error-formatter.test.ts src/utils/github-graphql.test.ts src/utils/env-leak-scanner.test.ts src/config/ src/state/ && bun test src/utils/path-validation.test.ts && bun test src/services/cleanup-service.test.ts && bun test src/services/title-generator.test.ts && bun test src/workflows/ && bun test src/operations/workflow-operations.test.ts && bun test src/operations/isolation-operations.test.ts && bun test src/orchestrator/orchestrator.test.ts && bun test src/orchestrator/orchestrator-agent.test.ts && bun test src/orchestrator/orchestrator-isolation.test.ts",
"type-check": "bun x tsc --noEmit",
"build": "echo 'No build needed - Bun runs TypeScript directly'"
},

View file

@ -446,9 +446,12 @@ describe('ClaudeClient', () => {
);
});
test('strips NODE_OPTIONS from subprocess env', async () => {
const original = process.env.NODE_OPTIONS;
process.env.NODE_OPTIONS = '--inspect';
test('subprocess env passes through all process.env keys (no allowlist filtering)', async () => {
// With the allowlist removed, buildSubprocessEnv returns { ...process.env }.
// CWD .env leakage and CLAUDECODE markers are handled at entry point by
// stripCwdEnv(), not by buildSubprocessEnv(). See #1067, #1097.
const originalKey = process.env.CUSTOM_USER_KEY;
process.env.CUSTOM_USER_KEY = 'user-trusted-value';
mockQuery.mockImplementation(async function* () {
// Empty generator
@ -460,113 +463,13 @@ describe('ClaudeClient', () => {
}
const callArgs = mockQuery.mock.calls[0][0] as { options: { env: NodeJS.ProcessEnv } };
expect(callArgs.options.env.NODE_OPTIONS).toBeUndefined();
expect(callArgs.options.env.CUSTOM_USER_KEY).toBe('user-trusted-value');
expect(callArgs.options.env.PATH).toBe(process.env.PATH);
expect(callArgs.options.env.HOME).toBe(process.env.HOME);
// Cleanup
if (original !== undefined) {
process.env.NODE_OPTIONS = original;
} else {
delete process.env.NODE_OPTIONS;
}
});
test('ANTHROPIC_API_KEY alone does not set hasExplicitTokens (falls through to global auth)', async () => {
const originalOauth = process.env.CLAUDE_CODE_OAUTH_TOKEN;
const originalApiKey = process.env.CLAUDE_API_KEY;
const originalAnthropicKey = process.env.ANTHROPIC_API_KEY;
delete process.env.CLAUDE_CODE_OAUTH_TOKEN;
delete process.env.CLAUDE_API_KEY;
process.env.ANTHROPIC_API_KEY = 'sk-ant-test-key';
mockQuery.mockImplementation(async function* () {
// Empty generator
});
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of client.sendQuery('test', '/workspace')) {
// consume
}
// ANTHROPIC_API_KEY must NOT reach the subprocess: it is not in the
// SUBPROCESS_ENV_ALLOWLIST, so a leaked target-repo key cannot bill
// the wrong account. See issue #1029.
const callArgs = mockQuery.mock.calls[0][0] as { options: { env: NodeJS.ProcessEnv } };
expect(callArgs.options.env.ANTHROPIC_API_KEY).toBeUndefined();
// Explicit SDK vars are absent (useGlobalAuth=true path)
expect(callArgs.options.env.CLAUDE_API_KEY).toBeUndefined();
expect(callArgs.options.env.CLAUDE_CODE_OAUTH_TOKEN).toBeUndefined();
// Cleanup
if (originalOauth !== undefined) process.env.CLAUDE_CODE_OAUTH_TOKEN = originalOauth;
else delete process.env.CLAUDE_CODE_OAUTH_TOKEN;
if (originalApiKey !== undefined) process.env.CLAUDE_API_KEY = originalApiKey;
else delete process.env.CLAUDE_API_KEY;
if (originalAnthropicKey !== undefined) process.env.ANTHROPIC_API_KEY = originalAnthropicKey;
else delete process.env.ANTHROPIC_API_KEY;
});
test('ANTHROPIC_API_KEY excluded from subprocess env when using explicit auth (useGlobalAuth=false)', async () => {
const originalOauth = process.env.CLAUDE_CODE_OAUTH_TOKEN;
const originalApiKey = process.env.CLAUDE_API_KEY;
const originalAnthropicKey = process.env.ANTHROPIC_API_KEY;
const originalGlobalAuth = process.env.CLAUDE_USE_GLOBAL_AUTH;
// Force explicit auth path regardless of env
process.env.CLAUDE_USE_GLOBAL_AUTH = 'false';
process.env.CLAUDE_API_KEY = 'sk-ant-explicit-key';
process.env.ANTHROPIC_API_KEY = 'sk-ant-target-repo-key';
delete process.env.CLAUDE_CODE_OAUTH_TOKEN;
mockQuery.mockImplementation(async function* () {
// Empty generator
});
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of client.sendQuery('test', '/workspace')) {
// consume
}
// ANTHROPIC_API_KEY must NOT reach the subprocess regardless of which auth
// path is taken — the allowlist excludes it in both cases. See issue #1029.
const callArgs = mockQuery.mock.calls[0][0] as { options: { env: NodeJS.ProcessEnv } };
expect(callArgs.options.env.ANTHROPIC_API_KEY).toBeUndefined();
// Explicit auth vars are present on the useGlobalAuth=false path
expect(callArgs.options.env.CLAUDE_API_KEY).toBeDefined();
// Cleanup
if (originalOauth !== undefined) process.env.CLAUDE_CODE_OAUTH_TOKEN = originalOauth;
else delete process.env.CLAUDE_CODE_OAUTH_TOKEN;
if (originalApiKey !== undefined) process.env.CLAUDE_API_KEY = originalApiKey;
else delete process.env.CLAUDE_API_KEY;
if (originalAnthropicKey !== undefined) process.env.ANTHROPIC_API_KEY = originalAnthropicKey;
else delete process.env.ANTHROPIC_API_KEY;
if (originalGlobalAuth !== undefined) process.env.CLAUDE_USE_GLOBAL_AUTH = originalGlobalAuth;
else delete process.env.CLAUDE_USE_GLOBAL_AUTH;
});
test('strips VSCODE_INSPECTOR_OPTIONS from subprocess env', async () => {
const original = process.env.VSCODE_INSPECTOR_OPTIONS;
process.env.VSCODE_INSPECTOR_OPTIONS = 'some-value';
mockQuery.mockImplementation(async function* () {
// Empty generator
});
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of client.sendQuery('test', '/workspace')) {
// consume
}
const callArgs = mockQuery.mock.calls[0][0] as { options: { env: NodeJS.ProcessEnv } };
expect(callArgs.options.env.VSCODE_INSPECTOR_OPTIONS).toBeUndefined();
// Cleanup
if (original !== undefined) {
process.env.VSCODE_INSPECTOR_OPTIONS = original;
} else {
delete process.env.VSCODE_INSPECTOR_OPTIONS;
}
if (originalKey !== undefined) process.env.CUSTOM_USER_KEY = originalKey;
else delete process.env.CUSTOM_USER_KEY;
});
test('classifies exit code errors as crash and retries up to 3 times', async () => {
@ -1106,3 +1009,89 @@ describe('ClaudeClient', () => {
});
});
});
describe('withFirstMessageTimeout', () => {
const { withFirstMessageTimeout } = claudeModule;
test('completes normally when first event arrives before timeout', async () => {
async function* fastGen(): AsyncGenerator<string> {
yield 'hello';
yield 'world';
}
const controller = new AbortController();
const gen = withFirstMessageTimeout(fastGen(), controller, 50, {});
const first = await gen.next();
expect(first.value).toBe('hello');
const second = await gen.next();
expect(second.value).toBe('world');
});
test('throws after timeout when generator never yields', async () => {
async function* stuckGen(): AsyncGenerator<string> {
await new Promise(() => {});
yield 'never';
}
const controller = new AbortController();
const gen = withFirstMessageTimeout(stuckGen(), controller, 50, {});
await expect(gen.next()).rejects.toThrow('produced no output within 50ms');
});
test('timeout error mentions issue #1067 for discoverability', async () => {
async function* stuckGen(): AsyncGenerator<string> {
await new Promise(() => {});
yield 'never';
}
const controller = new AbortController();
const gen = withFirstMessageTimeout(stuckGen(), controller, 50, {});
await expect(gen.next()).rejects.toThrow('1067');
});
test('aborts the controller when timeout fires', async () => {
async function* stuckGen(): AsyncGenerator<string> {
await new Promise(() => {});
yield 'never';
}
const controller = new AbortController();
const gen = withFirstMessageTimeout(stuckGen(), controller, 50, {});
await expect(gen.next()).rejects.toThrow();
expect(controller.signal.aborted).toBe(true);
});
test('handles generator that completes immediately without yielding', async () => {
async function* emptyGen(): AsyncGenerator<string> {
return;
}
const controller = new AbortController();
const gen = withFirstMessageTimeout(emptyGen(), controller, 50, {});
const result = await gen.next();
expect(result.done).toBe(true);
});
test('logs diagnostic payload with env keys and process state on timeout', async () => {
async function* stuckGen(): AsyncGenerator<string> {
await new Promise(() => {});
yield 'never';
}
const controller = new AbortController();
const diagnostics = {
subprocessEnvKeys: ['PATH', 'HOME', 'CLAUDE_API_KEY'],
parentClaudeKeys: ['CLAUDECODE', 'CLAUDE_CODE_ENTRYPOINT'],
model: 'sonnet',
platform: 'darwin',
};
const gen = withFirstMessageTimeout(stuckGen(), controller, 50, diagnostics);
await expect(gen.next()).rejects.toThrow();
// Verify the diagnostic dump was logged at error level
expect(mockLogger.error).toHaveBeenCalledWith(
expect.objectContaining({
subprocessEnvKeys: ['PATH', 'HOME', 'CLAUDE_API_KEY'],
parentClaudeKeys: ['CLAUDECODE', 'CLAUDE_CODE_ENTRYPOINT'],
model: 'sonnet',
platform: 'darwin',
timeoutMs: 50,
}),
'claude.first_event_timeout'
);
});
});

View file

@ -35,7 +35,10 @@ import {
type TokenUsage,
} from '../types';
import { createLogger } from '@archon/paths';
import { buildCleanSubprocessEnv } from '../utils/env-allowlist';
// No env filtering here — process.env is already clean:
// stripCwdEnv() at entry point stripped CWD .env keys + CLAUDECODE markers,
// then ~/.archon/.env was loaded as the trusted source. All keys the user sets
// in ~/.archon/.env are intentional and pass through to the subprocess.
import { scanPathForSensitiveKeys, EnvLeakError } from '../utils/env-leak-scanner';
import * as codebaseDb from '../db/codebases';
import { loadConfig } from '../config/config-loader';
@ -79,111 +82,31 @@ function normalizeClaudeUsage(usage?: {
}
/**
* Build environment for Claude subprocess
* Build environment for Claude subprocess.
*
* Auth behavior:
* - CLAUDE_USE_GLOBAL_AUTH=true: Filter tokens, use global auth from `claude /login`
* - CLAUDE_USE_GLOBAL_AUTH=false: Pass tokens through explicitly
* - Not set: Auto-detect use explicit tokens if present, otherwise fall back to global auth
* process.env is already clean at this point:
* - stripCwdEnv() at entry point removed CWD .env keys + CLAUDECODE markers
* - ~/.archon/.env loaded with override:true as the trusted source
*
* Auth mode is determined by the SDK based on what tokens are present:
* - Tokens in env SDK uses them (explicit auth)
* - No tokens SDK uses `claude /login` credentials (global auth)
* - User controls this by what they put in ~/.archon/.env
*
* We log the detected mode for diagnostics but don't filter — the user's
* config is trusted. See coleam00/Archon#1067 for design rationale.
*/
function buildSubprocessEnv(): NodeJS.ProcessEnv {
const globalAuthSetting = process.env.CLAUDE_USE_GLOBAL_AUTH?.toLowerCase();
// Check for empty token values (common misconfiguration)
const tokenVars = ['CLAUDE_CODE_OAUTH_TOKEN', 'CLAUDE_API_KEY'] as const;
const emptyTokens = tokenVars.filter(v => process.env[v] === '');
if (emptyTokens.length > 0) {
getLog().warn({ emptyTokens }, 'empty_token_values');
}
// Warn if user has the legacy variable but not the new ones
if (
process.env.ANTHROPIC_API_KEY &&
!process.env.CLAUDE_CODE_OAUTH_TOKEN &&
!process.env.CLAUDE_API_KEY
) {
getLog().warn(
{ hint: 'Use CLAUDE_API_KEY or CLAUDE_CODE_OAUTH_TOKEN instead' },
'deprecated_anthropic_api_key_ignored'
);
}
const hasExplicitTokens = Boolean(
process.env.CLAUDE_CODE_OAUTH_TOKEN ?? process.env.CLAUDE_API_KEY
);
const authMode = hasExplicitTokens ? 'explicit' : 'global';
getLog().info(
{ authMode },
authMode === 'global' ? 'using_global_auth' : 'using_explicit_tokens'
);
// Determine whether to use global auth
let useGlobalAuth: boolean;
if (globalAuthSetting === 'true') {
useGlobalAuth = true;
getLog().info({ authMode: 'global' }, 'using_global_auth');
} else if (globalAuthSetting === 'false') {
useGlobalAuth = false;
getLog().info({ authMode: 'explicit' }, 'using_explicit_tokens');
} else if (globalAuthSetting !== undefined) {
// Unrecognized value - warn and fall back to auto-detect
getLog().warn({ value: globalAuthSetting }, 'unrecognized_global_auth_setting');
useGlobalAuth = !hasExplicitTokens;
} else {
// Not set - auto-detect: use tokens if present, otherwise global auth
useGlobalAuth = !hasExplicitTokens;
if (hasExplicitTokens) {
getLog().info({ authMode: 'explicit', autoDetected: true }, 'using_explicit_tokens');
} else {
getLog().info({ authMode: 'global', autoDetected: true }, 'using_global_auth');
}
}
let baseEnv: NodeJS.ProcessEnv;
if (useGlobalAuth) {
// Start from allowlist-filtered env, then strip auth tokens
const clean = buildCleanSubprocessEnv();
const { CLAUDE_CODE_OAUTH_TOKEN, CLAUDE_API_KEY, ...envWithoutAuth } = clean;
// Log if we're filtering out tokens (helps debug auth issues)
const filtered = [
CLAUDE_CODE_OAUTH_TOKEN && 'CLAUDE_CODE_OAUTH_TOKEN',
CLAUDE_API_KEY && 'CLAUDE_API_KEY',
].filter(Boolean);
if (filtered.length > 0) {
getLog().info({ filteredVars: filtered }, 'global_auth_filtered_tokens');
}
baseEnv = envWithoutAuth;
} else {
// Start from allowlist-filtered env (includes auth tokens)
baseEnv = buildCleanSubprocessEnv();
}
// Clean env vars that interfere with Claude Code subprocess
const cleanedVars: string[] = [];
// Strip nested-session guard marker (claude-code v2.1.41+).
// When the server is started from inside a Claude Code terminal, CLAUDECODE=1
// is inherited and causes the subprocess to refuse to launch.
// See: https://github.com/anthropics/claude-code/issues/25434
if (baseEnv.CLAUDECODE) {
delete baseEnv.CLAUDECODE;
cleanedVars.push('CLAUDECODE');
}
// Strip debugger env vars
// See: https://github.com/anthropics/claude-code/issues/4619
if (baseEnv.NODE_OPTIONS) {
delete baseEnv.NODE_OPTIONS;
cleanedVars.push('NODE_OPTIONS');
}
if (baseEnv.VSCODE_INSPECTOR_OPTIONS) {
delete baseEnv.VSCODE_INSPECTOR_OPTIONS;
cleanedVars.push('VSCODE_INSPECTOR_OPTIONS');
}
if (cleanedVars.length > 0) {
getLog().info({ cleanedVars }, 'subprocess_env_cleaned');
}
return baseEnv;
return { ...process.env };
}
/** Max retries for transient subprocess failures (3 = 4 total attempts).
@ -230,6 +153,88 @@ function classifySubprocessError(
return 'unknown';
}
/** Default timeout for first SDK message (ms). Configurable via env var. */
function getFirstEventTimeoutMs(): number {
const raw = process.env.ARCHON_CLAUDE_FIRST_EVENT_TIMEOUT_MS;
if (raw) {
const parsed = Number(raw);
if (Number.isFinite(parsed) && parsed > 0) return parsed;
}
return 60_000;
}
/** Build a diagnostic payload for claude.first_event_timeout log */
function buildFirstEventHangDiagnostics(
subprocessEnv: Record<string, string>,
model: string | undefined
): Record<string, unknown> {
return {
subprocessEnvKeys: Object.keys(subprocessEnv),
parentClaudeKeys: Object.keys(process.env).filter(
k => k === 'CLAUDECODE' || k.startsWith('CLAUDE_CODE_') || k.startsWith('ANTHROPIC_')
),
model,
platform: process.platform,
uid: getProcessUid(),
isTTY: process.stdout.isTTY ?? false,
claudeCode: process.env.CLAUDECODE,
claudeCodeEntrypoint: process.env.CLAUDE_CODE_ENTRYPOINT,
};
}
/** Sentinel error class to identify timeout rejections in withFirstMessageTimeout. */
class FirstEventTimeoutError extends Error {}
/**
* Wraps an async generator so that the first call to .next() must resolve
* within `timeoutMs`. If it doesn't, aborts the controller and throws a
* descriptive error. Subsequent .next() calls are forwarded directly.
*
* Uses Promise.race() not just AbortController because the pathological
* case is "SDK ignores abort", so we need an independent unblocking mechanism.
*/
export async function* withFirstMessageTimeout<T>(
gen: AsyncGenerator<T>,
controller: AbortController,
timeoutMs: number,
diagnostics: Record<string, unknown>
): AsyncGenerator<T> {
// Race first event against timeout
let timerId: ReturnType<typeof setTimeout> | undefined;
let firstValue: IteratorResult<T>;
try {
firstValue = await Promise.race([
gen.next(),
new Promise<never>((_, reject) => {
timerId = setTimeout(() => {
reject(new FirstEventTimeoutError());
}, timeoutMs);
}),
]);
} catch (err) {
if (err instanceof FirstEventTimeoutError) {
controller.abort();
getLog().error({ ...diagnostics, timeoutMs }, 'claude.first_event_timeout');
throw new Error(
'Claude Code subprocess produced no output within ' +
timeoutMs +
'ms. ' +
'See logs for claude.first_event_timeout diagnostic dump. ' +
'Details: https://github.com/coleam00/Archon/issues/1067'
);
}
throw err;
} finally {
clearTimeout(timerId);
}
if (firstValue.done) return;
yield firstValue.value;
// Forward remaining events directly
yield* gen;
}
/**
* Returns the current process UID, or undefined on platforms that don't support it (e.g. Windows).
* Exported for testing spyOn(claudeModule, 'getProcessUid') works cross-platform.
@ -479,7 +484,14 @@ export class ClaudeClient implements IAssistantClient {
}
try {
for await (const msg of query({ prompt, options })) {
const rawEvents = query({ prompt, options });
const timeoutMs = getFirstEventTimeoutMs();
const diagnostics = buildFirstEventHangDiagnostics(
options.env as Record<string, string>,
options.model
);
const events = withFirstMessageTimeout(rawEvents, controller, timeoutMs, diagnostics);
for await (const msg of events) {
// Drain tool results captured by PostToolUse hook before processing the next message
while (toolResultQueue.length > 0) {
const tr = toolResultQueue.shift();

View file

@ -377,7 +377,9 @@ export async function dispatchBackgroundWorkflow(
preCreatedRun
);
// Surface workflow output to parent conversation as a result card
if (result.success && !('paused' in result) && result.summary) {
if ('paused' in result) {
// Paused workflows (approval gates) — no result card yet
} else if (result.success && result.summary) {
try {
await ctx.platform.sendMessage(ctx.conversationId, result.summary, {
category: 'workflow_result',
@ -393,6 +395,27 @@ export async function dispatchBackgroundWorkflow(
'workflow_output_surface_failed'
);
}
} else if (!result.success && result.workflowRunId) {
// Surface failure as a result card so the chat shows status + "View full logs"
try {
await ctx.platform.sendMessage(
ctx.conversationId,
`Workflow **${workflow.name}** failed: ${result.error}`,
{
category: 'workflow_result',
segment: 'new',
workflowResult: {
workflowName: workflow.name,
runId: result.workflowRunId,
},
}
);
} catch (surfaceError) {
getLog().warn(
{ err: toError(surfaceError), conversationId: ctx.conversationId },
'workflow_output_surface_failed'
);
}
}
} catch (error) {
const err = toError(error);
@ -404,9 +427,22 @@ export async function dispatchBackgroundWorkflow(
},
'background_workflow_failed'
);
// Surface error to parent conversation so the user knows
// Surface error to parent conversation — include workflowResult metadata when
// we have a pre-created run ID so the chat renders a result card with "View full logs"
const failureRunId = preCreatedRun?.id;
const failureMessage = `Workflow **${workflow.name}** failed: ${err.message}`;
await ctx.platform
.sendMessage(ctx.conversationId, `Workflow **${workflow.name}** failed: ${err.message}`)
.sendMessage(
ctx.conversationId,
failureMessage,
failureRunId
? {
category: 'workflow_result',
segment: 'new',
workflowResult: { workflowName: workflow.name, runId: failureRunId },
}
: undefined
)
.catch((sendErr: unknown) => {
getLog().error({ err: toError(sendErr) }, 'background_workflow_notify_failed');
});

View file

@ -1,70 +0,0 @@
import { describe, it, expect, beforeEach, afterEach } from 'bun:test';
import { buildCleanSubprocessEnv, SUBPROCESS_ENV_ALLOWLIST } from './env-allowlist';
describe('buildCleanSubprocessEnv', () => {
let originalEnv: NodeJS.ProcessEnv;
beforeEach(() => {
originalEnv = { ...process.env };
});
afterEach(() => {
for (const key of Object.keys(process.env)) {
if (!(key in originalEnv)) delete process.env[key];
}
Object.assign(process.env, originalEnv);
});
it('includes allowlisted vars present in process.env', () => {
process.env.CLAUDE_USE_GLOBAL_AUTH = 'true';
const env = buildCleanSubprocessEnv();
expect(env.CLAUDE_USE_GLOBAL_AUTH).toBe('true');
});
it('excludes ANTHROPIC_API_KEY (not in allowlist)', () => {
process.env.ANTHROPIC_API_KEY = 'leaked-key-from-target-repo';
const env = buildCleanSubprocessEnv();
expect(env.ANTHROPIC_API_KEY).toBeUndefined();
});
it('excludes arbitrary target-repo vars', () => {
process.env.MY_APP_SECRET = 'should-not-leak';
process.env.POSTGRES_PASSWORD = 'db-secret';
const env = buildCleanSubprocessEnv();
expect(env.MY_APP_SECRET).toBeUndefined();
expect(env.POSTGRES_PASSWORD).toBeUndefined();
});
it('includes PATH and HOME (system essentials)', () => {
const env = buildCleanSubprocessEnv();
expect(env.PATH).toBe(process.env.PATH);
expect(env.HOME).toBe(process.env.HOME);
});
it('includes GITHUB_TOKEN when present', () => {
process.env.GITHUB_TOKEN = 'ghp_test123';
const env = buildCleanSubprocessEnv();
expect(env.GITHUB_TOKEN).toBe('ghp_test123');
});
it('does not include keys with undefined values', () => {
const env = buildCleanSubprocessEnv();
for (const value of Object.values(env)) {
expect(value).not.toBeUndefined();
}
});
});
describe('SUBPROCESS_ENV_ALLOWLIST', () => {
it('does not contain ANTHROPIC_API_KEY', () => {
expect(SUBPROCESS_ENV_ALLOWLIST.has('ANTHROPIC_API_KEY')).toBe(false);
});
it('does not contain DATABASE_URL', () => {
expect(SUBPROCESS_ENV_ALLOWLIST.has('DATABASE_URL')).toBe(false);
});
it('contains CLAUDE_API_KEY', () => {
expect(SUBPROCESS_ENV_ALLOWLIST.has('CLAUDE_API_KEY')).toBe(true);
});
});

View file

@ -1,70 +0,0 @@
/**
* Subprocess environment allowlist
*
* Controls which process.env keys are passed to Claude Code subprocesses.
* Using an allowlist prevents target-repo .env leakage (Bun auto-loads CWD .env).
* Per-codebase env vars (codebase_env_vars table / .archon/config.yaml `env:`) are
* merged on top by the workflow executor via requestOptions.env those are unaffected.
*/
/** Canonical set of env vars Claude Code subprocess legitimately needs */
export const SUBPROCESS_ENV_ALLOWLIST = new Set([
// System essentials needed by tools, git, shell operations
'PATH',
'HOME',
'USER',
'LOGNAME',
'SHELL',
'TERM',
'TMPDIR',
'TEMP',
'TMP',
'LANG',
'LC_ALL',
'LC_CTYPE',
'TZ',
'SSH_AUTH_SOCK',
// Claude auth and config
'CLAUDE_USE_GLOBAL_AUTH',
'CLAUDE_API_KEY',
'CLAUDE_CODE_OAUTH_TOKEN',
'CLAUDE_CODE_USE_BEDROCK',
'CLAUDE_CODE_USE_VERTEX',
'ANTHROPIC_BASE_URL',
'ANTHROPIC_BEDROCK_BASE_URL',
'ANTHROPIC_VERTEX_PROJECT_ID',
'ANTHROPIC_VERTEX_REGION',
// Archon runtime config
'ARCHON_HOME',
'ARCHON_DOCKER',
'IS_SANDBOX',
'WORKSPACE_PATH',
'LOG_LEVEL',
// Git identity (used by git commits inside workflows)
'GIT_AUTHOR_NAME',
'GIT_AUTHOR_EMAIL',
'GIT_COMMITTER_NAME',
'GIT_COMMITTER_EMAIL',
'GIT_SSH_COMMAND',
// GitHub CLI (used by Claude Code tools)
'GITHUB_TOKEN',
'GH_TOKEN',
]);
/**
* Build a clean subprocess env from process.env using the allowlist.
* Call this instead of spreading process.env directly.
*
* The caller (buildSubprocessEnv in claude.ts) then applies auth filtering
* on top (strip CLAUDE_CODE_OAUTH_TOKEN/CLAUDE_API_KEY when using global auth).
* Per-query env overrides (requestOptions.env) are merged last by the caller.
*/
export function buildCleanSubprocessEnv(): NodeJS.ProcessEnv {
const clean: NodeJS.ProcessEnv = {};
for (const key of SUBPROCESS_ENV_ALLOWLIST) {
if (process.env[key] !== undefined) {
clean[key] = process.env[key];
}
}
return clean;
}

View file

@ -1,6 +1,6 @@
{
"name": "@archon/docs-web",
"version": "0.3.5",
"version": "0.3.6",
"private": true,
"scripts": {
"dev": "astro dev",

View file

@ -140,6 +140,18 @@ While a workflow runs, a progress card appears in the conversation showing:
For paused workflows (approval gates), the progress card shows **Approve** and **Reject** buttons so you can control the workflow directly from the chat.
### Workflow Result Card
When a workflow reaches a terminal state (completed, failed, or cancelled), the progress card is replaced by a result card in the conversation. The result card shows:
- **Status icon** -- Visual indicator for completed, failed, or cancelled
- **Header** -- "Workflow complete", "Workflow failed", or "Workflow cancelled" depending on outcome
- **Node count** -- How many nodes completed out of the total nodes that reached a terminal state (e.g., `3/4 nodes`)
- **Duration** -- Total elapsed time for the run
- **Artifacts** -- Any files or outputs produced by the workflow, with direct links
Click the arrow button in the result card header to open the full execution detail page.
### Execution Detail Page
Click on a workflow run (from the dashboard or progress card) to open the execution detail page at `/workflows/runs/:runId`. This shows:

View file

@ -362,11 +362,12 @@ When using `--branch`, workflows run inside the worktree directory.
## Environment
The CLI loads `~/.archon/.env` with `override: true`, so Archon's own config always wins over any env vars Bun auto-loads from the current working directory. Target repo env vars remain in `process.env` but cannot reach AI subprocesses — `SUBPROCESS_ENV_ALLOWLIST` blocks all non-whitelisted keys.
At startup, the CLI strips all Bun-auto-loaded CWD `.env` keys and nested Claude Code session markers from `process.env`, then loads `~/.archon/.env` as the sole trusted source. All keys you set in `~/.archon/.env` pass through to AI subprocesses — no allowlist filtering.
On startup, the CLI:
1. Loads `~/.archon/.env` with `override: true` (Archon's config wins over CWD vars)
2. Auto-enables global Claude auth if no explicit tokens are set
1. Strips CWD `.env` keys + `CLAUDECODE` markers from `process.env` (via `stripCwdEnv`)
2. Loads `~/.archon/.env` (all keys trusted)
3. Auto-enables global Claude auth if no explicit tokens are set
## Database

View file

@ -190,6 +190,7 @@ Environment variables override all other configuration. They are organized by ca
| `DEFAULT_AI_ASSISTANT` | Default AI assistant (`claude` or `codex`) | `claude` |
| `MAX_CONCURRENT_CONVERSATIONS` | Maximum concurrent AI conversations | `10` |
| `SESSION_RETENTION_DAYS` | Delete inactive sessions older than N days | `30` |
| `ARCHON_SUPPRESS_NESTED_CLAUDE_WARNING` | When set to `1`, suppresses the stderr warning emitted when `archon` is run inside a Claude Code session | -- |
### AI Providers -- Claude
@ -199,6 +200,7 @@ Environment variables override all other configuration. They are organized by ca
| `CLAUDE_CODE_OAUTH_TOKEN` | Explicit OAuth token (alternative to global auth) | -- |
| `CLAUDE_API_KEY` | Explicit API key (alternative to global auth) | -- |
| `TITLE_GENERATION_MODEL` | Lightweight model for generating conversation titles | SDK default |
| `ARCHON_CLAUDE_FIRST_EVENT_TIMEOUT_MS` | Timeout (ms) before Claude subprocess is considered hung (throws with diagnostic log) | `60000` |
When `CLAUDE_USE_GLOBAL_AUTH` is unset, Archon auto-detects: it uses explicit tokens if present, otherwise falls back to global auth.
@ -296,11 +298,11 @@ Infrastructure configuration (database URL, platform tokens) is stored in `.env`
| Component | Location | Purpose |
|-----------|----------|---------|
| **CLI** | `~/.archon/.env` | Global infrastructure config (only source, loaded with `override: true`) |
| **Server (dev)** | `<archon-repo>/.env` + `~/.archon/.env` | Repo `.env` for platform tokens; `~/.archon/.env` overrides with `override: true` |
| **CLI** | `~/.archon/.env` | Global infrastructure config; CWD .env keys stripped first, then loaded with `override: true` (Archon config wins over shell-inherited vars) |
| **Server (dev)** | `<archon-repo>/.env` + `~/.archon/.env` | Repo `.env` for platform tokens; `~/.archon/.env` loaded with `override: true` |
| **Server (binary)** | `~/.archon/.env` | Single source of truth (repo `.env` path is not available in compiled binaries) |
**How it works**: Both the CLI and server load `~/.archon/.env` with `override: true`, so Archon's own config always wins over any env vars Bun auto-loads from the current working directory. Target repo env vars remain in `process.env` but cannot reach AI subprocesses — `SUBPROCESS_ENV_ALLOWLIST` blocks all non-whitelisted keys.
**How it works**: At startup, the CLI and server strip all keys that Bun auto-loaded from the current working directory (`.env`, `.env.local`, `.env.development`, `.env.production`) and any nested Claude Code session markers (`CLAUDECODE`, `CLAUDE_CODE_*` except auth vars) before loading `~/.archon/.env`. This ensures target repo keys and nested-session guards are fully removed from `process.env` before any application code runs.
**Best practice**: Use `~/.archon/.env` as the single source of truth:

View file

@ -119,13 +119,14 @@ The GitHub and Gitea adapters verify webhook signatures to ensure payloads origi
- Never commit `.env` files to git. The repository's `.gitignore` excludes them.
**Subprocess env isolation:**
- Bun auto-loads `.env` from CWD before any Archon code runs. These vars remain in the server/CLI's `process.env` but **cannot reach AI subprocesses** — Claude Code subprocesses receive only an explicit allowlist of env vars (`SUBPROCESS_ENV_ALLOWLIST`: system essentials, Claude auth, Archon runtime config, git identity, GitHub tokens). Keys like `ANTHROPIC_API_KEY`, `OPENAI_API_KEY`, and `DATABASE_URL` are not on the allowlist and are blocked.
- `~/.archon/.env` is loaded with `override: true`, so Archon's own config always wins over any Bun-auto-loaded CWD vars for overlapping keys.
- Per-codebase env vars configured via `codebase_env_vars` or `.archon/config.yaml` `env:` are merged on top of this filtered base at workflow execution time.
- At startup, `stripCwdEnv()` removes **all** keys that Bun auto-loaded from the CWD `.env` files, plus nested Claude Code session markers (`CLAUDECODE`, `CLAUDE_CODE_*` except auth vars) and debugger vars (`NODE_OPTIONS`, `VSCODE_INSPECTOR_OPTIONS`). This runs before any module reads `process.env`.
- `~/.archon/.env` is then loaded as the trusted source of Archon configuration. All keys the user sets there pass through to subprocesses — there is no allowlist filtering. The user controls this file and all keys are intentional.
- Per-codebase env vars configured via `codebase_env_vars` or `.archon/config.yaml` `env:` are merged on top at workflow execution time.
- CWD `.env` keys are the **only** untrusted source. They belong to the target project, not to Archon.
### Env-leak gate (target repo `.env` keys)
Beyond the subprocess allowlist, Archon also scans target repos for sensitive keys **before spawning**. A Claude or Codex subprocess started with `cwd=/path/to/target/repo` inherits its own Bun auto-loaded `.env` — the env-leak gate catches this by scanning the target repo's `.env` files at registration and pre-spawn time.
As a second layer of defense, Archon scans target repos for sensitive keys **before spawning** AI subprocesses. A Claude or Codex subprocess started with `cwd=/path/to/target/repo` inherits Bun's auto-loaded `.env` from that CWD — the env-leak gate catches this by scanning the target repo's `.env` files at registration and pre-spawn time.
**What Archon scans:** auto-loaded filenames `.env`, `.env.local`, `.env.development`, `.env.production`, `.env.development.local`, `.env.production.local`.

View file

@ -279,3 +279,23 @@ docker compose exec app ls -la /.archon/workspaces
```bash
docker compose exec app git clone https://github.com/user/repo /.archon/workspaces/test-repo
```
## Workflows Hang Silently When Run Inside Claude Code
**Symptom:** Workflows started from within a Claude Code session (e.g., via the Terminal tool) produce no output, or the CLI emits a warning about `CLAUDECODE=1` before the workflow hangs.
**Cause:** Nested Claude Code sessions can deadlock — the outer session waits for tool results that the inner session never delivers.
**Fix:** Run `archon serve` from a regular shell outside Claude Code and use the Web UI or HTTP API instead.
**Suppress the warning:** If you have a non-deadlocking setup and want to silence the warning:
```bash
ARCHON_SUPPRESS_NESTED_CLAUDE_WARNING=1 archon workflow run ...
```
**Adjust the timeout:** If your environment is slow and hitting the 60-second first-event timeout:
```bash
ARCHON_CLAUDE_FIRST_EVENT_TIMEOUT_MS=120000 archon workflow run ...
```

View file

@ -1,6 +1,6 @@
{
"name": "@archon/git",
"version": "0.3.5",
"version": "0.3.6",
"type": "module",
"main": "./src/index.ts",
"types": "./src/index.ts",

View file

@ -1,6 +1,6 @@
{
"name": "@archon/isolation",
"version": "0.3.5",
"version": "0.3.6",
"type": "module",
"main": "./src/index.ts",
"types": "./src/index.ts",

View file

@ -1,17 +1,20 @@
{
"name": "@archon/paths",
"version": "0.3.5",
"version": "0.3.6",
"type": "module",
"main": "./src/index.ts",
"types": "./src/index.ts",
"exports": {
".": "./src/index.ts"
".": "./src/index.ts",
"./strip-cwd-env": "./src/strip-cwd-env.ts",
"./strip-cwd-env-boot": "./src/strip-cwd-env-boot.ts"
},
"scripts": {
"test": "bun test src/",
"type-check": "bun x tsc --noEmit"
},
"dependencies": {
"dotenv": "^17",
"pino": "^9",
"pino-pretty": "^13"
},

View file

@ -0,0 +1,208 @@
/**
* Integration tests for the env isolation flow:
* Bun auto-load (simulated) stripCwdEnv() ~/.archon/.env load subprocess env
*
* Tests the full user scenario: what keys reach the Claude subprocess when the
* user has various combinations of CWD .env, ~/.archon/.env, and shell env?
*
* Note: We can't actually test Bun's auto-load (it runs before any code), so we
* simulate it by setting process.env keys before calling stripCwdEnv(). This is
* equivalent Bun's auto-load just does process.env[key] = value, same as us.
*/
import { describe, it, expect, beforeEach, afterEach } from 'bun:test';
import { writeFileSync, mkdirSync, rmSync } from 'fs';
import { join } from 'path';
import { config } from 'dotenv';
import { stripCwdEnv } from './strip-cwd-env';
// Track all test keys so afterEach can clean them up reliably
const TEST_KEYS = [
'ANTHROPIC_API_KEY',
'CLAUDE_API_KEY',
'CLAUDE_CODE_OAUTH_TOKEN',
'CLAUDE_USE_GLOBAL_AUTH',
'DATABASE_URL',
'LOG_LEVEL',
'CWD_ONLY_KEY',
'ARCHON_ONLY_KEY',
'SHARED_KEY',
'MY_SECRET_TOKEN',
'CLAUDECODE',
'CLAUDE_CODE_ENTRYPOINT',
'NODE_OPTIONS',
'REDIS_URL',
];
describe('env isolation integration', () => {
const cwdDir = join(import.meta.dir, '__env-integration-cwd__');
const archonDir = join(import.meta.dir, '__env-integration-archon__');
let savedEnv: Record<string, string | undefined>;
beforeEach(() => {
// Save original env state
savedEnv = {};
for (const key of TEST_KEYS) {
savedEnv[key] = process.env[key];
}
mkdirSync(cwdDir, { recursive: true });
mkdirSync(archonDir, { recursive: true });
});
afterEach(() => {
// Restore original env
for (const key of TEST_KEYS) {
if (savedEnv[key] !== undefined) {
process.env[key] = savedEnv[key];
} else {
delete process.env[key];
}
}
rmSync(cwdDir, { recursive: true, force: true });
rmSync(archonDir, { recursive: true, force: true });
});
/**
* Simulate the full entry-point flow:
* 1. "Bun auto-load" (set CWD .env keys in process.env)
* 2. stripCwdEnv() (remove CWD keys + markers)
* 3. Load ~/.archon/.env (dotenv.config)
* 4. Return process.env snapshot (what buildSubprocessEnv would return)
*/
function simulateEntryPointFlow(cwdEnv: string, archonEnv: string): NodeJS.ProcessEnv {
// Write the CWD .env file
writeFileSync(join(cwdDir, '.env'), cwdEnv);
// Simulate Bun auto-load: parse CWD .env and set in process.env
const cwdParsed = config({ path: join(cwdDir, '.env'), processEnv: {} });
if (cwdParsed.parsed) {
for (const [key, value] of Object.entries(cwdParsed.parsed)) {
process.env[key] = value;
}
}
// Step 2: stripCwdEnv (same as entry point)
stripCwdEnv(cwdDir);
// Step 3: Load ~/.archon/.env with override — user's Archon config wins
// over any shell-inherited vars (same as real entry point).
writeFileSync(join(archonDir, '.env'), archonEnv);
config({ path: join(archonDir, '.env'), override: true });
// Step 4: Return subprocess env snapshot
return { ...process.env };
}
it('scenario 1: global auth user with ANTHROPIC_API_KEY in CWD .env — CWD key stripped', () => {
// User ran `claude /login` (global auth). Target repo has ANTHROPIC_API_KEY
// in its .env. That key must NOT reach the subprocess.
const subprocessEnv = simulateEntryPointFlow(
'ANTHROPIC_API_KEY=sk-target-repo-leaked\nDATABASE_URL=postgres://target/db\n',
'CLAUDE_USE_GLOBAL_AUTH=true\n'
);
expect(subprocessEnv.ANTHROPIC_API_KEY).toBeUndefined();
expect(subprocessEnv.DATABASE_URL).toBeUndefined();
expect(subprocessEnv.CLAUDE_USE_GLOBAL_AUTH).toBe('true');
});
it('scenario 2: user has OAuth token in archon env + random key in CWD .env — CWD stripped, archon kept', () => {
const subprocessEnv = simulateEntryPointFlow(
'CWD_ONLY_KEY=from-target-repo\nLOG_LEVEL=debug\n',
'CLAUDE_CODE_OAUTH_TOKEN=sk-ant-oat01-my-token\nCLAUDE_USE_GLOBAL_AUTH=false\n'
);
// CWD keys must be gone
expect(subprocessEnv.CWD_ONLY_KEY).toBeUndefined();
expect(subprocessEnv.LOG_LEVEL).toBeUndefined();
// Archon keys must be present
expect(subprocessEnv.CLAUDE_CODE_OAUTH_TOKEN).toBe('sk-ant-oat01-my-token');
expect(subprocessEnv.CLAUDE_USE_GLOBAL_AUTH).toBe('false');
});
it('scenario 3: nothing from CWD .env leaks to subprocess', () => {
const subprocessEnv = simulateEntryPointFlow(
'MY_SECRET_TOKEN=leaked\nDATABASE_URL=postgres://wrong/db\nLOG_LEVEL=trace\nANTHROPIC_API_KEY=sk-wrong-key\n',
'ARCHON_ONLY_KEY=trusted\n'
);
// ALL CWD keys must be gone
expect(subprocessEnv.MY_SECRET_TOKEN).toBeUndefined();
expect(subprocessEnv.DATABASE_URL).toBeUndefined();
expect(subprocessEnv.LOG_LEVEL).toBeUndefined();
expect(subprocessEnv.ANTHROPIC_API_KEY).toBeUndefined();
// Archon key present
expect(subprocessEnv.ARCHON_ONLY_KEY).toBe('trusted');
// Shell-inherited keys present
expect(subprocessEnv.PATH).toBeDefined();
expect(subprocessEnv.HOME).toBeDefined();
});
it('scenario 4: same key in both CWD and archon env — archon value wins', () => {
// User has ANTHROPIC_API_KEY in both places. CWD one is the target repo's,
// archon one is the user's intentional config. Archon must win.
const subprocessEnv = simulateEntryPointFlow(
'ANTHROPIC_API_KEY=sk-target-repo-WRONG\nSHARED_KEY=cwd-value\n',
'ANTHROPIC_API_KEY=sk-my-real-key\nSHARED_KEY=archon-value\n'
);
// Archon value wins (CWD was stripped, then archon loaded)
expect(subprocessEnv.ANTHROPIC_API_KEY).toBe('sk-my-real-key');
expect(subprocessEnv.SHARED_KEY).toBe('archon-value');
});
it('CLAUDECODE markers stripped even if not from CWD .env', () => {
// Simulating: parent Claude Code shell sets CLAUDECODE=1
// (not from .env file, from inherited shell env)
process.env.CLAUDECODE = '1';
process.env.CLAUDE_CODE_ENTRYPOINT = 'cli';
process.env.NODE_OPTIONS = '--inspect';
const subprocessEnv = simulateEntryPointFlow('', '');
expect(subprocessEnv.CLAUDECODE).toBeUndefined();
expect(subprocessEnv.CLAUDE_CODE_ENTRYPOINT).toBeUndefined();
expect(subprocessEnv.NODE_OPTIONS).toBeUndefined();
});
it('scenario 5: DATABASE_URL in CWD .env does not reach Archon — archon uses its own DB', () => {
// Target repo has DATABASE_URL for its own PostgreSQL. Archon must NOT
// connect to the target app's database — it should use its own DB
// (from ~/.archon/.env or default SQLite).
const subprocessEnv = simulateEntryPointFlow(
'DATABASE_URL=postgresql://target-app:5432/wrong_db\nREDIS_URL=redis://target:6379\n',
'DATABASE_URL=sqlite:///Users/me/.archon/archon.db\n'
);
// CWD DATABASE_URL is stripped, archon's wins
expect(subprocessEnv.DATABASE_URL).toBe('sqlite:///Users/me/.archon/archon.db');
// Other CWD keys also stripped
expect(subprocessEnv.REDIS_URL).toBeUndefined();
});
it('scenario 6: DATABASE_URL in CWD .env only (no archon env) — stripped entirely', () => {
// User relies on default SQLite (no DATABASE_URL in ~/.archon/.env).
// Target repo's DATABASE_URL must not leak.
const subprocessEnv = simulateEntryPointFlow(
'DATABASE_URL=postgresql://target-app:5432/production\n',
''
);
expect(subprocessEnv.DATABASE_URL).toBeUndefined();
});
it('CLAUDE_CODE_OAUTH_TOKEN from archon env survives marker strip', () => {
// CLAUDE_CODE_* markers are stripped, but CLAUDE_CODE_OAUTH_TOKEN is
// an auth var and must be preserved.
process.env.CLAUDECODE = '1';
process.env.CLAUDE_CODE_ENTRYPOINT = 'cli';
const subprocessEnv = simulateEntryPointFlow(
'',
'CLAUDE_CODE_OAUTH_TOKEN=sk-ant-oat01-keep-this\n'
);
expect(subprocessEnv.CLAUDECODE).toBeUndefined();
expect(subprocessEnv.CLAUDE_CODE_ENTRYPOINT).toBeUndefined();
expect(subprocessEnv.CLAUDE_CODE_OAUTH_TOKEN).toBe('sk-ant-oat01-keep-this');
});
});

View file

@ -0,0 +1,13 @@
/**
* Side-effect entry point: strips Bun-auto-loaded CWD .env keys at import time.
*
* Import this as the FIRST import in CLI entry points so it runs
* before any module that reads process.env at initialization time.
*
* @example
* // packages/cli/src/cli.ts — must be the very first import
* import '@archon/paths/strip-cwd-env-boot';
*/
import { stripCwdEnv } from './strip-cwd-env';
stripCwdEnv();

View file

@ -0,0 +1,159 @@
import { describe, it, expect, beforeEach, afterEach } from 'bun:test';
import { writeFileSync, mkdirSync, rmSync } from 'fs';
import { join } from 'path';
import { stripCwdEnv } from './strip-cwd-env';
describe('stripCwdEnv', () => {
const tmpDir = join(import.meta.dir, '__strip-cwd-env-test-tmp__');
beforeEach(() => {
mkdirSync(tmpDir, { recursive: true });
});
afterEach(() => {
rmSync(tmpDir, { recursive: true, force: true });
delete process.env.TEST_STRIP_KEY;
delete process.env.TEST_STRIP_KEY2;
delete process.env.TEST_STRIP_KEY_A;
delete process.env.TEST_STRIP_KEY_B;
// Clean up nested-session marker test keys
delete process.env.CLAUDECODE;
delete process.env.CLAUDE_CODE_ENTRYPOINT;
delete process.env.CLAUDE_CODE_EXECPATH;
delete process.env.CLAUDE_CODE_NO_FLICKER;
delete process.env.CLAUDE_CODE_OAUTH_TOKEN;
delete process.env.CLAUDE_CODE_USE_BEDROCK;
delete process.env.CLAUDE_CODE_USE_VERTEX;
delete process.env.NODE_OPTIONS;
delete process.env.VSCODE_INSPECTOR_OPTIONS;
});
it('strips keys from single .env file', () => {
writeFileSync(join(tmpDir, '.env'), 'TEST_STRIP_KEY=leaked\n');
process.env.TEST_STRIP_KEY = 'leaked';
stripCwdEnv(tmpDir);
expect(process.env.TEST_STRIP_KEY).toBeUndefined();
});
it('strips keys from all four Bun-auto-loaded files', () => {
for (const f of ['.env', '.env.local', '.env.development', '.env.production']) {
writeFileSync(join(tmpDir, f), 'TEST_STRIP_KEY=leaked\n');
}
process.env.TEST_STRIP_KEY = 'leaked';
stripCwdEnv(tmpDir);
expect(process.env.TEST_STRIP_KEY).toBeUndefined();
});
it('does nothing when no CWD .env files exist', () => {
process.env.TEST_STRIP_KEY = 'safe';
stripCwdEnv(tmpDir);
expect(process.env.TEST_STRIP_KEY).toBe('safe');
});
it('preserves keys not in CWD .env files', () => {
writeFileSync(join(tmpDir, '.env'), 'TEST_STRIP_KEY=leaked\n');
process.env.TEST_STRIP_KEY = 'leaked';
process.env.TEST_STRIP_KEY2 = 'preserved';
stripCwdEnv(tmpDir);
expect(process.env.TEST_STRIP_KEY).toBeUndefined();
expect(process.env.TEST_STRIP_KEY2).toBe('preserved');
});
it('tolerates malformed .env lines', () => {
writeFileSync(join(tmpDir, '.env'), 'NOTAKEYVALUE\nTEST_STRIP_KEY=leaked\n');
process.env.TEST_STRIP_KEY = 'leaked';
stripCwdEnv(tmpDir);
expect(process.env.TEST_STRIP_KEY).toBeUndefined();
});
it('does not delete key if it was not in process.env (no-op)', () => {
writeFileSync(join(tmpDir, '.env'), 'TEST_STRIP_KEY=parsed\n');
// Do NOT set process.env.TEST_STRIP_KEY — simulate key parsed but not auto-loaded
stripCwdEnv(tmpDir);
expect(process.env.TEST_STRIP_KEY).toBeUndefined(); // still undefined, no error
});
it('strips distinct keys from different .env files', () => {
writeFileSync(join(tmpDir, '.env'), 'TEST_STRIP_KEY_A=leaked\n');
writeFileSync(join(tmpDir, '.env.local'), 'TEST_STRIP_KEY_B=leaked\n');
process.env.TEST_STRIP_KEY_A = 'leaked';
process.env.TEST_STRIP_KEY_B = 'leaked';
stripCwdEnv(tmpDir);
expect(process.env.TEST_STRIP_KEY_A).toBeUndefined();
expect(process.env.TEST_STRIP_KEY_B).toBeUndefined();
});
});
describe('stripCwdEnv — nested Claude Code marker stripping', () => {
const tmpDir = join(import.meta.dir, '__strip-markers-test-tmp__');
beforeEach(() => {
mkdirSync(tmpDir, { recursive: true });
});
afterEach(() => {
rmSync(tmpDir, { recursive: true, force: true });
delete process.env.CLAUDECODE;
delete process.env.CLAUDE_CODE_ENTRYPOINT;
delete process.env.CLAUDE_CODE_EXECPATH;
delete process.env.CLAUDE_CODE_NO_FLICKER;
delete process.env.CLAUDE_CODE_HIDE_ACCOUNT_INFO;
delete process.env.CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS;
delete process.env.CLAUDE_CODE_OAUTH_TOKEN;
delete process.env.CLAUDE_CODE_USE_BEDROCK;
delete process.env.CLAUDE_CODE_USE_VERTEX;
delete process.env.NODE_OPTIONS;
delete process.env.VSCODE_INSPECTOR_OPTIONS;
});
it('strips CLAUDECODE from process.env', () => {
process.env.CLAUDECODE = '1';
stripCwdEnv(tmpDir);
expect(process.env.CLAUDECODE).toBeUndefined();
});
it('strips CLAUDE_CODE_* session markers', () => {
process.env.CLAUDE_CODE_ENTRYPOINT = 'cli';
process.env.CLAUDE_CODE_EXECPATH = '/usr/local/bin/claude';
process.env.CLAUDE_CODE_NO_FLICKER = '1';
process.env.CLAUDE_CODE_HIDE_ACCOUNT_INFO = '1';
process.env.CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS = '1';
stripCwdEnv(tmpDir);
expect(process.env.CLAUDE_CODE_ENTRYPOINT).toBeUndefined();
expect(process.env.CLAUDE_CODE_EXECPATH).toBeUndefined();
expect(process.env.CLAUDE_CODE_NO_FLICKER).toBeUndefined();
expect(process.env.CLAUDE_CODE_HIDE_ACCOUNT_INFO).toBeUndefined();
expect(process.env.CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS).toBeUndefined();
});
it('preserves CLAUDE_CODE_* auth vars', () => {
process.env.CLAUDE_CODE_OAUTH_TOKEN = 'sk-ant-oat01-secret';
process.env.CLAUDE_CODE_USE_BEDROCK = '1';
process.env.CLAUDE_CODE_USE_VERTEX = '1';
stripCwdEnv(tmpDir);
expect(process.env.CLAUDE_CODE_OAUTH_TOKEN).toBe('sk-ant-oat01-secret');
expect(process.env.CLAUDE_CODE_USE_BEDROCK).toBe('1');
expect(process.env.CLAUDE_CODE_USE_VERTEX).toBe('1');
});
it('strips NODE_OPTIONS and VSCODE_INSPECTOR_OPTIONS', () => {
process.env.NODE_OPTIONS = '--inspect';
process.env.VSCODE_INSPECTOR_OPTIONS = '{"port":9229}';
stripCwdEnv(tmpDir);
expect(process.env.NODE_OPTIONS).toBeUndefined();
expect(process.env.VSCODE_INSPECTOR_OPTIONS).toBeUndefined();
});
it('handles combined CWD .env + nested session markers in one call', () => {
writeFileSync(join(tmpDir, '.env'), 'TEST_STRIP_KEY=leaked\n');
process.env.TEST_STRIP_KEY = 'leaked';
process.env.CLAUDECODE = '1';
process.env.CLAUDE_CODE_ENTRYPOINT = 'cli';
process.env.CLAUDE_CODE_OAUTH_TOKEN = 'keep-me';
stripCwdEnv(tmpDir);
expect(process.env.TEST_STRIP_KEY).toBeUndefined();
expect(process.env.CLAUDECODE).toBeUndefined();
expect(process.env.CLAUDE_CODE_ENTRYPOINT).toBeUndefined();
expect(process.env.CLAUDE_CODE_OAUTH_TOKEN).toBe('keep-me');
});
});

View file

@ -0,0 +1,94 @@
/**
* Cleans process.env at startup BEFORE any module reads env at init time
* (notably `@archon/paths/logger` which reads `LOG_LEVEL` during module load).
*
* Two concerns handled in one pass:
*
* 1. CWD .env leak: Bun unconditionally loads .env / .env.local /
* .env.development / .env.production from CWD before any user code runs.
* When `archon` is invoked from inside a target repo, that repo's env vars
* leak into the Archon process. `override: true` in dotenv only fixes keys
* that exist in both files keys that only appear in the target repo's .env
* survive unaffected. We strip them.
*
* 2. Nested Claude Code session markers: When archon is launched from inside a
* Claude Code terminal, the parent shell exports CLAUDECODE=1 and several
* CLAUDE_CODE_* markers. The Claude Agent SDK leaks process.env into the
* spawned child regardless of the explicit `env` option
* (see coleam00/Archon#1097), so the only way to prevent the nested-session
* deadlock is to delete the markers from process.env at the entry point.
* Auth vars (CLAUDE_CODE_OAUTH_TOKEN, CLAUDE_CODE_USE_BEDROCK,
* CLAUDE_CODE_USE_VERTEX) are kept.
*/
import { config } from 'dotenv';
import { resolve } from 'path';
/** The four filenames Bun auto-loads from CWD (in loading order). */
const BUN_AUTO_LOADED_ENV_FILES = ['.env', '.env.local', '.env.development', '.env.production'];
/** CLAUDE_CODE_* vars that are auth-related and must be kept in process.env. */
const CLAUDE_CODE_AUTH_VARS = new Set([
'CLAUDE_CODE_OAUTH_TOKEN',
'CLAUDE_CODE_USE_BEDROCK',
'CLAUDE_CODE_USE_VERTEX',
]);
/**
* Strip CWD .env keys and nested Claude Code session markers from process.env.
* Keys in ~/.archon/.env (loaded afterward by each entry point) are unaffected.
* Safe to call even when no CWD .env files exist.
*/
export function stripCwdEnv(cwd: string = process.cwd()): void {
// --- Pass 1: CWD .env files ---
const cwdKeys = new Set<string>();
for (const filename of BUN_AUTO_LOADED_ENV_FILES) {
const filepath = resolve(cwd, filename);
// dotenv.config with processEnv:{} parses without writing to process.env
const result = config({ path: filepath, processEnv: {} });
if (result.error) {
// ENOENT is expected (file simply doesn't exist) — all others are unexpected
const code = (result.error as NodeJS.ErrnoException).code;
if (code !== 'ENOENT') {
process.stderr.write(
`[archon] Warning: could not parse ${filepath} for CWD env stripping: ${result.error.message}\n`
);
}
} else if (result.parsed) {
for (const key of Object.keys(result.parsed)) {
cwdKeys.add(key);
}
}
}
for (const key of cwdKeys) {
Reflect.deleteProperty(process.env, key);
}
// --- Pass 2: Nested Claude Code session markers ---
// Pattern-matched (not hardcoded) so new CLAUDE_CODE_* markers added by
// future Claude Code versions are automatically handled.
// Emit warning BEFORE deleting — downstream code won't see CLAUDECODE=1.
if (process.env.CLAUDECODE === '1' && !process.env.ARCHON_SUPPRESS_NESTED_CLAUDE_WARNING) {
process.stderr.write(
'\u26a0 Detected CLAUDECODE=1 \u2014 running inside a Claude Code session.\n' +
' If workflows hang silently, this is a known class of issue.\n' +
' Workaround: run `archon serve` from a regular shell.\n' +
' Suppress: set ARCHON_SUPPRESS_NESTED_CLAUDE_WARNING=1\n' +
' Details: https://github.com/coleam00/Archon/issues/1067\n'
);
}
if (process.env.CLAUDECODE) {
Reflect.deleteProperty(process.env, 'CLAUDECODE');
}
for (const key of Object.keys(process.env)) {
if (key.startsWith('CLAUDE_CODE_') && !CLAUDE_CODE_AUTH_VARS.has(key)) {
Reflect.deleteProperty(process.env, key);
}
}
// Strip debugger vars that crash Claude Code subprocesses
// See: https://github.com/anthropics/claude-code/issues/4619
Reflect.deleteProperty(process.env, 'NODE_OPTIONS');
Reflect.deleteProperty(process.env, 'VSCODE_INSPECTOR_OPTIONS');
}

View file

@ -19,7 +19,7 @@ export interface UpdateCheckResult {
}
const CACHE_FILE = 'update-check.json';
const STALENESS_MS = 24 * 60 * 60 * 1000; // 24 hours
const STALENESS_MS = 60 * 60 * 1000; // 1 hour
const FETCH_TIMEOUT_MS = 3000; // 3 seconds
const GITHUB_API_URL = 'https://api.github.com/repos/coleam00/Archon/releases/latest';

View file

@ -1,6 +1,6 @@
{
"name": "@archon/server",
"version": "0.3.5",
"version": "0.3.6",
"type": "module",
"main": "./src/index.ts",
"scripts": {

View file

@ -35,6 +35,7 @@ export function mapWorkflowEvent(event: WorkflowEmitterEvent): string | null {
return JSON.stringify({
type: 'workflow_step',
runId: event.runId,
nodeId: event.nodeId,
step: event.iteration - 1,
total: event.maxIterations,
name: `iteration-${String(event.iteration)}`,
@ -47,9 +48,10 @@ export function mapWorkflowEvent(event: WorkflowEmitterEvent): string | null {
return JSON.stringify({
type: 'workflow_step',
runId: event.runId,
nodeId: event.nodeId,
step: event.iteration - 1,
// total: 0 intentionally — maxIterations is not carried by loop_iteration_completed/failed events.
// useWorkflowStatus.ts guards against 0 by preserving the prior wf.maxIterations value.
// workflow-store.ts handleLoopIteration guards against 0 by preserving the prior wf.maxIterations value.
total: 0,
name: `iteration-${String(event.iteration)}`,
status: 'completed',
@ -62,9 +64,10 @@ export function mapWorkflowEvent(event: WorkflowEmitterEvent): string | null {
return JSON.stringify({
type: 'workflow_step',
runId: event.runId,
nodeId: event.nodeId,
step: event.iteration - 1,
// total: 0 intentionally — maxIterations is not carried by loop_iteration_completed/failed events.
// useWorkflowStatus.ts guards against 0 by preserving the prior wf.maxIterations value.
// workflow-store.ts handleLoopIteration guards against 0 by preserving the prior wf.maxIterations value.
total: 0,
name: `iteration-${String(event.iteration)}`,
status: 'failed',

View file

@ -3,12 +3,13 @@
* Multi-platform AI coding assistant (Telegram, Discord, Slack, GitHub, Gitea)
*/
// Load environment variables FIRST — before any application imports.
//
// Credential safety: target repo `.env` keys (like CLAUDE_API_KEY) that Bun
// auto-loads from CWD cannot leak into AI subprocesses because
// SUBPROCESS_ENV_ALLOWLIST blocks them. The env-leak gate provides a second
// layer by scanning target repos before spawning. No CWD stripping needed.
// Strip CWD .env keys FIRST — before any application imports read process.env.
// Bun auto-loads .env/.env.local/.env.development/.env.production from CWD;
// when `bun run dev:server` is run from inside a target repo those keys leak
// into the server process. stripCwdEnv() removes them before ~/.archon/.env loads.
import '@archon/paths/strip-cwd-env-boot';
// Load environment variables — after CWD stripping, before application imports.
import { config } from 'dotenv';
import { resolve } from 'path';
import { existsSync } from 'fs';
@ -39,6 +40,9 @@ if (existsSync(globalEnvPath)) {
}
}
// CLAUDECODE=1 warning is emitted inside stripCwdEnv() (boot import above)
// BEFORE the marker is deleted from process.env. No duplicate warning here.
// Smart default: use Claude Code's built-in OAuth if no explicit credentials
if (
!process.env.CLAUDE_API_KEY &&

View file

@ -43,6 +43,7 @@ import {
isDocker,
checkForUpdate,
BUNDLED_IS_BINARY,
BUNDLED_VERSION,
} from '@archon/paths';
import { discoverWorkflowsWithConfig } from '@archon/workflows/workflow-discovery';
import { parseWorkflow } from '@archon/workflows/loader';
@ -122,18 +123,21 @@ import {
codebaseEnvironmentsResponseSchema,
} from './schemas/config.schemas';
// Read app version once at module load (root package.json is 4 levels up from src/routes/)
// Read app version: use build-time constant in binary, package.json in dev
let appVersion = 'unknown';
try {
const pkgContent = readFileSync(join(import.meta.dir, '../../../../package.json'), 'utf-8');
const pkg = JSON.parse(pkgContent) as { version?: string };
appVersion = pkg.version ?? 'unknown';
} catch (err) {
// package.json not found (binary build or unusual install)
getLog().debug(
{ err, path: join(import.meta.dir, '../../../../package.json') },
'api.version_read_failed'
);
if (BUNDLED_IS_BINARY) {
appVersion = BUNDLED_VERSION;
} else {
try {
const pkgContent = readFileSync(join(import.meta.dir, '../../../../package.json'), 'utf-8');
const pkg = JSON.parse(pkgContent) as { version?: string };
appVersion = pkg.version ?? 'unknown';
} catch (err) {
getLog().debug(
{ err, path: join(import.meta.dir, '../../../../package.json') },
'api.version_read_failed'
);
}
}
type WorkflowSource = 'project' | 'bundled';
@ -2469,27 +2473,22 @@ export function registerApiRoutes(
return apiError(c, 500, 'Failed to look up workflow run');
}
if (!run?.working_path) {
if (!run) {
return apiError(c, 404, 'Workflow run not found');
}
// Derive owner/repo from working_path (must be under ~/.archon/workspaces/owner/repo/...)
const normalizedWorkspacesPath = normalize(getArchonWorkspacesPath());
const normalizedWorkingPath = normalize(run.working_path);
if (!normalizedWorkingPath.startsWith(normalizedWorkspacesPath + sep)) {
getLog().error(
{ runId, workingPath: run.working_path },
'artifacts.working_path_outside_workspaces'
);
return apiError(c, 404, 'Artifact not available: working path not in workspaces');
// Derive owner/repo from codebase name (format: "owner/repo")
const codebase = run.codebase_id ? await codebaseDb.getCodebase(run.codebase_id) : null;
if (!codebase?.name) {
getLog().error({ runId, codebaseId: run.codebase_id }, 'artifacts.codebase_lookup_failed');
return apiError(c, 404, 'Artifact not available: codebase not found');
}
const relative = normalizedWorkingPath.substring(normalizedWorkspacesPath.length + 1);
const parts = relative.split(sep).filter(p => p.length > 0);
if (parts.length < 2) {
getLog().error({ runId, workingPath: run.working_path }, 'artifacts.owner_repo_parse_failed');
const nameParts = codebase.name.split('/');
if (nameParts.length < 2) {
getLog().error({ runId, codebaseName: codebase.name }, 'artifacts.owner_repo_parse_failed');
return apiError(c, 404, 'Artifact not available: could not determine owner/repo');
}
const [owner, repo] = parts;
const [owner, repo] = nameParts;
const artifactDir = getRunArtifactsPath(owner, repo, runId);
const filePath = join(artifactDir, filename);

View file

@ -1,6 +1,6 @@
{
"name": "@archon/web",
"version": "0.3.5",
"version": "0.3.6",
"private": true,
"type": "module",
"scripts": {

View file

@ -1,72 +1,124 @@
import { memo, useState } from 'react';
import { Copy, Check } from 'lucide-react';
import ReactMarkdown from 'react-markdown';
import { memo, useMemo, useState } from 'react';
import { Copy, Check, Paperclip } from 'lucide-react';
import ReactMarkdown, { type Components } from 'react-markdown';
import rehypeHighlight from 'rehype-highlight';
import remarkBreaks from 'remark-breaks';
import remarkGfm from 'remark-gfm';
import { Paperclip } from 'lucide-react';
import type { ChatMessage, FileAttachment } from '@/lib/types';
import { cn } from '@/lib/utils';
import { ArtifactViewerModal } from '@/components/workflows/ArtifactViewerModal';
// Hoisted to module scope to prevent new references on every render
const REMARK_PLUGINS = [remarkGfm, remarkBreaks];
const REHYPE_PLUGINS = [rehypeHighlight];
const MARKDOWN_COMPONENTS = {
pre: ({ children, ...props }: React.ComponentPropsWithoutRef<'pre'>): React.ReactElement => (
<pre
className="overflow-x-auto rounded-lg border border-border bg-surface p-4 font-mono text-sm"
{...props}
>
{children}
</pre>
),
code: ({
children,
className,
...props
}: React.ComponentPropsWithoutRef<'code'> & { className?: string }): React.ReactElement => {
const isBlock = className?.startsWith('language-') || className?.startsWith('hljs');
if (isBlock) {
return (
<code className={cn(className, 'font-mono')} {...props}>
{children}
</code>
);
}
return (
<code
className="rounded bg-background px-1.5 py-0.5 font-mono text-sm text-accent-bright"
// Matches artifact paths (forward- and back-slash safe); groups: [1] runId, [2] filename
const ARTIFACT_PATH_RE = /artifacts[/\\]runs[/\\]([a-fA-F0-9-]+)[/\\](.+)/;
function extractArtifactInfo(text: string): { runId: string; filename: string } | null {
const match = ARTIFACT_PATH_RE.exec(text);
if (!match) return null;
const filename = match[2].replace(/\\/g, '/');
if (filename.split('/').some(s => s === '..')) return null;
return {
runId: match[1],
filename,
};
}
function makeMarkdownComponents(
onArtifactClick: (runId: string, filename: string) => void
): Components {
return {
pre: ({ children, ...props }: React.ComponentPropsWithoutRef<'pre'>): React.ReactElement => (
<pre
className="overflow-x-auto rounded-lg border border-border bg-surface p-4 font-mono text-sm"
{...props}
>
{children}
</code>
);
},
table: ({ children, ...props }: React.ComponentPropsWithoutRef<'table'>): React.ReactElement => (
<div className="overflow-x-auto">
<table {...props}>{children}</table>
</div>
),
blockquote: ({
children,
...props
}: React.ComponentPropsWithoutRef<'blockquote'>): React.ReactElement => (
<blockquote className="border-l-2 border-primary pl-4 text-text-secondary" {...props}>
{children}
</blockquote>
),
a: ({ children, ...props }: React.ComponentPropsWithoutRef<'a'>): React.ReactElement => (
<a
className="text-primary underline decoration-primary/40 hover:decoration-primary"
target="_blank"
rel="noopener noreferrer"
{...props}
>
{children}
</a>
),
};
</pre>
),
code: ({
children,
className,
...props
}: React.ComponentPropsWithoutRef<'code'> & { className?: string }): React.ReactElement => {
const isBlock = className?.startsWith('language-') || className?.startsWith('hljs');
if (isBlock) {
return (
<code className={cn(className, 'font-mono')} {...props}>
{children}
</code>
);
}
if (typeof children === 'string') {
const artifact = extractArtifactInfo(children);
if (artifact) {
const { runId, filename } = artifact;
const displayName = filename.split('/').pop() ?? filename;
if (filename.endsWith('.md')) {
return (
<button
type="button"
className="cursor-pointer rounded bg-background px-1.5 py-0.5 font-mono text-sm text-accent-bright hover:text-primary transition-colors"
onClick={() => {
onArtifactClick(runId, filename);
}}
>
{displayName}
</button>
);
}
const encodedFilename = filename.split('/').map(encodeURIComponent).join('/');
return (
<a
href={`/api/artifacts/${encodeURIComponent(runId)}/${encodedFilename}`}
target="_blank"
rel="noopener noreferrer"
className="rounded bg-background px-1.5 py-0.5 font-mono text-sm text-accent-bright underline decoration-accent-bright/40 hover:decoration-accent-bright"
>
{displayName}
</a>
);
}
}
return (
<code
className="rounded bg-background px-1.5 py-0.5 font-mono text-sm text-accent-bright"
{...props}
>
{children}
</code>
);
},
table: ({
children,
...props
}: React.ComponentPropsWithoutRef<'table'>): React.ReactElement => (
<div className="overflow-x-auto">
<table {...props}>{children}</table>
</div>
),
blockquote: ({
children,
...props
}: React.ComponentPropsWithoutRef<'blockquote'>): React.ReactElement => (
<blockquote className="border-l-2 border-primary pl-4 text-text-secondary" {...props}>
{children}
</blockquote>
),
a: ({ children, ...props }: React.ComponentPropsWithoutRef<'a'>): React.ReactElement => (
<a
className="text-primary underline decoration-primary/40 hover:decoration-primary"
target="_blank"
rel="noopener noreferrer"
{...props}
>
{children}
</a>
),
};
}
/** Detect if a string is a complete JSON object/array */
function isJsonString(str: string): boolean {
@ -88,6 +140,17 @@ function MessageBubbleRaw({ message }: MessageBubbleProps): React.ReactElement {
const isUser = message.role === 'user';
const isThinking = message.isStreaming && !message.content;
const [copied, setCopied] = useState(false);
const [artifactViewer, setArtifactViewer] = useState<{ runId: string; filename: string } | null>(
null
);
// setArtifactViewer is a stable React state setter — empty dep array is intentional
const markdownComponents = useMemo(
() =>
makeMarkdownComponents((runId, filename) => {
setArtifactViewer({ runId, filename });
}),
[]
);
const copyMessage = (): void => {
void navigator.clipboard.writeText(message.content).then(() => {
@ -99,97 +162,109 @@ function MessageBubbleRaw({ message }: MessageBubbleProps): React.ReactElement {
};
return (
<div className={cn('group flex w-full', isUser ? 'justify-end' : 'justify-start')}>
<div
className={cn(
'relative',
isUser
? 'max-w-[70%] rounded-2xl rounded-br-sm bg-accent-muted px-4 py-2.5'
: 'max-w-full rounded-lg border-l-2 border-primary/30 pl-4'
)}
>
{isUser ? (
<div className="flex flex-col gap-1.5">
<div className="flex items-start gap-2">
<p className="text-sm text-text-primary whitespace-pre-wrap flex-1">
{message.content}
</p>
<button
onClick={copyMessage}
className="shrink-0 mt-0.5 opacity-0 group-hover:opacity-100 transition-opacity text-text-tertiary hover:text-text-primary"
title="Copy message"
aria-label={copied ? 'Copied' : 'Copy message'}
>
{copied ? (
<Check className="h-3.5 w-3.5 text-success" />
) : (
<Copy className="h-3.5 w-3.5" />
)}
</button>
<>
<div className={cn('group flex w-full', isUser ? 'justify-end' : 'justify-start')}>
<div
className={cn(
'relative',
isUser
? 'max-w-[70%] rounded-2xl rounded-br-sm bg-accent-muted px-4 py-2.5'
: 'max-w-full rounded-lg border-l-2 border-primary/30 pl-4'
)}
>
{isUser ? (
<div className="flex flex-col gap-1.5">
<div className="flex items-start gap-2">
<p className="text-sm text-text-primary whitespace-pre-wrap flex-1">
{message.content}
</p>
<button
onClick={copyMessage}
className="shrink-0 mt-0.5 opacity-0 group-hover:opacity-100 transition-opacity text-text-tertiary hover:text-text-primary"
title="Copy message"
aria-label={copied ? 'Copied' : 'Copy message'}
>
{copied ? (
<Check className="h-3.5 w-3.5 text-success" />
) : (
<Copy className="h-3.5 w-3.5" />
)}
</button>
</div>
{message.files && message.files.length > 0 && (
<div className="flex flex-wrap gap-1">
{message.files.map((file: FileAttachment) => (
<div
key={file.id}
className="flex items-center gap-1 rounded-md bg-black/10 px-1.5 py-0.5 text-xs text-text-secondary"
title={file.name}
>
<Paperclip className="h-3 w-3 shrink-0" />
<span className="max-w-[120px] truncate">{file.name}</span>
</div>
))}
</div>
)}
</div>
{message.files && message.files.length > 0 && (
<div className="flex flex-wrap gap-1">
{message.files.map((file: FileAttachment) => (
<div
key={file.id}
className="flex items-center gap-1 rounded-md bg-black/10 px-1.5 py-0.5 text-xs text-text-secondary"
title={file.name}
>
<Paperclip className="h-3 w-3 shrink-0" />
<span className="max-w-[120px] truncate">{file.name}</span>
</div>
))}
</div>
)}
</div>
) : (
<div className="chat-markdown max-w-none text-sm text-text-primary">
{isThinking && (
<div className="flex items-center gap-1.5 py-1">
<span className="h-1.5 w-1.5 animate-pulse rounded-full bg-text-tertiary" />
<span
className="h-1.5 w-1.5 animate-pulse rounded-full bg-text-tertiary"
style={{ animationDelay: '0.2s' }}
/>
<span
className="h-1.5 w-1.5 animate-pulse rounded-full bg-text-tertiary"
style={{ animationDelay: '0.4s' }}
/>
</div>
)}
{isJsonString(message.content) ? (
<details className="group">
<summary className="cursor-pointer text-sm text-text-secondary hover:text-text-primary">
<span className="text-xs bg-surface-secondary rounded px-1.5 py-0.5 font-mono">
JSON output
</span>
</summary>
<pre className="mt-2 text-xs bg-surface-inset rounded p-3 overflow-x-auto">
{JSON.stringify(JSON.parse(message.content.trim()) as unknown, null, 2)}
</pre>
</details>
) : (
<ReactMarkdown
remarkPlugins={REMARK_PLUGINS}
rehypePlugins={REHYPE_PLUGINS}
components={MARKDOWN_COMPONENTS}
>
{message.content}
</ReactMarkdown>
)}
{message.isStreaming && message.content && (
<span className="inline-block h-4 w-0.5 animate-pulse bg-primary align-text-bottom" />
)}
</div>
)}
) : (
<div className="chat-markdown max-w-none text-sm text-text-primary">
{isThinking && (
<div className="flex items-center gap-1.5 py-1">
<span className="h-1.5 w-1.5 animate-pulse rounded-full bg-text-tertiary" />
<span
className="h-1.5 w-1.5 animate-pulse rounded-full bg-text-tertiary"
style={{ animationDelay: '0.2s' }}
/>
<span
className="h-1.5 w-1.5 animate-pulse rounded-full bg-text-tertiary"
style={{ animationDelay: '0.4s' }}
/>
</div>
)}
{isJsonString(message.content) ? (
<details className="group">
<summary className="cursor-pointer text-sm text-text-secondary hover:text-text-primary">
<span className="text-xs bg-surface-secondary rounded px-1.5 py-0.5 font-mono">
JSON output
</span>
</summary>
<pre className="mt-2 text-xs bg-surface-inset rounded p-3 overflow-x-auto">
{JSON.stringify(JSON.parse(message.content.trim()) as unknown, null, 2)}
</pre>
</details>
) : (
<ReactMarkdown
remarkPlugins={REMARK_PLUGINS}
rehypePlugins={REHYPE_PLUGINS}
components={markdownComponents}
>
{message.content}
</ReactMarkdown>
)}
{message.isStreaming && message.content && (
<span className="inline-block h-4 w-0.5 animate-pulse bg-primary align-text-bottom" />
)}
</div>
)}
{!isThinking && (
<div className="mt-0.5 text-[11px] text-text-tertiary">
{new Date(message.timestamp).toLocaleTimeString()}
</div>
)}
{!isThinking && (
<div className="mt-0.5 text-[11px] text-text-tertiary">
{new Date(message.timestamp).toLocaleTimeString()}
</div>
)}
</div>
</div>
</div>
{artifactViewer && (
<ArtifactViewerModal
open={true}
onOpenChange={() => {
setArtifactViewer(null);
}}
runId={artifactViewer.runId}
filename={artifactViewer.filename}
/>
)}
</>
);
}

View file

@ -1,29 +1,96 @@
import { memo, useEffect, useRef, useState } from 'react';
import { memo, useEffect, useMemo, useRef, useState } from 'react';
import { useNavigate } from 'react-router';
import ReactMarkdown from 'react-markdown';
import ReactMarkdown, { type Components } from 'react-markdown';
import remarkGfm from 'remark-gfm';
import { useQuery } from '@tanstack/react-query';
import { ArrowDown, Sparkles, ArrowRight, MessageSquare } from 'lucide-react';
import { Button } from '@/components/ui/button';
import { MessageBubble } from './MessageBubble';
import { ToolCallCard } from './ToolCallCard';
import { ErrorCard } from './ErrorCard';
import { WorkflowProgressCard } from './WorkflowProgressCard';
import { ArtifactViewerModal } from '@/components/workflows/ArtifactViewerModal';
import { useAutoScroll } from '@/hooks/useAutoScroll';
import type { ChatMessage } from '@/lib/types';
import { useWorkflowStore } from '@/stores/workflow-store';
import { getWorkflowRun } from '@/lib/api';
import { StatusIcon } from '@/components/workflows/StatusIcon';
import { ArtifactSummary } from '@/components/workflows/ArtifactSummary';
import { formatDurationMs, ensureUtc } from '@/lib/format';
import type { ChatMessage, WorkflowArtifact, ArtifactType } from '@/lib/types';
// Hoisted to module scope to prevent new references on every render
const WORKFLOW_RESULT_MARKDOWN_COMPONENTS = {
a: ({ children, ...props }: React.ComponentPropsWithoutRef<'a'>): React.ReactElement => (
<a
className="text-primary underline decoration-primary/40 hover:decoration-primary"
target="_blank"
rel="noopener noreferrer"
{...props}
>
{children}
</a>
),
};
// Matches artifact paths (forward- and back-slash safe); groups: [1] runId, [2] filename
const ARTIFACT_PATH_RE = /artifacts[/\\]runs[/\\]([a-fA-F0-9-]+)[/\\](.+)/;
function extractArtifactInfo(text: string): { runId: string; filename: string } | null {
const match = ARTIFACT_PATH_RE.exec(text);
if (!match) return null;
const filename = match[2].replace(/\\/g, '/');
if (filename.split('/').some(s => s === '..')) return null;
return { runId: match[1], filename };
}
function makeResultMarkdownComponents(
onArtifactClick: (runId: string, filename: string) => void
): Components {
return {
a: ({ children, ...props }: React.ComponentPropsWithoutRef<'a'>): React.ReactElement => (
<a
className="text-primary underline decoration-primary/40 hover:decoration-primary"
target="_blank"
rel="noopener noreferrer"
{...props}
>
{children}
</a>
),
code: ({
children,
className,
...props
}: React.ComponentPropsWithoutRef<'code'> & { className?: string }): React.ReactElement => {
const isBlock = className?.startsWith('language-') || className?.startsWith('hljs');
if (isBlock) {
return (
<code className={className} {...props}>
{children}
</code>
);
}
if (typeof children === 'string') {
const artifact = extractArtifactInfo(children);
if (artifact) {
const { runId, filename } = artifact;
const displayName = filename.split('/').pop() ?? filename;
const encodedFilename = filename.split('/').map(encodeURIComponent).join('/');
const artifactHref = `/api/artifacts/${encodeURIComponent(runId)}/${encodedFilename}`;
return (
<a
href={artifactHref}
onClick={
filename.endsWith('.md')
? (e: React.MouseEvent): void => {
e.preventDefault();
onArtifactClick(runId, filename);
}
: undefined
}
target={filename.endsWith('.md') ? undefined : '_blank'}
rel={filename.endsWith('.md') ? undefined : 'noopener noreferrer'}
className="!text-accent-bright hover:!text-primary font-mono font-medium underline decoration-accent-bright/40 hover:decoration-accent-bright"
>
{displayName}
</a>
);
}
}
return (
<code className="rounded bg-surface-elevated px-1 py-0.5 font-mono text-[0.9em]" {...props}>
{children}
</code>
);
},
};
}
function WorkflowResultCard({
workflowName,
@ -36,53 +103,164 @@ function WorkflowResultCard({
}): React.ReactElement {
const navigate = useNavigate();
const [expanded, setExpanded] = useState(false);
const [artifactViewer, setArtifactViewer] = useState<{
runId: string;
filename: string;
} | null>(null);
// setArtifactViewer is a stable React state setter — empty dep array is intentional
const mdComponents = useMemo(
() =>
makeResultMarkdownComponents((aRunId, filename) => {
setArtifactViewer({ runId: aRunId, filename });
}),
[]
);
// Zustand live state (populated if user had the page open during execution)
const liveState = useWorkflowStore(state => state.workflows.get(runId));
// One-time API fetch: staleTime: Infinity because a terminal run record is immutable —
// status, timestamps, and events do not change once completed/failed/cancelled.
const { data: runData, isError } = useQuery({
queryKey: ['workflowRun', runId],
queryFn: () => getWorkflowRun(runId),
staleTime: Infinity,
});
// Merge: prefer live state when available
const status = liveState?.status ?? runData?.run.status ?? 'completed';
const dagNodes = liveState?.dagNodes ?? [];
const storeArtifacts = liveState?.artifacts ?? [];
const startedAt =
liveState?.startedAt ??
(runData?.run.started_at ? new Date(ensureUtc(runData.run.started_at)).getTime() : null);
const completedAt =
liveState?.completedAt ??
(runData?.run.completed_at ? new Date(ensureUtc(runData.run.completed_at)).getTime() : null);
const duration = startedAt != null && completedAt != null ? completedAt - startedAt : null;
// Node counts: prefer live dagNodes (exact), fall back to events (approximation —
// totalCount is nodes that reached a terminal state, not the workflow's full node count).
let completedCount: number;
let totalCount: number;
if (dagNodes.length > 0) {
completedCount = dagNodes.filter(n => n.status === 'completed').length;
// Only count terminal nodes (same semantics as events fallback path)
totalCount = dagNodes.filter(
n => n.status === 'completed' || n.status === 'failed' || n.status === 'skipped'
).length;
} else {
const events = runData?.events ?? [];
const terminalEvents = events.filter(
e =>
e.event_type === 'node_completed' ||
e.event_type === 'node_failed' ||
e.event_type === 'node_skipped'
);
completedCount = events.filter(e => e.event_type === 'node_completed').length;
totalCount = terminalEvents.length;
}
// Artifacts: prefer live store, fall back to events
const eventArtifacts: WorkflowArtifact[] = (runData?.events ?? [])
.filter(e => e.event_type === 'workflow_artifact')
.map(e => {
const d = e.data;
return {
type: (typeof d.artifactType === 'string'
? d.artifactType
: 'file_created') as ArtifactType,
label: typeof d.label === 'string' ? d.label : '',
url: typeof d.url === 'string' ? d.url : undefined,
path: typeof d.path === 'string' ? d.path : undefined,
};
});
const artifacts = storeArtifacts.length > 0 ? storeArtifacts : eventArtifacts;
// If API fetch failed and no live state, show degraded card with just content + link
const fetchFailed = isError && !liveState;
// Status-aware header title
const headerTitle =
status === 'failed'
? 'Workflow failed'
: status === 'cancelled'
? 'Workflow cancelled'
: 'Workflow complete';
// Expand/collapse for text content
const lines = content.split('\n');
const isTruncatable = content.length > 500 || lines.length > 8;
const previewText = lines.slice(0, 8).join('\n').slice(0, 500);
const preview = isTruncatable
? previewText + (previewText.length < content.length ? '...' : '')
: content;
const displayContent = expanded || !isTruncatable ? content : preview;
return (
<div className="rounded-lg border border-border bg-surface overflow-hidden max-w-3xl">
<div className="flex items-center gap-2 px-3 py-2 border-b border-border bg-surface-elevated">
<span className="text-success text-xs shrink-0">&#x2713;</span>
<span className="text-xs font-medium text-text-primary truncate flex-1">
Workflow complete: {workflowName}
</span>
<button
onClick={(): void => {
navigate(`/workflows/runs/${runId}`);
}}
className="text-[10px] text-primary hover:text-accent-bright transition-colors shrink-0"
>
View full logs &rarr;
</button>
</div>
<div className="px-3 py-2">
<div className="chat-markdown text-xs text-text-secondary">
<ReactMarkdown
remarkPlugins={[remarkGfm]}
components={WORKFLOW_RESULT_MARKDOWN_COMPONENTS}
>
{displayContent}
</ReactMarkdown>
</div>
{isTruncatable && (
<>
<div className="rounded-lg border border-border bg-surface overflow-hidden max-w-3xl">
<div className="flex items-center gap-2 px-3 py-2 border-b border-border bg-surface-elevated">
<span className="shrink-0">
<StatusIcon status={fetchFailed ? 'completed' : status} />
</span>
<span className="text-xs font-medium text-text-primary truncate flex-1">
{headerTitle}: {workflowName}
</span>
{!fetchFailed && totalCount > 0 && (
<span className="shrink-0 text-[10px] text-text-secondary">
{completedCount}/{totalCount} nodes
</span>
)}
{!fetchFailed && duration != null && (
<span className="rounded-full bg-surface px-2 py-0.5 text-[10px] text-text-secondary shrink-0">
{formatDurationMs(duration)}
</span>
)}
<button
onClick={(): void => {
setExpanded(!expanded);
navigate(`/workflows/runs/${runId}`);
}}
className="mt-1 text-[10px] text-primary hover:text-accent-bright transition-colors"
className="text-[10px] text-primary hover:text-accent-bright transition-colors shrink-0"
>
{expanded ? 'Show less' : 'Show more'}
View full logs &rarr;
</button>
)}
</div>
<div className="px-3 py-2">
{!fetchFailed && artifacts.length > 0 && (
<div className="mb-2">
<ArtifactSummary artifacts={artifacts} runId={runId} />
</div>
)}
<div className="chat-markdown text-xs text-text-secondary">
<ReactMarkdown remarkPlugins={[remarkGfm]} components={mdComponents}>
{displayContent}
</ReactMarkdown>
</div>
{isTruncatable && (
<button
onClick={(): void => {
setExpanded(!expanded);
}}
className="mt-1 text-[10px] text-primary hover:text-accent-bright transition-colors"
>
{expanded ? 'Show less' : 'Show more'}
</button>
)}
</div>
</div>
</div>
{artifactViewer && (
<ArtifactViewerModal
open={true}
onOpenChange={(): void => {
setArtifactViewer(null);
}}
runId={artifactViewer.runId}
filename={artifactViewer.filename}
/>
)}
</>
);
}

View file

@ -1,3 +1,4 @@
import { useState } from 'react';
import { StatusIcon } from './StatusIcon';
import { formatDurationMs } from '@/lib/format';
import type { DagNodeState } from '@/lib/types';
@ -8,6 +9,84 @@ interface DagNodeProgressProps {
onNodeClick: (nodeId: string) => void;
}
function DagNodeItem({
node,
isActive,
onNodeClick,
}: {
node: DagNodeState;
isActive: boolean;
onNodeClick: (nodeId: string) => void;
}): React.ReactElement {
const [expanded, setExpanded] = useState(false);
const hasIterations = (node.iterations?.length ?? 0) > 0;
return (
<div>
<div
className={`w-full text-left px-2 py-1.5 rounded transition-colors cursor-pointer ${
isActive ? 'bg-accent/10 border-l-2 border-accent' : 'hover:bg-surface-hover'
}`}
onClick={(): void => {
onNodeClick(node.nodeId);
}}
role="row"
>
<div className="flex items-center gap-2 text-sm">
{hasIterations && (
<button
type="button"
onClick={(e): void => {
e.stopPropagation();
setExpanded(prev => !prev);
}}
className="text-text-tertiary hover:text-text-secondary shrink-0 text-xs cursor-pointer"
aria-label={expanded ? 'Collapse iterations' : 'Expand iterations'}
>
{expanded ? '\u25BC' : '\u25B6'}
</button>
)}
<StatusIcon status={node.status} />
<span className="truncate flex-1">{node.name}</span>
{node.currentIteration !== undefined && node.maxIterations !== undefined && (
<span className="text-xs text-text-secondary shrink-0">
{node.currentIteration}/{node.maxIterations}
</span>
)}
{node.duration !== undefined && (
<span className="text-xs text-text-secondary shrink-0">
{formatDurationMs(node.duration)}
</span>
)}
</div>
{node.error && (
<div className="text-xs text-red-400 mt-0.5 ml-6 truncate" title={node.error}>
{node.error.slice(0, 80)}
</div>
)}
{node.reason && (
<div className="text-xs text-text-tertiary mt-0.5 ml-6">
Skipped: {node.reason.replace(/_/g, ' ')}
</div>
)}
</div>
{expanded && hasIterations && (
<div className="ml-6 mt-0.5 space-y-0.5">
{(node.iterations ?? []).map(iter => (
<div key={iter.iteration} className="flex items-center gap-2 px-2 py-1 text-xs">
<StatusIcon status={iter.status} />
<span className="text-text-secondary flex-1">Iteration {iter.iteration}</span>
{iter.duration !== undefined && (
<span className="text-text-tertiary">{formatDurationMs(iter.duration)}</span>
)}
</div>
))}
</div>
)}
</div>
);
}
export function DagNodeProgress({
nodes,
activeNodeId,
@ -22,37 +101,12 @@ export function DagNodeProgress({
return (
<div className="space-y-1 p-2">
{nodes.map(node => (
<button
<DagNodeItem
key={node.nodeId}
onClick={(): void => {
onNodeClick(node.nodeId);
}}
className={`w-full text-left px-2 py-1.5 rounded transition-colors ${
node.nodeId === activeNodeId
? 'bg-accent/10 border-l-2 border-accent'
: 'hover:bg-surface-hover'
}`}
>
<div className="flex items-center gap-2 text-sm">
<StatusIcon status={node.status} />
<span className="truncate flex-1">{node.name}</span>
{node.duration !== undefined && (
<span className="text-xs text-text-secondary shrink-0">
{formatDurationMs(node.duration)}
</span>
)}
</div>
{node.error && (
<div className="text-xs text-red-400 mt-0.5 ml-6 truncate" title={node.error}>
{node.error.slice(0, 80)}
</div>
)}
{node.reason && (
<div className="text-xs text-text-tertiary mt-0.5 ml-6">
Skipped: {node.reason.replace(/_/g, ' ')}
</div>
)}
</button>
node={node}
isActive={node.nodeId === activeNodeId}
onNodeClick={onNodeClick}
/>
))}
</div>
);

View file

@ -11,6 +11,8 @@ export interface ExecutionNodeData extends DagNodeData {
duration?: number;
error?: string;
selected?: boolean;
currentIteration?: number;
maxIterations?: number;
}
export type ExecutionFlowNode = Node<ExecutionNodeData>;
@ -27,12 +29,14 @@ const TYPE_COLORS: Record<string, string> = {
command: 'text-purple-400',
prompt: 'text-accent-bright',
bash: 'text-amber-400',
loop: 'text-orange-400',
};
const TYPE_LABELS: Record<string, string> = {
command: 'CMD',
bash: 'BASH',
prompt: 'PROMPT',
loop: 'LOOP',
};
function ExecutionDagNodeRender({ data }: NodeProps<ExecutionFlowNode>): React.ReactElement {
@ -60,6 +64,11 @@ function ExecutionDagNodeRender({ data }: NodeProps<ExecutionFlowNode>): React.R
</span>
)}
</div>
{data.currentIteration !== undefined && data.maxIterations !== undefined && (
<div className="text-[10px] text-text-tertiary mt-0.5">
{data.currentIteration}/{data.maxIterations} iterations
</div>
)}
{data.error && (
<div className="text-[10px] text-error mt-1 truncate" title={data.error}>
{data.error.slice(0, 60)}

View file

@ -89,6 +89,8 @@ export function WorkflowDagViewer({
duration: live?.duration,
error: live?.error,
selected: node.id === selectedNodeId,
currentIteration: live?.currentIteration,
maxIterations: live?.maxIterations,
},
} as ExecutionFlowNode;
});

View file

@ -21,6 +21,7 @@ import type {
WorkflowRunStatus,
DagNodeState,
WorkflowStepStatus,
LoopIterationInfo,
} from '@/lib/types';
import type { WorkflowEventResponse } from '@/lib/api';
@ -133,6 +134,49 @@ export function WorkflowExecution({ runId }: WorkflowExecutionProps): React.Reac
});
}
}
// Second pass: enrich loop nodes with iteration data
for (const e of data.events.filter(ev => ev.event_type.startsWith('loop_iteration_'))) {
const nodeId = e.step_name ?? '';
if (!nodeId) continue;
const existing = nodeMap.get(nodeId);
if (!existing) continue; // No node_started event yet — skip (events ordered in DB)
const iteration = e.data.iteration as number | undefined;
const maxIter = e.data.maxIterations as number | undefined;
if (iteration === undefined) continue;
let iterStatus: LoopIterationInfo['status'];
if (e.event_type === 'loop_iteration_started') {
iterStatus = 'running';
} else if (e.event_type === 'loop_iteration_completed') {
iterStatus = 'completed';
} else {
iterStatus = 'failed';
}
const existingIters: LoopIterationInfo[] = existing.iterations ?? [];
const iterIdx = existingIters.findIndex(it => it.iteration === iteration);
const iterState: LoopIterationInfo = {
iteration,
status: iterStatus,
duration: e.data.duration_ms as number | undefined,
};
const newIters = [...existingIters];
if (iterIdx >= 0) {
newIters[iterIdx] = iterState;
} else {
newIters.push(iterState);
}
nodeMap.set(nodeId, {
...existing,
currentIteration: iteration,
maxIterations: maxIter ?? existing.maxIterations,
iterations: newIters,
});
}
return Array.from(nodeMap.values());
})(),
artifacts: data.events

View file

@ -388,10 +388,31 @@ export function WorkflowLogs({
filteredDbMessages = dbMessages;
}
// Collect DB text content for dedup against SSE text messages.
// During live execution, the same text (e.g., "🚀 Starting workflow...") can appear
// in both DB (from REST fetch on mount) and SSE (from event buffer replay).
// Without dedup, the text shows up twice in the message list.
const dbTextContents = new Set<string>();
for (const dm of filteredDbMessages) {
if (dm.role === 'assistant' && dm.content) {
dbTextContents.add(dm.content);
}
}
// Strip SSE tool calls that already appear in DB messages (completed).
// Also strip SSE text messages that are already in DB (prevents duplicate text).
const dedupedSse: ChatMessage[] = [];
for (const m of sseMessages) {
if (!m.toolCalls?.length) {
// Skip SSE text-only messages whose content already exists in DB.
if (m.content && dbTextContents.has(m.content)) {
continue;
}
// Also skip if DB has a message that starts with the SSE content
// (SSE text was flushed to DB before SSE finished accumulating).
if (m.content && [...dbTextContents].some(dc => dc.startsWith(m.content))) {
continue;
}
if (m.isStreaming || m.content) dedupedSse.push(m);
continue;
}
@ -415,7 +436,32 @@ export function WorkflowLogs({
const onText = useCallback((content: string): void => {
setSseMessages(prev => {
const last = prev[prev.length - 1];
// Workflow status messages (🚀 start, ✅ complete) should be their own message,
// matching ChatInterface's behavior and persistence segmentation. Without this,
// all text concatenates into one giant streaming message, breaking text dedup
// against DB messages (which are stored as separate segments).
const isWorkflowStatus = /^[\u{1F680}\u{2705}]/u.test(content);
if (last?.role === 'assistant' && last.isStreaming) {
const lastIsWorkflowStatus = /^[\u{1F680}\u{2705}]/u.test(last.content);
if ((isWorkflowStatus && last.content) || (lastIsWorkflowStatus && !isWorkflowStatus)) {
// Close the current streaming message and start a new one when:
// 1. Incoming is a workflow status and current has content
// 2. Current is a workflow status and incoming is regular text
return [
...prev.slice(0, -1),
{ ...last, isStreaming: false },
{
id: `msg-${String(Date.now())}`,
role: 'assistant' as const,
content,
timestamp: Date.now(),
isStreaming: true,
toolCalls: [],
},
];
}
return [...prev.slice(0, -1), { ...last, content: last.content + content }];
}
return [

View file

@ -1,6 +1,11 @@
import { useEffect } from 'react';
import { workflowSSEHandlers } from '@/stores/workflow-store';
import type { WorkflowStatusEvent, DagNodeEvent, WorkflowToolActivityEvent } from '@/lib/types';
import type {
WorkflowStatusEvent,
DagNodeEvent,
WorkflowToolActivityEvent,
LoopIterationEvent,
} from '@/lib/types';
/** Connects to the multiplexed dashboard SSE stream and routes events to the Zustand store. */
export function useDashboardSSE(): void {
@ -25,6 +30,9 @@ export function useDashboardSSE(): void {
case 'workflow_tool_activity':
workflowSSEHandlers.onToolActivity(event as WorkflowToolActivityEvent);
break;
case 'workflow_step':
workflowSSEHandlers.onLoopIteration(event as LoopIterationEvent);
break;
// heartbeat — ignore
}
};

View file

@ -2,6 +2,7 @@ import { useEffect, useRef, useState, useCallback } from 'react';
import type {
SSEEvent,
ErrorDisplay,
LoopIterationEvent,
WorkflowStatusEvent,
WorkflowArtifactEvent,
WorkflowDispatchEvent,
@ -37,6 +38,7 @@ interface SSEHandlers {
onWorkflowStatus?: (event: WorkflowStatusEvent) => void;
onWorkflowArtifact?: (event: WorkflowArtifactEvent) => void;
onDagNode?: (event: DagNodeEvent) => void;
onLoopIteration?: (event: LoopIterationEvent) => void;
onWorkflowDispatch?: (event: WorkflowDispatchEvent) => void;
onWorkflowOutputPreview?: (event: WorkflowOutputPreviewEvent) => void;
onWarning?: (message: string) => void;
@ -187,6 +189,9 @@ export function useSSE(
case 'dag_node':
h.onDagNode?.(data);
break;
case 'workflow_step':
h.onLoopIteration?.(data);
break;
case 'workflow_dispatch':
// Flush buffered text before dispatch events to ensure the dispatch
// message (🚀) is committed as an assistant message before

View file

@ -89,6 +89,26 @@ export interface WorkflowStatusEvent extends BaseSSEEvent {
approval?: { nodeId: string; message: string };
}
// Loop iteration info (per-iteration state stored in DagNodeState)
export interface LoopIterationInfo {
iteration: number;
status: 'running' | 'completed' | 'failed';
duration?: number;
}
// Loop iteration SSE event (emitted as 'workflow_step' by the bridge)
export interface LoopIterationEvent extends BaseSSEEvent {
type: 'workflow_step';
runId: string;
nodeId?: string;
step: number;
total: number;
name: string;
status: 'running' | 'completed' | 'failed';
iteration: number;
duration?: number;
}
// DAG node status (emitted during DAG workflow execution)
export interface DagNodeEvent extends BaseSSEEvent {
type: 'dag_node';
@ -161,6 +181,7 @@ export type SSEEvent =
| HeartbeatEvent
| WorkflowStatusEvent
| DagNodeEvent
| LoopIterationEvent
| WorkflowToolActivityEvent
| WorkflowArtifactEvent
| WorkflowDispatchEvent
@ -226,6 +247,9 @@ export interface DagNodeState {
duration?: number;
error?: string;
reason?: 'when_condition' | 'trigger_rule';
currentIteration?: number;
maxIterations?: number;
iterations?: LoopIterationInfo[];
}
export interface WorkflowArtifact {

View file

@ -4,6 +4,7 @@ import type {
WorkflowStatusEvent,
WorkflowArtifactEvent,
DagNodeEvent,
LoopIterationEvent,
WorkflowState,
} from '@/lib/types';
@ -324,3 +325,186 @@ describe('selectActiveWorkflow / activeWorkflowId', () => {
expect(useWorkflowStore.getState().activeWorkflowId).toBe('a');
});
});
function loopIterationEvent(
overrides: Partial<LoopIterationEvent> & { runId: string; iteration: number }
): LoopIterationEvent {
return {
type: 'workflow_step',
nodeId: 'loop-node',
step: overrides.iteration - 1,
total: 5,
name: `iteration-${String(overrides.iteration)}`,
status: 'running',
timestamp: 1000,
...overrides,
};
}
describe('handleLoopIteration', () => {
test('no-ops when event has no nodeId (non-DAG loop)', () => {
useWorkflowStore.getState().handleWorkflowStatus(statusEvent({ runId: 'run-li0' }));
const before = useWorkflowStore.getState().workflows;
useWorkflowStore
.getState()
.handleLoopIteration(
loopIterationEvent({ runId: 'run-li0', iteration: 1, nodeId: undefined })
);
// Map reference must not change — no mutation
expect(useWorkflowStore.getState().workflows).toBe(before);
});
test('no-ops when nodeId not yet in dagNodes', () => {
useWorkflowStore.getState().handleWorkflowStatus(statusEvent({ runId: 'run-li1' }));
useWorkflowStore
.getState()
.handleLoopIteration(
loopIterationEvent({ runId: 'run-li1', iteration: 1, nodeId: 'ghost-node' })
);
// Node was not registered — dagNodes must remain empty
const wf = useWorkflowStore.getState().workflows.get('run-li1')!;
expect(wf.dagNodes).toHaveLength(0);
});
test('appends first iteration to existing node', () => {
useWorkflowStore.getState().handleWorkflowStatus(statusEvent({ runId: 'run-li2' }));
useWorkflowStore
.getState()
.handleDagNode(dagNodeEvent({ runId: 'run-li2', nodeId: 'loop-node', name: 'My Loop' }));
useWorkflowStore.getState().handleLoopIteration(
loopIterationEvent({
runId: 'run-li2',
nodeId: 'loop-node',
iteration: 1,
total: 3,
status: 'running',
})
);
const wf = useWorkflowStore.getState().workflows.get('run-li2')!;
const node = wf.dagNodes.find(n => n.nodeId === 'loop-node')!;
expect(node.iterations).toHaveLength(1);
expect(node.iterations![0]).toEqual({ iteration: 1, status: 'running', duration: undefined });
expect(node.currentIteration).toBe(1);
expect(node.maxIterations).toBe(3);
});
test('updates existing iteration entry (upsert by iteration number)', () => {
useWorkflowStore.getState().handleWorkflowStatus(statusEvent({ runId: 'run-li3' }));
useWorkflowStore
.getState()
.handleDagNode(dagNodeEvent({ runId: 'run-li3', nodeId: 'loop-node', name: 'My Loop' }));
// First: started
useWorkflowStore.getState().handleLoopIteration(
loopIterationEvent({
runId: 'run-li3',
nodeId: 'loop-node',
iteration: 1,
status: 'running',
})
);
// Then: completed with duration
useWorkflowStore.getState().handleLoopIteration(
loopIterationEvent({
runId: 'run-li3',
nodeId: 'loop-node',
iteration: 1,
status: 'completed',
total: 0,
duration: 1500,
})
);
const wf = useWorkflowStore.getState().workflows.get('run-li3')!;
const node = wf.dagNodes.find(n => n.nodeId === 'loop-node')!;
expect(node.iterations).toHaveLength(1); // no duplicate
expect(node.iterations![0].status).toBe('completed');
expect(node.iterations![0].duration).toBe(1500);
});
test('preserves prior maxIterations when total: 0 (completed/failed events)', () => {
useWorkflowStore.getState().handleWorkflowStatus(statusEvent({ runId: 'run-li4' }));
useWorkflowStore
.getState()
.handleDagNode(dagNodeEvent({ runId: 'run-li4', nodeId: 'loop-node', name: 'My Loop' }));
// started with known total
useWorkflowStore.getState().handleLoopIteration(
loopIterationEvent({
runId: 'run-li4',
nodeId: 'loop-node',
iteration: 1,
total: 4,
status: 'running',
})
);
// completed with total: 0 (intentional bridge omission)
useWorkflowStore.getState().handleLoopIteration(
loopIterationEvent({
runId: 'run-li4',
nodeId: 'loop-node',
iteration: 1,
total: 0,
status: 'completed',
})
);
const node = useWorkflowStore
.getState()
.workflows.get('run-li4')!
.dagNodes.find(n => n.nodeId === 'loop-node')!;
expect(node.maxIterations).toBe(4); // preserved, not overwritten to 0
});
test('accumulates multiple distinct iterations', () => {
useWorkflowStore.getState().handleWorkflowStatus(statusEvent({ runId: 'run-li5' }));
useWorkflowStore
.getState()
.handleDagNode(dagNodeEvent({ runId: 'run-li5', nodeId: 'loop-node', name: 'My Loop' }));
for (let i = 1; i <= 3; i++) {
useWorkflowStore.getState().handleLoopIteration(
loopIterationEvent({
runId: 'run-li5',
nodeId: 'loop-node',
iteration: i,
status: 'completed',
})
);
}
const node = useWorkflowStore
.getState()
.workflows.get('run-li5')!
.dagNodes.find(n => n.nodeId === 'loop-node')!;
expect(node.iterations).toHaveLength(3);
expect(node.currentIteration).toBe(3);
});
test('preserves iteration data after node_completed dag event overwrites node', () => {
useWorkflowStore.getState().handleWorkflowStatus(statusEvent({ runId: 'run-li6' }));
useWorkflowStore
.getState()
.handleDagNode(dagNodeEvent({ runId: 'run-li6', nodeId: 'loop-node', name: 'My Loop' }));
useWorkflowStore.getState().handleLoopIteration(
loopIterationEvent({
runId: 'run-li6',
nodeId: 'loop-node',
iteration: 1,
total: 2,
status: 'completed',
})
);
// Simulate the loop node completing — handleDagNode must preserve the iteration data
useWorkflowStore.getState().handleDagNode(
dagNodeEvent({
runId: 'run-li6',
nodeId: 'loop-node',
name: 'My Loop',
status: 'completed',
duration: 5000,
})
);
const node = useWorkflowStore
.getState()
.workflows.get('run-li6')!
.dagNodes.find(n => n.nodeId === 'loop-node')!;
expect(node.status).toBe('completed');
expect(node.iterations).toHaveLength(1); // iteration data preserved after node completion
expect(node.maxIterations).toBe(2);
});
});

View file

@ -10,6 +10,8 @@ import type {
WorkflowArtifactEvent,
DagNodeEvent,
WorkflowToolActivityEvent,
LoopIterationEvent,
LoopIterationInfo,
} from '@/lib/types';
interface WorkflowStoreState {
@ -19,6 +21,7 @@ interface WorkflowStoreState {
handleWorkflowStatus: (event: WorkflowStatusEvent) => void;
handleWorkflowArtifact: (event: WorkflowArtifactEvent) => void;
handleDagNode: (event: DagNodeEvent) => void;
handleLoopIteration: (event: LoopIterationEvent) => void;
handleWorkflowToolActivity: (event: WorkflowToolActivityEvent) => void;
hydrateWorkflow: (state: WorkflowState) => void;
}
@ -65,6 +68,7 @@ function invalidateWorkflowQueries(): void {
const keys = [
'workflow-runs',
'workflowRuns',
'workflowRun',
'workflow-runs-status',
'conversations',
'workflowMessages',
@ -244,6 +248,7 @@ export const useWorkflowStore = create<WorkflowStoreState>()(
const existingIdx = dagNodes.findIndex(n => n.nodeId === event.nodeId);
const nodeState: DagNodeState = {
...(existingIdx >= 0 ? dagNodes[existingIdx] : {}), // preserve accumulated iteration state
nodeId: event.nodeId,
name: event.name,
status: event.status,
@ -265,6 +270,42 @@ export const useWorkflowStore = create<WorkflowStoreState>()(
);
},
handleLoopIteration: (event: LoopIterationEvent): void => {
if (!event.nodeId) return; // Non-DAG loops have no nodeId — skip
set(
state =>
updateWorkflow(state, event.runId, wf => {
const dagNodes = [...wf.dagNodes];
const existingIdx = dagNodes.findIndex(n => n.nodeId === event.nodeId);
if (existingIdx < 0) return wf; // Node not yet in store — loop iteration may arrive before dag_node event in SSE ordering. Intentional silent drop.
const existing = dagNodes[existingIdx];
const iterations: LoopIterationInfo[] = [...(existing.iterations ?? [])];
const iterIdx = iterations.findIndex(it => it.iteration === event.iteration);
const iterState: LoopIterationInfo = {
iteration: event.iteration,
status: event.status,
duration: event.duration,
};
if (iterIdx >= 0) {
iterations[iterIdx] = iterState;
} else {
iterations.push(iterState);
}
dagNodes[existingIdx] = {
...existing,
currentIteration: event.iteration,
maxIterations: event.total > 0 ? event.total : existing.maxIterations,
iterations,
};
return { ...wf, dagNodes };
}),
undefined,
'workflow/loopIteration'
);
},
handleWorkflowToolActivity: (event: WorkflowToolActivityEvent): void => {
set(
state =>
@ -316,13 +357,19 @@ export function selectActiveWorkflow(state: WorkflowStoreState): WorkflowState |
// Stable SSE handler object — actions are defined once in create(), so references never change.
// Shared by ChatInterface and WorkflowLogs instead of per-component useShallow selectors.
const { handleWorkflowStatus, handleWorkflowArtifact, handleDagNode, handleWorkflowToolActivity } =
useWorkflowStore.getState();
const {
handleWorkflowStatus,
handleWorkflowArtifact,
handleDagNode,
handleLoopIteration,
handleWorkflowToolActivity,
} = useWorkflowStore.getState();
export const workflowSSEHandlers = {
onWorkflowStatus: handleWorkflowStatus,
onWorkflowArtifact: handleWorkflowArtifact,
onDagNode: handleDagNode,
onLoopIteration: handleLoopIteration,
onToolActivity: handleWorkflowToolActivity,
} as const;

View file

@ -1,6 +1,6 @@
{
"name": "@archon/workflows",
"version": "0.3.5",
"version": "0.3.6",
"type": "module",
"exports": {
"./schemas/*": "./src/schemas/*.ts",

View file

@ -1915,7 +1915,9 @@ async function executeLoopNode(
if (platform.getStreamingMode() === 'stream') {
const toolMsg = formatToolCall(msg.toolName, msg.toolInput);
if (toolMsg) {
await safeSendMessage(platform, conversationId, toolMsg, msgContext);
await safeSendMessage(platform, conversationId, toolMsg, msgContext, {
category: 'tool_call_formatted',
} as WorkflowMessageMetadata);
}
if (platform.sendStructuredEvent) {
await platform.sendStructuredEvent(conversationId, msg);

View file

@ -26,6 +26,7 @@ export const WORKFLOW_EVENT_TYPES = [
'approval_requested',
'approval_received',
'workflow_cancelled',
'workflow_artifact',
] as const;
export type WorkflowEventType = (typeof WORKFLOW_EVENT_TYPES)[number];