Compare commits

...

336 commits

Author SHA1 Message Date
Iván Ovejero
10dbf32596
feat(core): Scale expression isolate pool to 0 after inactivity (#28472)
Some checks are pending
Build: Benchmark Image / build (push) Waiting to run
CI: Master (Build, Test, Lint) / Build for Github Cache (push) Waiting to run
CI: Master (Build, Test, Lint) / Unit tests (push) Waiting to run
CI: Master (Build, Test, Lint) / Lint (push) Waiting to run
CI: Master (Build, Test, Lint) / Performance (push) Waiting to run
CI: Master (Build, Test, Lint) / Notify Slack on failure (push) Blocked by required conditions
Util: Sync API Docs / sync-public-api (push) Waiting to run
Co-authored-by: Danny Martini <danny@n8n.io>
2026-04-21 15:20:01 +00:00
RomanDavydchuk
4869e0a463
fix(editor): HTTP request node showing warning about credentials not set when they are set (#28270) 2026-04-21 15:16:08 +00:00
Irénée
3bd7a2847c
feat(core): Make SSO connection settings configurable via env vars (#28714)
Co-authored-by: cubic-dev-ai[bot] <191113872+cubic-dev-ai[bot]@users.noreply.github.com>
2026-04-21 15:14:00 +00:00
Dimitri Lavrenük
9494f41c34
feat: Track computer use connect events (no-changelog) (#28815) 2026-04-21 14:49:48 +00:00
RomanDavydchuk
713c4981b7
fix(editor): Move tooltip for required RMC fields to the right (#28803) 2026-04-21 14:44:45 +00:00
Albert Alises
6db02fe928
fix(MCP Server Trigger Node): Only return error name and message in tool error responses (#28791)
Co-authored-by: Anand Reddy Jonnalagadda <15153801+joan1011@users.noreply.github.com>
2026-04-21 13:43:20 +00:00
Alex Grozav
a88f847708
refactor(editor): Migrate nodeMetadata to workflowDocumentStore (no-changelog) (#28788) 2026-04-21 13:22:52 +00:00
Svetoslav Dekov
7d74c1f04b
fix(editor): Resolve node parameter defaults in Instance AI setup wizard (no-changelog) (#28800)
Co-authored-by: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-21 13:04:15 +00:00
Daria
b1ca129496
feat(core): Broadcast workflow updates from MCP tools to open editors (#28709) 2026-04-21 12:26:56 +00:00
Michael Kret
8e49800421
fix: Additional keys in routing nodes (#28758) 2026-04-21 12:24:43 +00:00
Albert Alises
782b2d18b2
fix(ai-builder): Prevent duplicate workflow creation on parallel submits in instance AI (#28793) 2026-04-21 12:21:48 +00:00
Milorad FIlipović
76358a60be
fix(editor): Allow name parameters to be defined by AI (#28763) 2026-04-21 11:52:25 +00:00
Jaakko Husso
86ceb68a05
feat(core): Include workflow names on instance AI confirmations (no-changelog) (#28719)
Co-authored-by: Albert Alises <albert.alises@gmail.com>
2026-04-21 11:24:16 +00:00
Jaakko Husso
2d624a521e
fix(core): Generate title once there's enough user context (#28721) 2026-04-21 10:28:19 +00:00
Matsuuu
ba2c5488c7
Merge tag 'n8n@2.18.0' 2026-04-21 13:32:15 +03:00
Daria
d1c7b31237
fix: Stop persisting client id in session storage to fix duplicate tab problem (no-changelog) (#28769) 2026-04-21 10:02:43 +00:00
Ricardo Espinoza
26ecadcf94
fix(core): Use upsert for MCP OAuth consent to allow re-authorization (#28703) 2026-04-21 09:58:01 +00:00
Svetoslav Dekov
45b5b9e383
fix(editor): Fix instance-ai setup parameter issues not resetting on input (no-changelog) (#28689) 2026-04-21 09:55:29 +00:00
Matsu
cb9882ce9c
ci: Run ci-pr-quality only on n8n team PRs (#28773) 2026-04-21 09:50:16 +00:00
Jaakko Husso
6592ed8047
refactor(core): Move instance AI user settings under actual user settings (no-changelog) (#28706) 2026-04-21 09:47:36 +00:00
Michael Kret
92f1dac835
chore(Microsoft Agent 365 Trigger Node): Change label on toggle to enable Microsoft MCP Servers (#28766) 2026-04-21 09:38:33 +00:00
Vitalii Borovyk
a88ee76553
fix(MongoDB Chat Memory Node): Add connection pool limit (#28042)
Co-authored-by: Eugene <eugene@n8n.io>
2026-04-21 09:21:40 +00:00
Suguru Inoue
b444a95e11
refactor(editor): Migrate workflow object usages (#28534) 2026-04-21 09:17:45 +00:00
Declan Carroll
5e8002ab28
test: Refactor test workflow initialization (#28772) 2026-04-21 09:15:26 +00:00
Guillaume Jacquart
c012b52ac2
feat(core): Bootstrap encryption key set from environment (#28716)
Co-authored-by: Claude Opus 4.7 <noreply@anthropic.com>
2026-04-21 09:13:11 +00:00
Garrit Franke
fc5424477d
feat(core): Add require-node-api-error ESLint rule for community nodes (no-changelog) (#28454) 2026-04-21 09:12:51 +00:00
Jaakko Husso
cb1244c041
refactor: Use napi-rs/image instead of sharp for screenshots (#28586) 2026-04-21 09:12:14 +00:00
n8n-assistant[bot]
6336f0a447
🚀 Release 2.18.0 (#28768)
Co-authored-by: Matsuuu <16068444+Matsuuu@users.noreply.github.com>
2026-04-21 08:58:38 +00:00
Albert Alises
9ea2ef1840
fix(core): Hide pre-resolved setup requests from Instance AI wizard (#28731) 2026-04-21 08:34:59 +00:00
Milorad FIlipović
5e111975d4
fix(editor): Reset remote values on credentials change (#26282)
Co-authored-by: Elias Meire <elias@meire.dev>
Co-authored-by: Nikhil Kuriakose <nikhilkuria@gmail.com>
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
Co-authored-by: cubic-dev-ai[bot] <191113872+cubic-dev-ai[bot]@users.noreply.github.com>
2026-04-21 08:21:06 +00:00
José Braulio González Valido
87163163e6
fix(core): Add required field validation to MCP OAuth client registration (#28490)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-21 08:04:54 +00:00
Matsu
95c155859e
ci: Ensure stable npm packages are tagged as latest after release (#28755) 2026-04-21 08:04:21 +00:00
Ricardo Espinoza
575c34eae1
feat(core): Track workflow action source for external API and MCP requests (#28483) 2026-04-21 08:00:04 +00:00
Matsu
0d98d29ae4
ci: Only post QA metrics on n8n-io/n8n monorepo (#28692)
Some checks are pending
CI: Master (Build, Test, Lint) / Build for Github Cache (push) Waiting to run
CI: Master (Build, Test, Lint) / Unit tests (push) Waiting to run
CI: Master (Build, Test, Lint) / Lint (push) Waiting to run
CI: Master (Build, Test, Lint) / Performance (push) Waiting to run
CI: Master (Build, Test, Lint) / Notify Slack on failure (push) Blocked by required conditions
2026-04-21 04:59:06 +00:00
Ali Elkhateeb
9a65549575
feat(API): Add missing credential endpoints (GET by ID and test) (#28519)
Some checks are pending
Build: Benchmark Image / build (push) Waiting to run
CI: Master (Build, Test, Lint) / Unit tests (push) Waiting to run
CI: Master (Build, Test, Lint) / Lint (push) Waiting to run
CI: Master (Build, Test, Lint) / Performance (push) Waiting to run
CI: Master (Build, Test, Lint) / Notify Slack on failure (push) Blocked by required conditions
CI: Master (Build, Test, Lint) / Build for Github Cache (push) Waiting to run
Util: Sync API Docs / sync-public-api (push) Waiting to run
2026-04-20 20:56:51 +00:00
Dawid Myslak
dd6c28c6d1
fix(Alibaba Cloud Chat Model Node): Add credential-level url field for AI gateway compatibility (#28697) 2026-04-20 19:40:12 +00:00
Joco-95
d14f2546a1
feat: Removes computer use setup logic on Assistant AI opt-in flow and minor UX changes (no-changelog) (#28679) 2026-04-20 18:25:09 +00:00
RomanDavydchuk
d179f667c0
fix(HubSpot Trigger Node): Add missing property selectors (#28595) 2026-04-20 18:05:37 +00:00
Mutasem Aldmour
5b376cb12d
feat(editor): Enable workflow execution from instance AI preview canvas (#28412)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-20 17:57:03 +00:00
Jaakko Husso
6cfa0ed559
feat(core): Rename instance AI to AI Assistant in the UI texts (no-changelog) (#28732) 2026-04-20 17:49:04 +00:00
Luca Mattiazzi
107c48f65c
fix(core): Ensure single zod instance across workspace packages (#28604) 2026-04-20 17:02:24 +00:00
Svetoslav Dekov
1b13d325f1
fix(editor): Show auth type selector in Instance AI workflow setup (#28707)
Co-authored-by: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-20 16:50:21 +00:00
Albert Alises
db83a95522
fix(editor): Gate Instance AI input while setup wizard is open (#28685) 2026-04-20 16:32:14 +00:00
Joco-95
b42c890c5e
chore(core): Switch PostHog environment variables to EU region (#27115) 2026-04-20 16:21:37 +00:00
Albert Alises
3b15e470b5
fix(editor): Advance wizard step on Continue instead of applying setup (#28698) 2026-04-20 16:11:50 +00:00
Marc Littlemore
bef528cb21
fix: Restore OpenAPI schema version (no-changelog) (#28713) 2026-04-20 15:51:44 +00:00
Matsu
0b8fae6c5a
ci: Only run visual storybook on public monorepo (#28699)
Some checks are pending
Build: Benchmark Image / build (push) Waiting to run
CI: Master (Build, Test, Lint) / Build for Github Cache (push) Waiting to run
CI: Master (Build, Test, Lint) / Unit tests (push) Waiting to run
CI: Master (Build, Test, Lint) / Lint (push) Waiting to run
CI: Master (Build, Test, Lint) / Performance (push) Waiting to run
CI: Master (Build, Test, Lint) / Notify Slack on failure (push) Blocked by required conditions
Util: Sync API Docs / sync-public-api (push) Waiting to run
2026-04-20 14:25:42 +00:00
José Braulio González Valido
560f300716
test: Add Instance AI workflow evals CI pipeline (no-changelog) (#28366)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-20 14:15:41 +00:00
Jaakko Husso
73d93d4edf
fix(core): Better titles on instance AI, use common title logic on n8n agents sdk (no-changelog) (#28686) 2026-04-20 13:27:33 +00:00
Matsu
9f71e12e5f
chore: Migrate @n8n/json-schema-to-zod from Jest to Vitest (#28411)
Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-20 13:07:50 +00:00
Stephen Wright
9dd3e59acb
feat(core): Add KeyManagerService for encryption key lifecycle management (#28533) 2026-04-20 12:39:46 +00:00
Albert Alises
657bdf136f
fix(core): Filter stale credentials from setup wizard requests (#28478) 2026-04-20 12:37:51 +00:00
Bernhard Wittmann
2d0b231e31
fix(IMAP Node): Fix out-of-memory crash after ECONNRESET on reconnect (#28290) 2026-04-20 12:27:24 +00:00
Raúl Gómez Morales
c17f5b61fe
fix(editor): Prevent chat window jump when hovering prompt suggestions (no-changelog) (#28683) 2026-04-20 12:19:13 +00:00
Alex Grozav
db1eb91940
refactor(editor): Migrate workflow name consumers to workflowDocumentStore (#28682) 2026-04-20 12:17:08 +00:00
Matsu
a3292b738a
chore: Migrate @n8n/permissions to Vitest (#28408) 2026-04-20 12:14:53 +00:00
Declan Carroll
82ee4a9fce
ci: Strengthen Playwright test resilience (#28687) 2026-04-20 12:06:41 +00:00
Matsu
d608889e88
ci: Allow only bundles to 1.x (#28401) 2026-04-20 11:55:35 +00:00
Matsu
a39618a889
chore: Migrate @n8n/client-oauth2 to vitest (#28404) 2026-04-20 11:54:51 +00:00
oleg
bfee79dc21
fix(core): Fix instance-ai planner and prompts after tool consolidation (no-changelog) (#28684) 2026-04-20 11:29:49 +00:00
Jaakko Husso
3e724303c5
fix(core): Prevent nodes tool crash on flattened required fields (#28670) 2026-04-20 10:48:39 +00:00
RomanDavydchuk
19aadf19f7
fix(ClickUp Node): Unclear error message when using OAuth credentials (#28584)
Co-authored-by: Dawid Myslak <dawid.myslak@gmail.com>
2026-04-20 10:33:23 +00:00
Albert Alises
7b3696f3f7
fix(ai-builder): Scope artifacts panel to resources produced in-thread (#28678) 2026-04-20 10:11:46 +00:00
Albert Alises
35f9bed4de
fix(core): Cascade-cancel dependent planned tasks when a parent task fails (#28656) 2026-04-20 09:50:33 +00:00
Garrit Franke
b1c52dad58
test(core): Add credential isolation tests for same-type credentials (no-changelog) (#28308)
Co-authored-by: Jon <jonathan.bennetts@gmail.com>
2026-04-20 09:04:06 +00:00
Alex Grozav
d037fd4647
refactor(editor): Normalize sharedWithProjects field in workflow document store (no-changelog) (#28078) 2026-04-20 08:50:56 +00:00
Milorad FIlipović
0fc2d90b52
fix(core): Report success from mcp tool if workflow is created in DB (no-changelog) (#28529) 2026-04-20 08:48:32 +00:00
Matsu
b2fdcf16c0
ci: Update minor and patch release schedules (#28511) 2026-04-20 08:47:34 +00:00
RomanDavydchuk
73659cb3e7
fix(Google Gemini Node): Determine the file extention from MIME type for image and video operations (#28616) 2026-04-20 08:16:51 +00:00
Michael Kret
4070930e4c
fix(OpenAI Node): Replace hardcoded models with RLC (#28226) 2026-04-20 08:13:47 +00:00
Rob Hough
e848230947
fix(editor): Improve disabled Google sign-in button styling and tooltip alignment (#28536) 2026-04-20 07:31:15 +00:00
James Campbell
7094395cef
fix(Google Cloud Firestore Node): Fix empty array serialization in jsonToDocument (#28213)
Some checks failed
CI: Master (Build, Test, Lint) / Build for Github Cache (push) Has been cancelled
CI: Master (Build, Test, Lint) / Unit tests (push) Has been cancelled
CI: Master (Build, Test, Lint) / Lint (push) Has been cancelled
CI: Master (Build, Test, Lint) / Performance (push) Has been cancelled
CI: Master (Build, Test, Lint) / Notify Slack on failure (push) Has been cancelled
Co-authored-by: RomanDavydchuk <roman.davydchuk@n8n.io>
2026-04-18 13:48:45 +00:00
Jon
f1dab3e295
feat(Slack Node): Add app_home_opened as a dedicated trigger event (#28626)
Some checks are pending
CI: Master (Build, Test, Lint) / Build for Github Cache (push) Waiting to run
CI: Master (Build, Test, Lint) / Unit tests (push) Waiting to run
CI: Master (Build, Test, Lint) / Lint (push) Waiting to run
CI: Master (Build, Test, Lint) / Performance (push) Waiting to run
CI: Master (Build, Test, Lint) / Notify Slack on failure (push) Blocked by required conditions
Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
Co-authored-by: Roman Davydchuk <roman.davydchuk@n8n.io>
2026-04-17 19:13:53 +00:00
RomanDavydchuk
ff950e5840
fix: Link to n8n website broken in n8n forms (#28627) 2026-04-17 17:09:14 +00:00
Jon
77d27bc826
fix(core): Guard against undefined config properties in credential overwrites (#28573)
Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-17 16:42:34 +00:00
R🂡hul
25e07cab5a
fix(LinkedIn Node): Update LinkedIn API version in request headers (#28564)
Co-authored-by: Jon <jonathan.bennetts@gmail.com>
2026-04-17 14:33:10 +00:00
robrown-hubspot
8c3e692174
fix(HubSpot Node): Rename HubSpot "App Token" auth to "Service Key" (#28479)
Co-authored-by: Jon <jonathan.bennetts@gmail.com>
2026-04-17 14:20:54 +00:00
Declan Carroll
ef4bfbfe94
ci: Skip non isolated tests (#28615)
Some checks are pending
CI: Master (Build, Test, Lint) / Build for Github Cache (push) Waiting to run
CI: Master (Build, Test, Lint) / Unit tests (push) Waiting to run
CI: Master (Build, Test, Lint) / Lint (push) Waiting to run
CI: Master (Build, Test, Lint) / Performance (push) Waiting to run
CI: Master (Build, Test, Lint) / Notify Slack on failure (push) Blocked by required conditions
2026-04-17 13:03:10 +00:00
Jon
51bc71e897
fix(editor): Restore WASM file paths for cURL import in HTTP Request node (#28610)
Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
Co-authored-by: Matsuuu <huhta.matias@gmail.com>
2026-04-17 12:41:56 +00:00
Eugene
3b248eedc2
feat(Linear Trigger Node): Add signing secret validation (#28522)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Co-authored-by: cubic-dev-ai[bot] <191113872+cubic-dev-ai[bot]@users.noreply.github.com>
2026-04-17 12:33:01 +00:00
Csaba Tuncsik
21317b8945
fix(editor): Re-initialize SSO store after login to populate OIDC redirect URL (#28386) 2026-04-17 12:05:48 +00:00
Jaakko Husso
46aa46d996
fix(editor): Handle plan confirmation correctly at the UI (no-changelog) (#28613) 2026-04-17 12:05:33 +00:00
Jaakko Husso
5c9a732af4
fix(core): Rework Instance ai settings (no-changelog) (#28495) 2026-04-17 11:36:49 +00:00
Mutasem Aldmour
cff2852332
fix(core): Preserve submitted workflow outcome when builder errors after submit (no-changelog) (#28606)
Co-authored-by: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-17 10:19:42 +00:00
Raúl Gómez Morales
465478a829
feat(editor): Add collapsible sidebar and deferred thread creation to Instance AI (no-changelog) (#28459) 2026-04-17 10:00:37 +00:00
Albert Alises
d17211342e
fix(editor): Improve setup wizard placeholder detection and card completion scoping (#28474) 2026-04-17 08:47:54 +00:00
Stephen Wright
bb96d2e50a
feat(core): Persist deployment_key entries for stability across restarts and key rotation (#28518)
Some checks are pending
CI: Master (Build, Test, Lint) / Build for Github Cache (push) Waiting to run
CI: Master (Build, Test, Lint) / Unit tests (push) Waiting to run
CI: Master (Build, Test, Lint) / Lint (push) Waiting to run
CI: Master (Build, Test, Lint) / Performance (push) Waiting to run
CI: Master (Build, Test, Lint) / Notify Slack on failure (push) Blocked by required conditions
2026-04-16 19:49:11 +00:00
Mutasem Aldmour
c97c3b4d12
fix(editor): Resolve nodes stuck on loading after execution in instance-ai preview (#28450)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-16 19:07:19 +00:00
Garrit Franke
fb2bc1ca5f
feat: Add require-community-node-keyword ESLint rule (no-changelog) (#28395) 2026-04-16 17:26:11 +00:00
Rob Hough
04860d5cd7
fix(editor): Fix styles on disabled Publish button (no-changelog) (#28531) 2026-04-16 16:15:11 +00:00
Stephen Wright
c6534fa0b3
feat: Add Prometheus counters for token exchange (#28453)
Some checks are pending
CI: Master (Build, Test, Lint) / Unit tests (push) Waiting to run
CI: Master (Build, Test, Lint) / Build for Github Cache (push) Waiting to run
CI: Master (Build, Test, Lint) / Lint (push) Waiting to run
CI: Master (Build, Test, Lint) / Performance (push) Waiting to run
CI: Master (Build, Test, Lint) / Notify Slack on failure (push) Blocked by required conditions
2026-04-16 12:20:38 +00:00
Declan Carroll
bb9bec3ba4
revert: Make Wait node fully durable by removing in-memory execution path (#28538) 2026-04-16 11:42:22 +00:00
Stephen Wright
56f36a6d19
fix: Disable axios built-in proxy for OAuth2 token requests (#28513) 2026-04-16 09:35:15 +00:00
Luca Mattiazzi
e4fc753967
fix(core): Fix dev:ai script in package.json (no-changelog) (#28402) 2026-04-15 17:11:51 +00:00
Milorad FIlipović
1ecc290107
fix(core): Add strict input validation for workflow() (no-changelog) (#28517) 2026-04-15 14:57:43 +00:00
Jaakko Husso
6bb271d83c
fix(core): Position workflow correctly if opened while on a background tab (no-changelog) (#28421) 2026-04-15 13:26:07 +00:00
Michael Kret
d012346c77
feat: AI Gateway credentials endpoint instance url (#28520) 2026-04-15 12:12:19 +00:00
Tuukka Kantola
6739856aa3
fix(editor): Center sub-node icons and refresh triggers panel icons (#28515)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-15 10:35:12 +00:00
Suguru Inoue
b3e56437c8
refactor(editor): Migrate usages of workflowObject in canvas operations (#28128) 2026-04-15 10:34:00 +00:00
Milorad FIlipović
e5aaeb53a9
fix(core): Implement data tables name collision detection on pull (#26416)
Co-authored-by: cubic-dev-ai[bot] <191113872+cubic-dev-ai[bot]@users.noreply.github.com>
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
Co-authored-by: Nikhil Kuriakose <nikhilkuria@gmail.com>
Co-authored-by: Nikhil Kuriakose <nikhil.kuriakose@n8n.io>
2026-04-15 09:38:08 +00:00
Marcus
8b105cc0cf
feat(core): Support npm registry token authentication to install private community node packages (#28228)
Co-authored-by: Sandra Zollner <sandra.zollner@n8n.io>
2026-04-15 09:28:55 +00:00
Sandra Zollner
34430aedb1
fix(core): Fix public API package update process (#28475) 2026-04-15 09:04:39 +00:00
RomanDavydchuk
30128c9254
fix(Google Drive Node): Continue on error support for download file operation (#28276)
Co-authored-by: Shireen Missi <94372015+ShireenMissi@users.noreply.github.com>
2026-04-15 08:41:15 +00:00
Albert Alises
e20f8e91ce
feat(editor): Add admin toggle for computer use in AI settings (no-changelog) (#28452) 2026-04-15 08:08:39 +00:00
Charlie Kolb
f216fda511
fix(editor): Refine resource dependency badge (#28087) 2026-04-15 07:54:26 +00:00
Suguru Inoue
5368851506
refactor(editor): Migrate Workflow class usages in Vue props and function arguments (#28393) 2026-04-15 07:30:49 +00:00
Matsu
80de266be4
ci: Account for pnpm-workspace changes in bump-versions.mjs (#28503) 2026-04-15 07:02:10 +00:00
Matsu
57af37fc61
chore: Migrate @n8n/stylelint-config to Vitest (#28405) 2026-04-15 06:06:03 +00:00
Matsu
229256ee7c
chore: Migrate @n8n/api-types to Vitest (#28394) 2026-04-15 06:05:11 +00:00
Albert Alises
bb7d137cf7
fix(editor): Display placeholder sentinels as hint text in setup wizard (#28482) 2026-04-14 16:36:28 +00:00
Milorad FIlipović
62dc073b3d
fix(core): Fix workflow-sdk validation for plain workflow objects (#28416) 2026-04-14 16:29:20 +00:00
Dawid Myslak
3f57f1cc19
refactor(core): Rename AI Gateway credits to wallet with USD amounts (#28436) 2026-04-14 15:29:13 +00:00
Dimitri Lavrenük
819e707a61
feat: Simplify user consent flow for computer-use (no-changelog) (#28266) 2026-04-14 15:13:08 +00:00
Albert Alises
04d57c5fd6
fix(editor): Prevent setup wizard disappearing on requestId-driven remount (#28473) 2026-04-14 14:58:39 +00:00
Dawid Myslak
bd927d9350
feat(MiniMax Chat Model Node): Add MiniMax Chat Model sub-node (#28305) 2026-04-14 14:29:50 +00:00
Csaba Tuncsik
1042350f4e
fix(editor): Reset OIDC form dirty state after saving IdP settings (#28388) 2026-04-14 14:21:49 +00:00
Albert Alises
f54608e6e4
refactor(ai-builder): Consolidate native tools into 10 action families (no-changelog) (#28140) 2026-04-14 14:00:41 +00:00
Csaba Tuncsik
9c97931ca0
fix(editor): Only show role assignment warning modal when value actually changed (#28387) 2026-04-14 13:32:44 +00:00
Stephen Wright
ac41112731
fix(core): Enforce credential access checks in dynamic node parameter requests (#28446) 2026-04-14 13:23:41 +00:00
Bernhard Wittmann
2959b4dc2a
fix(core): Skip npm outdated check for verified-only community packages (#28335) 2026-04-14 13:09:13 +00:00
James Gee
36261fbe7a
feat(core): Configure OIDC settings via env vars (#28185)
Signed-off-by: James Gee <1285296+geemanjs@users.noreply.github.com>
Co-authored-by: Irénée <irenee.ajeneza@n8n.io>
Co-authored-by: Ali Elkhateeb <ali.elkhateeb@n8n.io>
2026-04-14 13:06:22 +00:00
Jaakko Husso
e849041c11
fix(core): Make workflow preview refresh after setup completes (no-changelog) (#28468) 2026-04-14 12:41:20 +00:00
Ali Elkhateeb
fa3299d042
fix(core): Handle git fetch failure during source control startup (#28422)
Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-14 12:40:06 +00:00
Sandra Zollner
24015b3449
feat(core): Project based data table creation and transfer (#28323) 2026-04-14 12:38:44 +00:00
Stephen Wright
59edd6ae54
feat: Add deployment_key table, entity, repository, and migration (#28329) 2026-04-14 12:20:22 +00:00
krisn0x
ca871cc10a
feat(core): Support npm dist-tags in community node installation (#28067)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-14 12:18:28 +00:00
yehorkardash
39189c3985
fix: Update working memory using tools (#28467) 2026-04-14 11:45:57 +00:00
Jaakko Husso
9ef55ca4f9
feat(core): Instance AI preview tags and command bar improvements (no-changelog) (#28383) 2026-04-14 11:38:00 +00:00
Charlie Kolb
90a3f460f1
feat(editor): Support showing full label in tooltip on hover of dropdown menu items (no-changelog) (#28231)
Co-authored-by: cubic-dev-ai[bot] <191113872+cubic-dev-ai[bot]@users.noreply.github.com>
2026-04-14 11:33:13 +00:00
Svetoslav Dekov
00b0558c2b
fix(editor): Hide setup parameter issue icons until user interacts with input (#28010)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-14 11:14:37 +00:00
Jaakko Husso
094a5b403e
fix(core): Switch to latest artifact when it updates / new one is created (no-changelog) (#28461) 2026-04-14 11:04:02 +00:00
Elias Meire
c9cab112f9
fix(editor): Show relevant node in workflow activation errors (#26691) 2026-04-14 11:03:50 +00:00
Matsu
dcbc3f14bd
chore: Bump axios to 1.15.0 (#28460) 2026-04-14 10:49:05 +00:00
Charlie Kolb
69a62e0906
docs: Add migration timestamp guidance to @n8n/db AGENTS.md (no-changelog) (#28444)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
Co-authored-by: cubic-dev-ai[bot] <191113872+cubic-dev-ai[bot]@users.noreply.github.com>
2026-04-14 10:40:42 +00:00
RomanDavydchuk
357fb7210a
fix(GraphQL Node): Improve error response handling (#28209) 2026-04-14 10:12:48 +00:00
Danny Martini
98b833a07d
fix(core): Resolve additional keys lazily in VM expression engine (#28430)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
2026-04-14 09:10:20 +00:00
Charlie Kolb
b1a075f760
feat(editor): Add favoriting for projects, folders, workflows and data tables (#26228)
Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
Co-authored-by: aikido-pr-checks[bot] <169896070+aikido-pr-checks[bot]@users.noreply.github.com>
2026-04-14 09:09:00 +00:00
Matsu
d6fbe5f847
ci: Run lint:styles as a part of reusable linting workflow (#28449) 2026-04-14 08:44:48 +00:00
Matsu
d496f6f1bd
ci: Replace docker/login-action with retry-wrapped docker login for DockerHub (#28442)
Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-14 08:44:12 +00:00
oleg
bd9713bd67
feat(instance-ai): Add Brave Search and Daytona credential types (no-changelog) (#28420)
Signed-off-by: Oleg Ivaniv <me@olegivaniv.com>
2026-04-14 08:15:36 +00:00
Luca Mattiazzi
9078bb2306
feat(ai-builder): Add a binary check to avoid code import in code blocks (no-changelog) (#28382)
Co-authored-by: cubic-dev-ai[bot] <191113872+cubic-dev-ai[bot]@users.noreply.github.com>
2026-04-14 08:02:41 +00:00
Mutasem Aldmour
433370dc2f
test: Add isolated local Playwright runner (#28427)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-14 07:57:24 +00:00
Jaakko Husso
bbc3230dcf
chore: Suppress warning from style lint (#28426) 2026-04-14 07:54:05 +00:00
Albert Alises
3c850f2711
fix(ai-builder): Increase orchestrator max steps from default 5 to 60 (#28429) 2026-04-14 07:51:51 +00:00
Dimitri Lavrenük
b48aeef1f2
fix: Block concurring connection requests in computer use (no-changelog) (#28312) 2026-04-14 07:29:25 +00:00
Andreas Fitzek
e8360a497d
feat(core): Add instance registry service (no-changelog) (#27731) 2026-04-14 06:57:35 +00:00
Bernhard Wittmann
5f8ab01f9b
fix(Schedule Node): Use elapsed-time check to self-heal after missed triggers (#28423) 2026-04-13 15:44:42 +00:00
James Gee
9a22fe5a25
feat(core): Workflow tracing - add workflow version id (#28424) 2026-04-13 15:25:44 +00:00
Jon
ca71d89d88
fix(core): Handle invalid percent sequences and equals signs in HTTP response headers (#27691)
Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-13 15:17:33 +00:00
Garrit Franke
550409923a
feat(core): Add require-node-description-fields ESLint rule for icon and subtitle (#28400) 2026-04-13 14:55:17 +00:00
n8n-release-tag-merge[bot]
60503b60b1 Merge tag 'n8n@2.17.0' 2026-04-13 15:10:41 +00:00
Mutasem Aldmour
df5855d4c6
test(editor): Add comprehensive instance AI e2e tests (#28326)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-13 14:40:15 +00:00
Irénée
1108467f44
feat: Enable security policy settings via env vars (#28321) 2026-04-13 14:09:06 +00:00
n8n-assistant[bot]
56d336b877
🚀 Release 2.17.0 (#28418)
Co-authored-by: Matsuuu <16068444+Matsuuu@users.noreply.github.com>
2026-04-13 13:53:22 +00:00
Albert Alises
39c6217109
fix(ai-builder): Use placeholders for user-provided values instead of hardcoding fake addresses (#28407) 2026-04-13 13:29:31 +00:00
Ali Elkhateeb
6217d08ce9
fix(core): Skip disabled Azure Key Vault secrets and handle partial fetch failures (#28325) 2026-04-13 13:23:38 +00:00
Declan Carroll
837652d14a
ci: Increase stale image limit (#28413) 2026-04-13 12:57:08 +00:00
Sandra Zollner
8cd75d2f2d
feat(core): Enable credential creation per project in public API (#28240) 2026-04-13 12:22:52 +00:00
oleg
a9950c182a
refactor(instance-ai): Harmonize prompting between builders (no-changelog) (#28338)
Signed-off-by: Oleg Ivaniv <me@olegivaniv.com>
2026-04-13 11:59:50 +00:00
Jaakko Husso
4a14840d54
fix(core): Refresh the ai assistant token if it's about to expire (no-changelog) (#28340) 2026-04-13 11:59:28 +00:00
Danny Martini
3d8da49ee4
fix(core): Use closure-scoped evaluation contexts in VM expression bridge (#28337)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-13 11:53:44 +00:00
Guillaume Jacquart
21c0bf3048
feat(core): Add telemetry for data redaction settings and reveal data (#28396)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-13 11:48:13 +00:00
Claire
ebd279f88c
fix(core): Add projectId and projectName to log streaming events (#28310)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-13 11:43:14 +00:00
Andreas Fitzek
05554abf16
feat(core): Make token exchange rate limits configurable via env vars (no-changelog) (#28328) 2026-04-13 11:40:17 +00:00
Jaakko Husso
3033d9e0eb
fix(core): Make it possible to run workflows with event based triggers on instance AI (no-changelog) (#28398) 2026-04-13 11:20:53 +00:00
Jaakko Husso
5f3dc64cb6
fix(core): More accurate definition of form triggers on system prompt (no-changelog) (#28252) 2026-04-13 11:20:17 +00:00
Declan Carroll
738d42cb54
test: Fix flaky unit tests across three packages (no-changelog) (#28336)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-13 10:58:24 +00:00
phyllis-noester
06a666aaa0
chore: Add scoped JWT strategy for public API (no-changelog) (#28333) 2026-04-13 10:54:27 +00:00
Romeo Balta
22afd80759
feat(editor): Add Instance AI prompt suggestions (#27984) 2026-04-13 10:50:43 +00:00
Albert Alises
e78f144e8e
feat(ai-builder): Improve sub-agent context passing with structured briefings and debriefings (#28317) 2026-04-13 10:11:27 +00:00
Guillaume Jacquart
0ce81461ab
feat(core): Add audit logging for expression-based role assignments (#28018)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-13 10:11:17 +00:00
Elias Meire
dab714f961
fix: Prohibit tool access to gateway settings directory (#28320) 2026-04-13 09:31:11 +00:00
Irénée
346d4f1597
feat(core): Add userRole and feature-enabled telemetry for external secrets (#27431) 2026-04-13 09:17:17 +00:00
Jaakko Husso
bb310661ce
fix(core): Show data table artifact after row mutations (no-changelog) (#28314) 2026-04-13 09:10:40 +00:00
Raúl Gómez Morales
316d5bda80
feat(editor): Add response grouping and thinking UI for instance AI (no-changelog) (#28236)
Co-authored-by: Tuukka Kantola <tuukka@n8n.io>
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-13 08:47:06 +00:00
Matsu
a12d368482
chore: Remove dependency to yamljs in favor of yaml (#28307) 2026-04-13 08:06:36 +00:00
Joco-95
98be0ad452
feat: Implement opt-in flow for n8n Agent enrolment (#28006) 2026-04-13 08:00:32 +00:00
Garrit Franke
882dd9ce53
fix(core): Drain webhook close functions to prevent MCP connection leaks (#28384) 2026-04-13 07:48:52 +00:00
krisn0x
72d0f9b98c
fix(Gitlab Node): Handle binary data in all storage modes (#28363)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-13 06:21:04 +00:00
Albert Alises
ff99c84b88
feat(core): Add parse-file tool for structured attachments (no-changelog) (#28251) 2026-04-10 19:11:50 +00:00
Albert Alises
153fae251c
fix(ai-builder): Prevent orchestrator and planner from assuming resource identifiers (no-changelog) (#28342) 2026-04-10 19:11:30 +00:00
Michael Kret
2c4b9749c7
feat: AI Gateway Top Up Flow (#28113)
Co-authored-by: Dawid Myslak <dawid.myslak@gmail.com>
2026-04-10 16:13:06 +00:00
Marc Littlemore
9ab974b7b0
fix(core): Improve audit queries to avoid PostgreSQL bind parameter limits (#27985)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-10 16:07:52 +00:00
José Braulio González Valido
9072365bdb
fix(ai-builder): Handle data table name conflict gracefully instead of looping (no-changelog) (#28279)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-10 15:19:21 +00:00
Marc Littlemore
dbe3f022f1
fix(core): Improve audit queries to avoid PostgreSQL bind parameter limits (#27985)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-10 15:12:16 +00:00
Matsu
aaa2599378
chore: Bump version and remove unnecessary js-base64 patch (#28324) 2026-04-10 15:04:49 +00:00
Dawid Myslak
e30d2eee60
feat(Moonshot Kimi Node): Add new node (#28189) 2026-04-10 14:48:12 +00:00
Csaba Tuncsik
4c3a1501fe
feat(editor): Refactor role provisioning to two-dropdown layout (#28024) 2026-04-10 14:27:44 +00:00
José Braulio González Valido
ac922fa38c
feat(ai-builder): Improve eval verifier and mock handler reliability (no-changelog) (#28255)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-10 13:57:32 +00:00
Jean Ibarz
2d22c65e50
fix(core): Propagate formidable parse errors in Form Trigger (#28217) 2026-04-10 13:23:47 +00:00
Declan Carroll
095bfc00f6
ci: Fix flaky e2e tests (#28306) 2026-04-10 12:54:25 +00:00
Dimitri Lavrenük
25e90ffde3
feat: Limit computer use connections to only cloud instances (#28304)
Co-authored-by: cubic-dev-ai[bot] <191113872+cubic-dev-ai[bot]@users.noreply.github.com>
2026-04-10 12:52:59 +00:00
Andreas Fitzek
8810097604
feat(core): Wire up embed login end-to-end with cookie overrides and audit events (no-changelog) (#28303) 2026-04-10 12:52:46 +00:00
Charlie Kolb
b353143543
fix(core): Increase timing delays in workflow publish history tests (#28301)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-10 12:17:16 +00:00
Andreas Fitzek
87e3f1877e
feat(core): Wire TokenExchangeService.exchange() end-to-end (no-changelog) (#28293) 2026-04-10 11:49:17 +00:00
Matsu
872fc671bb
ci: Add exclude patterns for test files in PR size check (#28316) 2026-04-10 11:46:07 +00:00
Jon
4b0519167f
fix(Box Node): Fix issue where Box trigger node was not paginating correctly (#27415) 2026-04-10 11:32:18 +00:00
Andreas Fitzek
f913ec7a59
feat(core): Add JWKS resolver for fetching and parsing JWK Set URLs (no-changelog) (#28027) 2026-04-10 11:27:22 +00:00
phyllis-noester
99e5f1578d
chore: Add tokengrant for JWT permission context (no-changelog) (#28295) 2026-04-10 10:40:06 +00:00
Jake Ranallo
9a8631da38
fix(editor): Move save button to credential modal header (#28287)
Co-authored-by: Elias Meire <elias@meire.dev>
2026-04-10 10:37:55 +00:00
Chris Z
4c2f90539a
docs: Fix design system path in contributing guide (#27998)
Co-authored-by: Garrit Franke <32395585+garritfra@users.noreply.github.com>
2026-04-10 10:25:30 +00:00
Albert Alises
7614712a15
fix(core): Remove LocalFilesystemProvider, require computer use for filesystem access (no-changelog) (#28297) 2026-04-10 10:14:20 +00:00
Jon
d7d18a04c8
fix(HubSpot Trigger Node): Add missing tickets scope to OAuth credentials (#27599)
Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-10 10:07:47 +00:00
Albert Alises
483b41221a
fix(editor): Address high-severity issues in parity of Instance AI setup wizard (no-changelog) (#28239) 2026-04-10 10:06:43 +00:00
Matsu
54881e189d
chore: Update create-pr skill and PR template (no-changelog) (#28302) 2026-04-10 09:58:59 +00:00
Tim Berndt
290005e0e8
fix(Facebook Lead Ads Node): Add missing pages_read_engagement scope (#27379)
Co-authored-by: Garrit Franke <32395585+garritfra@users.noreply.github.com>
2026-04-10 09:57:25 +00:00
Alon Kolyakov
be45c085fb
fix(AWS ELB Node): Fix spelling typo 'sucess' → 'success' in RemoveListenerCertificates (#27703)
Co-authored-by: BloodShop <bloodshop@users.noreply.github.com>
Co-authored-by: Garrit Franke <32395585+garritfra@users.noreply.github.com>
2026-04-10 09:46:29 +00:00
Iván Ovejero
69cb927761
ci: Upgrade to turbo 2.9 (#28292) 2026-04-10 09:44:24 +00:00
oleg
ab8e9a6764
fix(core): Reduce planner workflow bias and fix data-table task routing (no-changelog) (#28299) 2026-04-10 09:42:43 +00:00
oleg
320a4b244d
fix(core): Release LS trace clients on run finalization (no-changelog) (#28254)
Signed-off-by: Oleg Ivaniv <me@olegivaniv.com>
2026-04-10 09:42:00 +00:00
Declan Carroll
98534c6db9
ci: Fix Node 25 build failure and benchmark OOM on master (no-changelog) (#28262)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-10 09:05:14 +00:00
Tuukka Kantola
ea5b874a8c
feat(editor): Update built-in node icons to custom SVGs (#28104)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-10 08:53:41 +00:00
Charlie Kolb
8cc0c77775
fix(editor): Fix dependency pill alignment in data table details header (no-changelog) (#27903)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-10 08:50:35 +00:00
Charlie Kolb
8509074cb6
fix(core): Migrate workflow publish history version id foreign key to set null instead (no-changelog) (#27434) 2026-04-10 08:24:11 +00:00
Albert Alises
8f8b70a301
fix(ai-builder): Unify post-build credential setup into single setup-workflow flow (#28273) 2026-04-10 08:06:09 +00:00
Matsu
733812b1a1
ci: Add PR ownership and size guardrails (#28103)
Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-10 06:53:18 +00:00
Bernhard Wittmann
4b06720c8b
fix(AWS DynamoDB Node): Add option to disable auto-parsing of numeric strings (#28093)
Co-authored-by: umut-polat <52835619+umut-polat@users.noreply.github.com>
Co-authored-by: RomanDavydchuk <roman.davydchuk@n8n.io>
2026-04-10 06:29:36 +00:00
Sandra Zollner
dfdc6d2c75
feat(core): Add 'verify' option to installPackage handler and update … (#28257) 2026-04-09 16:39:07 +00:00
krisn0x
769c21946c
chore: Enable n8n plugin in Claude GitHub Actions workflows (no-changelog) (#28258)
Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-09 15:21:27 +00:00
Bernhard Wittmann
b964ec9588
fix(Google Drive Node): Fix infinite pagination loop in v1 API request (#28244) 2026-04-09 15:03:04 +00:00
Bernhard Wittmann
5fb777e14e
fix(HTTP Request Node): Fix multipart/form-data file upload with binary streams (#28233) 2026-04-09 15:02:34 +00:00
Bernhard Wittmann
c1b5c96f62
fix: Add credential auth and test for PostHog, NASA, Peekalink, Clearbit, Uptime Robot (#27957) 2026-04-09 15:02:17 +00:00
Declan Carroll
09c9b11fff
fix: Update lodash, lodash-es, and xmldom to latest stable versions (#28121)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-09 14:52:23 +00:00
Declan Carroll
bf25fad7df
test: Resolve 43 janitor violations and update baseline (no-changelog) (#28173)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-09 14:34:18 +00:00
Andreas Fitzek
12e660a1a8
feat(core): Add DB-backed TrustedKeyService with leader refresh and crypto cache (no-changelog) (#28136) 2026-04-09 14:24:50 +00:00
Rob Hough
5a01bb308e
fix(editor): Improve popover positioning defaults and animations (#27919) 2026-04-09 14:23:45 +00:00
Garrit Franke
af90581c45
test: Harden polling trigger test helper against flaky OAuth2 failures (no-changelog) (#28247) 2026-04-09 14:10:23 +00:00
Arvin A
df8e795c3f
fix(core): Sanitize request data sent to LLM in eval mock handler (no-changelog) (#28200)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-09 13:16:35 +00:00
Jaakko Husso
42fde1e369
fix(core): Run snapshot pruning correctly and adjust logs (no-changelog) (#28250) 2026-04-09 13:12:12 +00:00
Albert Alises
a407c70841
fix(ai-builder): Allow non-admin users to access Instance AI preferences (no-changelog) (#28243) 2026-04-09 13:11:26 +00:00
Hyuncheol Park
752a4e47d4
fix(MCP Client Node): Ensure MCP connections close when MCP Client node execution ends (#25742)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-09 12:51:28 +00:00
Csaba Tuncsik
0f4d558b36
chore: Bump n8n Claude Code plugin version to 0.2.0 (no-changelog) (#28249) 2026-04-09 12:49:20 +00:00
oleg
8793ca6386
fix(API): Disable response compression on instance-ai SSE connections (no-changelog) (#28246) 2026-04-09 12:22:30 +00:00
Stephen Wright
8f2da63871
fix: Handle normalization of JSON for SQLite / postgres (#28242) 2026-04-09 12:02:54 +00:00
Dawid Myslak
5cbc9734a4
feat(Moonshot Kimi Chat Model Node): Add Moonshot Kimi Chat Model sub-node (#28156) 2026-04-09 11:59:45 +00:00
oleg
7d834835f3
chore(instance-ai): Disable working memory and remove related code (no-changelog) (#28234)
Signed-off-by: Oleg Ivaniv <me@olegivaniv.com>
2026-04-09 11:55:11 +00:00
Dimitri Lavrenük
d3e6519730
feat: Implement session based permission modes in Computer Use (#28184) 2026-04-09 11:54:54 +00:00
Dimitri Lavrenük
6f722efef3
fix: Update computer use branding and readme (no-changelog) (#28241) 2026-04-09 10:35:38 +00:00
Dawid Myslak
1148d27725
feat(Alibaba Cloud Model Studio Node): Add new node (#27928)
Co-authored-by: Daniel Molenaars <daniel.molenaars@alibaba-inc.com>
Co-authored-by: Roman Davydchuk <roman.davydchuk@n8n.io>
2026-04-09 10:32:59 +00:00
Declan Carroll
7399af34c9
ci: Defer Chromatic visual regression tests (#28235) 2026-04-09 09:53:20 +00:00
Iván Ovejero
569ad497b7
fix(core): Align VM expression engine error handler with legacy engine (#28166)
Co-authored-by: Danny Martini <danny@n8n.io>
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-09 09:14:57 +00:00
Eugene
126983283e
chore: Add skill usage tracking hook (no-changelog) (#28183) 2026-04-09 09:14:56 +00:00
Declan Carroll
85144aa6bc
test: Add simple baseline instance AI memory test (#28170) 2026-04-09 08:47:25 +00:00
Iván Ovejero
4ccd72716e
feat(core): Emit audit events for workflow activation on bootup (#28126) 2026-04-09 08:41:54 +00:00
Rob Hough
72ebb430f4
fix(editor): Keep Back before Continue in MFA login footer (#27911) 2026-04-09 08:33:13 +00:00
Stephen Wright
3db52dca22
fix(core): Omit empty scope from OAuth2 client credentials token request and improve error messaging (#28159) 2026-04-09 08:11:25 +00:00
Raúl Gómez Morales
aa6c322059
fix(editor): UI tweaks for instance AI components (#28155) 2026-04-09 07:48:39 +00:00
Nikhil Kuriakose
5e60272632
fix(editor): Remove default for api params (#27914) 2026-04-09 07:39:39 +00:00
Declan Carroll
b7d6b6ea17
ci: Make PR metrics comment non-blocking (no-changelog) (#28232)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-09 07:49:32 +00:00
Stephen Wright
26d578dfc8
feat: Disable manual role management when expression-based mapping is enabled (#28105) 2026-04-09 07:29:32 +00:00
Tuukka Kantola
cc32c507c5
refactor(editor): Centralize instance AI confirmation dialog styling (#28195)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-09 07:25:22 +00:00
Andreas Fitzek
a95cbfb429
feat(core): Add DB infrastructure for trusted keys and key sources (no-changelog) (#28097) 2026-04-09 02:26:53 +00:00
Arvin A
09bb743bdb
fix(core): Preserve config credential properties in eval mock layer (no-changelog) (#28198)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-08 21:34:09 +00:00
Albert Alises
4b3b40e238
fix(ai-builder): Improve post-build flow: setup, test, then publish (#28125) 2026-04-08 16:45:56 +00:00
Jaakko Husso
eb5e89055a
feat(core): Persist open artifact preview on instance AI threads (no-changelog) (#28202) 2026-04-08 16:40:33 +00:00
Luca Mattiazzi
1e22e0ad51
fix(If Node): Patches IF node when fields are missing (#28014) 2026-04-08 16:35:57 +00:00
Muhammad Osama
1899a4e284
fix(Http Request Node): Handle empty JSON responses (#27793)
Co-authored-by: Dimitri Lavrenük <20122620+dlavrenuek@users.noreply.github.com>
2026-04-08 16:35:53 +00:00
Jaakko Husso
f1bb47e6a2
fix(core): Mark tool calls cancelled when cancelling main agent on instance AI (no-changelog) (#28192) 2026-04-08 16:17:06 +00:00
Albert Alises
d1a4fa99cc
fix(core): Unblock builder agent when correction arrives during HITL confirmation (no-changelog) (#28203) 2026-04-08 16:16:48 +00:00
phyllis-noester
f23d4ced40
fix(core): Resolver settings page is only visible to authorized users (no-changelog) (#28201) 2026-04-08 16:11:34 +00:00
Iván Ovejero
f8c21276cb
feat(core): Make VM expression bridge timeout and memory limit configurable (#27962)
Co-authored-by: Danny Martini <danny@n8n.io>
2026-04-08 16:07:16 +00:00
md
bd5a70215d
fix(Oracle Node): Resolve 'Maximum call stack size exceeded' on large datasets (#27037)
Co-authored-by: Garrit Franke <32395585+garritfra@users.noreply.github.com>
2026-04-08 15:17:02 +00:00
Rayan Salhab
294868de5a
fix(ICalendar Node): Fix Convert to ICS failing when File Name option is set (#27712)
Co-authored-by: Garrit Franke <32395585+garritfra@users.noreply.github.com>
2026-04-08 15:15:17 +00:00
Jaakko Husso
7e1bebdae6
feat(core): Make instance AI aware of read-only environments (no-changelog) (#28120) 2026-04-08 15:10:31 +00:00
Ian Gallagher
a93ae81fa4
fix(MQTT Trigger Node): Fix typo (no-changelog) (#9304)
Co-authored-by: Garrit Franke <32395585+garritfra@users.noreply.github.com>
2026-04-08 15:09:53 +00:00
Dimitri Lavrenük
468c9c4f8f
fix: Increase timeout for computer use tool calls to 60 seconds (no-changelog) (#28196) 2026-04-08 15:08:47 +00:00
AndyHazz
c0c0f8397c
fix(Pushover Node): Replace duplicate Pushover Timestamp field with the missing TTL field (#11287)
Co-authored-by: Garrit Franke <32395585+garritfra@users.noreply.github.com>
2026-04-08 14:56:35 +00:00
Jaakko Husso
59a9f016cb
fix(core): Save cancellation status on cancelled background sub-agent snapshots (no-changelog) (#28175) 2026-04-08 14:56:28 +00:00
oleg
5014d205f1
feat(instance-ai): Add planner sub-agent with progressive plan rendering (no-changelog) (#27889)
Signed-off-by: Oleg Ivaniv <me@olegivaniv.com>
2026-04-08 14:56:09 +00:00
Stephen Wright
0d078c75f0
fix: Truncate long custom role names and add hover tooltip (#28191) 2026-04-08 14:49:14 +00:00
Bernhard Wittmann
853a74044f
fix(Microsoft Outlook Node): Prevent poll from skipping messages after API errors (#28157) 2026-04-08 14:47:27 +00:00
José Braulio González Valido
91b01d27b9
feat(ai-builder): Fix IF/Switch/Filter node misconfiguration in builder (no-changelog) (#28172)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-08 14:35:43 +00:00
RomanDavydchuk
4fab655cc5
fix(Microsoft Teams Node): Block requests from Microsoft Preview Service to prevent accidental approvals for "Send and Wait" (#28085) 2026-04-08 14:25:54 +00:00
Nikhil Kuriakose
33282dbeb9
fix(editor): Removing redundant stop of key propogation (#23464)
Co-authored-by: Garrit Franke <32395585+garritfra@users.noreply.github.com>
2026-04-08 14:03:26 +00:00
Garrit Franke
4f725dab1b
feat(core): Add no-forbidden-lifecycle-scripts lint rule for community nodes (#28176)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-08 14:00:47 +00:00
Albert Alises
c2fbf9d643
fix(ai-builder): Expose credential account context to prevent prompt/credential mismatch (#28100) 2026-04-08 13:22:10 +00:00
Garrit Franke
b39fc5d612
test: Fix flaky e2e tests in CI shards 2, 5, and 7 (no-changelog) (#28182)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-08 13:12:27 +00:00
Irénée
1b995cde18
feat(core): Enable instance owner setup via environment variables (#27859)
Co-authored-by: James Gee <1285296+geemanjs@users.noreply.github.com>
2026-04-08 13:03:50 +00:00
Elias Meire
6bb90d43b6
feat: Rename extension to "Browser Use" and prepare for publishing (#27898)
Co-authored-by: Dimitri Lavrenük <dimitri.lavrenuek@n8n.io>
Co-authored-by: Dimitri Lavrenük <20122620+dlavrenuek@users.noreply.github.com>
2026-04-08 12:58:31 +00:00
Garrit Franke
6d110fa29b
feat: Add require-continue-on-fail ESLint rule for community nodes (no-changelog) (#28163)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-08 12:14:22 +00:00
krisn0x
8c52c5177a
chore: Refine linear-issue skill (no-changelog) (#28074)
Co-authored-by: cubic-dev-ai[bot] <191113872+cubic-dev-ai[bot]@users.noreply.github.com>
2026-04-08 12:06:09 +00:00
Iván Ovejero
94b463e2a0
fix(core): Avoid permanent deactivation on transient isolate errors (#28117) 2026-04-08 12:00:02 +00:00
Csaba Tuncsik
91fec345b1
feat(editor): Add project rules, fallback role, remove mapping, save flow (#27689) 2026-04-08 11:23:00 +00:00
Mutasem Aldmour
8cdcab3cc8
feat(core): Add telemetry events for AI builder journey (#28116)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-08 11:10:42 +00:00
Garrit Franke
e282fcdf0f
feat(core): Add missing-paired-item lint rule for community nodes (#28118)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-08 10:54:35 +00:00
Declan Carroll
65b878221a
test: Resolve 45 janitor violations and update baseline (no-changelog) (#28161)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-08 10:34:00 +00:00
Ali Elkhateeb
1253888174
chore(n8n Node): Add insights summary endpoint to API coverage manifest (no-changelog) (#28160) 2026-04-08 10:32:13 +00:00
krisn0x
7983a41132
chore: Add new entries to .gitignore (no-changelog) (#28168) 2026-04-08 10:21:22 +00:00
Garrit Franke
8f25ce57f2
chore(core): Add skill for creating community node lint rules (no-changelog) (#28165)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-08 10:17:35 +00:00
Jaakko Husso
1506afba91
fix(core): Avoid markdown in instance AI titles (no-changelog) (#28167) 2026-04-08 10:05:46 +00:00
Jaakko Husso
b68f843f31
refactor(core): Miscellaneous instance AI cleanup (#28162) 2026-04-08 10:05:26 +00:00
Matsu
2597669a5d
chore: Improve linear-issue claude skills (no-changelog) (#27970) 2026-04-08 09:53:12 +00:00
Matsu
69526c6795
ci: Run test:local on community PR's (#28164) 2026-04-08 09:40:56 +00:00
Jaakko Husso
4c3dc92c52
fix(core): Ensure SSE is connected on initial instance AI message (no-changelog) (#28131) 2026-04-08 08:54:54 +00:00
Daria
d6e7923ca6
fix: Validate workflow size for workflows with pinned data on backend (no-changelog) (#27356) 2026-04-08 08:33:01 +00:00
Declan Carroll
b13495227e
test: Resolve 68 janitor scope-lockdown violations in NodeDetailsViewPage (#27993)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-08 08:27:19 +00:00
Elias Meire
94f0a4db5d
fix: Improve browser use Chrome extension connection stability (#27846)
Co-authored-by: Dimitri Lavrenük <dimitri.lavrenuek@n8n.io>
Co-authored-by: Dimitri Lavrenük <20122620+dlavrenuek@users.noreply.github.com>
2026-04-08 08:16:45 +00:00
Csaba Tuncsik
a00dd19c43
chore: Move Claude Code skills, agents and commands under n8n plugin (no-changelog) (#28020) 2026-04-08 07:57:39 +00:00
yehorkardash
8d4e355241
feat: Add agent schema introspection (no-changelog) (#28015)
Co-authored-by: cubic-dev-ai[bot] <191113872+cubic-dev-ai[bot]@users.noreply.github.com>
2026-04-08 07:53:51 +00:00
Andreas Fitzek
4eb99b9c88
feat(core): Add in-process mutex for SQLite advisory lock parity (#28135) 2026-04-08 07:24:36 +00:00
Bernhard Wittmann
f5402dd7f7
feat(MCP Client Tool Node): Prefix MCP tool names with server name (#28094) 2026-04-08 07:23:40 +00:00
Declan Carroll
a23fc0a867
ci: Tag Docker images (#28088) 2026-04-08 06:12:02 +00:00
José Braulio González Valido
fef91c97dd
feat(ai-builder): Add --keep-workflows flag and fix eval execution errors (no-changelog) (#28129)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-07 17:35:04 +00:00
Csaba Tuncsik
a6b051bfe3
feat(editor): Add instance rules editor with drag-to-reorder (#27688) 2026-04-07 16:55:50 +00:00
Iván Ovejero
2ed3f9c336
fix(core): Fix retry activation in multi-main bypassing exponential backoff (#28110) 2026-04-07 16:40:37 +00:00
Alex Grozav
205ae77c9e
refactor(editor): Migrate description field to workflowDocument store (no-changelog) (#28064) 2026-04-07 15:28:52 +00:00
Ali Elkhateeb
13d153ef1e
feat(API): Add insights summary endpoint to public API (#28099)
Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-07 15:24:02 +00:00
Dimitri Lavrenük
b841c736df
feat: Update naming of local gateway to computer use (#28111) 2026-04-07 15:22:32 +00:00
James Gee
309a739271
fix(core): Improve performance of the push/pull modal getStatus (#27188)
Co-authored-by: Irénée <irenee.ajeneza@n8n.io>
Co-authored-by: Ali Elkhateeb <ali.elkhateeb@n8n.io>
2026-04-07 15:19:50 +00:00
Suguru Inoue
c5969b1952
refactor(editor): Migrate workflowObject usages in NDV components (#27982)
Co-authored-by: r00gm <raul00gm@gmail.com>
2026-04-07 15:09:52 +00:00
Suguru Inoue
2aec493852
refactor(editor): Migrate workflowObject usages in NDV store (#28115) 2026-04-07 15:09:33 +00:00
yehorkardash
5b2c221ffe
fix: Ensure monotonic message timestamps for agent (no-changelog) (#27624) 2026-04-07 14:14:28 +00:00
Luka Zivkovic
2e56ba137d
fix(core): MCP tools called after workflow execution failure (#28021) 2026-04-07 13:37:36 +00:00
José Braulio González Valido
2383749980
feat(ai-builder): Workflow evaluation framework with LLM mock execution (#27818)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
Co-authored-by: Arvin A <51036481+DeveloperTheExplorer@users.noreply.github.com>
Co-authored-by: cubic-dev-ai[bot] <191113872+cubic-dev-ai[bot]@users.noreply.github.com>
2026-04-07 13:31:16 +00:00
Declan Carroll
7ed34d7f85
ci: Add automated QA metrics reporting to PRs (#28003) 2026-04-07 13:17:01 +00:00
Alex Grozav
14e0c10f4d
refactor(editor): Migrate versionId to workflowDocument store (no-changelog) (#28063) 2026-04-07 13:08:46 +00:00
Albert Alises
91a1282db6
fix(editor): Skip only current step when clicking Later in workflow setup (#27929) 2026-04-07 12:58:16 +00:00
Matsu
b646105028
ci: Install script dependencies before detecting new packages (#28112) 2026-04-07 12:56:28 +00:00
Albert Alises
4a3fc7d27c
fix(ai-builder): Paginate list-credentials tool and drop unused fields (#28108) 2026-04-07 12:52:49 +00:00
Arvin A
b0484a1555
fix(core): Add streaming keepalive to prevent proxy timeout during long agent executions (#27853)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-07 12:42:06 +00:00
Sandra Zollner
524166e0f1
feat(core): Support projectId when creating workflow via public API (#27884) 2026-04-07 12:14:24 +00:00
Csaba Tuncsik
4e6b4fc3be
feat(editor): Add expression-based role mapping plumbing (#27686) 2026-04-07 11:52:46 +00:00
Andreas Fitzek
d9a5defe88
feat(core): Add identity resolution for token exchange (no-changelog) (#28009) 2026-04-07 11:28:41 +00:00
Charlie Kolb
a82de1dd8f
feat(core): Track instance version history (no-changelog) (#27428) 2026-04-07 11:20:31 +00:00
Guillaume Jacquart
7c156062d1
refactor(core): Extract API key auth into AuthStrategy pattern (no-changelog) (#28008)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
Co-authored-by: Phyllis Noester <phyllis.noester@n8n.io>
2026-04-07 11:00:38 +00:00
Raúl Gómez Morales
91ce8ea93c
fix(editor): UI tweaks for instance AI components (#27917)
Co-authored-by: Tuukka Kantola <tuukka@n8n.io>
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-07 10:52:24 +00:00
Romeo Balta
a9bc92f83d
fix(core): Scope deferred tool processors per run (#28068) 2026-04-07 10:36:54 +00:00
Raúl Gómez Morales
29e4248cc3
refactor(editor): Add workflowObject facade methods + ESLint guards (no-changelog) (#27556)
Co-authored-by: Suguru Inoue <suguru@n8n.io>
2026-04-07 09:16:36 +00:00
Michael Kret
6e2d35644f
feat: Add AI Gateway support for AI nodes (#27593)
Co-authored-by: Alexander Gekov <40495748+alexander-gekov@users.noreply.github.com>
2026-04-07 09:11:24 +00:00
Albert Alises
9b94862dc7
feat: N8n Agent admin settings page with enable toggle and permissions (#27913) 2026-04-07 08:56:50 +00:00
Matsu
e64408a0d2
ci: Migrate from codecov/test-results-action to codecov-action (#28086) 2026-04-07 08:48:24 +00:00
oleg
dc249ad5ee
fix(instance-ai): reduce memory footprint (no-changelog) (#27967)
Signed-off-by: Oleg Ivaniv <me@olegivaniv.com>
2026-04-07 07:50:30 +00:00
yehorkardash
2ec98687d4
chore: Make scripts in local-gateway platform-agnostic (#28019) 2026-04-07 07:45:35 +00:00
Charlie Kolb
ccd4fd0fc8
fix(core): Decrease workflow history compaction retention periods (#27763) 2026-04-07 07:21:46 +00:00
Svetoslav Dekov
aca249e856
fix(editor): AI builder setup wizard positioning and popover collision (#27821)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-07 07:20:49 +00:00
Charlie Kolb
c6a98b036e
feat(editor): Track telemetry when user clicks dependency pill item (no-changelog) (#27902)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-07 06:31:12 +00:00
2127 changed files with 116161 additions and 35474 deletions

View file

@ -2,6 +2,10 @@
This directory contains shared Claude Code configuration for the n8n team.
All skills, agents, and commands live under the `n8n` plugin at
`.claude/plugins/n8n/` for `n8n:` namespacing. See
[plugin README](plugins/n8n/README.md) for full details.
## Setup
### Linear MCP Server
@ -33,31 +37,10 @@ To auto-approve Linear MCP tools, add to your global settings:
**Note:** For GitHub/git operations, we use `gh` CLI and `git` commands instead of GitHub MCP.
## Available Commands
## Plugin
- `/n8n-triage PAY-XXX` - Analyze and triage a Linear issue
- `/n8n-plan PAY-XXX` - Create implementation plan
All skills, commands, and agents are auto-discovered from
`.claude/plugins/n8n/`. They get the `n8n:` namespace prefix automatically
(e.g. `n8n:create-pr`, `/n8n:plan`, `n8n:developer`).
## Quick Reference
- `/n8n-conventions` - Load detailed conventions guide (optional - agents already know n8n patterns)
## Workflow
**Recommended approach:**
1. `/n8n-triage PAY-123` → Investigate root cause and severity (optional)
2. `/n8n-plan PAY-123` → Create detailed implementation plan
3. Review the plan in chat
4. Say "implement it" or "go ahead" → I'll launch n8n-developer agent
5. Implementation proceeds with full context from the plan
## Agents
- **n8n-developer** - Full-stack n8n development (frontend/backend/nodes)
- **n8n-linear-issue-triager** - Issue investigation and analysis
## Skills
- **n8n-conventions** - Quick reference pointing to /AGENTS.md (optional - agents have embedded knowledge)
- Use `/n8n-conventions` when you need detailed patterns
- References root docs instead of duplicating (~95 lines)
See [plugin README](plugins/n8n/README.md) for structure and design decisions.

View file

@ -1,4 +1,5 @@
{
"name": "n8n",
"version": "0.2.0",
"description": "n8n Claude Code plugin — shared skills, commands, and agents for n8n development"
}

View file

@ -1,51 +1,19 @@
# n8n Claude Code Plugin
Shared skills, commands, and agents for n8n development.
Shared skills, commands, and agents for n8n development. All items are
namespaced under `n8n:` to avoid collisions with personal or third-party
plugins.
## Skills
## Usage
### `n8n:setup-mcps`
Skills, commands, and agents are auto-discovered by Claude Code from this
plugin directory. Everything gets the `n8n:` namespace prefix automatically.
Configures commonly used MCP servers for n8n engineers.
**Usage:**
```
/n8n:setup-mcps
```
**What it does:**
1. Checks which MCPs are already configured (matches by URL, not name)
2. Presents a multi-select menu of available MCPs (Linear, Notion, Context7, Figma)
3. For each selected MCP, asks which scope to install in:
- **user** (recommended) — available across all projects
- **local** — only in this project (`settings.local.json`)
4. Installs using official recommended commands
**Note:** Project scope is intentionally not offered since `.claude/settings.json` is tracked in git.
## Design Decisions
### Why a plugin instead of standalone skills?
To get the `n8n:` namespace prefix for all n8n-specific skills, avoiding name
collisions with built-in or personal skills. Claude Code only supports
colon-namespaced skills (`n8n:setup-mcps`) through the plugin system —
standalone `.claude/skills/` entries cannot be namespaced. This also provides a
home for future n8n skills, commands, and agents under the same `n8n:` prefix.
### Why only user and local scope (no project scope)?
Project scope writes MCP config to `.claude/settings.json`, which is tracked in
git. Since MCP credentials are personal (OAuth tokens, API keys), they should
not end up in version control. User scope makes MCPs available across all
projects; local scope (`settings.local.json`) keeps them project-specific but
gitignored.
### Why ask scope per MCP instead of once for all?
Engineers may want different scopes for different MCPs. For example, Context7
and Figma are useful across all projects (user scope), while Linear or Notion
might only be needed for this project (local scope).
| Type | Example | Invocation |
|------|---------|------------|
| Skill | `skills/create-pr/SKILL.md` | `n8n:create-pr` |
| Command | `commands/plan.md` | `/n8n:plan PAY-XXX` |
| Agent | `agents/developer.md` | `n8n:developer` |
## Plugin Structure
@ -54,13 +22,24 @@ might only be needed for this project (local scope).
├── .claude-plugin/
│ ├── marketplace.json # Marketplace manifest
│ └── plugin.json # Plugin identity
├── agents/
│ └── <name>.md # → n8n:<name> agent
├── commands/
│ └── <name>.md # → /n8n:<name> command
├── skills/
│ └── sample-skill/
│ └── SKILL.md
│ └── <name>/SKILL.md # → n8n:<name> skill
└── README.md
```
## Known Issues
## Design Decisions
### Why a plugin instead of standalone skills?
To get the `n8n:` namespace prefix, avoiding collisions with personal or
third-party plugins. Claude Code only supports colon-namespaced items through
the plugin system — standalone `.claude/skills/` entries cannot be namespaced.
### Known Issues
- Plugin skill namespacing requires omitting the `name` field from SKILL.md
frontmatter due to a [Claude Code bug](https://github.com/anthropics/claude-code/issues/17271).

View file

@ -1,6 +1,6 @@
---
name: n8n-developer
description: Use this agent for any n8n development task - frontend (Vue 3), backend (Node.js/TypeScript), workflow engine, node creation, or full-stack features. The agent automatically applies n8n conventions and best practices. Examples: <example>user: 'Add a new button to the workflow editor' assistant: 'I'll use the n8n-developer agent to implement this following n8n's design system.'</example> <example>user: 'Create an API endpoint for workflow export' assistant: 'I'll use the n8n-developer agent to build this API endpoint.'</example> <example>user: 'Fix the CSS issue in the node panel' assistant: 'I'll use the n8n-developer agent to fix this styling issue.'</example>
name: developer
description: Use this agent for any n8n development task - frontend (Vue 3), backend (Node.js/TypeScript), workflow engine, node creation, or full-stack features. The agent automatically applies n8n conventions and best practices. Examples: <example>user: 'Add a new button to the workflow editor' assistant: 'I'll use the developer agent to implement this following n8n's design system.'</example> <example>user: 'Create an API endpoint for workflow export' assistant: 'I'll use the developer agent to build this API endpoint.'</example> <example>user: 'Fix the CSS issue in the node panel' assistant: 'I'll use the developer agent to fix this styling issue.'</example>
model: inherit
color: blue
---

View file

@ -1,5 +1,5 @@
---
name: n8n-linear-issue-triager
name: linear-issue-triager
description: Use this agent proactively when a Linear issue is created, updated, or needs comprehensive analysis. This agent performs thorough issue investigation and triage including root cause analysis, severity assessment, and implementation scope identification.
model: inherit
color: red

View file

@ -4,4 +4,4 @@ argument-hint: [PAY-XXXX | DEV-XXXX | ENG-XXXX]
allowed-tools: Task
---
Use the n8n-linear-issue-triager agent to triage Linear issue $ARGUMENTS.
Use the n8n:linear-issue-triager agent to triage Linear issue $ARGUMENTS.

View file

@ -0,0 +1,55 @@
#!/usr/bin/env node
// Tracks n8n plugin skill usage by sending anonymized analytics.
// Called as a PostToolUse hook for the Skill tool.
// Receives JSON on stdin: { "tool_name": "Skill", "tool_input": { "skill": "n8n:foo", ... }, "tool_response": ... }
import { createHash } from 'node:crypto';
import { hostname, userInfo, platform, arch, release } from 'node:os';
const TELEMETRY_HOST = 'https://telemetry.n8n.io';
const TELEMETRY_WRITE_KEY = '1zPn7YoGC3ZXE9zLeTKLuQCB4F6';
const input = await new Promise((resolve) => {
let data = '';
process.stdin.on('data', (chunk) => (data += chunk));
process.stdin.on('end', () => resolve(data));
});
const { tool_input: toolInput } = JSON.parse(input);
const skillName = toolInput?.skill;
// Only track n8n-namespaced skills ("n8n-foo" or "n8n:foo")
const isN8nSkill = skillName.startsWith('n8n:') || skillName.startsWith('n8n-');
if (!skillName || !isN8nSkill) {
process.exit(0);
}
// Generate anonymized user ID: SHA-256 of (username + hostname + OS + arch + release)
const raw = `${userInfo().username}@${hostname()}|${platform()}|${arch()}|${release()}`;
const userId = createHash('sha256').update(raw).digest('hex');
const payload = JSON.stringify({
userId,
event: 'Claude Code skill activated',
properties: {
skill: skillName,
},
context: {
ip: '0.0.0.0',
},
});
// Send to telemetry HTTP Track API (fire-and-forget, never block the user)
try {
await fetch(`${TELEMETRY_HOST}/v1/track`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Basic ${Buffer.from(`${TELEMETRY_WRITE_KEY}:`).toString('base64')}`,
},
body: payload,
});
} catch {
// Silently ignore network errors
}

View file

@ -1,5 +1,4 @@
---
name: content-design
description: >
Product content designer for UI copy. Use when writing, reviewing, or auditing
user-facing text: button labels, error messages, tooltips, empty states, modal copy,

View file

@ -1,5 +1,4 @@
---
name: n8n-conventions
description: Quick reference for n8n patterns. Full docs /AGENTS.md
---

View file

@ -0,0 +1,217 @@
---
description: >-
Create new ESLint rules for the @n8n/eslint-plugin-community-nodes package.
Use when adding a lint rule, creating a community node lint, or working on
eslint-plugin-community-nodes. Guides rule implementation, tests, docs, and
plugin registration.
---
# Create Community Node Lint Rule
Guide for adding new ESLint rules to `packages/@n8n/eslint-plugin-community-nodes/`.
All paths below are relative to `packages/@n8n/eslint-plugin-community-nodes/`.
## Step 1: Understand the Rule
Before writing code, clarify:
- **What** does the rule detect? (missing property, wrong pattern, bad value)
- **Where** does it apply? (`.node.ts` files, credential classes, both)
- **Severity**: `error` (must fix) or `warn` (should fix)?
- **Fixable?** Can it be auto-fixed safely, or only suggest?
- **Scope**: Both `recommended` configs, or exclude from `recommendedWithoutN8nCloudSupport`?
## Step 2: Implement the Rule
Create `src/rules/<rule-name>.ts`:
```typescript
import { AST_NODE_TYPES } from '@typescript-eslint/utils';
import {
isNodeTypeClass, // or isCredentialTypeClass
findClassProperty,
findObjectProperty,
createRule,
} from '../utils/index.js';
export const YourRuleNameRule = createRule({
name: 'rule-name',
meta: {
type: 'problem', // or 'suggestion'
docs: {
description: 'One-line description of what the rule enforces',
},
messages: {
messageId: 'Human-readable message. Use {{placeholder}} for dynamic data.',
},
fixable: 'code', // omit if not auto-fixable
hasSuggestions: true, // omit if no suggestions
schema: [], // add options schema if configurable
},
defaultOptions: [],
create(context) {
return {
ClassDeclaration(node) {
if (!isNodeTypeClass(node)) return;
const descriptionProperty = findClassProperty(node, 'description');
if (!descriptionProperty) return;
const descriptionValue = descriptionProperty.value;
if (descriptionValue?.type !== AST_NODE_TYPES.ObjectExpression) return;
// Rule logic here — use findObjectProperty(), getLiteralValue(), etc.
context.report({
node: targetNode,
messageId: 'messageId',
data: { /* template vars */ },
fix(fixer) {
return fixer.replaceText(targetNode, 'replacement');
},
});
},
};
},
});
```
**Naming**: Export as `PascalCaseRule` (e.g. `MissingPairedItemRule`). The `name` field is kebab-case.
**Available AST helpers** — see [reference.md](reference.md) for the full catalog of `ast-utils` and `file-utils` exports.
## Step 3: Write Tests
Create `src/rules/<rule-name>.test.ts`:
```typescript
import { RuleTester } from '@typescript-eslint/rule-tester';
import { YourRuleNameRule } from './rule-name.js';
const ruleTester = new RuleTester();
// Helper to generate test code — keeps test cases readable
function createNodeCode(/* parameterize the varying parts */): string {
return `
import type { INodeType, INodeTypeDescription } from 'n8n-workflow';
export class TestNode implements INodeType {
description: INodeTypeDescription = {
displayName: 'Test Node',
name: 'testNode',
group: ['input'],
version: 1,
description: 'A test node',
defaults: { name: 'Test Node' },
inputs: [],
outputs: [],
properties: [],
};
}`;
}
ruleTester.run('rule-name', YourRuleNameRule, {
valid: [
{ name: 'class that does not implement INodeType', code: '...' },
{ name: 'node with correct pattern', code: createNodeCode(/* correct */) },
],
invalid: [
{
name: 'descriptive case name',
code: createNodeCode(/* incorrect */),
errors: [{ messageId: 'messageId', data: { /* expected template vars */ } }],
output: createNodeCode(/* expected after fix */), // or `output: null` if no fix
},
],
});
```
**Test guidelines:**
- Always test that non-INodeType classes are skipped (valid case)
- Test both the error message and the fixed output for fixable rules
- For rules with options, test each option combination
- For rules using filesystem, mock with `vi.mock('../utils/file-utils.js')`
- For suggestion-only rules, use `errors: [{ messageId, suggestions: [...] }]`
## Step 4: Register the Rule
### 4a. Add to `src/rules/index.ts`
```typescript
import { YourRuleNameRule } from './rule-name.js';
// Add to the rules object:
export const rules = {
// ... existing rules
'rule-name': YourRuleNameRule,
} satisfies Record<string, AnyRuleModule>;
```
### 4b. Add to `src/plugin.ts` configs
Add to **both** config objects (unless the rule depends on n8n cloud features):
```typescript
'@n8n/community-nodes/rule-name': 'error', // or 'warn'
```
- Use `error` for rules that catch bugs or required patterns
- Use `warn` for style/convention rules (like `options-sorted-alphabetically`)
- If the rule uses `no-restricted-globals` or `no-restricted-imports` patterns,
only add to `recommended` (not `recommendedWithoutN8nCloudSupport`)
## Step 5: Write Documentation
Create `docs/rules/<rule-name>.md`:
```markdown
# Description of what the rule does (`@n8n/community-nodes/rule-name`)
<!-- end auto-generated rule header -->
## Rule Details
Explain why this rule exists and what problem it prevents.
## Examples
### Incorrect
\`\`\`typescript
// code that triggers the rule
\`\`\`
### Correct
\`\`\`typescript
// code that passes the rule
\`\`\`
```
The header above `<!-- end auto-generated rule header -->` will be regenerated by `pnpm build:docs`. Write a reasonable first version — it gets overwritten.
## Step 6: Verify
Run from `packages/@n8n/eslint-plugin-community-nodes/`:
```bash
pushd packages/@n8n/eslint-plugin-community-nodes
pnpm test <rule-name>.test.ts # tests pass
pnpm typecheck # types are clean
pnpm build # compiles
pnpm build:docs # regenerates doc headers and README table
pnpm lint:docs # docs match schema
popd
```
## Checklist
- [ ] Rule file: `src/rules/<rule-name>.ts`
- [ ] Test file: `src/rules/<rule-name>.test.ts`
- [ ] Registered in `src/rules/index.ts`
- [ ] Added to configs in `src/plugin.ts`
- [ ] Doc file: `docs/rules/<rule-name>.md`
- [ ] README table updated via `pnpm build:docs`
- [ ] All verification commands pass

View file

@ -0,0 +1,85 @@
# AST & File Utilities Reference
Helpers available from `../utils/index.js`. Use these instead of writing custom AST traversal.
## ast-utils.ts
### Class/Interface detection
| Function | Returns | Use when |
|----------|---------|----------|
| `isNodeTypeClass(node)` | `boolean` | Check if class implements `INodeType` or extends `Node` |
| `isCredentialTypeClass(node)` | `boolean` | Check if class implements `ICredentialType` |
### Property finding
| Function | Returns | Use when |
|----------|---------|----------|
| `findClassProperty(node, name)` | `PropertyDefinition \| null` | Find a property on a class (e.g. `description`, `icon`) |
| `findObjectProperty(obj, name)` | `Property \| null` | Find a property in an object literal (Identifier key) |
| `findJsonProperty(obj, name)` | `Property \| null` | Find a property with a Literal key (JSON-style `"key"`) |
| `findArrayLiteralProperty(obj, name)` | `Property \| null` | Find a property whose value is an ArrayExpression |
### Value extraction
| Function | Returns | Use when |
|----------|---------|----------|
| `getLiteralValue(node)` | `string \| boolean \| number \| null` | Extract primitive from a Literal node |
| `getStringLiteralValue(node)` | `string \| null` | Extract string specifically |
| `getBooleanLiteralValue(node)` | `boolean \| null` | Extract boolean specifically |
| `getModulePath(node)` | `string \| null` | Get import path from string literal or template literal |
### Array operations
| Function | Returns | Use when |
|----------|---------|----------|
| `hasArrayLiteralValue(arr, value)` | `boolean` | Check if array contains a specific string literal |
| `extractCredentialInfoFromArray(element)` | `{ name, testedBy } \| null` | Parse credential object from array element |
| `extractCredentialNameFromArray(element)` | `string \| null` | Get just the credential name from array element |
### Method matching
| Function | Returns | Use when |
|----------|---------|----------|
| `isThisHelpersAccess(node)` | `boolean` | Match `this.helpers` member expression |
| `isThisMethodCall(node, method)` | `boolean` | Match `this.methodName(...)` calls |
| `isThisHelpersMethodCall(node, method)` | `boolean` | Match `this.helpers.methodName(...)` calls |
### Similarity
| Function | Returns | Use when |
|----------|---------|----------|
| `findSimilarStrings(target, candidates, maxDistance?)` | `string[]` | Suggest similar names (Levenshtein distance) |
## file-utils.ts
### Path operations
| Function | Use when |
|----------|----------|
| `isContainedWithin(child, parent)` | Check path is within a directory |
| `safeJoinPath(base, ...parts)` | Join paths with traversal prevention |
### Package.json
| Function | Returns | Use when |
|----------|---------|----------|
| `findPackageJson(startDir)` | `string \| null` | Walk up to find nearest package.json |
| `readPackageJsonN8n(startDir)` | `N8nPackageJson \| null` | Parse n8n config section |
| `readPackageJsonCredentials(startDir)` | `Set<string>` | Get credential names from package.json |
| `readPackageJsonNodes(startDir)` | `string[]` | Get resolved node file paths |
### File system
| Function | Use when |
|----------|----------|
| `validateIconPath(filePath, iconValue)` | Check icon file exists and is SVG |
| `extractCredentialNameFromFile(filePath)` | Parse credential class name from file |
| `fileExistsWithCaseSync(filePath)` | Case-sensitive existence check |
| `findSimilarSvgFiles(dir, name)` | Suggest similar SVG filenames |
### Credential verification
| Function | Use when |
|----------|----------|
| `areAllCredentialUsagesTestedByNodes(startDir)` | Check all credentials have testedBy |

View file

@ -1,5 +1,4 @@
---
name: create-issue
description: Create Linear tickets or GitHub issues following n8n conventions. Use when the user asks to create a ticket, file a bug, open an issue, or says /create-issue.
argument-hint: "[linear|github] <description of the issue>"
compatibility:

View file

@ -1,5 +1,4 @@
---
name: create-pr
description: Creates GitHub pull requests with properly formatted titles that pass the check-pr-title CI validation. Use when creating PRs, submitting changes for review, or when the user says /pr or asks to create a pull request.
allowed-tools: Bash(git:*), Bash(gh:*), Read, Grep, Glob
---
@ -73,24 +72,17 @@ Creates GitHub PRs with titles that pass n8n's `check-pr-title` CI validation.
git push -u origin HEAD
```
6. **Create PR** using gh CLI with the template from `.github/pull_request_template.md`:
6. **Create PR** using gh CLI. Read `.github/pull_request_template.md` as the
body structure, then populate each section with actual content before
creating the PR:
- **Summary**: describe what the PR does and how to test it
- **Related tickets**: add the Linear ticket URL (`https://linear.app/n8n/issue/[TICKET-ID]`) and any GitHub issue links
- **Checklist**: keep as-is from the template
- Add a "🤖 PR Summary generated by AI" at the end of the body
```bash
gh pr create --draft --title "<type>(<scope>): <summary>" --body "$(cat <<'EOF'
## Summary
<Describe what the PR does and how to test. Photos and videos are recommended.>
## Related Linear tickets, Github issues, and Community forum posts
<!-- Link to Linear ticket: https://linear.app/n8n/issue/[TICKET-ID] -->
<!-- Use "closes #<issue-number>", "fixes #<issue-number>", or "resolves #<issue-number>" to automatically close issues -->
## Review / Merge checklist
- [ ] PR title and summary are descriptive. ([conventions](../blob/master/.github/pull_request_title_conventions.md))
- [ ] [Docs updated](https://github.com/n8n-io/n8n-docs) or follow-up ticket created.
- [ ] Tests included.
- [ ] PR Labeled with `release/backport` (if the PR is an urgent fix that needs to be backported)
<populated body based on pull_request_template.md>
EOF
)"
```
@ -112,6 +104,7 @@ Based on `.github/pull_request_template.md`:
### Checklist
All items should be addressed before merging:
- The human author of the PR has checked the "I have seen this code, I have run this code, and I take responsibility for this code." checkbox
- PR title follows conventions
- Docs updated or follow-up ticket created
- Tests included (bugs need regression tests, features need coverage)
@ -192,5 +185,8 @@ Describe **what the code does**, not what threat it prevents.
| Linear ref | URL with slug (leaks title) | URL without slug or ticket ID only |
| Test name | `'should prevent SQL injection'` | `'should sanitize query parameters'` |
**Before pushing a security fix, verify:** no branch name, commit, PR title,
PR body, Linear URL, test name, or code comment hints at the vulnerability.
**When in doubt, check the Linear issue for possible extra precautions**

View file

@ -1,5 +1,4 @@
---
name: create-skill
description: >-
Guides users through creating effective Agent Skills. Use when you want to
create, write, or author a new skill, or asks about skill structure, best
@ -13,13 +12,13 @@ Skills are markdown (plus optional scripts) that teach the agent a focused workf
| Location | When to use |
|----------|-------------|
| **`.claude/skills/<name>/` in this repo** | Default for n8n: team-shared, versioned. **Cursor picks up project skills from here** when working in the repo (same idea as Claude Code). |
| **`.claude/plugins/n8n/skills/<name>/`** | Default for n8n: team-shared, versioned, namespaced under `n8n:`. |
| `~/.claude/skills/<name>/` | Personal skill for Claude Code across all projects. |
| `~/.cursor/skills/<name>/` | Optional personal skill for Cursor only, global to your machine. |
**Do not** put custom skills in `~/.cursor/skills-cursor/`—that is reserved for Cursors built-in skills.
Prefer **repo `.claude/skills/`** for anything that should match how the rest of the team works.
Prefer **plugin `.claude/plugins/n8n/skills/`** for anything that should match how the rest of the team works.
## Before you write: gather requirements
@ -29,7 +28,7 @@ Ask (or infer) briefly:
2. **Triggers** — when should the agent apply this skill?
3. **Gaps** — what does the agent *not* already know (project rules, URLs, formats)?
4. **Outputs** — templates, checklists, or strict formats?
5. **Examples** — follow an existing skill in `.claude/skills/` if one fits.
5. **Examples** — follow an existing skill in `.claude/plugins/n8n/skills/` if one fits.
Ask the user in plain language when you need more detail.
@ -80,7 +79,7 @@ description: >- # max 1024 chars, non-empty — see below
- **MCPs are optional per user** — not everyone has the same servers enabled. If a skill **requires** a specific MCP to work as written, say so explicitly:
- Put a hint in the **frontmatter description** (e.g. “Requires Linear MCP for …”) so mismatches are obvious early.
- Add a short **Prerequisites** (or **Requirements**) block near the top: which integration, what it is used for, and a **fallback** (e.g. web UI, `gh`, or “ask the user to paste …”) when it is missing.
- **Referencing other skills** — give the path from the **repository root** (e.g. `.claude/skills/create-issue/SKILL.md`) so humans and tools can resolve it. From a sibling folder, a relative link works too: `[create-issue](../create-issue/SKILL.md)`. Name the skill and the task; parent skills should delegate steps instead of duplicating long procedures.
- **Referencing other skills** use the namespaced invocation name (e.g. `n8n:create-issue`) so the agent resolves the plugin skill. For human-readable links, give the path from the repo root (e.g. `.claude/plugins/n8n/skills/create-issue/SKILL.md`). From a sibling folder, a relative link works too: `[create-issue](../create-issue/SKILL.md)`. Parent skills should delegate steps instead of duplicating long procedures.
## Patterns (pick what fits)

View file

@ -1,5 +1,4 @@
---
name: linear-issue
description: Fetch and analyze Linear issue with all related context. Use when starting work on a Linear ticket, analyzing issues, or gathering context about a Linear issue.
disable-model-invocation: true
argument-hint: "[issue-id]"
@ -51,7 +50,25 @@ Use the Linear MCP tools to fetch the issue details and comments together:
Both calls should be made together in the same step to gather the complete context upfront.
### 2. Analyze Attachments and Media (MANDATORY)
### 2. Check for Private/Security Issues (MANDATORY — do this before anything else)
After fetching the issue, immediately check its labels:
1. Look at the labels returned with the issue.
2. If any label is **`n8n-private`**:
a. Run `git remote -v` (via Bash) to list all configured remotes.
b. If **any** remote URL contains `n8n-io/n8n` without the `-private` suffix (i.e. matches the public repo), **stop immediately** and tell the user:
> **This issue is marked `n8n-private` and must be developed in a clean clone of the private repository.**
>
> One or more of your remotes point to the **public** `n8n-io/n8n` repo. Mixed remotes are not allowed — you must work in a **separate local clone** of `n8n-io/n8n-private` with no references to the public repo.
> For the full process, see: https://www.notion.so/n8n/Processing-critical-high-security-bugs-vulnerabilities-in-private-2f45b6e0c94f803da806f472111fb1a5
Do **not** continue with any further steps — return after showing this message.
3. If the label is not present, or all remotes point exclusively to `n8n-io/n8n-private`, continue normally.
### 3. Analyze Attachments and Media (MANDATORY)
**IMPORTANT:** This step is NOT optional. You MUST scan and fetch all visual content from BOTH the issue description AND all comments.
@ -75,7 +92,7 @@ Both calls should be made together in the same step to gather the complete conte
- Summarize key points, timestamps, and any demonstrated issues
3. Loom videos often contain crucial reproduction steps and context that text alone cannot convey
### 3. Fetch Related Context
### 4. Fetch Related Context
**Related Linear Issues:**
- Use `mcp__linear__get_issue` for any issues mentioned in relations (blocking, blocked by, related, duplicates)
@ -91,14 +108,14 @@ Both calls should be made together in the same step to gather the complete conte
- If Notion links are present, use `mcp__notion__notion-fetch` with the Notion URL or page ID to retrieve document content
- Summarize relevant documentation
### 4. Review Comments
### 5. Review Comments
Comments were already fetched in Step 1. Review them for:
- Additional context and discussion history
- Any attachments or media linked in comments (process in Step 2)
- Any attachments or media linked in comments (process in Step 3)
- Clarifications or updates to the original issue description
### 5. Identify Affected Node (if applicable)
### 6. Identify Affected Node (if applicable)
Determine whether this issue is specific to a particular n8n node (e.g. a trigger, action, or tool node). Look for clues in:
- The issue title (e.g. "Linear trigger", "Slack node", "HTTP Request")
@ -114,7 +131,11 @@ If the issue is node-specific:
- Tool variants: `n8n-nodes-base.<name>Tool` (e.g. "Google Sheets Tool" → `n8n-nodes-base.googleSheetsTool`)
- LangChain/AI nodes: `@n8n/n8n-nodes-langchain.<camelCaseName>` (e.g. "OpenAI Chat Model" → `@n8n/n8n-nodes-langchain.lmChatOpenAi`)
2. **Look up the node's popularity score** from `packages/frontend/editor-ui/data/node-popularity.json`. Use `Grep` to search for the node ID in that file. The popularity score is a log-scale value between 0 and 1. Use these thresholds to classify:
2. **Look up the node's popularity score** — first check for a Flaky assessment (see below), otherwise use the popularity file:
**Primary: Check for Flaky's assessment in Linear comments.** Flaky is an auto-triage agent that posts issue analysis as a comment. Search the comments already fetched in Step 1 for a comment from a user named "Flaky" (or containing "Flaky" in the author name) — do not re-fetch comments. If found, extract the popularity score and level directly from Flaky's analysis and use those values.
**Fallback (if no Flaky comment exists):** Look up the node's popularity score from `packages/frontend/editor-ui/data/node-popularity.json`. Use `Grep` to search for the node ID in that file. The popularity score is a log-scale value between 0 and 1. Use these thresholds to classify:
| Score | Level | Description | Examples |
|-------|-------|-------------|----------|
@ -122,13 +143,15 @@ If the issue is node-specific:
| 0.40.8 | **Medium** | Regularly used integrations | Slack (0.78), GitHub (0.64), Jira (0.65), MongoDB (0.63) |
| < 0.4 | **Low** | Niche or rarely used nodes | Amqp (0.34), Wise (0.36), CraftMyPdf (0.33) |
Include the raw score and the level (high/medium/low) in the summary.
Include the raw score and the level (high/medium/low) in the summary, and note whether it came from Flaky or the popularity file.
3. If the node is **not found** in the popularity file, note that it may be a community node or a very new/niche node.
3. If the node is **not found** in the popularity file (and no Flaky comment exists), note that it may be a community node or a very new/niche node.
### 6. Assess Effort/Complexity
### 7. Assess Effort/Complexity
After gathering all context, assess the effort required to fix/implement the issue. Use the following T-shirt sizes:
**Primary: Check for Flaky's effort estimate in Linear comments.** Search the comments already fetched in Step 1 for a Flaky comment — do not re-fetch. If found, extract the effort/complexity estimate directly from it and use that as your assessment.
**Fallback (if no Flaky comment exists):** After gathering all context, assess the effort required to fix/implement the issue. Use the following T-shirt sizes:
| Size | Approximate effort |
|------|--------------------|
@ -146,9 +169,9 @@ To make this assessment, consider:
- **Dependencies**: Are there external API changes, new packages, or cross-team coordination needed?
- **Documentation**: Does this require docs updates, migration guides, or changelog entries?
Provide the T-shirt size along with a brief justification explaining the key factors that drove the estimate.
Provide the T-shirt size along with a brief justification explaining the key factors that drove the estimate. Note whether it came from Flaky or your own assessment.
### 7. Present Summary
### 8. Present Summary
**Before presenting, verify you have completed:**
- [ ] Downloaded and viewed ALL images in the description AND comments

View file

@ -1,5 +1,4 @@
---
name: loom-transcript
description: Fetch and display the full transcript from a Loom video URL. Use when the user wants to get or read a Loom transcript.
argument-hint: [loom-url]
---

View file

@ -1,5 +1,4 @@
---
name: node-add-oauth
description: Add OAuth2 credential support to an existing n8n node — creates the credential file, updates the node, adds tests, and keeps the CLI constant in sync. Use when the user says /node-add-oauth.
argument-hint: "[node-name] [optional: custom-scopes flag or scope list]"
---

View file

@ -1,7 +1,5 @@
---
name: reproduce-bug
description: Reproduce a bug from a Linear ticket with a failing test. Expects the full ticket context (title, description, comments) to be provided as input.
user_invocable: true
---
# Bug Reproduction Framework

View file

@ -1,5 +1,4 @@
---
name: spec-driven-development
description: Keeps implementation and specs in sync. Use when working on a feature that has a spec in .claude/specs/, when the user says /spec, or when starting implementation of a documented feature. Also use when the user asks to verify implementation against a spec or update a spec after changes.
---

View file

@ -15,6 +15,21 @@
"Write(.claude/plans/*)"
]
},
"hooks": {
"PostToolUse": [
{
"matcher": "Skill",
"hooks": [
{
"type": "command",
"command": "node .claude/plugins/n8n/scripts/track-skill-usage.mjs",
"timeout": 10,
"async": true
}
]
}
]
},
"extraKnownMarketplaces": {
"n8n": {
"source": {

View file

@ -39,10 +39,13 @@ runs:
- name: Login to DockerHub
if: inputs.login-dockerhub == 'true'
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0
with:
username: ${{ inputs.dockerhub-username }}
password: ${{ inputs.dockerhub-password }}
shell: bash
env:
DOCKER_USER: ${{ inputs.dockerhub-username }}
DOCKER_PASS: ${{ inputs.dockerhub-password }}
run: |
node .github/scripts/retry.mjs --attempts 3 --delay 10 \
'echo "$DOCKER_PASS" | docker login -u "$DOCKER_USER" --password-stdin'
- name: Login to DHI Registry
if: inputs.login-dhi == 'true'

View file

@ -10,13 +10,14 @@ Photos and videos are recommended.
<!--
Include links to **Linear ticket** or Github issue or Community forum post.
Important in order to close *automatically* and provide context to reviewers.
https://linear.app/n8n/issue/
https://linear.app/n8n/issue/[TICKET-ID]
-->
<!-- Use "closes #<issue-number>", "fixes #<issue-number>", or "resolves #<issue-number>" to automatically close issues when the PR is merged. -->
## Review / Merge checklist
- [ ] I have seen this code, I have run this code, and I take responsibility for this code.
- [ ] PR title and summary are descriptive. ([conventions](../blob/master/.github/pull_request_title_conventions.md)) <!--
**Remember, the title automatically goes into the changelog.
Use `(no-changelog)` otherwise.**

View file

@ -1,4 +1,5 @@
import semver from 'semver';
import { parse } from 'yaml';
import { writeFile, readFile } from 'fs/promises';
import { resolve } from 'path';
import child_process from 'child_process';
@ -7,14 +8,19 @@ import assert from 'assert';
const exec = promisify(child_process.exec);
/**
* @param {string | semver.SemVer} currentVersion
*/
function generateExperimentalVersion(currentVersion) {
const parsed = semver.parse(currentVersion);
if (!parsed) throw new Error(`Invalid version: ${currentVersion}`);
// Check if it's already an experimental version
if (parsed.prerelease.length > 0 && parsed.prerelease[0] === 'exp') {
const minor = parsed.prerelease[1] || 0;
const minorInt = typeof minor === 'string' ? parseInt(minor) : minor;
// Increment the experimental minor version
const expMinor = (parsed.prerelease[1] || 0) + 1;
const expMinor = minorInt + 1;
return `${parsed.major}.${parsed.minor}.${parsed.patch}-exp.${expMinor}`;
}
@ -23,7 +29,10 @@ function generateExperimentalVersion(currentVersion) {
}
const rootDir = process.cwd();
const releaseType = process.env.RELEASE_TYPE;
const releaseType = /** @type { import('semver').ReleaseType | "experimental" } */ (
process.env.RELEASE_TYPE
);
assert.match(releaseType, /^(patch|minor|major|experimental|premajor)$/, 'Invalid RELEASE_TYPE');
// TODO: if releaseType is `auto` determine release type based on the changelog
@ -39,8 +48,12 @@ const packages = JSON.parse(
const packageMap = {};
for (let { name, path, version, private: isPrivate } of packages) {
if (isPrivate && path !== rootDir) continue;
if (path === rootDir) name = 'monorepo-root';
if (isPrivate && path !== rootDir) {
continue;
}
if (path === rootDir) {
name = 'monorepo-root';
}
const isDirty = await exec(`git diff --quiet HEAD ${lastTag} -- ${path}`)
.then(() => false)
@ -57,11 +70,94 @@ assert.ok(
// Propagate isDirty transitively: if a package's dependency will be bumped,
// that package also needs a bump (e.g. design-system → editor-ui → cli).
// Detect root-level changes that affect resolved dep versions without touching individual
// package.json files: pnpm.overrides (applies to all specifiers)
// and pnpm-workspace.yaml catalog entries (applies only to deps using a "catalog:…" specifier).
const rootPkgJson = JSON.parse(await readFile(resolve(rootDir, 'package.json'), 'utf-8'));
const rootPkgJsonAtTag = await exec(`git show ${lastTag}:package.json`)
.then(({ stdout }) => JSON.parse(stdout))
.catch(() => ({}));
const getOverrides = (pkg) => ({ ...pkg.pnpm?.overrides, ...pkg.overrides });
const currentOverrides = getOverrides(rootPkgJson);
const previousOverrides = getOverrides(rootPkgJsonAtTag);
const changedOverrides = new Set(
Object.keys({ ...currentOverrides, ...previousOverrides }).filter(
(k) => currentOverrides[k] !== previousOverrides[k],
),
);
const parseWorkspaceYaml = (content) => {
try {
return /** @type {Record<string, unknown>} */ (parse(content) ?? {});
} catch {
return {};
}
};
const workspaceYaml = parseWorkspaceYaml(
await readFile(resolve(rootDir, 'pnpm-workspace.yaml'), 'utf-8').catch(() => ''),
);
const workspaceYamlAtTag = parseWorkspaceYaml(
await exec(`git show ${lastTag}:pnpm-workspace.yaml`)
.then(({ stdout }) => stdout)
.catch(() => ''),
);
const getCatalogs = (ws) => {
const result = new Map();
if (ws.catalog) {
result.set('default', /** @type {Record<string,string>} */ (ws.catalog));
}
for (const [name, entries] of Object.entries(ws.catalogs ?? {})) {
result.set(name, entries);
}
return result;
};
// changedCatalogEntries: Map<catalogName, Set<depName>>
const currentCatalogs = getCatalogs(workspaceYaml);
const previousCatalogs = getCatalogs(workspaceYamlAtTag);
const changedCatalogEntries = new Map();
for (const catalogName of new Set([...currentCatalogs.keys(), ...previousCatalogs.keys()])) {
const current = currentCatalogs.get(catalogName) ?? {};
const previous = previousCatalogs.get(catalogName) ?? {};
const changedDeps = new Set(
Object.keys({ ...current, ...previous }).filter((dep) => current[dep] !== previous[dep]),
);
if (changedDeps.size > 0) {
changedCatalogEntries.set(catalogName, changedDeps);
}
}
// Store full dep objects (with specifiers) so we can inspect "catalog:…" values below.
const depsByPackage = {};
for (const packageName in packageMap) {
const packageFile = resolve(packageMap[packageName].path, 'package.json');
const packageJson = JSON.parse(await readFile(packageFile, 'utf-8'));
depsByPackage[packageName] = Object.keys(packageJson.dependencies || {});
depsByPackage[packageName] = /** @type {Record<string,string>} */ (
packageJson.dependencies ?? {}
);
}
// Mark packages dirty if any dep had a root-level override or catalog version change.
for (const [packageName, deps] of Object.entries(depsByPackage)) {
if (packageMap[packageName].isDirty) continue;
for (const [dep, specifier] of Object.entries(deps)) {
if (changedOverrides.has(dep)) {
packageMap[packageName].isDirty = true;
break;
}
if (typeof specifier === 'string' && specifier.startsWith('catalog:')) {
const catalogName = specifier === 'catalog:' ? 'default' : specifier.slice(8);
if (changedCatalogEntries.get(catalogName)?.has(dep)) {
packageMap[packageName].isDirty = true;
break;
}
}
}
}
let changed = true;
@ -69,7 +165,7 @@ while (changed) {
changed = false;
for (const packageName in packageMap) {
if (packageMap[packageName].isDirty) continue;
if (depsByPackage[packageName].some((dep) => packageMap[dep]?.isDirty)) {
if (Object.keys(depsByPackage[packageName]).some((dep) => packageMap[dep]?.isDirty)) {
packageMap[packageName].isDirty = true;
changed = true;
}

View file

@ -9,7 +9,7 @@ class TagGenerator {
this.githubOutput = process.env.GITHUB_OUTPUT || null;
}
generate({ image, version, platform, includeDockerHub = false }) {
generate({ image, version, platform, includeDockerHub = false, sha = '' }) {
let imageName = image;
let versionSuffix = '';
@ -27,6 +27,21 @@ class TagGenerator {
};
tags.all = [...tags.ghcr, ...tags.docker];
// Generate additional SHA-based tags for immutable references
if (sha) {
const shaVersion = `${version}-${sha}`;
const shaPlatformTag = `${shaVersion}${versionSuffix}${platformSuffix}`;
const shaGhcr = [`ghcr.io/${this.githubOwner}/${imageName}:${shaPlatformTag}`];
const shaDocker = includeDockerHub
? [`${this.dockerUsername}/${imageName}:${shaPlatformTag}`]
: [];
tags.all = [...tags.all, ...shaGhcr, ...shaDocker];
tags.ghcr = [...tags.ghcr, ...shaGhcr];
tags.docker = [...tags.docker, ...shaDocker];
tags.shaPrimaryTag = shaGhcr[0].replace(/-amd64$|-arm64$/, '');
}
return tags;
}
@ -40,18 +55,21 @@ class TagGenerator {
`${prefixStr}docker_tag=${tags.docker[0] || ''}`,
`${prefixStr}primary_tag=${primaryTag}`,
];
if (tags.shaPrimaryTag) {
outputs.push(`${prefixStr}sha_primary_tag=${tags.shaPrimaryTag}`);
}
appendFileSync(this.githubOutput, outputs.join('\n') + '\n');
} else {
console.log(JSON.stringify(tags, null, 2));
}
}
generateAll({ version, platform, includeDockerHub = false }) {
generateAll({ version, platform, includeDockerHub = false, sha = '' }) {
const images = ['n8n', 'runners', 'runners-distroless'];
const results = {};
for (const image of images) {
const tags = this.generate({ image, version, platform, includeDockerHub });
const tags = this.generate({ image, version, platform, includeDockerHub, sha });
const prefix = image.replace('-distroless', '_distroless');
results[prefix] = tags;
@ -86,6 +104,7 @@ if (import.meta.url === `file://${process.argv[1]}`) {
version,
platform: getArg('platform'),
includeDockerHub: hasFlag('include-docker'),
sha: getArg('sha') || '',
});
if (!generator.githubOutput) {
console.log(JSON.stringify(results, null, 2));
@ -101,6 +120,7 @@ if (import.meta.url === `file://${process.argv[1]}`) {
version,
platform: getArg('platform'),
includeDockerHub: hasFlag('include-docker'),
sha: getArg('sha') || '',
});
generator.output(tags);
}

View file

@ -1,7 +1,7 @@
{
"name": "workflow-scripts",
"scripts": {
"test": "node --test --experimental-test-module-mocks ./*.test.mjs"
"test": "node --test --experimental-test-module-mocks ./*.test.mjs ./quality/*.test.mjs"
},
"dependencies": {
"@actions/github": "9.0.0",
@ -9,8 +9,10 @@
"conventional-changelog": "7.2.0",
"debug": "4.4.3",
"glob": "13.0.6",
"minimatch": "10.2.4",
"semver": "7.7.4",
"tempfile": "6.0.1"
"tempfile": "6.0.1",
"yaml": "^2.8.3"
},
"devDependencies": {
"conventional-changelog-angular": "8.3.0"

View file

@ -23,12 +23,18 @@ importers:
glob:
specifier: 13.0.6
version: 13.0.6
minimatch:
specifier: 10.2.4
version: 10.2.4
semver:
specifier: 7.7.4
version: 7.7.4
tempfile:
specifier: 6.0.1
version: 6.0.1
yaml:
specifier: ^2.8.3
version: 2.8.3
devDependencies:
conventional-changelog-angular:
specifier: 8.3.0
@ -289,6 +295,11 @@ packages:
wordwrap@1.0.0:
resolution: {integrity: sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==}
yaml@2.8.3:
resolution: {integrity: sha512-AvbaCLOO2Otw/lW5bmh9d/WEdcDFdQp2Z2ZUH3pX9U2ihyUY0nvLv7J6TrWowklRGPYbB/IuIMfYgxaCPg5Bpg==}
engines: {node: '>= 14.6'}
hasBin: true
snapshots:
'@actions/github@9.0.0':
@ -537,3 +548,5 @@ snapshots:
walk-up-path@4.0.0: {}
wordwrap@1.0.0: {}
yaml@2.8.3: {}

25
.github/scripts/pnpm-utils.mjs vendored Normal file
View file

@ -0,0 +1,25 @@
import child_process from 'child_process';
import { promisify } from 'node:util';
const exec = promisify(child_process.exec);
/**
* @typedef PnpmPackage
* @property { string } name
* @property { string } version
* @property { string } path
* @property { boolean } private
* */
/**
* @returns { Promise<PnpmPackage[]> }
* */
export async function getMonorepoProjects() {
return JSON.parse(
(
await exec(
`pnpm ls -r --only-projects --json | jq -r '[.[] | { name: .name, version: .version, path: .path, private: .private}]'`,
)
).stdout,
);
}

View file

@ -0,0 +1,143 @@
#!/usr/bin/env node
/**
* Fetches QA metric comparisons and posts/updates a PR comment.
*
* Usage:
* node .github/scripts/post-qa-metrics-comment.mjs --metrics memory-heap-used-baseline
* node .github/scripts/post-qa-metrics-comment.mjs --metrics memory-heap-used-baseline --pr 27880 --dry-run
*
* Env:
* QA_METRICS_COMMENT_WEBHOOK_URL - n8n workflow webhook (required)
* QA_METRICS_WEBHOOK_USER/PASSWORD - Basic auth for webhook
* GITHUB_TOKEN - For posting comments (not needed with --dry-run)
* GITHUB_REF, GITHUB_REPOSITORY, GITHUB_SHA - Auto-set in CI
*/
import { parseArgs } from 'node:util';
const MARKER = '<!-- n8n-qa-metrics-comparison -->';
const { values } = parseArgs({
options: {
metrics: { type: 'string' },
pr: { type: 'string' },
'baseline-days': { type: 'string', default: '14' },
'dry-run': { type: 'boolean', default: false },
},
strict: true,
});
const metrics = values.metrics?.split(',').map((m) => m.trim());
if (!metrics?.length) {
console.error('--metrics is required (comma-separated metric names)');
process.exit(1);
}
const pr = parseInt(values.pr ?? inferPr(), 10);
if (!pr) {
console.error('--pr is required (or set GITHUB_REF)');
process.exit(1);
}
const webhookUrl = process.env.QA_METRICS_COMMENT_WEBHOOK_URL;
if (!webhookUrl) {
console.error('QA_METRICS_COMMENT_WEBHOOK_URL is required');
process.exit(1);
}
const repo = process.env.GITHUB_REPOSITORY ?? 'n8n-io/n8n';
const sha = process.env.GITHUB_SHA?.slice(0, 8) ?? '';
const baselineDays = parseInt(values['baseline-days'], 10);
// --- Fetch ---
const headers = { 'Content-Type': 'application/json' };
const user = process.env.QA_METRICS_WEBHOOK_USER;
const pass = process.env.QA_METRICS_WEBHOOK_PASSWORD;
if (user && pass) {
headers.Authorization = `Basic ${Buffer.from(`${user}:${pass}`).toString('base64')}`;
}
console.log(`PR #${pr}: fetching ${metrics.join(', ')} (${baselineDays}-day baseline)`);
let res;
try {
res = await fetch(webhookUrl, {
method: 'POST',
headers,
body: JSON.stringify({
pr_number: pr,
github_repo: repo,
git_sha: sha,
baseline_days: baselineDays,
metric_names: metrics,
}),
signal: AbortSignal.timeout(60_000),
});
} catch (err) {
console.warn(`Webhook unreachable, skipping metrics comment: ${err.message}`);
process.exit(0);
}
if (!res.ok) {
const text = await res.text().catch(() => '');
console.warn(`Webhook failed: ${res.status} ${res.statusText}\n${text}`);
console.warn('Skipping metrics comment.');
process.exit(0);
}
const { markdown, has_data } = await res.json();
if (!has_data || !markdown) {
console.log('No metric data available, skipping.');
process.exit(0);
}
if (values['dry-run']) {
console.log('\n--- DRY RUN ---\n');
console.log(markdown);
process.exit(0);
}
// --- Post comment ---
const token = process.env.GITHUB_TOKEN;
if (!token) {
console.error('GITHUB_TOKEN is required to post comments');
process.exit(1);
}
const [owner, repoName] = repo.split('/');
const ghHeaders = {
Accept: 'application/vnd.github+json',
Authorization: `Bearer ${token}`,
'Content-Type': 'application/json',
};
const comments = await fetch(
`https://api.github.com/repos/${owner}/${repoName}/issues/${pr}/comments?per_page=100`,
{ headers: ghHeaders },
).then((r) => r.json());
const existing = Array.isArray(comments)
? comments.find((c) => c.body?.includes(MARKER))
: null;
if (existing) {
await fetch(
`https://api.github.com/repos/${owner}/${repoName}/issues/comments/${existing.id}`,
{ method: 'PATCH', headers: ghHeaders, body: JSON.stringify({ body: markdown }) },
);
console.log(`Updated comment ${existing.id}`);
} else {
const created = await fetch(
`https://api.github.com/repos/${owner}/${repoName}/issues/${pr}/comments`,
{ method: 'POST', headers: ghHeaders, body: JSON.stringify({ body: markdown }) },
).then((r) => r.json());
console.log(`Created comment ${created.id}`);
}
function inferPr() {
const match = (process.env.GITHUB_REF ?? '').match(/refs\/pull\/(\d+)/);
return match?.[1];
}

View file

@ -0,0 +1,81 @@
/**
* Checks that the PR description contains a checked ownership acknowledgement checkbox.
*
* Exit codes:
* 0 Checkbox is present and checked
* 1 Checkbox is missing or unchecked
*/
import { initGithub, getEventFromGithubEventPath } from '../github-helpers.mjs';
const BOT_MARKER = '<!-- pr-ownership-check -->';
/**
* Returns true if the PR body contains a checked ownership acknowledgement checkbox.
*
* @param {string | null | undefined} body
* @returns {boolean}
*/
export function isOwnershipCheckboxChecked(body) {
return /\[x\]\s+I have seen this code,\s+I have run this code,\s+and I take responsibility for this code/i.test(
body ?? '',
);
}
async function main() {
const event = getEventFromGithubEventPath();
const pr = event.pull_request;
const { octokit, owner, repo } = initGithub();
const { data: comments } = await octokit.rest.issues.listComments({
owner,
repo,
issue_number: pr.number,
per_page: 100,
});
const botComment = comments.find((c) => c.body.includes(BOT_MARKER));
if (!isOwnershipCheckboxChecked(pr.body)) {
const message = [
BOT_MARKER,
'## ⚠️ Ownership acknowledgement required',
'',
'Please add or check the following item in your PR description before this can be merged:',
'',
'```',
'- [x] I have seen this code, I have run this code, and I take responsibility for this code.',
'```',
].join('\n');
if (botComment) {
await octokit.rest.issues.updateComment({
owner,
repo,
comment_id: botComment.id,
body: message,
});
} else {
await octokit.rest.issues.createComment({
owner,
repo,
issue_number: pr.number,
body: message,
});
}
console.log(
'::error::Ownership checkbox is not checked. Add it to your PR description and check it.',
);
process.exit(1);
} else if (botComment) {
await octokit.rest.issues.deleteComment({
owner,
repo,
comment_id: botComment.id,
});
}
}
if (import.meta.url === `file://${process.argv[1]}`) {
await main();
}

View file

@ -0,0 +1,85 @@
import { describe, it, before, mock } from 'node:test';
import assert from 'node:assert/strict';
/**
* Run with:
* node --test --experimental-test-module-mocks .github/scripts/quality/check-ownership-checkbox.test.mjs
*/
mock.module('../github-helpers.mjs', {
namedExports: {
initGithub: () => {},
getEventFromGithubEventPath: () => {},
},
});
let isOwnershipCheckboxChecked;
before(async () => {
({ isOwnershipCheckboxChecked } = await import('./check-ownership-checkbox.mjs'));
});
describe('isOwnershipCheckboxChecked', () => {
it('returns true for a checked checkbox with exact text', () => {
const body =
'- [x] I have seen this code, I have run this code, and I take responsibility for this code.';
assert.ok(isOwnershipCheckboxChecked(body));
});
it('returns true for uppercase [X]', () => {
const body =
'- [X] I have seen this code, I have run this code, and I take responsibility for this code.';
assert.ok(isOwnershipCheckboxChecked(body));
});
it('returns false for an unchecked checkbox [ ]', () => {
const body =
'- [ ] I have seen this code, I have run this code, and I take responsibility for this code.';
assert.equal(isOwnershipCheckboxChecked(body), false);
});
it('returns false when the checkbox is absent', () => {
const body = '## Summary\n\nThis PR does some things.\n';
assert.equal(isOwnershipCheckboxChecked(body), false);
});
it('returns false for null body', () => {
assert.equal(isOwnershipCheckboxChecked(null), false);
});
it('returns false for undefined body', () => {
assert.equal(isOwnershipCheckboxChecked(undefined), false);
});
it('returns false for empty body', () => {
assert.equal(isOwnershipCheckboxChecked(''), false);
});
it('returns true when checkbox appears among other content', () => {
const body = [
'## Summary',
'',
'Some description here.',
'',
'## Checklist',
'- [x] Tests included',
'- [x] I have seen this code, I have run this code, and I take responsibility for this code.',
'- [ ] Docs updated',
].join('\n');
assert.ok(isOwnershipCheckboxChecked(body));
});
it('returns false when only other checkboxes are checked', () => {
const body = [
'- [x] Tests included',
'- [x] Docs updated',
'- [ ] I have seen this code, I have run this code, and I take responsibility for this code.',
].join('\n');
assert.equal(isOwnershipCheckboxChecked(body), false);
});
it('is case-insensitive for the checkbox marker', () => {
const lower =
'- [x] i have seen this code, i have run this code, and i take responsibility for this code.';
assert.ok(isOwnershipCheckboxChecked(lower));
});
});

View file

@ -0,0 +1,172 @@
/**
* Checks that the PR does not exceed the line addition limit.
*
* Files matching any pattern in EXCLUDE_PATTERNS are not counted toward the
* limit (e.g. test files, snapshots).
*
* A maintainer (write access or above) can override by commenting `/size-limit-override`
* on the PR. The override takes effect on the next pull_request event (push, reopen, etc.).
*
* Exit codes:
* 0 PR is within the limit, or a valid override comment exists
* 1 PR exceeds the limit with no valid override
*/
import { minimatch } from 'minimatch';
import { initGithub, getEventFromGithubEventPath } from '../github-helpers.mjs';
export const SIZE_LIMIT = 1000;
export const OVERRIDE_COMMAND = '/size-limit-override';
export const EXCLUDE_PATTERNS = [
// Test files (by extension)
'**/*.test.ts',
'**/*.test.js',
'**/*.test.mjs',
'**/*.spec.ts',
'**/*.spec.js',
'**/*.spec.mjs',
// Test directories
'**/test/**',
'**/tests/**',
'**/__tests__/**',
// Snapshots
'**/__snapshots__/**',
'**/*.snap',
// Fixtures and mocks
'**/fixtures/**',
'**/__mocks__/**',
// Dedicated testing package
'packages/testing/**',
// Lock file (can produce massive diffs on dependency changes)
'pnpm-lock.yaml',
];
const BOT_MARKER = '<!-- pr-size-check -->';
/**
* Returns true if any comment in the list is a valid `/size-limit-override` from a
* user with write access or above.
*
* @param {Array<{ body?: string, user: { login: string } | null }>} comments
* @param {(username: string) => Promise<string>} getPermission - returns the permission level string
* @returns {Promise<boolean>}
*/
export async function hasValidOverride(comments, getPermission) {
for (const comment of comments) {
if (!comment.body?.startsWith(OVERRIDE_COMMAND)) {
continue;
}
if (!comment.user) {
return false;
}
const perm = await getPermission(comment.user.login);
if (['admin', 'write', 'maintain'].includes(perm)) {
return true;
}
}
return false;
}
/**
* Returns the total additions across all files, excluding those matching any exclude pattern.
*
* @param {Array<{ filename: string, additions: number }>} files
* @param {string[]} excludePatterns
* @returns {number}
*/
export function countFilteredAdditions(files, excludePatterns) {
return files
.filter((file) => !excludePatterns.some((pattern) => minimatch(file.filename, pattern)))
.reduce((sum, file) => sum + file.additions, 0);
}
async function main() {
const event = getEventFromGithubEventPath();
const pr = event.pull_request;
const { octokit, owner, repo } = initGithub();
const files = await octokit.paginate(octokit.rest.pulls.listFiles, {
owner,
repo,
pull_number: pr.number,
per_page: 100,
});
const additions = countFilteredAdditions(files, EXCLUDE_PATTERNS);
const { data: comments } = await octokit.rest.issues.listComments({
owner,
repo,
issue_number: pr.number,
per_page: 100,
sort: 'created',
direction: 'desc',
});
const overrideFound = await hasValidOverride(comments, async (username) => {
const { data: perm } = await octokit.rest.repos.getCollaboratorPermissionLevel({
owner,
repo,
username,
});
return perm.permission;
});
const botComment = comments.find((c) => c.body?.includes(BOT_MARKER));
if (additions > SIZE_LIMIT && !overrideFound) {
const message = [
BOT_MARKER,
`## ! PR exceeds size limit (${additions.toLocaleString()} lines added)`,
'',
`This PR adds **${additions.toLocaleString()} lines**, exceeding the ${SIZE_LIMIT.toLocaleString()}-line limit (test files excluded).`,
'',
'Large PRs are harder to review and increase the risk of bugs going unnoticed. Please consider:',
'- Breaking this into smaller, logically separate PRs',
'- Moving unrelated changes to a follow-up PR',
'',
`If the size is genuinely justified (e.g. generated code, large migrations, test fixtures), a maintainer can override by commenting \`${OVERRIDE_COMMAND}\` and then pushing a new commit or re-running this check.`,
].join('\n');
if (botComment) {
await octokit.rest.issues.updateComment({
owner,
repo,
comment_id: botComment.id,
body: message,
});
} else {
await octokit.rest.issues.createComment({
owner,
repo,
issue_number: pr.number,
body: message,
});
}
console.log(
`::error::PR adds ${additions.toLocaleString()} lines (test files excluded), exceeding the ${SIZE_LIMIT.toLocaleString()}-line limit. Reduce PR size or ask a maintainer to comment \`${OVERRIDE_COMMAND}\`.`,
);
process.exit(1);
} else {
if (botComment) {
await octokit.rest.issues.deleteComment({
owner,
repo,
comment_id: botComment.id,
});
}
if (overrideFound && additions > SIZE_LIMIT) {
console.log(
`PR size limit overridden. ${additions.toLocaleString()} lines added (limit: ${SIZE_LIMIT.toLocaleString()}, test files excluded).`,
);
}
}
}
if (import.meta.url === `file://${process.argv[1]}`) {
await main();
}

View file

@ -0,0 +1,206 @@
import { describe, it, before, mock } from 'node:test';
import assert from 'node:assert/strict';
/**
* Run with:
* node --test --experimental-test-module-mocks .github/scripts/quality/check-pr-size.test.mjs
*/
mock.module('../github-helpers.mjs', {
namedExports: {
initGithub: () => {},
getEventFromGithubEventPath: () => {},
},
});
let hasValidOverride, countFilteredAdditions, SIZE_LIMIT, OVERRIDE_COMMAND, EXCLUDE_PATTERNS;
before(async () => {
({ hasValidOverride, countFilteredAdditions, SIZE_LIMIT, OVERRIDE_COMMAND, EXCLUDE_PATTERNS } =
await import('./check-pr-size.mjs'));
});
/** @param {string} permission */
const permissionGetter = (permission) => async (_username) => permission;
describe('SIZE_LIMIT', () => {
it('is 1000', () => {
assert.equal(SIZE_LIMIT, 1000);
});
});
describe('hasValidOverride', () => {
it('returns false when there are no comments', async () => {
const result = await hasValidOverride([], permissionGetter('write'));
assert.equal(result, false);
});
it('returns false when no comment starts with the override command', async () => {
const comments = [
{ body: 'Looks good to me!', user: { login: 'reviewer' } },
{ body: 'Please split this PR.', user: { login: 'maintainer' } },
];
const result = await hasValidOverride(comments, permissionGetter('write'));
assert.equal(result, false);
});
it('returns true when a write-access user has posted the override command', async () => {
const comments = [{ body: OVERRIDE_COMMAND, user: { login: 'maintainer' } }];
const result = await hasValidOverride(comments, permissionGetter('write'));
assert.ok(result);
});
it('returns true for maintain permission', async () => {
const comments = [{ body: OVERRIDE_COMMAND, user: { login: 'lead' } }];
const result = await hasValidOverride(comments, permissionGetter('maintain'));
assert.ok(result);
});
it('returns true for admin permission', async () => {
const comments = [{ body: OVERRIDE_COMMAND, user: { login: 'admin' } }];
const result = await hasValidOverride(comments, permissionGetter('admin'));
assert.ok(result);
});
it('returns false when the override commenter only has read access', async () => {
const comments = [{ body: OVERRIDE_COMMAND, user: { login: 'outsider' } }];
const result = await hasValidOverride(comments, permissionGetter('read'));
assert.equal(result, false);
});
it('returns false when the override commenter only has triage access', async () => {
const comments = [{ body: OVERRIDE_COMMAND, user: { login: 'triager' } }];
const result = await hasValidOverride(comments, permissionGetter('triage'));
assert.equal(result, false);
});
it('returns false when the override command appears mid-comment, not at the start', async () => {
const comments = [
{
body: `Please note: ${OVERRIDE_COMMAND} should only be used when justified.`,
user: { login: 'maintainer' },
},
];
const result = await hasValidOverride(comments, permissionGetter('write'));
assert.equal(result, false);
});
it('returns true when one of several comments is a valid override', async () => {
const comments = [
{ body: 'Looks good!', user: { login: 'reviewer' } },
{ body: OVERRIDE_COMMAND, user: { login: 'maintainer' } },
{ body: 'Please add tests.', user: { login: 'other' } },
];
const result = await hasValidOverride(comments, permissionGetter('write'));
assert.ok(result);
});
it('returns false when override comment exists but all posters lack write access', async () => {
const comments = [
{ body: OVERRIDE_COMMAND, user: { login: 'user1' } },
{ body: OVERRIDE_COMMAND, user: { login: 'user2' } },
];
const result = await hasValidOverride(comments, permissionGetter('read'));
assert.equal(result, false);
});
it('checks permissions per commenter independently', async () => {
const permissions = { writer: 'write', reader: 'read' };
const getPermission = async (username) => permissions[username] ?? 'read';
const comments = [
{ body: OVERRIDE_COMMAND, user: { login: 'reader' } },
{ body: OVERRIDE_COMMAND, user: { login: 'writer' } },
];
const result = await hasValidOverride(comments, getPermission);
assert.ok(result);
});
});
describe('countFilteredAdditions', () => {
it('sums additions across all files when no patterns are given', () => {
const files = [
{ filename: 'src/foo.ts', additions: 100 },
{ filename: 'src/bar.ts', additions: 200 },
];
assert.equal(countFilteredAdditions(files, []), 300);
});
it('excludes files matching a glob pattern', () => {
const files = [
{ filename: 'src/foo.ts', additions: 100 },
{ filename: 'src/foo.test.ts', additions: 500 },
];
assert.equal(countFilteredAdditions(files, ['**/*.test.ts']), 100);
});
it('excludes files matching any of multiple patterns', () => {
const files = [
{ filename: 'src/foo.ts', additions: 100 },
{ filename: 'src/foo.test.ts', additions: 200 },
{ filename: 'src/foo.spec.ts', additions: 300 },
{ filename: 'src/__tests__/bar.ts', additions: 400 },
];
assert.equal(
countFilteredAdditions(files, ['**/*.test.ts', '**/*.spec.ts', '**/__tests__/**']),
100,
);
});
it('returns 0 when all files are excluded', () => {
const files = [
{ filename: 'src/foo.test.ts', additions: 100 },
{ filename: 'src/bar.test.ts', additions: 200 },
];
assert.equal(countFilteredAdditions(files, ['**/*.test.ts']), 0);
});
it('returns 0 for an empty file list', () => {
assert.equal(countFilteredAdditions([], EXCLUDE_PATTERNS), 0);
});
it('applies EXCLUDE_PATTERNS to common test file extensions', () => {
const files = [
{ filename: 'src/service.ts', additions: 50 },
{ filename: 'src/service.test.ts', additions: 100 },
{ filename: 'src/service.spec.ts', additions: 100 },
{ filename: 'src/service.test.mjs', additions: 100 },
{ filename: 'src/service.spec.mjs', additions: 100 },
{ filename: 'src/service.test.js', additions: 100 },
{ filename: 'src/service.spec.js', additions: 100 },
{ filename: 'src/__tests__/helper.ts', additions: 100 },
{ filename: 'src/component.snap', additions: 100 },
];
assert.equal(countFilteredAdditions(files, EXCLUDE_PATTERNS), 50);
});
it('applies EXCLUDE_PATTERNS to test directories (test/, tests/, __tests__)', () => {
const files = [
{ filename: 'packages/cli/src/service.ts', additions: 50 },
{ filename: 'packages/cli/test/unit/service.test.ts', additions: 100 },
{ filename: 'packages/cli/test/integration/api.test.ts', additions: 100 },
{ filename: 'packages/nodes-base/nodes/Foo/tests/Foo.test.ts', additions: 100 },
{ filename: 'packages/core/src/__tests__/cipher.test.ts', additions: 100 },
];
assert.equal(countFilteredAdditions(files, EXCLUDE_PATTERNS), 50);
});
it('applies EXCLUDE_PATTERNS to snapshots, fixtures, and mocks', () => {
const files = [
{ filename: 'packages/cli/src/service.ts', additions: 50 },
{ filename: 'packages/editor-ui/src/__snapshots__/Canvas.test.ts.snap', additions: 100 },
{ filename: 'packages/workflow/test/fixtures/workflow.json', additions: 100 },
{ filename: 'packages/core/src/__mocks__/fs.ts', additions: 100 },
];
assert.equal(countFilteredAdditions(files, EXCLUDE_PATTERNS), 50);
});
it('applies EXCLUDE_PATTERNS to packages/testing and pnpm-lock.yaml', () => {
const files = [
{ filename: 'packages/cli/src/service.ts', additions: 50 },
{ filename: 'packages/testing/playwright/tests/workflow.spec.ts', additions: 100 },
{ filename: 'packages/testing/playwright/pages/CanvasPage.ts', additions: 100 },
{ filename: 'pnpm-lock.yaml', additions: 500 },
];
assert.equal(countFilteredAdditions(files, EXCLUDE_PATTERNS), 50);
});
});

View file

@ -0,0 +1,97 @@
/**
* Re-triggers the PR Size Limit check when a maintainer comments `/size-limit-override`.
*
* Finds the latest `PR Size Limit` check run on the PR's HEAD commit and re-requests it.
* The re-run scans comments, finds the override, and passes satisfying branch protection
* without any label manipulation or status API calls.
*
* Exit codes:
* 0 Check run re-requested successfully
* 1 Commenter lacks permission, or no check run found to re-request
*/
import { initGithub, getEventFromGithubEventPath } from '../github-helpers.mjs';
const CHECK_NAME = 'PR Size Limit';
/**
* @param {{
* octokit: import('../github-helpers.mjs').GitHubInstance,
* owner: string,
* repo: string,
* prNumber: number,
* commenter: string,
* commentId: number,
* }} params
*/
export async function run({ octokit, owner, repo, prNumber, commenter, commentId }) {
const { data: perm } = await octokit.rest.repos.getCollaboratorPermissionLevel({
owner,
repo,
username: commenter,
});
if (!['admin', 'write', 'maintain'].includes(perm.permission)) {
console.log(
`::error::@${commenter} does not have permission to override the PR size limit (requires write access).`,
);
process.exit(1);
}
const { data: pr } = await octokit.rest.pulls.get({
owner,
repo,
pull_number: prNumber,
});
const headSha = pr.head.sha;
const {
data: { check_runs },
} = await octokit.rest.checks.listForRef({
owner,
repo,
ref: headSha,
check_name: CHECK_NAME,
per_page: 1,
});
if (check_runs.length === 0) {
console.log(
`::error::No '${CHECK_NAME}' check run found for ${headSha}. Push a new commit to trigger it.`,
);
process.exit(1);
}
await octokit.rest.checks.rerequestRun({
owner,
repo,
check_run_id: check_runs[0].id,
});
await octokit.rest.reactions.createForIssueComment({
owner,
repo,
comment_id: commentId,
content: '+1',
});
console.log(`Re-requested '${CHECK_NAME}' check run (${check_runs[0].id}) for ${headSha}`);
}
async function main() {
const event = getEventFromGithubEventPath();
const { octokit, owner, repo } = initGithub();
await run({
octokit,
owner,
repo,
prNumber: event.issue.number,
commenter: event.sender.login,
commentId: event.comment.id,
});
}
if (import.meta.url === `file://${process.argv[1]}`) {
await main();
}

View file

@ -18,6 +18,18 @@ import { existsSync, readFileSync } from 'node:fs';
import { sendMetrics, metric } from './send-metrics.mjs';
/** Parse human-readable sizes (e.g. "1.5G", "500M", "12K") to MB. */
function parseSizeToMB(val) {
if (typeof val === 'number') return val / (1024 * 1024);
if (typeof val !== 'string') return null;
const match = val.match(/^([\d.]+)\s*([KMGT]?)i?B?$/i);
if (!match) return null;
const num = parseFloat(match[1]);
const suffix = match[2].toUpperCase();
const toMB = { '': 1 / (1024 * 1024), 'K': 1 / 1024, 'M': 1, 'G': 1024, 'T': 1024 * 1024 };
return Math.round(num * (toMB[suffix] ?? 1) * 100) / 100;
}
const buildManifestPath = 'compiled/build-manifest.json';
const dockerManifestPath = 'docker-build-manifest.json';
@ -37,11 +49,13 @@ const dockerManifest = existsSync(dockerManifestPath)
const metrics = [];
if (buildManifest) {
if (buildManifest.artifactSize != null) {
metrics.push(metric('artifact-size', buildManifest.artifactSize, 'bytes', { artifact: 'compiled' }));
const sizeMB = parseSizeToMB(buildManifest.artifactSize);
if (sizeMB != null) {
metrics.push(metric('artifact-size', sizeMB, 'MB', { artifact: 'compiled' }));
}
if (buildManifest.buildDuration != null) {
metrics.push(metric('build-duration', buildManifest.buildDuration / 1000, 's', { artifact: 'compiled' }));
const duration = buildManifest.buildDuration;
if (duration?.total != null) {
metrics.push(metric('build-duration', duration.total / 1000, 's', { artifact: 'compiled' }));
}
}
@ -49,12 +63,12 @@ if (dockerManifest) {
const platform = dockerManifest.platform ?? 'unknown';
for (const image of dockerManifest.images ?? []) {
if (image.sizeBytes != null) {
const imageSizeMB = parseSizeToMB(image.size ?? image.sizeBytes);
const imageName = image.imageName ?? image.name ?? 'unknown';
const shortName = imageName.replace(/^n8nio\//, '').replace(/:.*$/, '');
if (imageSizeMB != null) {
metrics.push(
metric('docker-image-size', image.sizeBytes, 'bytes', {
image: image.name ?? 'unknown',
platform,
}),
metric(`docker-image-size-${shortName}`, imageSizeMB, 'MB', { platform }),
);
}
}

View file

@ -0,0 +1,28 @@
import { trySh } from './github-helpers.mjs';
import { getMonorepoProjects } from './pnpm-utils.mjs';
async function setLatestForMonorepoPackages() {
const packages = await getMonorepoProjects();
const publishedPackages = packages //
.filter((pkg) => !pkg.private)
.filter((pkg) => pkg.version);
for (const pkg of publishedPackages) {
const versionName = `${pkg.name}@${pkg.version}`;
const res = trySh('npm', ['dist-tag', 'add', versionName, 'latest']);
if (res.ok) {
console.log(`Set ${versionName} as latest`);
} else {
console.warn(`Update failed for ${versionName}`);
}
}
}
// only run when executed directly, not when imported by tests
if (import.meta.url === `file://${process.argv[1]}`) {
setLatestForMonorepoPackages().catch((err) => {
console.error(err);
process.exit(1);
});
}

File diff suppressed because it is too large Load diff

101
.github/workflows/ci-pr-quality.yml vendored Normal file
View file

@ -0,0 +1,101 @@
name: 'CI: PR Quality Checks'
on:
pull_request:
types:
- opened
- edited
- synchronize
branches:
- master
issue_comment:
types:
- created
jobs:
handle-size-override:
name: Handle /size-limit-override
# Re-requests the PR Size Limit check run on the PR's HEAD commit, so it re-runs
# in the original PR context and picks up the override comment.
if: |
github.event_name == 'issue_comment' &&
github.event.issue.pull_request &&
startsWith(github.event.comment.body, '/size-limit-override')
runs-on: ubuntu-latest
timeout-minutes: 5
permissions:
contents: read
checks: write
issues: write
pull-requests: read
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Setup Node.js
uses: ./.github/actions/setup-nodejs
with:
build-command: ''
install-command: pnpm install --frozen-lockfile --dir ./.github/scripts --ignore-workspace
- name: Re-request PR Size Limit check
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: node .github/scripts/quality/handle-size-override.mjs
check-ownership-checkbox:
name: Ownership Acknowledgement
# Checks that the author has acknowledged the ownership of their code
# by checking the checkbox in the PR summary.
if: |
github.event_name == 'pull_request' &&
github.event.pull_request.head.repo.full_name == github.repository &&
!contains(github.event.pull_request.labels.*.name, 'automation:backport') &&
!contains(github.event.pull_request.title, '(backport to')
runs-on: ubuntu-latest
timeout-minutes: 5
permissions:
contents: read
issues: write
pull-requests: write
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Setup Node.js
uses: ./.github/actions/setup-nodejs
with:
build-command: ''
install-command: pnpm install --frozen-lockfile --dir ./.github/scripts --ignore-workspace
- name: Check ownership checkbox
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: node .github/scripts/quality/check-ownership-checkbox.mjs
check-pr-size:
name: PR Size Limit
# Checks that the PR size doesn't exceed the limit (currently 1000 lines)
# Allows for override via '/size-limit-override' comment
if: |
github.event_name == 'pull_request' &&
github.event.pull_request.head.repo.full_name == github.repository &&
!contains(github.event.pull_request.labels.*.name, 'automation:backport') &&
!contains(github.event.pull_request.title, '(backport to')
runs-on: ubuntu-latest
timeout-minutes: 5
permissions:
contents: read
issues: write
pull-requests: write
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Setup Node.js
uses: ./.github/actions/setup-nodejs
with:
build-command: ''
install-command: pnpm install --frozen-lockfile --dir ./.github/scripts --ignore-workspace
- name: Check PR size
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: node .github/scripts/quality/check-pr-size.mjs

View file

@ -0,0 +1,66 @@
name: 'CI: Pull Request Review'
on:
pull_request_review:
types: [submitted]
concurrency:
group: ci-review-${{ github.event.pull_request.number }}
cancel-in-progress: true
jobs:
filter:
name: Check Changes
if: >-
github.event.review.state == 'approved' &&
github.repository == 'n8n-io/n8n'
runs-on: ubuntu-slim
outputs:
design_system: ${{ fromJSON(steps.ci-filter.outputs.results)['design-system'] == true }}
commit_sha: ${{ steps.commit-sha.outputs.sha }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- name: Capture commit SHA
id: commit-sha
run: echo "sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT"
- name: Check for relevant changes
uses: ./.github/actions/ci-filter
id: ci-filter
with:
mode: filter
filters: |
design-system:
packages/frontend/@n8n/design-system/**
packages/frontend/@n8n/storybook/**
.github/workflows/test-visual-chromatic.yml
chromatic:
name: Chromatic
needs: filter
if: needs.filter.outputs.design_system == 'true'
uses: ./.github/workflows/test-visual-chromatic.yml
with:
ref: ${{ needs.filter.outputs.commit_sha }}
secrets: inherit
# Required by GitHub branch protection rules.
# PRs cannot be merged unless this job passes.
required-review-checks:
name: Required Review Checks
needs: [filter, chromatic]
if: always()
runs-on: ubuntu-slim
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
sparse-checkout: .github/actions/ci-filter
sparse-checkout-cone-mode: false
- name: Validate required checks
uses: ./.github/actions/ci-filter
with:
mode: validate
job-results: ${{ toJSON(needs) }}

View file

@ -25,8 +25,9 @@ jobs:
workflows: ${{ fromJSON(steps.ci-filter.outputs.results).workflows == true }}
workflow_scripts: ${{ fromJSON(steps.ci-filter.outputs.results)['workflow-scripts'] == true }}
db: ${{ fromJSON(steps.ci-filter.outputs.results).db == true }}
design_system: ${{ fromJSON(steps.ci-filter.outputs.results)['design-system'] == true }}
performance: ${{ fromJSON(steps.ci-filter.outputs.results).performance == true }}
e2e_performance: ${{ fromJSON(steps.ci-filter.outputs.results)['e2e-performance'] == true }}
instance_ai_workflow_eval: ${{ fromJSON(steps.ci-filter.outputs.results)['instance-ai-workflow-eval'] == true }}
commit_sha: ${{ steps.commit-sha.outputs.sha }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
@ -60,15 +61,22 @@ jobs:
packages/testing/containers/**
workflows: .github/**
workflow-scripts: .github/scripts/**
design-system:
packages/frontend/@n8n/design-system/**
packages/frontend/@n8n/chat/**
packages/frontend/@n8n/storybook/**
.github/workflows/test-visual-chromatic.yml
performance:
packages/testing/performance/**
packages/workflow/src/**
packages/@n8n/expression-runtime/src/**
.github/workflows/test-bench-reusable.yml
e2e-performance:
packages/testing/playwright/tests/performance/**
packages/testing/playwright/utils/performance-helper.ts
packages/testing/containers/**
.github/workflows/test-e2e-performance-reusable.yml
instance-ai-workflow-eval:
packages/@n8n/instance-ai/src/**
packages/@n8n/instance-ai/evaluations/**
packages/cli/src/modules/instance-ai/**
packages/core/src/execution-engine/eval-mock-helpers.ts
.github/workflows/test-evals-instance-ai*.yml
db:
packages/cli/src/databases/**
packages/cli/src/modules/*/database/**
@ -167,6 +175,16 @@ jobs:
with:
ref: ${{ needs.install-and-build.outputs.commit_sha }}
e2e-performance:
name: E2E Performance
needs: install-and-build
if: >-
(needs.install-and-build.outputs.ci == 'true' || needs.install-and-build.outputs.e2e_performance == 'true') &&
github.event_name == 'pull_request' &&
github.repository == 'n8n-io/n8n'
uses: ./.github/workflows/test-e2e-performance-reusable.yml
secrets: inherit
security-checks:
name: Security Checks
needs: install-and-build
@ -185,13 +203,16 @@ jobs:
ref: ${{ needs.install-and-build.outputs.commit_sha }}
secrets: inherit
chromatic:
name: Chromatic
instance-ai-workflow-evals:
name: Instance AI Workflow Evals
needs: install-and-build
if: needs.install-and-build.outputs.design_system == 'true' && github.event_name == 'pull_request' && github.repository == 'n8n-io/n8n'
uses: ./.github/workflows/test-visual-chromatic.yml
if: >-
needs.install-and-build.outputs.instance_ai_workflow_eval == 'true' &&
github.repository == 'n8n-io/n8n' &&
(github.event_name != 'pull_request' || !github.event.pull_request.head.repo.fork)
uses: ./.github/workflows/test-evals-instance-ai.yml
with:
ref: ${{ needs.install-and-build.outputs.commit_sha }}
branch: ${{ needs.install-and-build.outputs.commit_sha }}
secrets: inherit
# This job is required by GitHub branch protection rules.
@ -210,7 +231,6 @@ jobs:
performance,
security-checks,
workflow-scripts,
chromatic,
]
if: always()
runs-on: ubuntu-slim
@ -224,3 +244,14 @@ jobs:
with:
mode: validate
job-results: ${{ toJSON(needs) }}
# Posts a QA metrics comparison comment on the PR.
# Runs after all checks so any job can emit metrics before this reports.
post-qa-metrics-comment:
name: QA Metrics
needs: [required-checks, e2e-performance]
if: always()
uses: ./.github/workflows/util-qa-metrics-comment-reusable.yml
with:
metrics: memory-heap-used-baseline,memory-rss-baseline,instance-ai-heap-used-baseline,instance-ai-rss-baseline,docker-image-size-n8n,docker-image-size-runners
secrets: inherit

View file

@ -4,6 +4,7 @@ on:
pull_request:
branches:
- master
- 1.x
permissions:
pull-requests: write
@ -46,7 +47,7 @@ jobs:
`${marker}\n` +
`🚫 **Merge blocked**: PRs into \`${base}\` are only allowed from branches named \`bundle/*\`.\n\n` +
`Current source branch: \`${head}\`\n\n` +
`Merge your developments into a bundle branch instead of directly merging to master.`;
`Merge your developments into a bundle branch instead of directly merging to master or 1.x.`;
// Find an existing marker comment (to update instead of spamming)
const { data: comments } = await github.rest.issues.listComments({
@ -79,7 +80,7 @@ jobs:
env:
HEAD_REF: ${{ github.head_ref }}
run: |
echo "::error::You can only merge to master from a bundle/* branch. Got '$HEAD_REF'."
echo "::error::You can only merge to master and 1.x from a bundle/* branch. Got '$HEAD_REF'."
exit 1
- name: Allowed

View file

@ -79,6 +79,9 @@ jobs:
primary_ghcr_manifest_tag: ${{ steps.determine-tags.outputs.n8n_primary_tag }}
runners_primary_ghcr_manifest_tag: ${{ steps.determine-tags.outputs.runners_primary_tag }}
runners_distroless_primary_ghcr_manifest_tag: ${{ steps.determine-tags.outputs.runners_distroless_primary_tag }}
n8n_sha_manifest_tag: ${{ steps.determine-tags.outputs.n8n_sha_primary_tag }}
runners_sha_manifest_tag: ${{ steps.determine-tags.outputs.runners_sha_primary_tag }}
runners_distroless_sha_manifest_tag: ${{ steps.determine-tags.outputs.runners_distroless_sha_primary_tag }}
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
@ -101,6 +104,7 @@ jobs:
--all \
--version "${{ needs.determine-build-context.outputs.n8n_version }}" \
--platform "${{ matrix.docker_platform }}" \
--sha "${GITHUB_SHA::7}" \
${{ needs.determine-build-context.outputs.push_to_docker == 'true' && '--include-docker' || '' }}
echo "=== Generated Docker Tags ==="
@ -228,6 +232,11 @@ jobs:
create_manifest "runners" "${{ needs.build-and-push-docker.outputs.runners_primary_ghcr_manifest_tag }}"
create_manifest "runners-distroless" "${{ needs.build-and-push-docker.outputs.runners_distroless_primary_ghcr_manifest_tag }}"
# Create SHA-tagged manifests (immutable references for deployments)
create_manifest "n8n (sha)" "${{ needs.build-and-push-docker.outputs.n8n_sha_manifest_tag }}"
create_manifest "runners (sha)" "${{ needs.build-and-push-docker.outputs.runners_sha_manifest_tag }}"
create_manifest "runners-distroless (sha)" "${{ needs.build-and-push-docker.outputs.runners_distroless_sha_manifest_tag }}"
- name: Create Docker Hub manifests
if: needs.determine-build-context.outputs.push_to_docker == 'true'
run: |
@ -241,6 +250,8 @@ jobs:
["runners-distroless"]="${VERSION}-distroless"
)
SHORT_SHA="${GITHUB_SHA::7}"
for image in "${!images[@]}"; do
TAG_SUFFIX="${images[$image]}"
IMAGE_NAME="${image//-distroless/}" # Remove -distroless from image name
@ -250,6 +261,20 @@ jobs:
--tag "${DOCKER_BASE}/${IMAGE_NAME}:${TAG_SUFFIX}" \
"${DOCKER_BASE}/${IMAGE_NAME}:${TAG_SUFFIX}-amd64" \
"${DOCKER_BASE}/${IMAGE_NAME}:${TAG_SUFFIX}-arm64"
# Create SHA-tagged manifest (immutable reference)
# For distroless, insert SHA between version and -distroless suffix
# to match docker-tags.mjs format: nightly-abc1234-distroless (not nightly-distroless-abc1234)
if [[ "$image" == *"-distroless"* ]]; then
SHA_SUFFIX="${VERSION}-${SHORT_SHA}-distroless"
else
SHA_SUFFIX="${TAG_SUFFIX}-${SHORT_SHA}"
fi
echo "Creating Docker Hub SHA manifest for $image: ${SHA_SUFFIX}"
docker buildx imagetools create \
--tag "${DOCKER_BASE}/${IMAGE_NAME}:${SHA_SUFFIX}" \
"${DOCKER_BASE}/${IMAGE_NAME}:${SHA_SUFFIX}-amd64" \
"${DOCKER_BASE}/${IMAGE_NAME}:${SHA_SUFFIX}-arm64"
done
- name: Get manifest digests for attestation

View file

@ -3,7 +3,7 @@ name: 'Release: Create Minor Release PR'
on:
workflow_dispatch:
schedule:
- cron: 0 13 * * 1 # 2pm CET (UTC+1), Monday
- cron: 0 8 * * 2 # 9am CET (UTC+1), Tuesday
jobs:
create-release-pr:

View file

@ -66,6 +66,14 @@ jobs:
uses: ./.github/workflows/util-ensure-release-candidate-branches.yml
secrets: inherit
ensure-correct-latest-version-on-npm:
name: Ensure correct latest version on npm
if: |
inputs.bump == 'minor' ||
inputs.track == 'stable'
uses: ./.github/workflows/release-set-stable-npm-packages-to-latest.yml
secrets: inherit
populate-cloud-with-releases:
name: 'Populate cloud database with releases'
uses: ./.github/workflows/release-populate-cloud-with-releases.yml

View file

@ -58,6 +58,9 @@ jobs:
N8N_FAIL_ON_POPULARITY_FETCH_ERROR: true
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
- name: Install script dependencies
run: pnpm install --frozen-lockfile --dir ./.github/scripts --ignore-workspace
- name: Check for new unpublished packages
run: node .github/scripts/detect-new-packages.mjs
@ -81,7 +84,7 @@ jobs:
- name: Publish other packages to NPM
env:
PUBLISH_BRANCH: ${{ github.event.pull_request.base.ref }}
PUBLISH_TAG: ${{ needs.determine-version-info.outputs.track == 'stable' && 'latest' || needs.determine-version-info.outputs.track }}
PUBLISH_TAG: ${{ needs.determine-version-info.outputs.track }}
run: |
# Prefix version-like track names (e.g. "1", "v1") to avoid npm rejecting them as semver ranges
if [[ "$PUBLISH_TAG" =~ ^v?[0-9] ]]; then

View file

@ -3,7 +3,7 @@ name: 'Release: Schedule Patch Release PRs'
on:
workflow_dispatch:
schedule:
- cron: '0 8 * * 2-5' # 9am CET (UTC+1), TuesdayFriday
- cron: '0 8 * * 3-5,1' # 9am CET (UTC+1), Wednesday - Friday and Monday. (Minor release on tuesday)
jobs:
create-patch-prs:

View file

@ -0,0 +1,35 @@
name: 'Release: Set stable npm packages to latest'
on:
workflow_call:
workflow_dispatch:
permissions:
contents: write
jobs:
promote-github-releases:
name: Promote current stable releases as latest
runs-on: ubuntu-slim
environment: release
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: refs/tags/stable
fetch-depth: 1
- name: Setup NodeJS
uses: ./.github/actions/setup-nodejs
with:
build-command: ''
install-command: pnpm install --frozen-lockfile --dir ./.github/scripts --ignore-workspace
# Remove after https://github.com/npm/cli/issues/8547 gets resolved
- run: echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > ~/.npmrc
env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Set npm packages to latest
run: node ./.github/scripts/set-latest-for-monorepo-packages.mjs

View file

@ -16,6 +16,9 @@ on:
type: string
default: ''
env:
NODE_OPTIONS: --max-old-space-size=6144
jobs:
bench:
name: Benchmarks

View file

@ -144,7 +144,7 @@ jobs:
with:
branch: ${{ inputs.branch }}
test-mode: local
test-command: pnpm --filter=n8n-playwright test:local:e2e-only
test-command: pnpm --filter=n8n-playwright test:local
shards: 7
runner: ubuntu-latest
workers: '1'

View file

@ -5,9 +5,6 @@ on:
workflow_dispatch:
schedule:
- cron: '0 0 * * *' # Runs daily at midnight
pull_request:
paths:
- '.github/workflows/test-e2e-performance-reusable.yml'
jobs:
build-and-test-performance:

View file

@ -130,6 +130,9 @@ jobs:
enable-docker-cache: ${{ inputs.test-mode == 'docker-build' }}
env:
INCLUDE_TEST_CONTROLLER: ${{ inputs.test-mode == 'docker-build' && 'true' || '' }}
QA_METRICS_WEBHOOK_URL: ${{ secrets.QA_METRICS_WEBHOOK_URL }}
QA_METRICS_WEBHOOK_USER: ${{ secrets.QA_METRICS_WEBHOOK_USER }}
QA_METRICS_WEBHOOK_PASSWORD: ${{ secrets.QA_METRICS_WEBHOOK_PASSWORD }}
- name: Install Browsers
run: pnpm turbo run install-browsers --filter=n8n-playwright

View file

@ -0,0 +1,141 @@
name: 'Test: Instance AI Exec Evals'
on:
workflow_call:
inputs:
branch:
description: 'GitHub branch to test'
required: false
type: string
default: 'master'
filter:
description: 'Filter test cases by name (e.g. "contact-form")'
required: false
type: string
default: ''
workflow_dispatch:
inputs:
branch:
description: 'GitHub branch to test'
required: false
default: 'master'
filter:
description: 'Filter test cases by name (e.g. "contact-form")'
required: false
default: ''
jobs:
run-evals:
name: 'Run Evals'
runs-on: blacksmith-4vcpu-ubuntu-2204
timeout-minutes: 45
permissions:
contents: read
pull-requests: write
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ inputs.branch || github.ref }}
fetch-depth: 1
- name: Setup Environment
uses: ./.github/actions/setup-nodejs
with:
build-command: 'pnpm build'
- name: Build Docker image
run: pnpm build:docker
env:
INCLUDE_TEST_CONTROLLER: 'true'
- name: Start n8n container
run: |
docker run -d --name n8n-eval \
-e E2E_TESTS=true \
-e N8N_ENABLED_MODULES=instance-ai \
-e N8N_AI_ENABLED=true \
-e N8N_INSTANCE_AI_MODEL_API_KEY=${{ secrets.EVALS_ANTHROPIC_KEY }} \
-e N8N_LICENSE_ACTIVATION_KEY=${{ secrets.N8N_LICENSE_ACTIVATION_KEY }} \
-e N8N_LICENSE_CERT=${{ secrets.N8N_LICENSE_CERT }} \
-e N8N_ENCRYPTION_KEY=${{ secrets.N8N_ENCRYPTION_KEY }} \
-p 5678:5678 \
n8nio/n8n:local
echo "Waiting for n8n to be ready..."
for i in $(seq 1 60); do
if curl -s http://localhost:5678/healthz/readiness -o /dev/null -w "%{http_code}" | grep -q 200; then
echo "n8n ready after ${i}s"
exit 0
fi
sleep 1
done
echo "::error::n8n failed to start within 60s"
docker logs n8n-eval --tail 30
exit 1
- name: Create test user
run: |
curl -sf -X POST http://localhost:5678/rest/e2e/reset \
-H "Content-Type: application/json" \
-d '{
"owner":{"email":"nathan@n8n.io","password":"PlaywrightTest123","firstName":"Eval","lastName":"Owner"},
"admin":{"email":"admin@n8n.io","password":"PlaywrightTest123","firstName":"Admin","lastName":"User"},
"members":[],
"chat":{"email":"chat@n8n.io","password":"PlaywrightTest123","firstName":"Chat","lastName":"User"}
}'
- name: Run Instance AI Evals
continue-on-error: true
working-directory: packages/@n8n/instance-ai
run: >-
pnpm eval:instance-ai
--base-url http://localhost:5678
--verbose
${{ inputs.filter && format('--filter "{0}"', inputs.filter) || '' }}
env:
N8N_INSTANCE_AI_MODEL_API_KEY: ${{ secrets.EVALS_ANTHROPIC_KEY }}
- name: Stop n8n container
if: ${{ always() }}
run: docker stop n8n-eval && docker rm n8n-eval || true
- name: Post eval results to PR
if: ${{ always() && github.event_name == 'pull_request' }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
RESULTS_FILE="packages/@n8n/instance-ai/eval-results.json"
if [ ! -f "$RESULTS_FILE" ]; then
echo "No eval results file found"
exit 0
fi
# Build the full comment body with jq
jq -r '
"### Instance AI Workflow Eval Results\n\n" +
"**\(.summary.built)/\(.summary.testCases) built | \(.summary.scenariosPassed)/\(.summary.scenariosTotal) passed (\(.summary.passRate * 100 | floor)%)**\n\n" +
"| Workflow | Build | Passed |\n|---|---|---|\n" +
([.testCases[] | "| \(.name) | \(if .built then "✅" else "❌" end) | \([.scenarios[] | select(.passed)] | length)/\(.scenarios | length) |"] | join("\n")) +
"\n\n<details><summary>Failure details</summary>\n\n" +
([.testCases[].scenarios[] | select(.passed == false) | "**\(.name)** \(if .failureCategory then "[\(.failureCategory)]" else "" end)\n> \(.reasoning | .[0:200])\n"] | join("\n")) +
"\n</details>"
' "$RESULTS_FILE" > /tmp/eval-comment.md
# Find and update existing eval comment, or create new one
COMMENT_ID=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \
--jq '.[] | select(.body | startswith("### Instance AI Workflow Eval")) | .id' | tail -1)
if [ -n "$COMMENT_ID" ]; then
gh api "repos/${{ github.repository }}/issues/comments/${COMMENT_ID}" -X PATCH -F body=@/tmp/eval-comment.md
else
gh pr comment "${{ github.event.pull_request.number }}" --body-file /tmp/eval-comment.md
fi
- name: Upload Results
if: ${{ always() }}
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: instance-ai-workflow-eval-results
path: packages/@n8n/instance-ai/eval-results.json
retention-days: 14

View file

@ -29,5 +29,5 @@ jobs:
- name: Build and Test
uses: ./.github/actions/setup-nodejs
with:
build-command: pnpm lint
build-command: pnpm lint:ci
node-version: ${{ inputs.nodeVersion }}

View file

@ -52,7 +52,7 @@ jobs:
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
with:
token: ${{ secrets.CODECOV_TOKEN }}
report-type: test_results
report_type: test_results
name: backend-unit
- name: Upload coverage to Codecov
@ -92,9 +92,10 @@ jobs:
- name: Upload test results to Codecov
if: ${{ !cancelled() }}
uses: codecov/test-results-action@0fa95f0e1eeaafde2c782583b36b28ad0d8c77d3 # v1.2.1
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
with:
token: ${{ secrets.CODECOV_TOKEN }}
report_type: test_results
name: backend-integration
- name: Upload coverage to Codecov
@ -134,9 +135,10 @@ jobs:
- name: Upload test results to Codecov
if: ${{ !cancelled() }}
uses: codecov/test-results-action@0fa95f0e1eeaafde2c782583b36b28ad0d8c77d3 # v1.2.1
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
with:
token: ${{ secrets.CODECOV_TOKEN }}
report_type: test_results
name: nodes-unit
- name: Upload coverage to Codecov
@ -182,9 +184,10 @@ jobs:
- name: Upload test results to Codecov
if: ${{ !cancelled() }}
uses: codecov/test-results-action@0fa95f0e1eeaafde2c782583b36b28ad0d8c77d3 # v1.2.1
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
with:
token: ${{ secrets.CODECOV_TOKEN }}
report_type: test_results
name: frontend-shard-${{ matrix.shard }}
- name: Upload coverage to Codecov

View file

@ -20,7 +20,8 @@ jobs:
cloudflare:
name: Cloudflare Pages
if: |
!contains(github.event.pull_request.labels.*.name, 'community')
!contains(github.event.pull_request.labels.*.name, 'community') &&
github.repository == 'n8n-io/n8n'
runs-on: blacksmith-2vcpu-ubuntu-2204
permissions:
contents: read

View file

@ -149,6 +149,17 @@ jobs:
"mcp__linear__*",
"mcp__notion__*"
]
},
"extraKnownMarketplaces": {
"n8n": {
"source": {
"source": "directory",
"path": "./.claude/plugins/n8n"
}
}
},
"enabledPlugins": {
"n8n@n8n": true
}
}
claude_args: |

View file

@ -36,6 +36,20 @@ jobs:
# Or use OAuth token instead:
# claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
timeout_minutes: '60'
settings: |
{
"extraKnownMarketplaces": {
"n8n": {
"source": {
"source": "directory",
"path": "./.claude/plugins/n8n"
}
}
},
"enabledPlugins": {
"n8n@n8n": true
}
}
# mode: tag # Default: responds to @claude mentions
# Optional: Restrict network access to specific domains only
# experimental_allowed_domains: |

View file

@ -24,4 +24,4 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GHCR_ORG: ${{ github.repository_owner }}
GHCR_REPO: ${{ github.event.repository.name }}
run: node .github/scripts/cleanup-ghcr-images.mjs --stale 1
run: node .github/scripts/cleanup-ghcr-images.mjs --stale 3

View file

@ -0,0 +1,41 @@
name: 'QA: Metrics PR Comment'
on:
workflow_call:
inputs:
metrics:
description: 'Comma-separated list of metric names to report'
required: true
type: string
baseline-days:
description: 'Number of days for the rolling baseline'
required: false
type: number
default: 14
jobs:
post-comment:
name: Post Metrics Comment
if: >-
github.event_name == 'pull_request' &&
!github.event.pull_request.head.repo.fork &&
github.repository == 'n8n-io/n8n'
runs-on: ubuntu-slim
continue-on-error: true
permissions:
pull-requests: write
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
sparse-checkout: .github/scripts/post-qa-metrics-comment.mjs
sparse-checkout-cone-mode: false
- name: Post QA metrics comparison
env:
QA_METRICS_COMMENT_WEBHOOK_URL: ${{ secrets.QA_METRICS_COMMENT_WEBHOOK_URL }}
QA_METRICS_WEBHOOK_USER: ${{ secrets.QA_METRICS_WEBHOOK_USER }}
QA_METRICS_WEBHOOK_PASSWORD: ${{ secrets.QA_METRICS_WEBHOOK_PASSWORD }}
GITHUB_TOKEN: ${{ github.token }}
run: |
node .github/scripts/post-qa-metrics-comment.mjs \
--metrics "${{ inputs.metrics }}" \
--baseline-days "${{ inputs.baseline-days }}"

6
.gitignore vendored
View file

@ -33,6 +33,8 @@ test-results.json
*.0x
packages/testing/playwright/playwright-report
packages/testing/playwright/test-results
packages/testing/playwright/eval-results.json
packages/@n8n/instance-ai/eval-results.json
packages/testing/playwright/.playwright-browsers
packages/testing/playwright/.playwright-cli
test-results/
@ -57,5 +59,9 @@ packages/cli/src/commands/export/outputs
.data/
.claude/settings.local.json
.claude/plans/
.cursor/plans/
.superset
.conductor
.n8n
lefthook-local.yml
.playwright-mcp

View file

@ -20,6 +20,13 @@ frontend, and extensible node-based workflow engine.
Hygiene below)
- Use mermaid diagrams in MD files when you need to visualise something
## Claude Code Plugin
n8n-specific skills, commands, and agents live in `.claude/plugins/n8n/` and
are namespaced under `n8n:`. Use `n8n:` prefix when invoking them
(e.g. `/n8n:create-pr`, `/n8n:plan`, `n8n:developer` agent).
See [plugin README](.claude/plugins/n8n/README.md) for structure and details.
## Essential Commands
### Building
@ -74,6 +81,7 @@ The monorepo is organized into these key packages:
- **`packages/@n8n/i18n`**: Internationalization for UI text
- **`packages/nodes-base`**: Built-in nodes for integrations
- **`packages/@n8n/nodes-langchain`**: AI/LangChain nodes
- **`packages/@n8n/instance-ai`**: "AI Assistant" in the UI, "Instance AI" in code — AI assistant backend. See its `CLAUDE.md` for architecture docs.
- **`@n8n/design-system`**: Vue component library for UI consistency
- **`@n8n/config`**: Centralized configuration management

View file

@ -1,3 +1,184 @@
# [2.18.0](https://github.com/n8n-io/n8n/compare/n8n@2.17.0...n8n@2.18.0) (2026-04-21)
### Bug Fixes
* **ai-builder:** Increase orchestrator max steps from default 5 to 60 ([#28429](https://github.com/n8n-io/n8n/issues/28429)) ([3c850f2](https://github.com/n8n-io/n8n/commit/3c850f2711d53ded62a3540c67b9ec02143cbb3f))
* **ai-builder:** Scope artifacts panel to resources produced in-thread ([#28678](https://github.com/n8n-io/n8n/issues/28678)) ([7b3696f](https://github.com/n8n-io/n8n/commit/7b3696f3f7d95ab3cbaeb8ca58fdc74264a83b52))
* **ai-builder:** Use placeholders for user-provided values instead of hardcoding fake addresses ([#28407](https://github.com/n8n-io/n8n/issues/28407)) ([39c6217](https://github.com/n8n-io/n8n/commit/39c62171092618149fa67ccb9a384a5a3aadd4e8))
* **Alibaba Cloud Chat Model Node:** Add credential-level url field for AI gateway compatibility ([#28697](https://github.com/n8n-io/n8n/issues/28697)) ([dd6c28c](https://github.com/n8n-io/n8n/commit/dd6c28c6d16274354b83d3cc6a731f2f7a859a14))
* **ClickUp Node:** Unclear error message when using OAuth credentials ([#28584](https://github.com/n8n-io/n8n/issues/28584)) ([19aadf1](https://github.com/n8n-io/n8n/commit/19aadf19f753d64cc2cd80af3c5b3dd957a4ede7))
* **core:** Add required field validation to MCP OAuth client registration ([#28490](https://github.com/n8n-io/n8n/issues/28490)) ([8716316](https://github.com/n8n-io/n8n/commit/87163163e67001f69a2a2d7b4a650e0511614d62))
* **core:** Cascade-cancel dependent planned tasks when a parent task fails ([#28656](https://github.com/n8n-io/n8n/issues/28656)) ([35f9bed](https://github.com/n8n-io/n8n/commit/35f9bed4de39350717192d9f272ad044ad50b323))
* **core:** Enforce credential access checks in dynamic node parameter requests ([#28446](https://github.com/n8n-io/n8n/issues/28446)) ([ac41112](https://github.com/n8n-io/n8n/commit/ac411127314921aaf82b7b97d76eeaa2703b708c))
* **core:** Ensure single zod instance across workspace packages ([#28604](https://github.com/n8n-io/n8n/issues/28604)) ([107c48f](https://github.com/n8n-io/n8n/commit/107c48f65c10d26f8f01d1bee5d2eb77b9d26084))
* **core:** Filter stale credentials from setup wizard requests ([#28478](https://github.com/n8n-io/n8n/issues/28478)) ([657bdf1](https://github.com/n8n-io/n8n/commit/657bdf136fd0fc01cee3629baf65e130ee80840a))
* **core:** Fix public API package update process ([#28475](https://github.com/n8n-io/n8n/issues/28475)) ([34430ae](https://github.com/n8n-io/n8n/commit/34430aedb15fa5305be475582e04f08967415e38))
* **core:** Fix workflow-sdk validation for plain workflow objects ([#28416](https://github.com/n8n-io/n8n/issues/28416)) ([62dc073](https://github.com/n8n-io/n8n/commit/62dc073b3d954dc885359962c02ae8aa84d17c43))
* **core:** Guard against undefined config properties in credential overwrites ([#28573](https://github.com/n8n-io/n8n/issues/28573)) ([77d27bc](https://github.com/n8n-io/n8n/commit/77d27bc826e4e91c2c589a62cbb6b997cacccd16))
* **core:** Handle git fetch failure during source control startup ([#28422](https://github.com/n8n-io/n8n/issues/28422)) ([fa3299d](https://github.com/n8n-io/n8n/commit/fa3299d0425dfa2eaeaca6732dc46e0181e6fd68))
* **core:** Handle invalid percent sequences and equals signs in HTTP response headers ([#27691](https://github.com/n8n-io/n8n/issues/27691)) ([ca71d89](https://github.com/n8n-io/n8n/commit/ca71d89d885d01f8663e29a2a5b1f06c713aede8))
* **core:** Implement data tables name collision detection on pull ([#26416](https://github.com/n8n-io/n8n/issues/26416)) ([e5aaeb5](https://github.com/n8n-io/n8n/commit/e5aaeb53a93c63a04978e2a6eb7aa7255fcf510b))
* **core:** Prevent nodes tool crash on flattened required fields ([#28670](https://github.com/n8n-io/n8n/issues/28670)) ([3e72430](https://github.com/n8n-io/n8n/commit/3e724303c537739319e91f8bcaf7070fe105ffc7))
* **core:** Resolve additional keys lazily in VM expression engine ([#28430](https://github.com/n8n-io/n8n/issues/28430)) ([98b833a](https://github.com/n8n-io/n8n/commit/98b833a07d6d0f705633d7cb48298ee953688bd1))
* **core:** Skip disabled Azure Key Vault secrets and handle partial fetch failures ([#28325](https://github.com/n8n-io/n8n/issues/28325)) ([6217d08](https://github.com/n8n-io/n8n/commit/6217d08ce9b53d6fd5277fa0708ed13d36e0e934))
* **core:** Skip npm outdated check for verified-only community packages ([#28335](https://github.com/n8n-io/n8n/issues/28335)) ([2959b4d](https://github.com/n8n-io/n8n/commit/2959b4dc2a6cfd3733cc83bd6442dddd4cff08d2))
* Disable axios built-in proxy for OAuth2 token requests ([#28513](https://github.com/n8n-io/n8n/issues/28513)) ([56f36a6](https://github.com/n8n-io/n8n/commit/56f36a6d1961d95780fb8258e8876d7d512503c2))
* **editor:** Advance wizard step on Continue instead of applying setup ([#28698](https://github.com/n8n-io/n8n/issues/28698)) ([3b15e47](https://github.com/n8n-io/n8n/commit/3b15e470b54b13e9fe68e81c81a757c06b264783))
* **editor:** Center sub-node icons and refresh triggers panel icons ([#28515](https://github.com/n8n-io/n8n/issues/28515)) ([6739856](https://github.com/n8n-io/n8n/commit/6739856aa32689b43d143ae4909e1f3d85dc4106))
* **editor:** Display placeholder sentinels as hint text in setup wizard ([#28482](https://github.com/n8n-io/n8n/issues/28482)) ([bb7d137](https://github.com/n8n-io/n8n/commit/bb7d137cf735bcdf65bbcf8ff58fa911d83121f5))
* **editor:** Gate Instance AI input while setup wizard is open ([#28685](https://github.com/n8n-io/n8n/issues/28685)) ([db83a95](https://github.com/n8n-io/n8n/commit/db83a95522957c10a3466f0b57944c8b8827347a))
* **editor:** Hide setup parameter issue icons until user interacts with input ([#28010](https://github.com/n8n-io/n8n/issues/28010)) ([00b0558](https://github.com/n8n-io/n8n/commit/00b0558c2b1ed6bc4d47a86cb1bfca8eb55a47bc))
* **editor:** Improve disabled Google sign-in button styling and tooltip alignment ([#28536](https://github.com/n8n-io/n8n/issues/28536)) ([e848230](https://github.com/n8n-io/n8n/commit/e8482309478eed05793dcaa4d82185936439663f))
* **editor:** Improve setup wizard placeholder detection and card completion scoping ([#28474](https://github.com/n8n-io/n8n/issues/28474)) ([d172113](https://github.com/n8n-io/n8n/commit/d17211342e4ee8c8ec89a9c918017884e2de0763))
* **editor:** Only show role assignment warning modal when value actually changed ([#28387](https://github.com/n8n-io/n8n/issues/28387)) ([9c97931](https://github.com/n8n-io/n8n/commit/9c97931ca06d407bec1c6a8bab510d206afba394))
* **editor:** Prevent setup wizard disappearing on requestId-driven remount ([#28473](https://github.com/n8n-io/n8n/issues/28473)) ([04d57c5](https://github.com/n8n-io/n8n/commit/04d57c5fd62a5b9a2e086a3f540b7f50a932b62d))
* **editor:** Re-initialize SSO store after login to populate OIDC redirect URL ([#28386](https://github.com/n8n-io/n8n/issues/28386)) ([21317b8](https://github.com/n8n-io/n8n/commit/21317b8945dec9169e36b7e5fdf867713018661d))
* **editor:** Refine resource dependency badge ([#28087](https://github.com/n8n-io/n8n/issues/28087)) ([f216fda](https://github.com/n8n-io/n8n/commit/f216fda511062a40199b986351693677ebb2919e))
* **editor:** Reset OIDC form dirty state after saving IdP settings ([#28388](https://github.com/n8n-io/n8n/issues/28388)) ([1042350](https://github.com/n8n-io/n8n/commit/1042350f4e0f6ed44b51a1d707de665f71437faa))
* **editor:** Reset remote values on credentials change ([#26282](https://github.com/n8n-io/n8n/issues/26282)) ([5e11197](https://github.com/n8n-io/n8n/commit/5e111975d4086c060ac3d29d07da7c00ea2103a1))
* **editor:** Resolve nodes stuck on loading after execution in instance-ai preview ([#28450](https://github.com/n8n-io/n8n/issues/28450)) ([c97c3b4](https://github.com/n8n-io/n8n/commit/c97c3b4d12e166091be9ea1de969a17d64c36ec2))
* **editor:** Restore WASM file paths for cURL import in HTTP Request node ([#28610](https://github.com/n8n-io/n8n/issues/28610)) ([51bc71e](https://github.com/n8n-io/n8n/commit/51bc71e897e2baaf729963bf0f373a73505aee43))
* **editor:** Show auth type selector in Instance AI workflow setup ([#28707](https://github.com/n8n-io/n8n/issues/28707)) ([1b13d32](https://github.com/n8n-io/n8n/commit/1b13d325f12a5a27d139c75164114ee41583a902))
* **editor:** Show relevant node in workflow activation errors ([#26691](https://github.com/n8n-io/n8n/issues/26691)) ([c9cab11](https://github.com/n8n-io/n8n/commit/c9cab112f99a5da2742012773450bf7721484c28))
* **Google Cloud Firestore Node:** Fix empty array serialization in jsonToDocument ([#28213](https://github.com/n8n-io/n8n/issues/28213)) ([7094395](https://github.com/n8n-io/n8n/commit/7094395cef8e71f767df6fa5e242cf2fa42366ed))
* **Google Drive Node:** Continue on error support for download file operation ([#28276](https://github.com/n8n-io/n8n/issues/28276)) ([30128c9](https://github.com/n8n-io/n8n/commit/30128c9254be2214e746e0158296c1f1bd8ab4d8))
* **Google Gemini Node:** Determine the file extention from MIME type for image and video operations ([#28616](https://github.com/n8n-io/n8n/issues/28616)) ([73659cb](https://github.com/n8n-io/n8n/commit/73659cb3e7eccd48a739829be0a4d7a6557ce4a1))
* **GraphQL Node:** Improve error response handling ([#28209](https://github.com/n8n-io/n8n/issues/28209)) ([357fb72](https://github.com/n8n-io/n8n/commit/357fb7210ab201e13e2d3256a7886cf382656f22))
* **HubSpot Node:** Rename HubSpot "App Token" auth to "Service Key" ([#28479](https://github.com/n8n-io/n8n/issues/28479)) ([8c3e692](https://github.com/n8n-io/n8n/commit/8c3e6921741f0e28ba28f8fb39797d5e19db71c9))
* **HubSpot Trigger Node:** Add missing property selectors ([#28595](https://github.com/n8n-io/n8n/issues/28595)) ([d179f66](https://github.com/n8n-io/n8n/commit/d179f667c0044fd246d8e8535cd3a741d3f96b6f))
* **IMAP Node:** Fix out-of-memory crash after ECONNRESET on reconnect ([#28290](https://github.com/n8n-io/n8n/issues/28290)) ([2d0b231](https://github.com/n8n-io/n8n/commit/2d0b231e31f265f39dd95d6794bd74d9b5592056))
* Link to n8n website broken in n8n forms ([#28627](https://github.com/n8n-io/n8n/issues/28627)) ([ff950e5](https://github.com/n8n-io/n8n/commit/ff950e5840214c515d413b45f174d9638a51dd39))
* **LinkedIn Node:** Update LinkedIn API version in request headers ([#28564](https://github.com/n8n-io/n8n/issues/28564)) ([25e07ca](https://github.com/n8n-io/n8n/commit/25e07cab5a66b04960753055131d355e0323d971))
* **OpenAI Node:** Replace hardcoded models with RLC ([#28226](https://github.com/n8n-io/n8n/issues/28226)) ([4070930](https://github.com/n8n-io/n8n/commit/4070930e4c080c634df9b241175941c48afed9dc))
* **Schedule Node:** Use elapsed-time check to self-heal after missed triggers ([#28423](https://github.com/n8n-io/n8n/issues/28423)) ([5f8ab01](https://github.com/n8n-io/n8n/commit/5f8ab01f9bb26f4d27f6f882fe1024f27caf4d67))
* Update working memory using tools ([#28467](https://github.com/n8n-io/n8n/issues/28467)) ([39189c3](https://github.com/n8n-io/n8n/commit/39189c39859fbb4c1562a03ae3e6cd29195f7d1d))
### Features
* Add deployment_key table, entity, repository, and migration ([#28329](https://github.com/n8n-io/n8n/issues/28329)) ([59edd6a](https://github.com/n8n-io/n8n/commit/59edd6ae5421aa6be34ee009a3024e0ca9843467))
* Add Prometheus counters for token exchange ([#28453](https://github.com/n8n-io/n8n/issues/28453)) ([c6534fa](https://github.com/n8n-io/n8n/commit/c6534fa0b389a394e7591d3fc5ec565409279004))
* AI Gateway credentials endpoint instance url ([#28520](https://github.com/n8n-io/n8n/issues/28520)) ([d012346](https://github.com/n8n-io/n8n/commit/d012346c777455de5bde9cab218f0c4f2d712fa0))
* **API:** Add missing credential endpoints (GET by ID and test) ([#28519](https://github.com/n8n-io/n8n/issues/28519)) ([9a65549](https://github.com/n8n-io/n8n/commit/9a65549575bb201c3f55888d71e04663f622eb5b))
* **core:** Add `require-node-description-fields` ESLint rule for icon and subtitle ([#28400](https://github.com/n8n-io/n8n/issues/28400)) ([5504099](https://github.com/n8n-io/n8n/commit/550409923a3d8d6961648674024eabb0d0749cfc))
* **core:** Add KeyManagerService for encryption key lifecycle management ([#28533](https://github.com/n8n-io/n8n/issues/28533)) ([9dd3e59](https://github.com/n8n-io/n8n/commit/9dd3e59acb6eb94bb38ffe01677ea1c9a108d87b))
* **core:** Configure OIDC settings via env vars ([#28185](https://github.com/n8n-io/n8n/issues/28185)) ([36261fb](https://github.com/n8n-io/n8n/commit/36261fbe7ad55a7b3bcc19809b6decb401b245bb))
* **core:** Persist deployment_key entries for stability across restarts and key rotation ([#28518](https://github.com/n8n-io/n8n/issues/28518)) ([bb96d2e](https://github.com/n8n-io/n8n/commit/bb96d2e50a6b7cd77ea6256bb1446e8b3b348bd2))
* **core:** Support npm dist-tags in community node installation ([#28067](https://github.com/n8n-io/n8n/issues/28067)) ([ca871cc](https://github.com/n8n-io/n8n/commit/ca871cc10aca97de8c0892e0735c9fa2ed16d251))
* **core:** Support npm registry token authentication to install private community node packages ([#28228](https://github.com/n8n-io/n8n/issues/28228)) ([8b105cc](https://github.com/n8n-io/n8n/commit/8b105cc0cf6e84e069f6b7f3a98c334cd44876c1))
* **core:** Track workflow action source for external API and MCP requests ([#28483](https://github.com/n8n-io/n8n/issues/28483)) ([575c34e](https://github.com/n8n-io/n8n/commit/575c34eae1bdf8e9d5d5fe7d31c92f57f27fcc27))
* **core:** Workflow tracing - add workflow version id ([#28424](https://github.com/n8n-io/n8n/issues/28424)) ([9a22fe5](https://github.com/n8n-io/n8n/commit/9a22fe5a255b20be7d0e78fff7e03bf79e50a62f))
* **editor:** Add favoriting for projects, folders, workflows and data tables ([#26228](https://github.com/n8n-io/n8n/issues/26228)) ([b1a075f](https://github.com/n8n-io/n8n/commit/b1a075f7609045620563f86df0e15d27b1176d45))
* **editor:** Enable workflow execution from instance AI preview canvas ([#28412](https://github.com/n8n-io/n8n/issues/28412)) ([5b376cb](https://github.com/n8n-io/n8n/commit/5b376cb12d6331e4e458a1f1880fcddce76d1db9))
* Enable security policy settings via env vars ([#28321](https://github.com/n8n-io/n8n/issues/28321)) ([1108467](https://github.com/n8n-io/n8n/commit/1108467f44bf987c0f5a5a0eafb6396e2745b8ce))
* **Linear Trigger Node:** Add signing secret validation ([#28522](https://github.com/n8n-io/n8n/issues/28522)) ([3b248ee](https://github.com/n8n-io/n8n/commit/3b248eedc289c62f32f16da677c75b25df0fcb9f))
* **MiniMax Chat Model Node:** Add MiniMax Chat Model sub-node ([#28305](https://github.com/n8n-io/n8n/issues/28305)) ([bd927d9](https://github.com/n8n-io/n8n/commit/bd927d93503a65e0be18c4c40e68dcad96f68d82))
* **Slack Node:** Add app_home_opened as a dedicated trigger event ([#28626](https://github.com/n8n-io/n8n/issues/28626)) ([f1dab3e](https://github.com/n8n-io/n8n/commit/f1dab3e29530ee596d68db474024ddbae5fa055a))
### Reverts
* Make Wait node fully durable by removing in-memory execution path ([#28538](https://github.com/n8n-io/n8n/issues/28538)) ([bb9bec3](https://github.com/n8n-io/n8n/commit/bb9bec3ba419d46450122411839f20cd614db920))
# [2.17.0](https://github.com/n8n-io/n8n/compare/n8n@2.16.0...n8n@2.17.0) (2026-04-13)
### Bug Fixes
* Add credential auth and test for PostHog, NASA, Peekalink, Clearbit, Uptime Robot ([#27957](https://github.com/n8n-io/n8n/issues/27957)) ([c1b5c96](https://github.com/n8n-io/n8n/commit/c1b5c96f62ffd4e61d9dc5a46a231f20f4fdc5c7))
* **ai-builder:** Expose credential account context to prevent prompt/credential mismatch ([#28100](https://github.com/n8n-io/n8n/issues/28100)) ([c2fbf9d](https://github.com/n8n-io/n8n/commit/c2fbf9d64322db8bf78b724e6c32c0482181b23a))
* **ai-builder:** Improve post-build flow: setup, test, then publish ([#28125](https://github.com/n8n-io/n8n/issues/28125)) ([4b3b40e](https://github.com/n8n-io/n8n/commit/4b3b40e2385b9e8be68a8f7f05e7aae4dedaf847))
* **ai-builder:** Paginate list-credentials tool and drop unused fields ([#28108](https://github.com/n8n-io/n8n/issues/28108)) ([4a3fc7d](https://github.com/n8n-io/n8n/commit/4a3fc7d27cd32944324ddab098dfacc17ac63e68))
* **ai-builder:** Unify post-build credential setup into single setup-workflow flow ([#28273](https://github.com/n8n-io/n8n/issues/28273)) ([8f8b70a](https://github.com/n8n-io/n8n/commit/8f8b70a301b0ca1d7fdcdecc4fd0d0131b8c9b5a))
* **AWS DynamoDB Node:** Add option to disable auto-parsing of numeric strings ([#28093](https://github.com/n8n-io/n8n/issues/28093)) ([4b06720](https://github.com/n8n-io/n8n/commit/4b06720c8b7dd8b8d435690f7b594340a59f9f8a))
* **AWS ELB Node:** Fix spelling typo 'sucess' → 'success' in RemoveListenerCertificates ([#27703](https://github.com/n8n-io/n8n/issues/27703)) ([be45c08](https://github.com/n8n-io/n8n/commit/be45c085fb26174f04f4387516c65dc1cd547320))
* **Box Node:** Fix issue where Box trigger node was not paginating correctly ([#27415](https://github.com/n8n-io/n8n/issues/27415)) ([4b05191](https://github.com/n8n-io/n8n/commit/4b0519167fd8fcb77ed7bb73c1702be843215c91))
* **core:** Add projectId and projectName to log streaming events ([#28310](https://github.com/n8n-io/n8n/issues/28310)) ([ebd279f](https://github.com/n8n-io/n8n/commit/ebd279f88c532773e569af48aab06a82c1f40cee))
* **core:** Add streaming keepalive to prevent proxy timeout during long agent executions ([#27853](https://github.com/n8n-io/n8n/issues/27853)) ([b0484a1](https://github.com/n8n-io/n8n/commit/b0484a15553a818bbad9226ce5756e125723bbee))
* **core:** Align VM expression engine error handler with legacy engine ([#28166](https://github.com/n8n-io/n8n/issues/28166)) ([569ad49](https://github.com/n8n-io/n8n/commit/569ad497b7bd2cbb61b5fc256da3dae2976fddff))
* **core:** Avoid permanent deactivation on transient isolate errors ([#28117](https://github.com/n8n-io/n8n/issues/28117)) ([94b463e](https://github.com/n8n-io/n8n/commit/94b463e2a0e11979dae4197838eb03f1ef0c7a4a))
* **core:** Decrease workflow history compaction retention periods ([#27763](https://github.com/n8n-io/n8n/issues/27763)) ([ccd4fd0](https://github.com/n8n-io/n8n/commit/ccd4fd0fc881af83ecfcfb8ff8f393be83dd7dc8))
* **core:** Drain webhook close functions to prevent MCP connection leaks ([#28384](https://github.com/n8n-io/n8n/issues/28384)) ([882dd9c](https://github.com/n8n-io/n8n/commit/882dd9ce531fcb557d83a568228cc068d398518f))
* **core:** Fix retry activation in multi-main bypassing exponential backoff ([#28110](https://github.com/n8n-io/n8n/issues/28110)) ([2ed3f9c](https://github.com/n8n-io/n8n/commit/2ed3f9c336bb8285f748b1b082c1609ad8444094))
* **core:** Improve audit queries to avoid PostgreSQL bind parameter limits ([#27985](https://github.com/n8n-io/n8n/issues/27985)) ([9ab974b](https://github.com/n8n-io/n8n/commit/9ab974b7b0afdb2866d8d7cf191f449ce6fcf49e))
* **core:** Improve audit queries to avoid PostgreSQL bind parameter limits ([#27985](https://github.com/n8n-io/n8n/issues/27985)) ([dbe3f02](https://github.com/n8n-io/n8n/commit/dbe3f022f1364c1c738963280bbeea12833bdd4c))
* **core:** Improve performance of the push/pull modal getStatus ([#27188](https://github.com/n8n-io/n8n/issues/27188)) ([309a739](https://github.com/n8n-io/n8n/commit/309a7392710925cb5c61fd3c59eb87b02b157cf6))
* **core:** Increase timing delays in workflow publish history tests ([#28301](https://github.com/n8n-io/n8n/issues/28301)) ([b353143](https://github.com/n8n-io/n8n/commit/b353143543d98418344f9e6b9233d6bb0d4b778c))
* **core:** MCP tools called after workflow execution failure ([#28021](https://github.com/n8n-io/n8n/issues/28021)) ([2e56ba1](https://github.com/n8n-io/n8n/commit/2e56ba137d450e235bd248e40df99e95d2da2c07))
* **core:** Omit empty scope from OAuth2 client credentials token request and improve error messaging ([#28159](https://github.com/n8n-io/n8n/issues/28159)) ([3db52dc](https://github.com/n8n-io/n8n/commit/3db52dca22c856cb161f88d8a932729a88e80f0a))
* **core:** Propagate formidable parse errors in Form Trigger ([#28217](https://github.com/n8n-io/n8n/issues/28217)) ([2d22c65](https://github.com/n8n-io/n8n/commit/2d22c65e509c54cebb75a65935fe78b85d3f40f5))
* **core:** Scope deferred tool processors per run ([#28068](https://github.com/n8n-io/n8n/issues/28068)) ([a9bc92f](https://github.com/n8n-io/n8n/commit/a9bc92f83de42df9d9e76f5891b9a32433a3161d))
* **core:** Use closure-scoped evaluation contexts in VM expression bridge ([#28337](https://github.com/n8n-io/n8n/issues/28337)) ([3d8da49](https://github.com/n8n-io/n8n/commit/3d8da49ee41f18018c6233ab1af6e0334b4ccae0))
* **editor:** AI builder setup wizard positioning and popover collision ([#27821](https://github.com/n8n-io/n8n/issues/27821)) ([aca249e](https://github.com/n8n-io/n8n/commit/aca249e856cbf2877905594161b0f5a66d83fbf2))
* **editor:** Improve popover positioning defaults and animations ([#27919](https://github.com/n8n-io/n8n/issues/27919)) ([5a01bb3](https://github.com/n8n-io/n8n/commit/5a01bb308e992a3a28f88ad3f477735ffedefb0d))
* **editor:** Keep Back before Continue in MFA login footer ([#27911](https://github.com/n8n-io/n8n/issues/27911)) ([72ebb43](https://github.com/n8n-io/n8n/commit/72ebb430f4c2d80b971f81e902ea7b97e92c4202))
* **editor:** Move save button to credential modal header ([#28287](https://github.com/n8n-io/n8n/issues/28287)) ([9a8631d](https://github.com/n8n-io/n8n/commit/9a8631da38330691fdc2b63be67c7ae2084460c9))
* **editor:** Remove default for api params ([#27914](https://github.com/n8n-io/n8n/issues/27914)) ([5e60272](https://github.com/n8n-io/n8n/commit/5e602726327074434be0116ed52803225dfa5c45))
* **editor:** Removing redundant stop of key propogation ([#23464](https://github.com/n8n-io/n8n/issues/23464)) ([33282db](https://github.com/n8n-io/n8n/commit/33282dbeb99500962b4574245768636b4b729c3e))
* **editor:** Skip only current step when clicking Later in workflow setup ([#27929](https://github.com/n8n-io/n8n/issues/27929)) ([91a1282](https://github.com/n8n-io/n8n/commit/91a1282db6a2439d27bc34b0576adedc0a76cca9))
* **editor:** UI tweaks for instance AI components ([#27917](https://github.com/n8n-io/n8n/issues/27917)) ([91ce8ea](https://github.com/n8n-io/n8n/commit/91ce8ea93cae582a4d6486b7d7ac5fbc1a02390d))
* **editor:** UI tweaks for instance AI components ([#28155](https://github.com/n8n-io/n8n/issues/28155)) ([aa6c322](https://github.com/n8n-io/n8n/commit/aa6c322059a61deb6faf92051bf103bd1455e840))
* **Facebook Lead Ads Node:** Add missing pages_read_engagement scope ([#27379](https://github.com/n8n-io/n8n/issues/27379)) ([290005e](https://github.com/n8n-io/n8n/commit/290005e0e80efe6d51142726a59589e6986657ee))
* **Gitlab Node:** Handle binary data in all storage modes ([#28363](https://github.com/n8n-io/n8n/issues/28363)) ([72d0f9b](https://github.com/n8n-io/n8n/commit/72d0f9b98c065527dddcd74d92cd258e51d80a9c))
* **Google Drive Node:** Fix infinite pagination loop in v1 API request ([#28244](https://github.com/n8n-io/n8n/issues/28244)) ([b964ec9](https://github.com/n8n-io/n8n/commit/b964ec958812ef541a3325e463f5144a3252edf4))
* Handle normalization of JSON for SQLite / postgres ([#28242](https://github.com/n8n-io/n8n/issues/28242)) ([8f2da63](https://github.com/n8n-io/n8n/commit/8f2da63871c6ca3285377b34992624be0f7dfc93))
* **HTTP Request Node:** Fix multipart/form-data file upload with binary streams ([#28233](https://github.com/n8n-io/n8n/issues/28233)) ([5fb777e](https://github.com/n8n-io/n8n/commit/5fb777e14e886fcfa9f640b66b4a30752d7623fa))
* **Http Request Node:** Handle empty JSON responses ([#27793](https://github.com/n8n-io/n8n/issues/27793)) ([1899a4e](https://github.com/n8n-io/n8n/commit/1899a4e2845f25e68455a0fc4124c11ecdf6adfc))
* **HubSpot Trigger Node:** Add missing tickets scope to OAuth credentials ([#27599](https://github.com/n8n-io/n8n/issues/27599)) ([d7d18a0](https://github.com/n8n-io/n8n/commit/d7d18a04c825194d38ad5932a82c6669416bfea3))
* **ICalendar Node:** Fix Convert to ICS failing when File Name option is set ([#27712](https://github.com/n8n-io/n8n/issues/27712)) ([294868d](https://github.com/n8n-io/n8n/commit/294868de5a0d2e01b4569d905a7ea45ab8f30123))
* **If Node:** Patches IF node when fields are missing ([#28014](https://github.com/n8n-io/n8n/issues/28014)) ([1e22e0a](https://github.com/n8n-io/n8n/commit/1e22e0ad511b2dc992fefe8c53bb7f5a845467d8))
* Improve browser use Chrome extension connection stability ([#27846](https://github.com/n8n-io/n8n/issues/27846)) ([94f0a4d](https://github.com/n8n-io/n8n/commit/94f0a4db5d353ff08d5fe8e2da7b8f3e93d7c1b0))
* **MCP Client Node:** Ensure MCP connections close when MCP Client node execution ends ([#25742](https://github.com/n8n-io/n8n/issues/25742)) ([752a4e4](https://github.com/n8n-io/n8n/commit/752a4e47d4581072e6f16159d179fcd13d6178bf))
* **Microsoft Outlook Node:** Prevent poll from skipping messages after API errors ([#28157](https://github.com/n8n-io/n8n/issues/28157)) ([853a740](https://github.com/n8n-io/n8n/commit/853a74044f89a16a05c9306d4005a31a82a9bfff))
* **Microsoft Teams Node:** Block requests from Microsoft Preview Service to prevent accidental approvals for "Send and Wait" ([#28085](https://github.com/n8n-io/n8n/issues/28085)) ([4fab655](https://github.com/n8n-io/n8n/commit/4fab655cc50c8f694b6a9d3dadacd127a02c11f1))
* **Oracle Node:** Resolve 'Maximum call stack size exceeded' on large datasets ([#27037](https://github.com/n8n-io/n8n/issues/27037)) ([bd5a702](https://github.com/n8n-io/n8n/commit/bd5a70215d19e46a53ef8705475616a737d4094f))
* Prohibit tool access to gateway settings directory ([#28320](https://github.com/n8n-io/n8n/issues/28320)) ([dab714f](https://github.com/n8n-io/n8n/commit/dab714f96144b668018b4bc9bade33ab4d83b684))
* **Pushover Node:** Replace duplicate Pushover Timestamp field with the missing TTL field ([#11287](https://github.com/n8n-io/n8n/issues/11287)) ([c0c0f83](https://github.com/n8n-io/n8n/commit/c0c0f8397c02df6745e186b9e205c64cd673130e))
* Truncate long custom role names and add hover tooltip ([#28191](https://github.com/n8n-io/n8n/issues/28191)) ([0d078c7](https://github.com/n8n-io/n8n/commit/0d078c75f00ff6182acb5390b8350c3c5255dbe0))
* Update lodash, lodash-es, and xmldom to latest stable versions ([#28121](https://github.com/n8n-io/n8n/issues/28121)) ([09c9b11](https://github.com/n8n-io/n8n/commit/09c9b11fff069c50c98b7fa750c93d2d5f26b978))
### Features
* Add AI Gateway support for AI nodes ([#27593](https://github.com/n8n-io/n8n/issues/27593)) ([6e2d356](https://github.com/n8n-io/n8n/commit/6e2d35644f99c95bc912c4e00bf938cdc3260f06))
* AI Gateway Top Up Flow ([#28113](https://github.com/n8n-io/n8n/issues/28113)) ([2c4b974](https://github.com/n8n-io/n8n/commit/2c4b9749c76ed96dc547ee546baa4fe37a74fe80))
* **ai-builder:** Improve sub-agent context passing with structured briefings and debriefings ([#28317](https://github.com/n8n-io/n8n/issues/28317)) ([e78f144](https://github.com/n8n-io/n8n/commit/e78f144e8e2c3c534a60d1ade70c4b2e501fd073))
* **ai-builder:** Workflow evaluation framework with LLM mock execution ([#27818](https://github.com/n8n-io/n8n/issues/27818)) ([2383749](https://github.com/n8n-io/n8n/commit/23837499802e8ef31c66f49d664311e2de4df9aa))
* **Alibaba Cloud Model Studio Node:** Add new node ([#27928](https://github.com/n8n-io/n8n/issues/27928)) ([1148d27](https://github.com/n8n-io/n8n/commit/1148d27725b2bf1b3868b67211494e78a82045ab))
* **API:** Add insights summary endpoint to public API ([#28099](https://github.com/n8n-io/n8n/issues/28099)) ([13d153e](https://github.com/n8n-io/n8n/commit/13d153ef1e8498803ff63c53f436530aa5cb60d2))
* **core:** Add 'verify' option to installPackage handler and update … ([#28257](https://github.com/n8n-io/n8n/issues/28257)) ([dfdc6d2](https://github.com/n8n-io/n8n/commit/dfdc6d2c75a562b135c5c0d3e849d57779d55ebe))
* **core:** Add audit logging for expression-based role assignments ([#28018](https://github.com/n8n-io/n8n/issues/28018)) ([0ce8146](https://github.com/n8n-io/n8n/commit/0ce81461abd177d6752a9bad881b08c35fb06051))
* **core:** Add in-process mutex for SQLite advisory lock parity ([#28135](https://github.com/n8n-io/n8n/issues/28135)) ([4eb99b9](https://github.com/n8n-io/n8n/commit/4eb99b9c88401eee8ed2726bad311884cf113ad7))
* **core:** Add missing-paired-item lint rule for community nodes ([#28118](https://github.com/n8n-io/n8n/issues/28118)) ([e282fcd](https://github.com/n8n-io/n8n/commit/e282fcdf0fe006c3d61158f359330ffe71b8fbd7))
* **core:** Add no-forbidden-lifecycle-scripts lint rule for community nodes ([#28176](https://github.com/n8n-io/n8n/issues/28176)) ([4f725da](https://github.com/n8n-io/n8n/commit/4f725dab1bf3220963c825b8b04659bac518447b))
* **core:** Add telemetry events for AI builder journey ([#28116](https://github.com/n8n-io/n8n/issues/28116)) ([8cdcab3](https://github.com/n8n-io/n8n/commit/8cdcab3cc80f8a5b306c33dd70f6e08f80a9953f))
* **core:** Add telemetry for data redaction settings and reveal data ([#28396](https://github.com/n8n-io/n8n/issues/28396)) ([21c0bf3](https://github.com/n8n-io/n8n/commit/21c0bf3048bbe559ad11831773816648adc43bca))
* **core:** Add userRole and feature-enabled telemetry for external secrets ([#27431](https://github.com/n8n-io/n8n/issues/27431)) ([346d4f1](https://github.com/n8n-io/n8n/commit/346d4f1597fafaba2d0c4f768eb358e2921f1d68))
* **core:** Emit audit events for workflow activation on bootup ([#28126](https://github.com/n8n-io/n8n/issues/28126)) ([4ccd727](https://github.com/n8n-io/n8n/commit/4ccd72716e7f5bbe3aa868378d0bdd9cfc7ad349))
* **core:** Enable credential creation per project in public API ([#28240](https://github.com/n8n-io/n8n/issues/28240)) ([8cd75d2](https://github.com/n8n-io/n8n/commit/8cd75d2f2dae70820079fbdc99fe2ad83e048cad))
* **core:** Enable instance owner setup via environment variables ([#27859](https://github.com/n8n-io/n8n/issues/27859)) ([1b995cd](https://github.com/n8n-io/n8n/commit/1b995cde180914bd228fdb8d116594e25095311e))
* **core:** Make VM expression bridge timeout and memory limit configurable ([#27962](https://github.com/n8n-io/n8n/issues/27962)) ([f8c2127](https://github.com/n8n-io/n8n/commit/f8c21276cb769cad9dc302dc5536a609f6ed35ac))
* **core:** Support projectId when creating workflow via public API ([#27884](https://github.com/n8n-io/n8n/issues/27884)) ([524166e](https://github.com/n8n-io/n8n/commit/524166e0f18d428b55f110bde27cb0e18ebe58c1))
* Disable manual role management when expression-based mapping is enabled ([#28105](https://github.com/n8n-io/n8n/issues/28105)) ([26d578d](https://github.com/n8n-io/n8n/commit/26d578dfc8e6f01d364ec30841fad5e7fc344869))
* **editor:** Add expression-based role mapping plumbing ([#27686](https://github.com/n8n-io/n8n/issues/27686)) ([4e6b4fc](https://github.com/n8n-io/n8n/commit/4e6b4fc3be372391e821d88f4173e0945fd959ef))
* **editor:** Add Instance AI prompt suggestions ([#27984](https://github.com/n8n-io/n8n/issues/27984)) ([22afd80](https://github.com/n8n-io/n8n/commit/22afd80759391602a9c60c9e50eef770ecf17fff))
* **editor:** Add instance rules editor with drag-to-reorder ([#27688](https://github.com/n8n-io/n8n/issues/27688)) ([a6b051b](https://github.com/n8n-io/n8n/commit/a6b051bfe310362b7b7882cc12af4f17ece6ffa9))
* **editor:** Add project rules, fallback role, remove mapping, save flow ([#27689](https://github.com/n8n-io/n8n/issues/27689)) ([91fec34](https://github.com/n8n-io/n8n/commit/91fec345b13c8b724de89a69db4b78c9b10d06f4))
* **editor:** Refactor role provisioning to two-dropdown layout ([#28024](https://github.com/n8n-io/n8n/issues/28024)) ([4c3a150](https://github.com/n8n-io/n8n/commit/4c3a1501fe743fc2d2d629732054e49b237a1456))
* **editor:** Update built-in node icons to custom SVGs ([#28104](https://github.com/n8n-io/n8n/issues/28104)) ([ea5b874](https://github.com/n8n-io/n8n/commit/ea5b874a8c88f1bd13c6c1e5633f79ad1b6449d9))
* Implement opt-in flow for n8n Agent enrolment ([#28006](https://github.com/n8n-io/n8n/issues/28006)) ([98be0ad](https://github.com/n8n-io/n8n/commit/98be0ad45253c5fd9a45bb5b47cd41c4062b2a3a))
* Implement session based permission modes in Computer Use ([#28184](https://github.com/n8n-io/n8n/issues/28184)) ([d3e6519](https://github.com/n8n-io/n8n/commit/d3e65197309e513bb1897046c747fd07b8eaf391))
* Limit computer use connections to only cloud instances ([#28304](https://github.com/n8n-io/n8n/issues/28304)) ([25e90ff](https://github.com/n8n-io/n8n/commit/25e90ffde32d72f38189bf8ae7a4f45f68ca9a67))
* **MCP Client Tool Node:** Prefix MCP tool names with server name ([#28094](https://github.com/n8n-io/n8n/issues/28094)) ([f5402dd](https://github.com/n8n-io/n8n/commit/f5402dd7f7e28fb000296906c49ab396fa22aaf8))
* **Moonshot Kimi Chat Model Node:** Add Moonshot Kimi Chat Model sub-node ([#28156](https://github.com/n8n-io/n8n/issues/28156)) ([5cbc973](https://github.com/n8n-io/n8n/commit/5cbc9734a42d579d75e12c731bc2fd57fdb178de))
* **Moonshot Kimi Node:** Add new node ([#28189](https://github.com/n8n-io/n8n/issues/28189)) ([e30d2ee](https://github.com/n8n-io/n8n/commit/e30d2eee60efd43ebbb89e5aab59554908468176))
* N8n Agent admin settings page with enable toggle and permissions ([#27913](https://github.com/n8n-io/n8n/issues/27913)) ([9b94862](https://github.com/n8n-io/n8n/commit/9b94862dc782c087f4e63e59fb81614d440e7afe))
* Rename extension to "Browser Use" and prepare for publishing ([#27898](https://github.com/n8n-io/n8n/issues/27898)) ([6bb90d4](https://github.com/n8n-io/n8n/commit/6bb90d43b6587380123bf4f65a382cca4b5b67e3))
* Update naming of local gateway to computer use ([#28111](https://github.com/n8n-io/n8n/issues/28111)) ([b841c73](https://github.com/n8n-io/n8n/commit/b841c736df676d5b132e551ecd4afe073ba40435))
# [2.16.0](https://github.com/n8n-io/n8n/compare/n8n@2.15.0...n8n@2.16.0) (2026-04-07)

View file

@ -54,7 +54,7 @@ The most important directories:
execution, active webhooks and
workflows. **Contact n8n before
starting on any changes here**
- [/packages/frontend/@n8n/design-system](/packages/design-system) - Vue frontend components
- [/packages/frontend/@n8n/design-system](/packages/frontend/@n8n/design-system) - Vue frontend components
- [/packages/frontend/editor-ui](/packages/editor-ui) - Vue frontend workflow editor
- [/packages/node-dev](/packages/node-dev) - CLI to create new n8n-nodes
- [/packages/nodes-base](/packages/nodes-base) - Base n8n nodes

View file

@ -53,7 +53,7 @@ n8n is [fair-code](https://faircode.io) distributed under the [Sustainable Use L
- **Self-Hostable**: Deploy anywhere
- **Extensible**: Add your own nodes and functionality
[Enterprise licenses](mailto:license@n8n.io) available for additional features and support.
[Enterprise Licenses](mailto:license@n8n.io) available for additional features and support.
Additional information about the license model can be found in the [docs](https://docs.n8n.io/sustainable-use-license/).

View file

@ -16,7 +16,8 @@
"**/CHANGELOG.md",
"**/cl100k_base.json",
"**/o200k_base.json",
"**/*.generated.ts"
"**/*.generated.ts",
"**/expectations/**"
]
},
"formatter": {

View file

@ -1,6 +1,6 @@
{
"name": "n8n-monorepo",
"version": "2.16.0",
"version": "2.18.0",
"private": true,
"engines": {
"node": ">=22.16",
@ -21,7 +21,7 @@
"typecheck": "turbo typecheck",
"dev": "turbo run dev --parallel --env-mode=loose --filter=!@n8n/design-system --filter=!@n8n/chat --filter=!@n8n/task-runner",
"dev:be": "turbo run dev --parallel --env-mode=loose --filter=!@n8n/design-system --filter=!@n8n/chat --filter=!@n8n/task-runner --filter=!n8n-editor-ui",
"dev:ai": "turbo run dev --parallel --env-mode=loose --filter=@n8n/nodes-langchain --filter=n8n --filter=n8n-core",
"dev:ai": "turbo run dev --parallel --env-mode=loose --filter=@n8n/n8n-nodes-langchain --filter=n8n --filter=n8n-core",
"dev:fe": "run-p start \"dev:fe:editor --filter=@n8n/design-system\"",
"dev:fe:editor": "turbo run dev --parallel --env-mode=loose --filter=n8n-editor-ui",
"dev:e2e": "pnpm --filter=n8n-playwright dev --ui",
@ -34,6 +34,7 @@
"lint:styles:fix": "turbo run lint:styles:fix",
"lint:affected": "turbo run lint --affected",
"lint:fix": "turbo run lint:fix",
"lint:ci": "turbo run lint lint:styles",
"optimize-svg": "find ./packages -name '*.svg' ! -name 'pipedrive.svg' -print0 | xargs -0 -P16 -L20 npx svgo",
"generate:third-party-licenses": "node scripts/generate-third-party-licenses.mjs",
"setup-backend-module": "node scripts/ensure-zx.mjs && zx scripts/backend-module/setup.mjs",
@ -50,8 +51,8 @@
"watch": "turbo run watch --concurrency=64",
"webhook": "./packages/cli/bin/n8n webhook",
"worker": "./packages/cli/bin/n8n worker",
"dev:fs-proxy": "pnpm --filter @n8n/fs-proxy build && node packages/@n8n/fs-proxy/dist/cli.js serve",
"stop:fs-proxy": "lsof -ti :7655 | xargs kill 2>/dev/null; echo 'fs-proxy stopped'"
"dev:computer-use": "pnpm --filter @n8n/computer-use build && node packages/@n8n/computer-use/dist/cli.js serve",
"stop:computer-use": "lsof -ti :7655 | xargs kill 2>/dev/null; echo 'computer-use stopped'"
},
"devDependencies": {
"@babel/preset-env": "^7.26.0",
@ -81,7 +82,7 @@
"ts-jest": "^29.1.1",
"tsc-alias": "^1.8.10",
"tsc-watch": "^6.2.0",
"turbo": "2.8.9",
"turbo": "2.9.4",
"typescript": "*",
"zx": "^8.8.5"
},
@ -102,7 +103,7 @@
"@mistralai/mistralai": "^1.10.0",
"@n8n/typeorm>@sentry/node": "catalog:sentry",
"@types/node": "^20.17.50",
"axios": "1.13.5",
"axios": "1.15.0",
"chokidar": "4.0.3",
"esbuild": "^0.25.0",
"expr-eval@2.0.2": "npm:expr-eval-fork@3.0.0",
@ -148,7 +149,9 @@
"bn.js@4": "5.2.3",
"bn.js@5": "5.2.3",
"langsmith": ">=0.4.6",
"lodash-es": "4.17.23",
"lodash": "4.18.1",
"lodash-es": "4.18.1",
"@xmldom/xmldom": "0.8.12",
"word-wrap@<=1.2.4": "1.2.4",
"minimatch@<=5.1.8": "5.1.8",
"minimatch@10": "10.2.3",
@ -162,7 +165,8 @@
"path-to-regexp@<0.1.13": "0.1.13",
"picomatch@2": "2.3.2",
"picomatch@4": "4.0.4",
"brace-expansion@5": "5.0.5"
"brace-expansion@5": "5.0.5",
"avsc": "5.7.9"
},
"patchedDependencies": {
"bull@4.16.4": "patches/bull@4.16.4.patch",
@ -173,7 +177,6 @@
"@types/uuencode@0.0.3": "patches/@types__uuencode@0.0.3.patch",
"vue-tsc@2.2.8": "patches/vue-tsc@2.2.8.patch",
"element-plus@2.4.3": "patches/element-plus@2.4.3.patch",
"js-base64": "patches/js-base64.patch",
"ics": "patches/ics.patch",
"minifaker": "patches/minifaker.patch",
"z-vue-scan": "patches/z-vue-scan.patch",

View file

@ -39,8 +39,7 @@ src/
telemetry.ts # Telemetry builder (OTel, redaction)
tool.ts # Tool builder
verify.ts # Verification utilities
evals/ # Built-in eval scorers; exported as namespace `evals` from index
runtime/ # Internal — never exported from index.ts
runtime/ # Internal — never exported
agent-runtime.ts # Core agent execution engine (AI SDK)
tool-adapter.ts # Tool execution, branded suspend detection
stream.ts # Streaming helpers
@ -112,7 +111,7 @@ class EngineAgent extends Agent {
## Testing
- Unit tests live in `src/__tests__/`, integration tests in `src/__tests__/integration/`
- Unit tests use Jest (`pnpm test` / `pnpm test:unit`)
- Unit tests use Jest (`pnpm test`)
- Integration tests use Vitest (`pnpm test:integration`) with real LLM calls
- A `.env` file at the package root is loaded automatically by the vitest config.
Always assume it exists when running integration tests. Never commit it.

View file

@ -253,6 +253,13 @@ correct `turnDelta()` after suspend/resume.
`stripOrphanedToolMessages` runs on loaded history and inside `forLlm()` so
incomplete tool pairs do not reach the model.
**Ordering note:** The in-memory list is append-only; LLM context follows array
order. Persisted threads, however, are loaded with **`ORDER BY createdAt`** (and
a `seq` tiebreaker in SQL backends). Every message therefore needs a
**unique, monotonically increasing `createdAt`** while it flows through
`AgentMessageList` so reloads and `before`-filtered fetches match the turns
true sequence. See [Monotonic `createdAt`](#monotonic-createdat-for-persisted-order).
---
## Agentic loop
@ -360,10 +367,11 @@ At end of turn, `saveToMemory()` uses `list.turnDelta()` and
`saveMessagesToThread`. If **semantic recall** is configured with an embedder
and `memory.saveEmbeddings`, new messages are embedded and stored.
**Working memory:** when configured, the runtime parses `<working_memory>`
`</working_memory>` regions from assistant text, validates structured JSON if a
schema exists, strips the tags from the visible message, and asynchronously
persists via `memory.saveWorkingMemory`.
**Working memory:** when configured, the runtime injects an `updateWorkingMemory`
tool into the agent's tool set. The current state is included in the system prompt
so the model can read it; when new information should be persisted the model calls
the tool, which validates the input and asynchronously persists via
`memory.saveWorkingMemory`.
**Thread titles:** `titleGeneration` triggers `generateThreadTitle` (fire-and-forget)
after a successful save when persistence and memory are present.
@ -407,7 +415,7 @@ src/
tool-adapter.ts — buildToolMap, executeTool, toAiSdkTools, suspend / agent-result guards
stream.ts — convertChunk, toTokenUsage
runtime-helpers.ts — normalizeInput, usage merge, stream error helpers, …
working-memory.ts — instruction text, parse/filter for working_memory tags
working-memory.ts — instruction text, updateWorkingMemory tool builder
strip-orphaned-tool-messages.ts
title-generation.ts
logger.ts
@ -449,3 +457,26 @@ The bus is shared between `Agent` and `AgentRuntime` so `on()` registrations and
Signals cancel HTTP immediately in the AI SDK and compose with caller-provided
`abortSignal` via `resetAbort`.
### Monotonic `createdAt` for persisted order
**Problem.** Live messages often used `Date.now()` (or no timestamp). Several
messages added in the same millisecond (multi-part input, batched tool results,
fast loops) produced **identical `createdAt` values**. SQL stores mitigate ties
with a `seq` column, but ordering was still ambiguous for consumers that sort
only by time, and **in-memory `BuiltMemory`** (`InMemoryMemory`) keyed ordering
off the stored timestamp. Duplicate timestamps made **pagination windows** (`before`,
`limit`) and reload order **non-deterministic** relative to insertion order —
message history could appear to **shuffle** between turns or after resume.
**Approach.** `AgentMessageList` tracks `lastCreatedAt` and assigns each **live**
message (`input` / `response`) a `createdAt` of
`max(hint, lastCreatedAt + 1)`, where `hint` is any existing timestamp or
`Date.now()`. **`history` messages** keep the database timestamp exactly;
`lastCreatedAt` advances to `max` so new live rows stay strictly later (handles
clock skew and prior monotonic runs). **`deserialize()`** recomputes
`lastCreatedAt` from all restored rows so suspend/resume continues the sequence.
**Downstream.** `saveMessages` / Postgres / SQLite persist the message-owned
`createdAt`, and in-memory storage uses that same value for filtering so
`getMessages` stays aligned with `AgentMessageList`s ordering guarantees.

View file

@ -1,6 +1,6 @@
{
"name": "@n8n/agents",
"version": "0.3.0",
"version": "0.5.0",
"description": "AI agent SDK for n8n's code-first execution engine",
"main": "dist/index.js",
"module": "dist/index.js",

View file

@ -0,0 +1,405 @@
import { z } from 'zod';
import { Agent } from '../sdk/agent';
import { McpClient } from '../sdk/mcp-client';
import { Telemetry } from '../sdk/telemetry';
import { Tool } from '../sdk/tool';
import type { BuiltEval, BuiltGuardrail, BuiltMemory, BuiltProviderTool } from '../types';
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function makeMockMemory(): BuiltMemory {
return {
getThread: jest.fn(),
saveThread: jest.fn(),
deleteThread: jest.fn(),
getMessages: jest.fn(),
saveMessages: jest.fn(),
deleteMessages: jest.fn(),
} as unknown as BuiltMemory;
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
describe('Agent.describe()', () => {
it('returns null/empty fields for an unconfigured agent', () => {
const agent = new Agent('test-agent');
const schema = agent.describe();
expect(schema.model).toEqual({ provider: null, name: null });
expect(schema.credential).toBeNull();
expect(schema.instructions).toBeNull();
expect(schema.description).toBeNull();
expect(schema.tools).toEqual([]);
expect(schema.providerTools).toEqual([]);
expect(schema.memory).toBeNull();
expect(schema.evaluations).toEqual([]);
expect(schema.guardrails).toEqual([]);
expect(schema.mcp).toBeNull();
expect(schema.telemetry).toBeNull();
expect(schema.checkpoint).toBeNull();
expect(schema.config.structuredOutput).toEqual({ enabled: false, schemaSource: null });
expect(schema.config.thinking).toBeNull();
expect(schema.config.toolCallConcurrency).toBeNull();
expect(schema.config.requireToolApproval).toBe(false);
});
// --- Model parsing ---
it('parses two-arg model (provider, name)', () => {
const agent = new Agent('test-agent').model('anthropic', 'claude-sonnet-4-5');
const schema = agent.describe();
expect(schema.model).toEqual({ provider: 'anthropic', name: 'claude-sonnet-4-5' });
});
it('parses single-arg model with slash', () => {
const agent = new Agent('test-agent').model('anthropic/claude-sonnet-4-5');
const schema = agent.describe();
expect(schema.model).toEqual({ provider: 'anthropic', name: 'claude-sonnet-4-5' });
});
it('parses model without slash', () => {
const agent = new Agent('test-agent').model('gpt-4o');
const schema = agent.describe();
expect(schema.model).toEqual({ provider: null, name: 'gpt-4o' });
});
it('handles object model config', () => {
const agent = new Agent('test-agent').model({
id: 'anthropic/claude-sonnet-4-5',
apiKey: 'sk-test',
});
const schema = agent.describe();
expect(schema.model).toEqual({ provider: null, name: null, raw: 'object' });
});
// --- Credential ---
it('returns credential name', () => {
const agent = new Agent('test-agent').credential('my-anthropic-key');
const schema = agent.describe();
expect(schema.credential).toBe('my-anthropic-key');
});
// --- Instructions ---
it('returns instructions text', () => {
const agent = new Agent('test-agent').instructions('You are helpful.');
const schema = agent.describe();
expect(schema.instructions).toBe('You are helpful.');
});
// --- Custom tool ---
it('describes a custom tool with handler, input schema, and suspend/resume', () => {
const suspendSchema = z.object({ reason: z.string() });
const resumeSchema = z.object({ approved: z.boolean() });
const tool = new Tool('danger')
.description('A dangerous action')
.input(z.object({ target: z.string() }))
.output(z.object({ result: z.string() }))
.suspend(suspendSchema)
.resume(resumeSchema)
.handler(async ({ target }) => await Promise.resolve({ result: target }))
.build();
const agent = new Agent('test-agent').tool(tool);
const schema = agent.describe();
expect(schema.tools).toHaveLength(1);
const ts = schema.tools[0];
expect(ts.name).toBe('danger');
expect(ts.editable).toBe(true);
expect(ts.hasSuspend).toBe(true);
expect(ts.hasResume).toBe(true);
expect(ts.hasToMessage).toBe(false);
expect(ts.inputSchema).toBeTruthy();
expect(ts.outputSchema).toBeTruthy();
// handlerSource is a fallback (compiled JS), CLI overrides with real TypeScript
expect(ts.handlerSource).toContain('target');
// Source string fields are null — CLI patches with original TypeScript
expect(ts.inputSchemaSource).toBeNull();
expect(ts.outputSchemaSource).toBeNull();
expect(ts.suspendSchemaSource).toBeNull();
expect(ts.resumeSchemaSource).toBeNull();
expect(ts.toMessageSource).toBeNull();
expect(ts.requireApproval).toBe(false);
expect(ts.needsApprovalFnSource).toBeNull();
expect(ts.providerOptions).toBeNull();
});
// --- Provider tool ---
it('describes a provider tool in providerTools array', () => {
const providerTool: BuiltProviderTool = {
name: 'anthropic.web_search_20250305',
args: { maxResults: 5 },
};
const agent = new Agent('test-agent').providerTool(providerTool);
const schema = agent.describe();
// Provider tools are now in a separate array
expect(schema.tools).toHaveLength(0);
expect(schema.providerTools).toHaveLength(1);
expect(schema.providerTools[0].name).toBe('anthropic.web_search_20250305');
expect(schema.providerTools[0].source).toBe('');
});
// --- MCP servers ---
it('describes MCP servers in mcp field', () => {
const client = new McpClient([
{ name: 'browser', url: 'http://localhost:9222/mcp', transport: 'streamableHttp' },
{ name: 'fs', command: 'echo', args: ['test'] },
]);
const agent = new Agent('test-agent').mcp(client);
const schema = agent.describe();
// MCP servers are now in a separate mcp field
expect(schema.tools).toHaveLength(0);
expect(schema.mcp).toHaveLength(2);
expect(schema.mcp![0].name).toBe('browser');
expect(schema.mcp![0].configSource).toBe('');
expect(schema.mcp![1].name).toBe('fs');
expect(schema.mcp![1].configSource).toBe('');
});
it('returns null mcp when no clients are configured', () => {
const agent = new Agent('test-agent');
const schema = agent.describe();
expect(schema.mcp).toBeNull();
});
// --- Guardrails ---
it('describes input and output guardrails', () => {
const inputGuard: BuiltGuardrail = {
name: 'pii-filter',
guardType: 'pii',
strategy: 'redact',
_config: { types: ['email', 'phone'] },
};
const outputGuard: BuiltGuardrail = {
name: 'moderation-check',
guardType: 'moderation',
strategy: 'block',
_config: {},
};
const agent = new Agent('test-agent').inputGuardrail(inputGuard).outputGuardrail(outputGuard);
const schema = agent.describe();
expect(schema.guardrails).toHaveLength(2);
expect(schema.guardrails[0]).toEqual({
name: 'pii-filter',
guardType: 'pii',
strategy: 'redact',
position: 'input',
config: { types: ['email', 'phone'] },
source: '',
});
expect(schema.guardrails[1]).toEqual({
name: 'moderation-check',
guardType: 'moderation',
strategy: 'block',
position: 'output',
config: {},
source: '',
});
});
// --- Telemetry ---
it('returns telemetry schema when telemetry builder is set', () => {
const agent = new Agent('test-agent').telemetry(new Telemetry());
const schema = agent.describe();
expect(schema.telemetry).toEqual({ source: '' });
});
it('returns null telemetry when not configured', () => {
const agent = new Agent('test-agent');
const schema = agent.describe();
expect(schema.telemetry).toBeNull();
});
// --- Checkpoint ---
it('returns memory checkpoint when checkpoint is memory', () => {
const agent = new Agent('test-agent').checkpoint('memory');
const schema = agent.describe();
expect(schema.checkpoint).toBe('memory');
});
it('returns null checkpoint when not configured', () => {
const agent = new Agent('test-agent');
const schema = agent.describe();
expect(schema.checkpoint).toBeNull();
});
// --- Memory ---
it('describes memory configuration', () => {
const agent = new Agent('test-agent').memory({
memory: makeMockMemory(),
lastMessages: 20,
semanticRecall: {
topK: 5,
messageRange: { before: 2, after: 2 },
embedder: 'openai/text-embedding-3-small',
},
workingMemory: {
template: 'Current state: {{state}}',
structured: false,
scope: 'resource' as const,
},
});
const schema = agent.describe();
expect(schema.memory).toBeTruthy();
expect(schema.memory!.source).toBeNull();
expect(schema.memory!.lastMessages).toBe(20);
expect(schema.memory!.semanticRecall).toEqual({
topK: 5,
messageRange: { before: 2, after: 2 },
embedder: 'openai/text-embedding-3-small',
});
expect(schema.memory!.workingMemory).toEqual({
type: 'freeform',
template: 'Current state: {{state}}',
});
});
it('describes structured working memory', () => {
const agent = new Agent('test-agent').memory({
memory: makeMockMemory(),
lastMessages: 10,
workingMemory: {
template: '',
structured: true,
schema: z.object({ notes: z.string() }),
scope: 'resource' as const,
},
});
const schema = agent.describe();
expect(schema.memory!.workingMemory!.type).toBe('structured');
expect(schema.memory!.workingMemory!.schema).toBeTruthy();
});
// --- Evaluations ---
it('describes evaluations with evalType, modelId, and handlerSource', () => {
const checkEval: BuiltEval = {
name: 'has-greeting',
description: 'Checks for greeting',
evalType: 'check',
modelId: null,
credentialName: null,
_run: jest.fn(),
};
const judgeEval: BuiltEval = {
name: 'quality-judge',
description: undefined,
evalType: 'judge',
modelId: 'anthropic/claude-haiku-4-5',
credentialName: 'anthropic-key',
_run: jest.fn(),
};
const agent = new Agent('test-agent').eval(checkEval).eval(judgeEval);
const schema = agent.describe();
expect(schema.evaluations).toHaveLength(2);
expect(schema.evaluations[0]).toEqual({
name: 'has-greeting',
description: 'Checks for greeting',
type: 'check',
modelId: null,
hasCredential: false,
credentialName: null,
handlerSource: null,
});
expect(schema.evaluations[1]).toEqual({
name: 'quality-judge',
description: null,
type: 'judge',
modelId: 'anthropic/claude-haiku-4-5',
hasCredential: true,
credentialName: 'anthropic-key',
handlerSource: null,
});
});
// --- Thinking config ---
it('describes anthropic thinking config', () => {
const agent = new Agent('test-agent')
.model('anthropic', 'claude-sonnet-4-5')
.thinking('anthropic', { budgetTokens: 10000 });
const schema = agent.describe();
expect(schema.config.thinking).toEqual({
provider: 'anthropic',
budgetTokens: 10000,
});
});
it('describes openai thinking config', () => {
const agent = new Agent('test-agent')
.model('openai', 'o3-mini')
.thinking('openai', { reasoningEffort: 'high' });
const schema = agent.describe();
expect(schema.config.thinking).toEqual({
provider: 'openai',
reasoningEffort: 'high',
});
});
// --- requireToolApproval ---
it('reflects requireToolApproval flag', () => {
const agent = new Agent('test-agent').requireToolApproval();
const schema = agent.describe();
expect(schema.config.requireToolApproval).toBe(true);
});
// --- toolCallConcurrency ---
it('reflects toolCallConcurrency', () => {
const agent = new Agent('test-agent').toolCallConcurrency(5);
const schema = agent.describe();
expect(schema.config.toolCallConcurrency).toBe(5);
});
// --- Structured output ---
it('describes structured output with schemaSource null', () => {
const outputSchema = z.object({ code: z.string(), explanation: z.string() });
const agent = new Agent('test-agent').structuredOutput(outputSchema);
const schema = agent.describe();
expect(schema.config.structuredOutput.enabled).toBe(true);
expect(schema.config.structuredOutput.schemaSource).toBeNull();
});
});

View file

@ -0,0 +1,606 @@
import { z } from 'zod';
import { Agent } from '../sdk/agent';
import { isSuspendResult } from '../sdk/from-schema';
import type { HandlerExecutor } from '../types/sdk/handler-executor';
import type { AgentSchema, ToolSchema } from '../types/sdk/schema';
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function mockExecutor(): HandlerExecutor {
return {
executeTool: jest.fn().mockResolvedValue({ result: 'mocked' }),
executeToMessage: jest.fn().mockResolvedValue(undefined),
executeEval: jest.fn().mockResolvedValue({ score: 1 }),
evaluateSchema: jest.fn().mockResolvedValue(undefined),
evaluateExpression: jest.fn().mockResolvedValue(undefined),
};
}
function minimalSchema(overrides: Partial<AgentSchema> = {}): AgentSchema {
return {
model: { provider: 'anthropic', name: 'claude-sonnet-4-5' },
credential: 'my-credential',
instructions: 'You are helpful.',
description: null,
tools: [],
providerTools: [],
memory: null,
evaluations: [],
guardrails: [],
mcp: null,
telemetry: null,
checkpoint: null,
config: {
structuredOutput: { enabled: false, schemaSource: null },
thinking: null,
toolCallConcurrency: null,
requireToolApproval: false,
},
...overrides,
};
}
function makeToolSchema(overrides: Partial<ToolSchema> = {}): ToolSchema {
return {
name: 'test-tool',
description: 'A test tool',
type: 'custom',
editable: true,
inputSchemaSource: null,
outputSchemaSource: null,
handlerSource: null,
suspendSchemaSource: null,
resumeSchemaSource: null,
toMessageSource: null,
requireApproval: false,
needsApprovalFnSource: null,
providerOptions: null,
inputSchema: { type: 'object', properties: { query: { type: 'string' } } },
outputSchema: null,
hasSuspend: false,
hasResume: false,
hasToMessage: false,
...overrides,
};
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
describe('Agent.fromSchema()', () => {
it('reconstructs basic agent config', async () => {
const schema = minimalSchema();
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: mockExecutor(),
});
const described = agent.describe();
expect(described.model).toEqual({ provider: 'anthropic', name: 'claude-sonnet-4-5' });
expect(described.credential).toBe('my-credential');
expect(described.instructions).toBe('You are helpful.');
});
it('reconstructs model with only name (no provider)', async () => {
const schema = minimalSchema({
model: { provider: null, name: 'gpt-4o' },
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: mockExecutor(),
});
const described = agent.describe();
expect(described.model).toEqual({ provider: null, name: 'gpt-4o' });
});
it('reconstructs thinking config with correct provider arg', async () => {
const schema = minimalSchema({
config: {
structuredOutput: { enabled: false, schemaSource: null },
thinking: { provider: 'anthropic', budgetTokens: 10000 },
toolCallConcurrency: null,
requireToolApproval: false,
},
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: mockExecutor(),
});
const described = agent.describe();
expect(described.config.thinking).toEqual({
provider: 'anthropic',
budgetTokens: 10000,
});
});
it('reconstructs openai thinking config', async () => {
const schema = minimalSchema({
model: { provider: 'openai', name: 'o3-mini' },
config: {
structuredOutput: { enabled: false, schemaSource: null },
thinking: { provider: 'openai', reasoningEffort: 'high' },
toolCallConcurrency: null,
requireToolApproval: false,
},
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: mockExecutor(),
});
const described = agent.describe();
expect(described.config.thinking).toEqual({
provider: 'openai',
reasoningEffort: 'high',
});
});
it('creates proxy handlers for custom tools', async () => {
const toolSchema = makeToolSchema({
name: 'search',
description: 'Search the web',
});
const schema = minimalSchema({ tools: [toolSchema] });
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: mockExecutor(),
});
const described = agent.describe();
expect(described.tools).toHaveLength(1);
expect(described.tools[0].name).toBe('search');
expect(described.tools[0].description).toBe('Search the web');
expect(described.tools[0].editable).toBe(true);
});
it('adds WorkflowTool markers for non-editable tools', async () => {
const toolSchema = makeToolSchema({ name: 'Send Email', type: 'workflow', editable: false });
const schema = minimalSchema({ tools: [toolSchema] });
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: mockExecutor(),
});
// Non-editable tools become WorkflowTool markers in declaredTools
const markers = agent.declaredTools.filter(
(t) => '__workflowTool' in t && (t as Record<string, unknown>).__workflowTool === true,
);
expect(markers).toHaveLength(1);
expect(markers[0].name).toBe('Send Email');
});
it('reconstructs memory from schema fields', async () => {
const schema = minimalSchema({
memory: {
source: null,
storage: 'memory',
lastMessages: 20,
semanticRecall: null,
workingMemory: null,
},
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: mockExecutor(),
});
const described = agent.describe();
expect(described.memory).toBeTruthy();
expect(described.memory!.lastMessages).toBe(20);
expect(described.memory!.storage).toBe('memory');
});
it('sets toolCallConcurrency when specified', async () => {
const schema = minimalSchema({
config: {
structuredOutput: { enabled: false, schemaSource: null },
thinking: null,
toolCallConcurrency: 5,
requireToolApproval: false,
},
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: mockExecutor(),
});
const described = agent.describe();
expect(described.config.toolCallConcurrency).toBe(5);
});
it('sets requireToolApproval when true', async () => {
const schema = minimalSchema({
config: {
structuredOutput: { enabled: false, schemaSource: null },
thinking: null,
toolCallConcurrency: null,
requireToolApproval: true,
},
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: mockExecutor(),
});
const described = agent.describe();
expect(described.config.requireToolApproval).toBe(true);
});
it('sets checkpoint when specified', async () => {
const schema = minimalSchema({ checkpoint: 'memory' });
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: mockExecutor(),
});
const described = agent.describe();
expect(described.checkpoint).toBe('memory');
});
it('delegates tool execution to handlerExecutor', async () => {
const executor = mockExecutor();
const toolSchema = makeToolSchema({ name: 'my-tool' });
const schema = minimalSchema({ tools: [toolSchema] });
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: executor,
});
// Access the built tool's handler via declaredTools
const tools = agent.declaredTools;
expect(tools).toHaveLength(1);
const result = await tools[0].handler!({ query: 'test' }, { parentTelemetry: undefined });
expect(executor.executeTool).toHaveBeenCalledWith(
'my-tool',
{ query: 'test' },
{ parentTelemetry: undefined },
);
expect(result).toEqual({ result: 'mocked' });
});
it('reconstructs guardrails with correct position', async () => {
const schema = minimalSchema({
guardrails: [
{
name: 'pii-guard',
guardType: 'pii',
strategy: 'redact',
position: 'input',
config: { detectionTypes: ['email', 'phone'] },
source: '',
},
{
name: 'mod-guard',
guardType: 'moderation',
strategy: 'block',
position: 'output',
config: {},
source: '',
},
],
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: mockExecutor(),
});
const described = agent.describe();
expect(described.guardrails).toHaveLength(2);
expect(described.guardrails[0].name).toBe('pii-guard');
expect(described.guardrails[0].position).toBe('input');
expect(described.guardrails[0].guardType).toBe('pii');
expect(described.guardrails[1].name).toBe('mod-guard');
expect(described.guardrails[1].position).toBe('output');
});
it('reconstructs evals with proxy _run', async () => {
const executor = mockExecutor();
const schema = minimalSchema({
evaluations: [
{
name: 'accuracy',
description: 'Check accuracy',
type: 'check',
modelId: null,
credentialName: null,
hasCredential: false,
handlerSource: null,
},
{
name: 'quality',
description: 'Judge quality',
type: 'judge',
modelId: 'anthropic/claude-sonnet-4-5',
credentialName: 'anthropic',
hasCredential: true,
handlerSource: null,
},
],
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: executor,
});
const described = agent.describe();
expect(described.evaluations).toHaveLength(2);
expect(described.evaluations[0].name).toBe('accuracy');
expect(described.evaluations[0].type).toBe('check');
expect(described.evaluations[1].name).toBe('quality');
expect(described.evaluations[1].type).toBe('judge');
});
it('reconstructs provider tools', async () => {
const schema = minimalSchema({
providerTools: [{ name: 'anthropic.web_search_20250305', source: '' }],
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: mockExecutor(),
});
const described = agent.describe();
expect(described.providerTools).toHaveLength(1);
expect(described.providerTools[0].name).toBe('anthropic.web_search_20250305');
});
it('evaluates provider tool source via evaluateExpression', async () => {
const executor = mockExecutor();
(executor.evaluateExpression as jest.Mock).mockResolvedValue({
name: 'anthropic.web_search_20250305',
args: { maxUses: 5 },
});
const schema = minimalSchema({
providerTools: [
{
name: 'anthropic.web_search_20250305',
source: 'providerTools.anthropicWebSearch({ maxUses: 5 })',
},
],
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: executor,
});
const described = agent.describe();
expect(executor.evaluateExpression).toHaveBeenCalledWith(
'providerTools.anthropicWebSearch({ maxUses: 5 })',
);
expect(described.providerTools).toHaveLength(1);
expect(described.providerTools[0].name).toBe('anthropic.web_search_20250305');
});
it('evaluates structuredOutput schema via evaluateSchema', async () => {
const zodSchema = z.object({ answer: z.string() });
const executor = mockExecutor();
(executor.evaluateSchema as jest.Mock).mockResolvedValue(zodSchema);
const schema = minimalSchema({
config: {
structuredOutput: { enabled: true, schemaSource: 'z.object({ answer: z.string() })' },
thinking: null,
toolCallConcurrency: null,
requireToolApproval: false,
},
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: executor,
});
const described = agent.describe();
expect(executor.evaluateSchema).toHaveBeenCalledWith('z.object({ answer: z.string() })');
expect(described.config.structuredOutput.enabled).toBe(true);
});
it('handles suspend result detection via isSuspendResult', () => {
const suspendMarker = Symbol.for('n8n.agent.suspend');
const suspendResult = { [suspendMarker]: true, payload: { message: 'approve?' } };
const nonSuspend = { result: 42 };
expect(isSuspendResult(suspendResult)).toBe(true);
expect(isSuspendResult(nonSuspend)).toBe(false);
expect(isSuspendResult(null)).toBe(false);
expect(isSuspendResult(undefined)).toBe(false);
});
it('delegates interruptible tool execution with suspend detection', async () => {
const suspendMarker = Symbol.for('n8n.agent.suspend');
const executor = {
...mockExecutor(),
executeTool: jest.fn().mockResolvedValue({
[suspendMarker]: true,
payload: { message: 'Please approve' },
}),
};
const toolSchema = makeToolSchema({
name: 'suspend-tool',
hasSuspend: true,
});
const schema = minimalSchema({ tools: [toolSchema] });
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: executor,
});
const tools = agent.declaredTools;
expect(tools).toHaveLength(1);
// Call with an interruptible context
let suspendedPayload: unknown;
const ctx = {
parentTelemetry: undefined,
resumeData: undefined,
// eslint-disable-next-line @typescript-eslint/require-await
suspend: jest.fn().mockImplementation(async (payload: unknown) => {
suspendedPayload = payload;
return { suspended: true };
}),
};
await tools[0].handler!({ query: 'test' }, ctx);
expect(ctx.suspend).toHaveBeenCalledWith({ message: 'Please approve' });
expect(suspendedPayload).toEqual({ message: 'Please approve' });
});
it('reconstructs requireApproval on individual tools', async () => {
const toolSchema = makeToolSchema({
name: 'danger-tool',
requireApproval: true,
});
const schema = minimalSchema({
tools: [toolSchema],
checkpoint: 'memory',
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: mockExecutor(),
});
// The tool should be wrapped for approval, which adds suspendSchema
const tools = agent.declaredTools;
expect(tools).toHaveLength(1);
expect(tools[0].suspendSchema).toBeDefined();
});
it('reconstructs MCP servers by evaluating configSource', async () => {
const executor = mockExecutor();
(executor.evaluateExpression as jest.Mock).mockResolvedValue({
name: 'browser',
url: 'http://localhost:9222/mcp',
transport: 'streamableHttp',
});
const schema = minimalSchema({
mcp: [
{
name: 'browser',
configSource:
'({ name: "browser", url: "http://localhost:9222/mcp", transport: "streamableHttp" })',
},
],
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: executor,
});
expect(executor.evaluateExpression).toHaveBeenCalledWith(
'({ name: "browser", url: "http://localhost:9222/mcp", transport: "streamableHttp" })',
);
const described = agent.describe();
expect(described.mcp).toHaveLength(1);
expect(described.mcp![0].name).toBe('browser');
});
it('reconstructs multiple MCP servers', async () => {
const executor = mockExecutor();
(executor.evaluateExpression as jest.Mock)
.mockResolvedValueOnce({
name: 'browser',
url: 'http://localhost:9222/mcp',
transport: 'streamableHttp',
})
.mockResolvedValueOnce({
name: 'fs',
command: 'npx',
args: ['@anthropic/mcp-fs', '/tmp'],
});
const schema = minimalSchema({
mcp: [
{ name: 'browser', configSource: 'browserConfig' },
{ name: 'fs', configSource: 'fsConfig' },
],
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: executor,
});
const described = agent.describe();
expect(described.mcp).toHaveLength(2);
expect(described.mcp![0].name).toBe('browser');
expect(described.mcp![1].name).toBe('fs');
});
it('skips MCP servers with empty configSource', async () => {
const schema = minimalSchema({
mcp: [{ name: 'browser', configSource: '' }],
});
const executor = mockExecutor();
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: executor,
});
expect(executor.evaluateExpression).not.toHaveBeenCalled();
// No MCP configs evaluated means no client is added
const described = agent.describe();
expect(described.mcp).toBeNull();
});
it('reconstructs telemetry by evaluating source', async () => {
const executor = mockExecutor();
(executor.evaluateExpression as jest.Mock).mockResolvedValue({
enabled: true,
functionId: 'my-agent',
recordInputs: true,
recordOutputs: true,
integrations: [],
});
const schema = minimalSchema({
telemetry: { source: 'new Telemetry().functionId("my-agent").build()' },
});
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: executor,
});
expect(executor.evaluateExpression).toHaveBeenCalledWith(
'new Telemetry().functionId("my-agent").build()',
);
const described = agent.describe();
expect(described.telemetry).not.toBeNull();
});
it('does not set telemetry when schema has no telemetry', async () => {
const schema = minimalSchema({ telemetry: null });
const executor = mockExecutor();
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: executor,
});
const described = agent.describe();
expect(described.telemetry).toBeNull();
expect(executor.evaluateExpression).not.toHaveBeenCalled();
});
it('evaluates suspend/resume schemas via evaluateSchema', async () => {
const suspendSchema = z.object({ reason: z.string() });
const resumeSchema = z.object({ approved: z.boolean() });
const executor = mockExecutor();
(executor.evaluateSchema as jest.Mock)
.mockResolvedValueOnce(suspendSchema)
.mockResolvedValueOnce(resumeSchema);
const toolSchema = makeToolSchema({
name: 'interruptible-tool',
hasSuspend: true,
hasResume: true,
suspendSchemaSource: 'z.object({ reason: z.string() })',
resumeSchemaSource: 'z.object({ approved: z.boolean() })',
});
const schema = minimalSchema({ tools: [toolSchema] });
const agent = await Agent.fromSchema(schema, 'test-agent', {
handlerExecutor: executor,
});
const tools = agent.declaredTools;
expect(tools).toHaveLength(1);
expect(tools[0].suspendSchema).toBe(suspendSchema);
expect(tools[0].resumeSchema).toBe(resumeSchema);
});
});

View file

@ -1,4 +1,5 @@
import { InMemoryMemory } from '../runtime/memory-store';
import type { AgentDbMessage } from '../types/sdk/message';
describe('InMemoryMemory working memory', () => {
it('returns null for unknown key', async () => {
@ -55,3 +56,64 @@ describe('InMemoryMemory working memory', () => {
expect(await mem.getWorkingMemory({ threadId: 'thread-2' })).toBe('data for thread 2');
});
});
// ---------------------------------------------------------------------------
// Message persistence — createdAt correctness
// ---------------------------------------------------------------------------
function makeDbMsg(id: string, createdAt: Date, text: string): AgentDbMessage {
return { id, createdAt, role: 'user', content: [{ type: 'text', text }] };
}
describe('InMemoryMemory — message createdAt', () => {
it('before filter uses each message createdAt, not a shared batch timestamp', async () => {
const mem = new InMemoryMemory();
// Use dates clearly in the past so the batch wall-clock time (≈ now)
// never accidentally falls inside the range we're filtering.
const t1 = new Date('2020-01-01T00:00:01.000Z');
const t2 = new Date('2020-01-01T00:00:02.000Z');
const t3 = new Date('2020-01-01T00:00:03.000Z');
await mem.saveMessages({
threadId: 't1',
messages: [
makeDbMsg('m1', t1, 'first'),
makeDbMsg('m2', t2, 'second'),
makeDbMsg('m3', t3, 'third'),
],
});
// before: t3 should return only the two earlier messages
const result = await mem.getMessages('t1', { before: t3 });
// Pre-fix: saveMessages stores StoredMessage.createdAt = new Date() (wall clock,
// much later than t3), so the before filter excludes all messages → length 0.
// Post-fix: each StoredMessage.createdAt = dbMsg.createdAt, so t1 and t2 pass.
expect(result).toHaveLength(2);
expect(result[0].id).toBe('m1');
expect(result[1].id).toBe('m2');
});
it('getMessages returns createdAt from the stored record (consistent with before filter)', async () => {
const mem = new InMemoryMemory();
const t1 = new Date('2020-06-01T10:00:00.000Z');
const t2 = new Date('2020-06-01T10:00:01.000Z');
await mem.saveMessages({
threadId: 't1',
messages: [makeDbMsg('a', t1, 'alpha'), makeDbMsg('b', t2, 'beta')],
});
const loaded = await mem.getMessages('t1');
// Pre-fix: getMessages returns s.message whose createdAt is from toDbMessage
// (correct), but StoredMessage.createdAt is 'now' — the two are inconsistent.
// Post-fix: both use the same authoritative value, so this is always consistent.
expect(loaded[0].createdAt).toBeInstanceOf(Date);
expect(loaded[0].createdAt.getTime()).toBe(t1.getTime());
expect(loaded[1].createdAt).toBeInstanceOf(Date);
expect(loaded[1].createdAt.getTime()).toBe(t2.getTime());
});
});

View file

@ -7,7 +7,7 @@
*/
import { expect, it, beforeEach } from 'vitest';
import { Agent, Memory, toDbMessage, type AgentDbMessage, type AgentMessage } from '../../../index';
import { Agent, Memory, type AgentDbMessage } from '../../../index';
import type { BuiltMemory, Thread } from '../../../types/sdk/memory';
import { describeIf, findLastTextContent, getModel } from '../helpers';
@ -55,16 +55,16 @@ class CustomMapMemory implements BuiltMemory {
if (opts?.limit) {
msgs = msgs.slice(-opts.limit);
}
return msgs.map(toDbMessage);
return msgs;
}
async saveMessages(args: {
threadId: string;
resourceId?: string;
messages: AgentMessage[];
messages: AgentDbMessage[];
}): Promise<void> {
const existing = this.messages.get(args.threadId) ?? [];
this.messages.set(args.threadId, [...existing, ...args.messages.map(toDbMessage)]);
this.messages.set(args.threadId, [...existing, ...args.messages]);
}
async deleteMessages(messageIds: string[]): Promise<void> {
@ -224,7 +224,7 @@ describe('custom BuiltMemory backend', () => {
expect(findLastTextContent(result.messages)?.toLowerCase()).not.toContain('aurora');
// Thread 2 working memory should be independent
expect(store.workingMemory.get(thread2)).not.toContain('aurora');
expect(store.workingMemory.get(thread2)).toBeFalsy();
});
it('thread-scoped working memory allows recall within the same thread when history is truncated', async () => {

View file

@ -32,26 +32,24 @@ describe('freeform working memory', () => {
expect(findLastTextContent(result.messages)?.toLowerCase()).toContain('berlin');
});
it('working memory tags are stripped from visible response', async () => {
it('working memory is updated when new information is provided', async () => {
const memory = new Memory().storage('memory').lastMessages(10).freeform(template);
const agent = new Agent('strip-test')
const agent = new Agent('wm-update-test')
.model(getModel('anthropic'))
.instructions('You are a helpful assistant. Be concise.')
.memory(memory);
const threadId = `strip-${Date.now()}`;
const threadId = `wm-update-${Date.now()}`;
const options = { persistence: { threadId, resourceId: 'test-user' } };
const result = await agent.generate('My name is Bob.', options);
const allText = result.messages
.flatMap((m) => ('content' in m ? m.content : []))
.filter((c) => c.type === 'text')
.map((c) => (c as { text: string }).text)
.join(' ');
expect(allText).not.toContain('<working_memory>');
expect(allText).not.toContain('</working_memory>');
const toolCalls = result.messages.flatMap((m) =>
'content' in m ? m.content.filter((c) => c.type === 'tool-call') : [],
) as Array<{ type: 'tool-call'; toolName: string }>;
const wmToolCall = toolCalls.find((c) => c.toolName === 'updateWorkingMemory');
expect(wmToolCall).toBeDefined();
});
it('working memory persists across threads with same resourceId', async () => {

View file

@ -9,6 +9,7 @@ import { Pool } from 'pg';
import { GenericContainer, Wait, type StartedTestContainer } from 'testcontainers';
import { afterAll, beforeAll, describe, expect, it } from 'vitest';
import type { AgentDbMessage } from '../../../index';
import { Agent, Memory, PostgresMemory } from '../../../index';
import { describeIf, findLastTextContent, getModel } from '../helpers';
@ -144,10 +145,25 @@ describeWithDocker('PostgresMemory unit tests', () => {
await mem.saveThread({ id: 't1', resourceId: 'u1' });
const messages = [
{ role: 'user' as const, content: [{ type: 'text' as const, text: 'Hello' }] },
{ role: 'assistant' as const, content: [{ type: 'text' as const, text: 'Hi there' }] },
{ role: 'user' as const, content: [{ type: 'text' as const, text: 'How are you?' }] },
const messages: AgentDbMessage[] = [
{
id: 'm1',
createdAt: new Date(),
role: 'user' as const,
content: [{ type: 'text' as const, text: 'Hello' }],
},
{
id: 'm2',
createdAt: new Date(),
role: 'assistant' as const,
content: [{ type: 'text' as const, text: 'Hi there' }],
},
{
id: 'm3',
createdAt: new Date(),
role: 'user' as const,
content: [{ type: 'text' as const, text: 'How are you?' }],
},
];
await mem.saveMessages({ threadId: 't1', messages });
@ -307,7 +323,14 @@ describeWithDocker('PostgresMemory unit tests', () => {
await mem.saveThread({ id: 'del-t1', resourceId: 'u1' });
await mem.saveMessages({
threadId: 'del-t1',
messages: [{ role: 'user' as const, content: [{ type: 'text' as const, text: 'test' }] }],
messages: [
{
id: 'm1',
createdAt: new Date(),
role: 'user' as const,
content: [{ type: 'text' as const, text: 'test' }],
},
],
});
await mem.deleteThread('del-t1');
@ -575,7 +598,7 @@ describeWithDockerAndApi('PostgresMemory agent integration', () => {
// Working memory should be stored keyed by threadId
const wmByThread = await store.getWorkingMemory({ threadId, resourceId, scope: 'thread' });
expect(wmByThread).toBeDefined();
expect(wmByThread).toBeTruthy();
expect(wmByThread!.toLowerCase()).toContain('helios');
// resourceId key should be empty — nothing stored there

View file

@ -3,7 +3,7 @@ import { z } from 'zod';
import { describeIf, getModel, createSqliteMemory } from './helpers';
import { Agent, Memory, Tool } from '../../index';
import type { AgentMessage } from '../../index';
import type { AgentDbMessage } from '../../index';
const describe = describeIf('anthropic');
@ -41,13 +41,17 @@ describe('orphaned tool messages in memory', () => {
* 6: tool tool-result(call_2)
* 7: assistant "There are 5 gadgets"
*/
function buildSeedMessages(): AgentMessage[] {
function buildSeedMessages(): AgentDbMessage[] {
return [
{
id: 'm1',
createdAt: new Date(),
role: 'user',
content: [{ type: 'text', text: 'How many widgets do we have?' }],
},
{
id: 'm2',
createdAt: new Date(),
role: 'assistant',
content: [
{ type: 'text', text: 'Let me look that up.' },
@ -55,20 +59,28 @@ describe('orphaned tool messages in memory', () => {
],
},
{
id: 'm3',
createdAt: new Date(),
role: 'tool',
content: [
{ type: 'tool-result', toolCallId: 'call_1', toolName: 'lookup', result: { count: 10 } },
],
},
{
id: 'm4',
createdAt: new Date(),
role: 'assistant',
content: [{ type: 'text', text: 'There are 10 widgets in stock.' }],
},
{
id: 'm5',
createdAt: new Date(),
role: 'user',
content: [{ type: 'text', text: 'What about gadgets?' }],
},
{
id: 'm6',
createdAt: new Date(),
role: 'assistant',
content: [
{ type: 'text', text: 'Let me check.' },
@ -76,12 +88,16 @@ describe('orphaned tool messages in memory', () => {
],
},
{
id: 'm7',
createdAt: new Date(),
role: 'tool',
content: [
{ type: 'tool-result', toolCallId: 'call_2', toolName: 'lookup', result: { count: 5 } },
],
},
{
id: 'm8',
createdAt: new Date(),
role: 'assistant',
content: [{ type: 'text', text: 'There are 5 gadgets in stock.' }],
},
@ -121,16 +137,21 @@ describe('orphaned tool messages in memory', () => {
cleanups.push(cleanup);
const threadId = 'thread-orphan-call';
const now = Date.now();
// Store a conversation where the last saved message is an assistant
// with a tool-call but the tool-result was never persisted (simulating
// a partial save / interrupted turn).
const messages: AgentMessage[] = [
const messages: AgentDbMessage[] = [
{
id: 'm1',
createdAt: new Date(now),
role: 'user',
content: [{ type: 'text', text: 'How many widgets?' }],
},
{
id: 'm2',
createdAt: new Date(now + 1),
role: 'assistant',
content: [
{ type: 'text', text: 'Checking inventory.' },

View file

@ -0,0 +1,176 @@
import { AgentMessageList } from '../runtime/message-list';
import { isLlmMessage } from '../sdk/message';
import type { AgentDbMessage, AgentMessage, Message } from '../types/sdk/message';
function makeUserMsg(text: string): AgentMessage {
return { role: 'user', content: [{ type: 'text', text }] };
}
function makeDbMsg(text: string, createdAt: Date): AgentDbMessage {
return {
id: crypto.randomUUID(),
createdAt,
role: 'user',
content: [{ type: 'text', text }],
};
}
// ---------------------------------------------------------------------------
// Monotonic timestamp assignment
// ---------------------------------------------------------------------------
describe('AgentMessageList — monotonic timestamps', () => {
it('assigns a Date createdAt to every message added via addInput', () => {
const list = new AgentMessageList();
list.addInput([makeUserMsg('hello')]);
const [msg] = list.turnDelta();
expect(msg.createdAt).toBeInstanceOf(Date);
});
it('assigns strictly increasing createdAt to a batch of input messages', () => {
const list = new AgentMessageList();
// Three messages added in the same synchronous call — all would share the
// same Date.now() tick without monotonic enforcement.
list.addInput([makeUserMsg('a'), makeUserMsg('b'), makeUserMsg('c')]);
const [a, b, c] = list.turnDelta();
expect(a.createdAt).toBeInstanceOf(Date);
expect(b.createdAt).toBeInstanceOf(Date);
expect(c.createdAt).toBeInstanceOf(Date);
expect(b.createdAt.getTime()).toBeGreaterThan(a.createdAt.getTime());
expect(c.createdAt.getTime()).toBeGreaterThan(b.createdAt.getTime());
});
it('assigns strictly increasing createdAt to response messages', () => {
const list = new AgentMessageList();
list.addResponse([makeUserMsg('r1'), makeUserMsg('r2'), makeUserMsg('r3')]);
const [r1, r2, r3] = list.responseDelta();
expect(r2.createdAt.getTime()).toBeGreaterThan(r1.createdAt.getTime());
expect(r3.createdAt.getTime()).toBeGreaterThan(r2.createdAt.getTime());
});
it('assigns createdAt that is strictly greater than history timestamps', () => {
const list = new AgentMessageList();
// Simulate a DB-loaded message with a timestamp in the future relative to
// wall clock — the new input message must still be later.
const futureTs = new Date(Date.now() + 60_000);
list.addHistory([makeDbMsg('old', futureTs)]);
list.addInput([makeUserMsg('new')]);
const [inputMsg] = list.turnDelta();
expect(inputMsg.createdAt).toBeInstanceOf(Date);
expect(inputMsg.createdAt.getTime()).toBeGreaterThan(futureTs.getTime());
});
});
// ---------------------------------------------------------------------------
// History messages keep their DB-sourced createdAt
// ---------------------------------------------------------------------------
describe('AgentMessageList — chronological order', () => {
it('reorders addHistory when the batch is not in createdAt order', () => {
const list = new AgentMessageList();
const t1 = new Date('2024-01-01T00:00:01.000Z');
const t2 = new Date('2024-01-01T00:00:02.000Z');
list.addHistory([makeDbMsg('second', t2), makeDbMsg('first', t1)]);
const msgs = list.serialize().messages.filter(isLlmMessage) as Message[];
expect(msgs).toHaveLength(2);
expect(msgs[0].content[0]).toMatchObject({ type: 'text', text: 'first' });
expect(msgs[1].content[0]).toMatchObject({ type: 'text', text: 'second' });
});
});
// ---------------------------------------------------------------------------
// History messages keep their DB-sourced createdAt
// ---------------------------------------------------------------------------
describe('AgentMessageList — preserving DB timestamps', () => {
it('preserves the exact createdAt of history messages loaded from the database', () => {
const list = new AgentMessageList();
const dbTimestamp = new Date('2020-01-01T00:00:00.000Z');
list.addHistory([makeDbMsg('from db', dbTimestamp)]);
const [hist] = list.serialize().messages;
expect(hist.createdAt).toBeInstanceOf(Date);
expect(hist.createdAt.getTime()).toBe(dbTimestamp.getTime());
});
});
// ---------------------------------------------------------------------------
// Input / response messages use existing createdAt as a hint
// ---------------------------------------------------------------------------
describe('AgentMessageList — hint-based monotonicity for input/response', () => {
it('keeps an input message createdAt when it is already later than lastCreatedAt', () => {
const list = new AgentMessageList();
const histTs = new Date('2024-01-01T00:00:01.000Z');
list.addHistory([makeDbMsg('hist', histTs)]);
// freshTs is well after histTs so no bump is needed
const freshTs = new Date('2024-01-01T00:00:10.000Z');
const msg = { ...makeUserMsg('new'), createdAt: freshTs };
list.addInput([msg]);
const [inputMsg] = list.turnDelta();
expect(inputMsg.createdAt.getTime()).toBe(freshTs.getTime());
});
it('bumps an input message createdAt when it collides with or precedes the last timestamp', () => {
const list = new AgentMessageList();
const histTs = new Date('2024-01-01T00:00:05.000Z');
list.addHistory([makeDbMsg('hist', histTs)]);
// staleTs is before histTs — must be bumped to at least histTs + 1 ms
const staleTs = new Date('2024-01-01T00:00:04.000Z');
const msg = { ...makeUserMsg('stale'), createdAt: staleTs };
list.addInput([msg]);
const [inputMsg] = list.turnDelta();
expect(inputMsg.createdAt.getTime()).toBeGreaterThan(histTs.getTime());
});
it('bumps an input message createdAt when it equals the last timestamp', () => {
const list = new AgentMessageList();
const ts = new Date('2024-06-01T12:00:00.000Z');
list.addHistory([makeDbMsg('hist', ts)]);
// sameTs equals histTs — must be bumped by at least 1 ms
const msg = { ...makeUserMsg('same'), createdAt: new Date(ts) };
list.addInput([msg]);
const [inputMsg] = list.turnDelta();
expect(inputMsg.createdAt.getTime()).toBeGreaterThan(ts.getTime());
});
});
// ---------------------------------------------------------------------------
// Deserialization restores lastCreatedAt
// ---------------------------------------------------------------------------
describe('AgentMessageList — deserialize', () => {
it('messages added after deserialize have createdAt later than any restored message', () => {
const list = new AgentMessageList();
// History message with a future timestamp (edge case: e.g. clock skew or
// the previous turn's monotonic assignment ran ahead of wall clock).
const futureTs = new Date(Date.now() + 60_000);
list.addHistory([makeDbMsg('prev', futureTs)]);
// Round-trip through serialization (simulates suspend / resume)
const list2 = AgentMessageList.deserialize(list.serialize());
list2.addInput([makeUserMsg('after resume')]);
const [newMsg] = list2.turnDelta();
expect(newMsg.createdAt).toBeInstanceOf(Date);
expect(newMsg.createdAt.getTime()).toBeGreaterThan(futureTs.getTime());
});
});

View file

@ -0,0 +1,94 @@
import { getCreatedAt } from '../sdk/message';
import type { AgentMessage } from '../types/sdk/message';
function userMessage(partial: Partial<AgentMessage> & { createdAt?: unknown }): AgentMessage {
return partial as AgentMessage;
}
describe('getCreatedAt', () => {
it('returns the Date when createdAt is a valid Date', () => {
const d = new Date('2020-06-15T12:00:00.000Z');
expect(
getCreatedAt(
userMessage({
role: 'user',
content: [{ type: 'text', text: 'hi' }],
createdAt: d,
}),
),
).toBe(d);
});
it('parses a valid ISO string', () => {
const iso = '2021-03-01T08:30:00.000Z';
const got = getCreatedAt(
userMessage({
role: 'user',
content: [{ type: 'text', text: 'hi' }],
createdAt: iso,
}),
);
expect(got).not.toBeNull();
expect(got!.getTime()).toBe(new Date(iso).getTime());
});
it('parses a valid epoch ms number', () => {
const ms = 1_700_000_000_000;
const got = getCreatedAt(
userMessage({
role: 'user',
content: [{ type: 'text', text: 'hi' }],
createdAt: ms,
}),
);
expect(got).not.toBeNull();
expect(got!.getTime()).toBe(ms);
});
it('returns null for a string that does not parse to a date (avoids NaN times)', () => {
expect(
getCreatedAt(
userMessage({
role: 'user',
content: [{ type: 'text', text: 'hi' }],
createdAt: 'not-a-valid-date',
}),
),
).toBeNull();
});
it('returns null for an empty date string (Invalid Date)', () => {
expect(
getCreatedAt(
userMessage({
role: 'user',
content: [{ type: 'text', text: 'hi' }],
createdAt: '',
}),
),
).toBeNull();
});
it('returns null when createdAt is NaN as a number (avoids NaN times)', () => {
expect(
getCreatedAt(
userMessage({
role: 'user',
content: [{ type: 'text', text: 'hi' }],
createdAt: Number.NaN,
}),
),
).toBeNull();
});
it('returns null when the message has no createdAt', () => {
expect(
getCreatedAt(
userMessage({
role: 'user',
content: [{ type: 'text', text: 'hi' }],
}),
),
).toBeNull();
});
});

View file

@ -2,27 +2,56 @@ import type { LanguageModel } from 'ai';
import { createModel } from '../runtime/model-factory';
type ProviderOpts = {
apiKey?: string;
baseURL?: string;
fetch?: typeof globalThis.fetch;
headers?: Record<string, string>;
};
jest.mock('@ai-sdk/anthropic', () => ({
createAnthropic: (opts?: { apiKey?: string; baseURL?: string }) => (model: string) => ({
createAnthropic: (opts?: ProviderOpts) => (model: string) => ({
provider: 'anthropic',
modelId: model,
apiKey: opts?.apiKey,
baseURL: opts?.baseURL,
fetch: opts?.fetch,
headers: opts?.headers,
specificationVersion: 'v3',
}),
}));
jest.mock('@ai-sdk/openai', () => ({
createOpenAI: (opts?: { apiKey?: string; baseURL?: string }) => (model: string) => ({
createOpenAI: (opts?: ProviderOpts) => (model: string) => ({
provider: 'openai',
modelId: model,
apiKey: opts?.apiKey,
baseURL: opts?.baseURL,
fetch: opts?.fetch,
headers: opts?.headers,
specificationVersion: 'v3',
}),
}));
const mockProxyAgent = jest.fn();
jest.mock('undici', () => ({
ProxyAgent: mockProxyAgent,
}));
describe('createModel', () => {
const originalEnv = process.env;
beforeEach(() => {
process.env = { ...originalEnv };
delete process.env.HTTPS_PROXY;
delete process.env.HTTP_PROXY;
mockProxyAgent.mockClear();
});
afterAll(() => {
process.env = originalEnv;
});
it('should accept a string config', () => {
const model = createModel('anthropic/claude-sonnet-4-5') as unknown as Record<string, unknown>;
expect(model.provider).toBe('anthropic');
@ -63,4 +92,42 @@ describe('createModel', () => {
expect(model.provider).toBe('openai');
expect(model.modelId).toBe('ft:gpt-4o:my-org:custom:abc123');
});
it('should not pass fetch when no proxy env vars are set', () => {
const model = createModel('anthropic/claude-sonnet-4-5') as unknown as Record<string, unknown>;
expect(model.fetch).toBeUndefined();
});
it('should pass proxy-aware fetch when HTTPS_PROXY is set', () => {
process.env.HTTPS_PROXY = 'http://proxy:8080';
const model = createModel('anthropic/claude-sonnet-4-5') as unknown as Record<string, unknown>;
expect(model.fetch).toBeInstanceOf(Function);
expect(mockProxyAgent).toHaveBeenCalledWith('http://proxy:8080');
});
it('should pass proxy-aware fetch when HTTP_PROXY is set', () => {
process.env.HTTP_PROXY = 'http://proxy:9090';
const model = createModel('openai/gpt-4o') as unknown as Record<string, unknown>;
expect(model.fetch).toBeInstanceOf(Function);
expect(mockProxyAgent).toHaveBeenCalledWith('http://proxy:9090');
});
it('should forward custom headers to the provider factory', () => {
const model = createModel({
id: 'anthropic/claude-sonnet-4-5',
apiKey: 'sk-test',
headers: { 'x-proxy-auth': 'Bearer abc', 'anthropic-beta': 'tools-2024' },
}) as unknown as Record<string, unknown>;
expect(model.headers).toEqual({
'x-proxy-auth': 'Bearer abc',
'anthropic-beta': 'tools-2024',
});
});
it('should prefer HTTPS_PROXY over HTTP_PROXY', () => {
process.env.HTTPS_PROXY = 'http://https-proxy:8080';
process.env.HTTP_PROXY = 'http://http-proxy:9090';
createModel('anthropic/claude-sonnet-4-5');
expect(mockProxyAgent).toHaveBeenCalledWith('http://https-proxy:8080');
});
});

View file

@ -3,7 +3,7 @@ import * as os from 'os';
import * as path from 'path';
import { SqliteMemory } from '../storage/sqlite-memory';
import type { AgentMessage, Message } from '../types/sdk/message';
import type { AgentDbMessage, AgentMessage, Message } from '../types/sdk/message';
// ---------------------------------------------------------------------------
// Helpers
@ -13,8 +13,13 @@ function makeTempDb(): string {
return path.join(os.tmpdir(), `test-${Date.now()}-${Math.random().toString(36).slice(2)}.db`);
}
function makeMsg(role: 'user' | 'assistant', text: string): Message {
return { role, content: [{ type: 'text', text }] };
function makeMsg(role: 'user' | 'assistant', text: string): AgentDbMessage {
return {
id: crypto.randomUUID(),
createdAt: new Date(),
role,
content: [{ type: 'text', text }],
};
}
function textOf(msg: AgentMessage): string {
@ -165,30 +170,10 @@ describe('SqliteMemory — messages', () => {
expect(textOf(msgsB[0])).toBe('thread-b');
});
it('assigns stable IDs — preserves existing, generates for missing', async () => {
const mem = makeMemory(dbPath);
const withId = { ...makeMsg('user', 'has-id'), id: 'custom-id-123' } as unknown as AgentMessage;
const withoutId = makeMsg('assistant', 'no-id');
await mem.saveMessages({ threadId: 't-1', messages: [withId, withoutId] });
const msgs = await mem.getMessages('t-1');
expect(msgs).toHaveLength(2);
// The message with a pre-existing id should keep it
const first = msgs[0] as unknown as { id: string };
expect(first.id).toBe('custom-id-123');
// The message without id should have gotten one assigned
const second = msgs[1] as unknown as { id: string };
expect(typeof second.id).toBe('string');
expect(second.id.length).toBeGreaterThan(0);
});
it('deletes specific messages', async () => {
const mem = makeMemory(dbPath);
const m1 = { ...makeMsg('user', 'keep'), id: 'keep-1' } as unknown as AgentMessage;
const m2 = { ...makeMsg('user', 'delete-me'), id: 'del-1' } as unknown as AgentMessage;
const m1 = { ...makeMsg('user', 'keep'), id: 'keep-1' };
const m2 = { ...makeMsg('user', 'delete-me'), id: 'del-1' };
await mem.saveMessages({ threadId: 't-1', messages: [m1, m2] });
await mem.deleteMessages(['del-1']);
@ -197,6 +182,56 @@ describe('SqliteMemory — messages', () => {
expect(msgs).toHaveLength(1);
expect((msgs[0] as unknown as { id: string }).id).toBe('keep-1');
});
it('createdAt round-trips: saved message createdAt is restored as a Date on load', async () => {
const mem = makeMemory(dbPath);
const fixedDate = new Date('2020-03-15T10:30:00.123Z');
const msg: AgentDbMessage = {
id: 'msg-round-trip',
createdAt: fixedDate,
role: 'user',
content: [{ type: 'text', text: 'hello' }],
};
await mem.saveMessages({ threadId: 't-1', messages: [msg] });
const [loaded] = await mem.getMessages('t-1');
// Pre-fix: saveMessages stores createdAt as new Date() (wall clock), not fixedDate.
// getMessages does not copy createdAt from the DB column back onto the message
// object, leaving it as a JSON string inside the content blob.
// So loaded.createdAt would be a string, failing the instanceof check.
// Post-fix: saveMessages uses msg.createdAt for the DB column, getMessages sets
// msg.createdAt = new Date(row.createdAt), restoring a proper Date.
expect(loaded.createdAt).toBeInstanceOf(Date);
expect(loaded.createdAt.getTime()).toBe(fixedDate.getTime());
});
it('before filter works correctly because saveMessages persists msg.createdAt to the DB column', async () => {
const mem = makeMemory(dbPath);
const t1 = new Date('2020-01-01T00:00:01.000Z');
const t2 = new Date('2020-01-01T00:00:02.000Z');
const t3 = new Date('2020-01-01T00:00:03.000Z');
const msgs: AgentDbMessage[] = [
{ id: 'm1', createdAt: t1, role: 'user', content: [{ type: 'text', text: 'first' }] },
{ id: 'm2', createdAt: t2, role: 'assistant', content: [{ type: 'text', text: 'second' }] },
{ id: 'm3', createdAt: t3, role: 'user', content: [{ type: 'text', text: 'third' }] },
];
await mem.saveMessages({ threadId: 't-1', messages: msgs });
// before: t3 should return the first two messages only
const result = await mem.getMessages('t-1', { before: t3 });
// Pre-fix: saveMessages stores each row with createdAt = new Date() (wall clock,
// much later than the 2020 dates), so the before: t3 filter returns nothing.
// Post-fix: each row gets createdAt from msg.createdAt, so t1 and t2 pass the filter.
expect(result).toHaveLength(2);
expect((result[0] as unknown as { id: string }).id).toBe('m1');
expect((result[1] as unknown as { id: string }).id).toBe('m2');
});
});
// ---------------------------------------------------------------------------

View file

@ -1,14 +1,9 @@
import { stripOrphanedToolMessages } from '../runtime/strip-orphaned-tool-messages';
import { isLlmMessage, toDbMessage } from '../sdk/message';
import type { AgentDbMessage, AgentMessage, Message } from '../types/sdk/message';
function seed(messages: AgentMessage[]): AgentDbMessage[] {
return messages.map(toDbMessage);
}
import type { AgentMessage, Message } from '../types/sdk/message';
describe('stripOrphanedToolMessages', () => {
it('returns messages unchanged when all tool pairs are complete', () => {
const messages = seed([
const messages: AgentMessage[] = [
{ role: 'user', content: [{ type: 'text', text: 'Hello' }] },
{
role: 'assistant',
@ -22,23 +17,23 @@ describe('stripOrphanedToolMessages', () => {
content: [{ type: 'tool-result', toolCallId: 'c1', toolName: 'lookup', result: 42 }],
},
{ role: 'assistant', content: [{ type: 'text', text: 'Done.' }] },
]);
];
const result = stripOrphanedToolMessages(messages);
expect(result).toBe(messages);
});
it('strips orphaned tool-result when matching tool-call is missing', () => {
const messages = seed([
const messages: AgentMessage[] = [
{
role: 'tool',
content: [{ type: 'tool-result', toolCallId: 'c1', toolName: 'lookup', result: 42 }],
},
{ role: 'assistant', content: [{ type: 'text', text: 'There are 42.' }] },
{ role: 'user', content: [{ type: 'text', text: 'Thanks' }] },
]);
];
const result = stripOrphanedToolMessages(messages).filter(isLlmMessage) as Message[];
const result = stripOrphanedToolMessages(messages) as Message[];
expect(result).toHaveLength(2);
expect(result[0].role).toBe('assistant');
@ -46,7 +41,7 @@ describe('stripOrphanedToolMessages', () => {
});
it('strips orphaned tool-call when matching tool-result is missing', () => {
const messages = seed([
const messages: AgentMessage[] = [
{ role: 'user', content: [{ type: 'text', text: 'Check it' }] },
{
role: 'assistant',
@ -55,9 +50,9 @@ describe('stripOrphanedToolMessages', () => {
{ type: 'tool-call', toolCallId: 'c1', toolName: 'lookup', input: {} },
],
},
]);
];
const result = stripOrphanedToolMessages(messages).filter(isLlmMessage) as Message[];
const result = stripOrphanedToolMessages(messages) as Message[];
expect(result).toHaveLength(2);
const assistantMsg = result[1];
@ -67,22 +62,22 @@ describe('stripOrphanedToolMessages', () => {
});
it('drops assistant message entirely if it only contained an orphaned tool-call', () => {
const messages = seed([
const messages: AgentMessage[] = [
{ role: 'user', content: [{ type: 'text', text: 'Do it' }] },
{
role: 'assistant',
content: [{ type: 'tool-call', toolCallId: 'c1', toolName: 'action', input: {} }],
},
]);
];
const result = stripOrphanedToolMessages(messages).filter(isLlmMessage) as Message[];
const result = stripOrphanedToolMessages(messages) as Message[];
expect(result).toHaveLength(1);
expect(result[0].role).toBe('user');
});
it('handles mixed scenario: one complete pair and one orphaned result', () => {
const messages = seed([
const messages: AgentMessage[] = [
{
role: 'tool',
content: [
@ -103,9 +98,9 @@ describe('stripOrphanedToolMessages', () => {
content: [{ type: 'tool-result', toolCallId: 'c2', toolName: 'lookup', result: 99 }],
},
{ role: 'assistant', content: [{ type: 'text', text: '99 items' }] },
]);
];
const result = stripOrphanedToolMessages(messages).filter(isLlmMessage) as Message[];
const result = stripOrphanedToolMessages(messages) as Message[];
expect(result).toHaveLength(5);
expect(result[0].role).toBe('assistant');
@ -122,21 +117,19 @@ describe('stripOrphanedToolMessages', () => {
});
it('preserves custom (non-LLM) messages', () => {
const customMsg: AgentDbMessage = {
const customMsg: AgentMessage = {
id: 'custom-1',
type: 'custom',
messageType: 'notification',
data: { info: 'hello' },
} as unknown as AgentDbMessage;
} as unknown as AgentMessage;
const messages: AgentDbMessage[] = [
const messages: AgentMessage[] = [
customMsg,
...seed([
{
role: 'tool',
content: [{ type: 'tool-result', toolCallId: 'orphan', toolName: 'x', result: null }],
},
]),
{
role: 'tool',
content: [{ type: 'tool-result', toolCallId: 'orphan', toolName: 'x', result: null }],
},
];
const result = stripOrphanedToolMessages(messages);
@ -146,10 +139,10 @@ describe('stripOrphanedToolMessages', () => {
});
it('returns same array reference when no orphans exist (no-op fast path)', () => {
const messages = seed([
const messages: AgentMessage[] = [
{ role: 'user', content: [{ type: 'text', text: 'Hi' }] },
{ role: 'assistant', content: [{ type: 'text', text: 'Hello!' }] },
]);
];
const result = stripOrphanedToolMessages(messages);
expect(result).toBe(messages);

View file

@ -0,0 +1,123 @@
import type * as AiImport from 'ai';
import type { LanguageModel } from 'ai';
import { generateTitleFromMessage } from '../runtime/title-generation';
type GenerateTextCall = {
messages: Array<{ role: string; content: string }>;
};
const mockGenerateText = jest.fn<Promise<{ text: string }>, [GenerateTextCall]>();
jest.mock('ai', () => {
const actual = jest.requireActual<typeof AiImport>('ai');
return {
...actual,
generateText: async (call: GenerateTextCall): Promise<{ text: string }> =>
await mockGenerateText(call),
};
});
const fakeModel = {} as LanguageModel;
describe('generateTitleFromMessage', () => {
beforeEach(() => {
mockGenerateText.mockReset();
});
it('returns null for empty input without calling the LLM', async () => {
const result = await generateTitleFromMessage(fakeModel, ' ');
expect(result).toBeNull();
expect(mockGenerateText).not.toHaveBeenCalled();
});
it('returns null for trivial greetings without calling the LLM', async () => {
const result = await generateTitleFromMessage(fakeModel, 'hey');
expect(result).toBeNull();
expect(mockGenerateText).not.toHaveBeenCalled();
});
it('returns null for short multi-word messages without calling the LLM', async () => {
const result = await generateTitleFromMessage(fakeModel, 'hi there');
expect(result).toBeNull();
expect(mockGenerateText).not.toHaveBeenCalled();
});
it('strips markdown heading prefixes from the LLM response', async () => {
mockGenerateText.mockResolvedValue({ text: '# Daily Berlin rain alert' });
const result = await generateTitleFromMessage(
fakeModel,
'Build a daily Berlin rain alert workflow',
);
expect(result).toBe('Daily Berlin rain alert');
});
it('strips inline emphasis markers from the LLM response', async () => {
mockGenerateText.mockResolvedValue({ text: 'Your **Berlin** rain alert' });
const result = await generateTitleFromMessage(
fakeModel,
'Build a daily Berlin rain alert workflow',
);
expect(result).toBe('Your Berlin rain alert');
});
it('strips <think> reasoning blocks from the LLM response', async () => {
mockGenerateText.mockResolvedValue({
text: '<think>Let me think about this</think>Deploy release pipeline',
});
const result = await generateTitleFromMessage(
fakeModel,
'Help me set up an automated deploy pipeline',
);
expect(result).toBe('Deploy release pipeline');
});
it('strips surrounding quotes from the LLM response', async () => {
mockGenerateText.mockResolvedValue({ text: '"Build Gmail to Slack workflow"' });
const result = await generateTitleFromMessage(
fakeModel,
'Build a workflow that forwards Gmail to Slack',
);
expect(result).toBe('Build Gmail to Slack workflow');
});
it('truncates titles longer than 80 characters at a word boundary', async () => {
mockGenerateText.mockResolvedValue({
text: 'Create a data table for users, then build a workflow that syncs them to our CRM every hour',
});
const result = await generateTitleFromMessage(
fakeModel,
'Create a data table for users and sync them to our CRM every hour with error alerting',
);
expect(result).not.toBeNull();
expect(result!.length).toBeLessThanOrEqual(81);
expect(result!.endsWith('\u2026')).toBe(true);
});
it('returns null when the LLM returns empty text', async () => {
mockGenerateText.mockResolvedValue({ text: ' ' });
const result = await generateTitleFromMessage(
fakeModel,
'Build a daily Berlin rain alert workflow',
);
expect(result).toBeNull();
});
it('passes the default instructions to the LLM', async () => {
mockGenerateText.mockResolvedValue({ text: 'Berlin rain alert' });
await generateTitleFromMessage(fakeModel, 'Build a daily Berlin rain alert workflow');
const call = mockGenerateText.mock.calls[0][0];
expect(call.messages[0].role).toBe('system');
expect(call.messages[0].content).toContain('markdown');
expect(call.messages[0].content).toContain('sentence case');
});
it('accepts custom instructions', async () => {
mockGenerateText.mockResolvedValue({ text: 'Custom title' });
await generateTitleFromMessage(fakeModel, 'Build a daily Berlin rain alert workflow', {
instructions: 'Custom system prompt',
});
const call = mockGenerateText.mock.calls[0][0];
expect(call.messages[0].content).toBe('Custom system prompt');
});
});

View file

@ -1,62 +1,74 @@
import { z } from 'zod';
import {
parseWorkingMemory,
buildWorkingMemoryInstruction,
buildWorkingMemoryTool,
templateFromSchema,
WorkingMemoryStreamFilter,
UPDATE_WORKING_MEMORY_TOOL_NAME,
WORKING_MEMORY_DEFAULT_INSTRUCTION,
} from '../runtime/working-memory';
import type { StreamChunk } from '../types';
describe('parseWorkingMemory', () => {
it('extracts content between tags at end of text', () => {
const text = 'Hello world.\n<working_memory>\n# Name: Alice\n</working_memory>';
const result = parseWorkingMemory(text);
expect(result.cleanText).toBe('Hello world.');
expect(result.workingMemory).toBe('# Name: Alice');
});
it('extracts content between tags in middle of text', () => {
const text = 'Before.\n<working_memory>\ndata\n</working_memory>\nAfter.';
const result = parseWorkingMemory(text);
expect(result.cleanText).toBe('Before.\nAfter.');
expect(result.workingMemory).toBe('data');
});
it('returns null when no tags present', () => {
const text = 'Just a normal response.';
const result = parseWorkingMemory(text);
expect(result.cleanText).toBe('Just a normal response.');
expect(result.workingMemory).toBeNull();
});
it('handles empty working memory', () => {
const text = 'Response.\n<working_memory>\n</working_memory>';
const result = parseWorkingMemory(text);
expect(result.cleanText).toBe('Response.');
expect(result.workingMemory).toBe('');
});
it('handles multiline content with markdown', () => {
const wm = '# User Context\n- **Name**: Alice\n- **City**: Berlin';
const text = `Response text.\n<working_memory>\n${wm}\n</working_memory>`;
const result = parseWorkingMemory(text);
expect(result.workingMemory).toBe(wm);
});
});
describe('buildWorkingMemoryInstruction', () => {
it('generates freeform instruction', () => {
it('mentions the updateWorkingMemory tool name', () => {
const result = buildWorkingMemoryInstruction('# Context\n- Name:', false);
expect(result).toContain('<working_memory>');
expect(result).toContain('</working_memory>');
expect(result).toContain('# Context\n- Name:');
expect(result).toContain(UPDATE_WORKING_MEMORY_TOOL_NAME);
});
it('generates structured instruction mentioning JSON', () => {
const result = buildWorkingMemoryInstruction('{"userName": ""}', true);
it('instructs the model to call the tool only when something changed', () => {
const result = buildWorkingMemoryInstruction('# Context\n- Name:', false);
expect(result).toContain('Only call it when something has actually changed');
});
it('includes the template in the instruction', () => {
const template = '# Context\n- Name:\n- City:';
const result = buildWorkingMemoryInstruction(template, false);
expect(result).toContain(template);
});
it('mentions JSON for structured variant', () => {
const result = buildWorkingMemoryInstruction('{"name": ""}', true);
expect(result).toContain('JSON');
expect(result).toContain('<working_memory>');
});
describe('custom instruction', () => {
it('replaces the default instruction body when provided', () => {
const custom = 'Always update working memory after every message.';
const result = buildWorkingMemoryInstruction('# Template', false, custom);
expect(result).toContain(custom);
expect(result).not.toContain(WORKING_MEMORY_DEFAULT_INSTRUCTION);
});
it('still includes the ## Working Memory heading', () => {
const result = buildWorkingMemoryInstruction('# Template', false, 'Custom text.');
expect(result).toContain('## Working Memory');
});
it('still includes the template block', () => {
const template = '# Context\n- Name:\n- City:';
const result = buildWorkingMemoryInstruction(template, false, 'Custom text.');
expect(result).toContain(template);
});
it('still includes the format hint for structured memory', () => {
const result = buildWorkingMemoryInstruction('{}', true, 'Custom text.');
expect(result).toContain('JSON');
});
it('still includes the format hint for freeform memory', () => {
const result = buildWorkingMemoryInstruction('# Template', false, 'Custom text.');
expect(result).toContain('Update the template with any new information learned');
});
it('uses the default instruction when undefined is passed explicitly', () => {
const withDefault = buildWorkingMemoryInstruction('# Template', false, undefined);
const withoutArg = buildWorkingMemoryInstruction('# Template', false);
expect(withDefault).toBe(withoutArg);
});
it('WORKING_MEMORY_DEFAULT_INSTRUCTION appears in the output when no custom instruction is set', () => {
const result = buildWorkingMemoryInstruction('# Template', false);
expect(result).toContain(WORKING_MEMORY_DEFAULT_INSTRUCTION);
});
});
});
@ -69,7 +81,6 @@ describe('templateFromSchema', () => {
const result = templateFromSchema(schema);
expect(result).toContain('userName');
expect(result).toContain('favoriteColor');
// Should be valid JSON
let parsed: unknown;
try {
parsed = JSON.parse(result);
@ -80,118 +91,117 @@ describe('templateFromSchema', () => {
});
});
/**
* Helper that feeds chunks through a WorkingMemoryStreamFilter and collects
* the output text and any persisted working memory content.
*/
async function runStreamFilter(
chunks: string[],
): Promise<{ outputText: string; persisted: string[] }> {
const persisted: string[] = [];
const stream = new TransformStream<StreamChunk>();
const writer = stream.writable.getWriter();
// eslint-disable-next-line @typescript-eslint/require-await
const filter = new WorkingMemoryStreamFilter(writer, async (content) => {
persisted.push(content);
describe('buildWorkingMemoryTool — freeform', () => {
it('returns a BuiltTool with the correct name', () => {
const tool = buildWorkingMemoryTool({
structured: false,
persist: async () => {},
});
expect(tool.name).toBe(UPDATE_WORKING_MEMORY_TOOL_NAME);
});
// Read the readable side concurrently to avoid backpressure deadlock
const reader = stream.readable.getReader();
const readAll = (async () => {
let outputText = '';
while (true) {
const result = await reader.read();
if (result.done) break;
const chunk = result.value as StreamChunk;
if (chunk.type === 'text-delta') outputText += chunk.delta;
}
return outputText;
})();
for (const chunk of chunks) {
await filter.write({ type: 'text-delta', delta: chunk });
}
await filter.flush();
await writer.close();
const outputText = await readAll;
return { outputText, persisted };
}
describe('WorkingMemoryStreamFilter with tag split across multiple chunks', () => {
it('handles tag split mid-open-tag', async () => {
const { outputText, persisted } = await runStreamFilter([
'Hello <work',
'ing_memory>state</working_memory>',
]);
expect(outputText).toBe('Hello ');
expect(persisted).toEqual(['state']);
it('has a description', () => {
const tool = buildWorkingMemoryTool({
structured: false,
persist: async () => {},
});
expect(tool.description).toBeTruthy();
});
it('handles tag split mid-close-tag', async () => {
const { outputText, persisted } = await runStreamFilter([
'<working_memory>state</worki',
'ng_memory> after',
]);
expect(persisted).toEqual(['state']);
expect(outputText).toBe(' after');
it('has a freeform input schema with a memory field', () => {
const tool = buildWorkingMemoryTool({
structured: false,
persist: async () => {},
});
expect(tool.inputSchema).toBeDefined();
const schema = tool.inputSchema as z.ZodObject<z.ZodRawShape>;
const result = schema.safeParse({ memory: 'hello' });
expect(result.success).toBe(true);
});
it('handles tag spread across 3+ chunks', async () => {
const { outputText, persisted } = await runStreamFilter([
'<wor',
'king_mem',
'ory>data</working_memory>',
]);
expect(persisted).toEqual(['data']);
expect(outputText).toBe('');
it('rejects input without memory field', () => {
const tool = buildWorkingMemoryTool({
structured: false,
persist: async () => {},
});
const schema = tool.inputSchema as z.ZodObject<z.ZodRawShape>;
const result = schema.safeParse({ other: 'value' });
expect(result.success).toBe(false);
});
it('handles partial < that is not a tag', async () => {
const { outputText, persisted } = await runStreamFilter(['Hello <', 'div>world']);
expect(outputText).toBe('Hello <div>world');
expect(persisted).toEqual([]);
it('handler calls persist with the memory string', async () => {
const persisted: string[] = [];
const tool = buildWorkingMemoryTool({
structured: false,
// eslint-disable-next-line @typescript-eslint/require-await
persist: async (content) => {
persisted.push(content);
},
});
const result = await tool.handler!({ memory: 'test content' }, {} as never);
expect(persisted).toEqual(['test content']);
expect(result).toMatchObject({ success: true });
});
});
describe('parseWorkingMemory with invalid structured content', () => {
it('strips tags and extracts content regardless of JSON validity', () => {
const invalidJson = '{not valid json!!!}';
const text = `Here is my response.\n<working_memory>\n${invalidJson}\n</working_memory>`;
const result = parseWorkingMemory(text);
expect(result.cleanText).toBe('Here is my response.');
expect(result.workingMemory).toBe(invalidJson);
describe('buildWorkingMemoryTool — structured', () => {
const schema = z.object({
userName: z.string().optional().describe("The user's name"),
location: z.string().optional().describe('Where the user lives'),
});
it('strips tags with content that fails Zod schema validation', () => {
// Content is valid JSON but wrong shape for the schema
const wrongShape = '{"unexpected": true}';
const text = `Response text.\n<working_memory>\n${wrongShape}\n</working_memory>`;
const result = parseWorkingMemory(text);
it('uses the Zod schema as input schema', () => {
const tool = buildWorkingMemoryTool({
structured: true,
schema,
persist: async () => {},
});
const inputSchema = tool.inputSchema as typeof schema;
const result = inputSchema.safeParse({ userName: 'Alice', location: 'Berlin' });
expect(result.success).toBe(true);
});
// Tags are stripped from response regardless
expect(result.cleanText).toBe('Response text.');
// Raw content is returned — caller decides whether it passes validation
expect(result.workingMemory).toBe(wrongShape);
it('handler serializes input to JSON and calls persist', async () => {
const persisted: string[] = [];
const tool = buildWorkingMemoryTool({
structured: true,
schema,
// eslint-disable-next-line @typescript-eslint/require-await
persist: async (content) => {
persisted.push(content);
},
});
// Verify the content would indeed fail schema validation
expect(result.workingMemory).not.toBeNull();
const input = { userName: 'Alice', location: 'Berlin' };
await tool.handler!(input, {} as never);
expect(persisted).toHaveLength(1);
let parsed: unknown;
try {
parsed = JSON.parse(result.workingMemory!);
parsed = JSON.parse(persisted[0]) as unknown;
} catch {
parsed = undefined;
}
expect(parsed).toBeDefined();
expect(parsed).toMatchObject(input);
});
it('strips tags even when content is completely non-JSON', () => {
const text =
'My reply.\n<working_memory>\nthis is just plain text, not JSON at all\n</working_memory>';
const result = parseWorkingMemory(text);
it('handler returns success confirmation', async () => {
const tool = buildWorkingMemoryTool({
structured: true,
schema,
persist: async () => {},
});
const result = await tool.handler!({ userName: 'Alice' }, {} as never);
expect(result).toMatchObject({ success: true });
});
expect(result.cleanText).toBe('My reply.');
expect(result.workingMemory).toBe('this is just plain text, not JSON at all');
it('falls back to freeform when no schema provided despite structured:true', () => {
const tool = buildWorkingMemoryTool({
structured: true,
persist: async () => {},
});
const inputSchema = tool.inputSchema as z.ZodObject<z.ZodRawShape>;
const result = inputSchema.safeParse({ memory: 'fallback text' });
expect(result.success).toBe(true);
});
});

View file

@ -0,0 +1,217 @@
import type prettier from 'prettier';
import type {
AgentSchema,
EvalSchema,
GuardrailSchema,
MemorySchema,
ToolSchema,
} from '../types/sdk/schema';
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function escapeTemplateLiteral(str: string): string {
return str.replace(/\\/g, '\\\\').replace(/`/g, '\\`').replace(/\$/g, '\\$');
}
function escapeSingleQuote(str: string): string {
return JSON.stringify(str).slice(1, -1).replace(/'/g, "\\'");
}
let prettierInstance: typeof prettier | undefined;
/**
* Format TypeScript source code using Prettier.
* Loaded lazily to avoid startup cost when not generating code.
*/
async function formatCode(code: string): Promise<string> {
prettierInstance ??= await import('prettier');
return await prettierInstance.format(code, {
parser: 'typescript',
singleQuote: true,
useTabs: true,
trailingComma: 'all',
printWidth: 100,
});
}
/**
* Compile-time exhaustive check. If a new property is added to AgentSchema
* but not handled in generateAgentCode(), TypeScript will report an error
* here because the destructured rest object won't be empty.
*/
function assertAllHandled(_: Record<string, never>): void {
// intentionally empty — this is a compile-time-only check
}
// ---------------------------------------------------------------------------
// Section builders — each returns `.method(...)` chain fragments
// ---------------------------------------------------------------------------
function modelParts(model: AgentSchema['model']): string[] {
if (model.provider && model.name) {
return [`.model('${escapeSingleQuote(model.provider)}', '${escapeSingleQuote(model.name)}')`];
}
if (model.name) {
return [`.model('${escapeSingleQuote(model.name)}')`];
}
return [];
}
function toolPart(tool: ToolSchema): { part: string; usesWorkflowTool: boolean } {
if (!tool.editable) {
return {
part: `.tool(new WorkflowTool('${escapeSingleQuote(tool.name)}'))`,
usesWorkflowTool: true,
};
}
const parts = [`new Tool('${escapeSingleQuote(tool.name)}')`];
parts.push(`.description('${escapeSingleQuote(tool.description)}')`);
if (tool.inputSchemaSource) parts.push(`.input(${tool.inputSchemaSource})`);
if (tool.outputSchemaSource) parts.push(`.output(${tool.outputSchemaSource})`);
if (tool.suspendSchemaSource) parts.push(`.suspend(${tool.suspendSchemaSource})`);
if (tool.resumeSchemaSource) parts.push(`.resume(${tool.resumeSchemaSource})`);
if (tool.handlerSource) parts.push(`.handler(${tool.handlerSource})`);
if (tool.toMessageSource) parts.push(`.toMessage(${tool.toMessageSource})`);
if (tool.requireApproval) parts.push('.requireApproval()');
if (tool.needsApprovalFnSource) parts.push(`.needsApprovalFn(${tool.needsApprovalFnSource})`);
return { part: `.tool(${parts.join('')})`, usesWorkflowTool: false };
}
function evalPart(ev: EvalSchema): string {
const parts = [`new Eval('${escapeSingleQuote(ev.name)}')`];
if (ev.description) parts.push(`.description('${escapeSingleQuote(ev.description)}')`);
if (ev.modelId) parts.push(`.model('${escapeSingleQuote(ev.modelId)}')`);
if (ev.credentialName) parts.push(`.credential('${escapeSingleQuote(ev.credentialName)}')`);
if (ev.handlerSource) {
parts.push(ev.type === 'check' ? `.check(${ev.handlerSource})` : `.judge(${ev.handlerSource})`);
}
return `.eval(${parts.join('')})`;
}
function guardrailPart(g: GuardrailSchema): string {
const method = g.position === 'input' ? 'inputGuardrail' : 'outputGuardrail';
return `.${method}(${g.source})`;
}
function memoryPart(memory: MemorySchema): string {
if (memory.source) {
return `.memory(${memory.source})`;
}
return `.memory(new Memory().lastMessages(${memory.lastMessages ?? 10}))`;
}
function thinkingPart(thinking: NonNullable<AgentSchema['config']['thinking']>): string {
const props: string[] = [];
if (thinking.budgetTokens !== undefined) props.push(`budgetTokens: ${thinking.budgetTokens}`);
if (thinking.reasoningEffort) props.push(`reasoningEffort: '${thinking.reasoningEffort}'`);
if (props.length > 0) {
return `.thinking('${thinking.provider}', { ${props.join(', ')} })`;
}
return `.thinking('${thinking.provider}')`;
}
function buildImports(schema: AgentSchema, needsWorkflowTool: boolean): string {
const agentImports = new Set<string>(['Agent']);
if (schema.tools.some((t) => t.editable)) agentImports.add('Tool');
if (needsWorkflowTool) agentImports.add('WorkflowTool');
if (schema.memory) agentImports.add('Memory');
if (schema.mcp && schema.mcp.length > 0) agentImports.add('McpClient');
if (schema.evaluations.length > 0) agentImports.add('Eval');
const toolsNeedZod = schema.tools.some(
(t) =>
(t.inputSchemaSource?.includes('z.') ?? false) ||
(t.outputSchemaSource?.includes('z.') ?? false),
);
const structuredOutputNeedsZod =
schema.config.structuredOutput.schemaSource?.includes('z.') ?? false;
let imports = `import { ${Array.from(agentImports).sort().join(', ')} } from '@n8n/agents';`;
if (toolsNeedZod || structuredOutputNeedsZod) imports += "\nimport { z } from 'zod';";
return imports;
}
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
export async function generateAgentCode(schema: AgentSchema, agentName: string): Promise<string> {
// Destructure every top-level property. If a new property is added to
// AgentSchema, TypeScript will error on assertAllHandled below until
// you handle it here AND add it to the destructure.
const {
model,
credential,
instructions,
description: _description, // entity-level, not in code
tools,
providerTools,
memory,
evaluations,
guardrails,
mcp,
telemetry,
checkpoint,
config,
...rest
} = schema;
// If this errors, you added a property to AgentSchema but didn't
// destructure it above. Add it to the destructure and handle it below.
assertAllHandled(rest);
const { thinking, toolCallConcurrency, requireToolApproval, structuredOutput, ...configRest } =
config;
assertAllHandled(configRest);
// No manual indentation — Prettier formats at the end.
const parts: string[] = [];
let needsWorkflowTool = false;
parts.push(`export default new Agent('${escapeSingleQuote(agentName)}')`);
parts.push(...modelParts(model));
if (credential) parts.push(`.credential('${escapeSingleQuote(credential)}')`);
if (instructions) parts.push(`.instructions(\`${escapeTemplateLiteral(instructions)}\`)`);
for (const tool of tools) {
const { part, usesWorkflowTool } = toolPart(tool);
if (usesWorkflowTool) needsWorkflowTool = true;
parts.push(part);
}
for (const pt of providerTools) {
parts.push(`.providerTool(${pt.source})`);
}
if (memory) parts.push(memoryPart(memory));
for (const ev of evaluations) {
parts.push(evalPart(ev));
}
for (const g of guardrails) {
parts.push(guardrailPart(g));
}
if (mcp && mcp.length > 0) {
const configs = mcp.map((s) => s.configSource).join(', ');
parts.push(`.mcp(new McpClient([${configs}]))`);
}
if (telemetry) parts.push(`.telemetry(${telemetry.source})`);
if (checkpoint) parts.push(`.checkpoint('${escapeSingleQuote(checkpoint)}')`);
if (thinking) parts.push(thinkingPart(thinking));
if (toolCallConcurrency) parts.push(`.toolCallConcurrency(${toolCallConcurrency})`);
if (requireToolApproval) parts.push('.requireToolApproval()');
if (structuredOutput.enabled && structuredOutput.schemaSource) {
parts.push(`.structuredOutput(${structuredOutput.schemaSource})`);
}
const imports = buildImports(schema, needsWorkflowTool);
const raw = `${imports}\n\n${parts.join('')};\n`;
return await formatCode(raw);
}

View file

@ -55,6 +55,12 @@ export { Telemetry } from './sdk/telemetry';
export { LangSmithTelemetry } from './integrations/langsmith';
export type { LangSmithTelemetryConfig } from './integrations/langsmith';
export { Agent } from './sdk/agent';
export type {
AgentBuilder,
CredentialProvider,
ResolvedCredential,
CredentialListItem,
} from './types';
export { McpClient } from './sdk/mcp-client';
export { Network } from './sdk/network';
export { providerTools } from './sdk/provider-tools';
@ -75,12 +81,23 @@ export type {
CustomAgentMessages,
AgentDbMessage,
} from './types/sdk/message';
export {
toDbMessage,
filterLlmMessages,
isLlmMessage,
} from './sdk/message';
export type { HandlerExecutor } from './types/sdk/handler-executor';
export type {
AgentSchema,
ToolSchema,
MemorySchema,
EvalSchema,
ThinkingSchema,
ProviderToolSchema,
GuardrailSchema,
McpServerSchema,
TelemetrySchema,
} from './types/sdk/schema';
export { generateAgentCode } from './codegen/generate-agent-code';
export { filterLlmMessages, isLlmMessage } from './sdk/message';
export { fetchProviderCatalog } from './sdk/catalog';
export { providerCapabilities } from './sdk/provider-capabilities';
export type { ProviderCapability } from './sdk/provider-capabilities';
export type {
ProviderCatalog,
ProviderInfo,
@ -89,10 +106,17 @@ export type {
ModelLimits,
} from './sdk/catalog';
export { SqliteMemory } from './storage/sqlite-memory';
export {
UPDATE_WORKING_MEMORY_TOOL_NAME,
WORKING_MEMORY_DEFAULT_INSTRUCTION,
} from './runtime/working-memory';
export type { SqliteMemoryConfig } from './storage/sqlite-memory';
export { PostgresMemory } from './storage/postgres-memory';
export type { PostgresMemoryConfig } from './storage/postgres-memory';
export { createModel } from './runtime/model-factory';
export { generateTitleFromMessage } from './runtime/title-generation';
export { Workspace } from './workspace';
export { BaseFilesystem } from './workspace';
export { BaseSandbox } from './workspace';

View file

@ -1,16 +1,17 @@
import type { ProviderOptions } from '@ai-sdk/provider-utils';
import { generateText, streamText, Output } from 'ai';
import { generateText, Output, streamText } from 'ai';
import Ajv from 'ajv';
import type { z } from 'zod';
import { zodToJsonSchema, type JsonSchema7Type } from 'zod-to-json-schema';
import { computeCost, getModelCost, type ModelCost } from '../sdk/catalog';
import { isLlmMessage, toDbMessage } from '../sdk/message';
import { isLlmMessage } from '../sdk/message';
import type {
AgentRunState,
AnthropicThinkingConfig,
BuiltMemory,
BuiltProviderTool,
BuiltTelemetry,
BuiltTool,
CheckpointStore,
FinishReason,
@ -23,20 +24,18 @@ import type {
SerializableAgentState,
StreamChunk,
StreamResult,
SubAgentUsage,
ThinkingConfig,
TitleGenerationConfig,
TokenUsage,
XaiThinkingConfig,
SubAgentUsage,
BuiltTelemetry,
} from '../types';
import { AgentEventBus } from './event-bus';
import { createFilteredLogger } from './logger';
import { saveMessagesToThread } from './memory-store';
import { AgentMessageList, type SerializedMessageList } from './message-list';
import { fromAiFinishReason, fromAiMessages } from './messages';
import { createEmbeddingModel, createModel } from './model-factory';
import { RunStateManager, generateRunId } from './run-state';
import { generateRunId, RunStateManager } from './run-state';
import {
accumulateUsage,
applySubAgentUsage,
@ -50,21 +49,21 @@ import { convertChunk } from './stream';
import { stripOrphanedToolMessages } from './strip-orphaned-tool-messages';
import { generateThreadTitle } from './title-generation';
import {
isAgentToolResult,
isSuspendedToolResult,
buildToolMap,
executeTool,
toAiSdkTools,
isAgentToolResult,
isSuspendedToolResult,
toAiSdkProviderTools,
toAiSdkTools,
} from './tool-adapter';
import { parseWorkingMemory, WorkingMemoryStreamFilter } from './working-memory';
import { buildWorkingMemoryTool } from './working-memory';
import { AgentEvent } from '../types/runtime/event';
import type {
ModelConfig,
AgentPersistenceOptions,
ExecutionOptions,
ModelConfig,
PersistedExecutionOptions,
ToolResultEntry,
AgentPersistenceOptions,
} from '../types/sdk/agent';
import type {
AgentDbMessage,
@ -75,19 +74,6 @@ import type {
import type { JSONObject, JSONValue } from '../types/utils/json';
import { isZodSchema } from '../utils/zod';
const logger = createFilteredLogger();
/** Type guard for text content parts in LLM messages. */
function isTextPart(part: unknown): part is { type: 'text'; text: string } {
return (
typeof part === 'object' &&
part !== null &&
'type' in part &&
(part as Record<string, unknown>).type === 'text' &&
'text' in part
);
}
export interface AgentRuntimeConfig {
name: string;
model: ModelConfig;
@ -102,6 +88,7 @@ export interface AgentRuntimeConfig {
structured: boolean;
schema?: z.ZodObject<z.ZodRawShape>;
scope?: 'resource' | 'thread';
instruction?: string;
};
semanticRecall?: SemanticRecallConfig;
structuredOutput?: z.ZodType;
@ -140,11 +127,11 @@ type ToolCallOutcome =
outcome: 'success';
toolEntry: ToolResultEntry;
subAgentUsage?: SubAgentUsage[];
customMessage?: AgentDbMessage;
message: AgentDbMessage;
customMessage?: AgentMessage;
message: AgentMessage;
}
| { outcome: 'suspended'; payload: unknown; resumeSchema: JsonSchema7Type }
| { outcome: 'error'; error: unknown; message: AgentDbMessage }
| { outcome: 'error'; error: unknown; message: AgentMessage }
| { outcome: 'noop' }; // tool call shouldn't be saved or logged anywhere, usually means that if was executed by AI SDK
/** A tool call that completed successfully. */
@ -154,8 +141,8 @@ interface ToolCallSuccess {
input: JSONValue;
toolEntry: ToolResultEntry;
subAgentUsage?: SubAgentUsage[];
customMessage?: AgentDbMessage;
message: AgentDbMessage;
customMessage?: AgentMessage;
message: AgentMessage;
}
/** Info about a tool call that suspended (before persistence — no runId yet). */
@ -174,7 +161,7 @@ interface ToolCallError {
toolName: string;
input: JSONValue;
error: unknown;
message: AgentDbMessage;
message: AgentMessage;
}
/** Result of executing a batch of tool calls (before persistence). */
@ -395,7 +382,7 @@ export class AgentRuntime {
* prepends it at every LLM call site.
*/
private async buildMessageList(
input: AgentDbMessage[],
input: AgentMessage[],
options?: RunOptions,
): Promise<AgentMessageList> {
const list = new AgentMessageList();
@ -405,7 +392,7 @@ export class AgentRuntime {
limit: this.config.lastMessages ?? 10,
});
if (memMessages.length > 0) {
list.addHistory(stripOrphanedToolMessages(memMessages.map(toDbMessage)));
list.addHistory(stripOrphanedToolMessages(memMessages));
}
}
@ -432,7 +419,7 @@ export class AgentRuntime {
*/
private async performSemanticRecall(
list: AgentMessageList,
input: AgentDbMessage[],
input: AgentMessage[],
threadId: string,
resourceId?: string,
): Promise<void> {
@ -447,7 +434,7 @@ export class AgentRuntime {
if (!userText) return;
let recalled: AgentMessage[] = [];
let recalled: AgentDbMessage[] = [];
if (this.config.memory.queryEmbeddings && this.config.semanticRecall.embedder) {
// Tier 3: runtime embeds the query, backend does vector search
@ -480,7 +467,7 @@ export class AgentRuntime {
);
} else {
recalled = allMsgs.filter((m) => {
const id = 'id' in m && typeof m.id === 'string' ? m.id : undefined;
const id = m.id;
return id !== undefined && hitIds.has(id);
});
}
@ -501,12 +488,10 @@ export class AgentRuntime {
const { historyIds } = list.serialize();
const historyIdSet = new Set(historyIds);
const newRecalled = recalled
.filter((m) => {
const id = 'id' in m && typeof m.id === 'string' ? m.id : undefined;
return !id || !historyIdSet.has(id);
})
.map(toDbMessage);
const newRecalled = recalled.filter((m) => {
const id = m.id;
return !id || !historyIdSet.has(id);
});
if (newRecalled.length > 0) {
list.addHistory(newRecalled);
@ -515,10 +500,10 @@ export class AgentRuntime {
/** Expand hit IDs by messageRange (before/after) within the ordered message list. */
private expandMessageRange(
allMsgs: AgentMessage[],
allMsgs: AgentDbMessage[],
hitIds: Set<string>,
range: { before: number; after: number },
): AgentMessage[] {
): AgentDbMessage[] {
const expandedIds = new Set<string>();
for (const msg of allMsgs) {
const id = 'id' in msg && typeof msg.id === 'string' ? msg.id : undefined;
@ -630,7 +615,7 @@ export class AgentRuntime {
runId?: string,
): Promise<GenerateResult> {
const { model, toolMap, aiTools, providerOptions, hasTools, outputSpec } =
this.buildLoopContext(options);
this.buildLoopContext({ ...options, persistence: options?.persistence });
let totalUsage: TokenUsage | undefined;
let lastFinishReason: FinishReason = 'stop';
@ -762,19 +747,6 @@ export class AgentRuntime {
);
}
// Extract and strip working memory from assistant response
if (
this.config.workingMemory &&
this.config.memory?.saveWorkingMemory &&
options?.persistence
) {
this.extractAndPersistWorkingMemory(list, {
threadId: options.persistence.threadId,
resourceId: options.persistence.resourceId,
scope: this.config.workingMemory?.scope ?? 'resource',
});
}
await this.saveToMemory(list, options);
await this.flushTelemetry(options);
@ -852,22 +824,10 @@ export class AgentRuntime {
runId?: string,
): Promise<void> {
const { model, toolMap, aiTools, providerOptions, hasTools, outputSpec } =
this.buildLoopContext(options);
// Wrap writer with working memory filter if configured
const wmParamsStream = this.resolveWorkingMemoryParams(options?.persistence);
const wmFilter = wmParamsStream?.persistFn
? new WorkingMemoryStreamFilter(writer, async (content: string) => {
await wmParamsStream.persistFn(content);
})
: undefined;
this.buildLoopContext({ ...options, persistence: options?.persistence });
const writeChunk = async (chunk: StreamChunk): Promise<void> => {
if (wmFilter) {
await wmFilter.write(chunk);
} else {
await writer.write(chunk);
}
await writer.write(chunk);
};
let totalUsage: TokenUsage | undefined;
@ -879,7 +839,6 @@ export class AgentRuntime {
const closeStreamWithError = async (error: unknown, status: AgentRunState): Promise<void> => {
await this.cleanupRun(runId);
this.updateState({ status });
if (wmFilter) await wmFilter.flush();
await writer.write({ type: 'error', error });
await writer.write({ type: 'finish', finishReason: 'error' });
await writer.close();
@ -1067,8 +1026,6 @@ export class AgentRuntime {
this.emitTurnEnd(newMessages, extractToolResults(list.responseDelta()));
}
if (wmFilter) await wmFilter.flush();
const costUsage = this.applyCost(totalUsage);
const parentCost = costUsage?.cost ?? 0;
const subCost = collectedSubAgentUsage.reduce((sum, s) => sum + (s.usage.cost ?? 0), 0);
@ -1085,19 +1042,6 @@ export class AgentRuntime {
});
try {
// Extract and strip working memory from assistant response
if (
this.config.workingMemory &&
this.config.memory?.saveWorkingMemory &&
options?.persistence
) {
this.extractAndPersistWorkingMemory(list, {
threadId: options.persistence.threadId,
resourceId: options.persistence.resourceId,
scope: this.config.workingMemory?.scope ?? 'resource',
});
}
await this.saveToMemory(list, options);
if (this.config.titleGeneration && options?.persistence && this.config.memory) {
@ -1189,43 +1133,6 @@ export class AgentRuntime {
});
}
/**
* Extract <working_memory> tags from the last assistant message in the turn delta,
* strip them from the message, and persist the working memory content.
*/
private extractAndPersistWorkingMemory(
list: AgentMessageList,
params: { threadId: string; resourceId: string; scope: 'resource' | 'thread' },
): void {
const delta = list.responseDelta();
for (let i = delta.length - 1; i >= 0; i--) {
const msg = delta[i];
if (!isLlmMessage(msg) || msg.role !== 'assistant') continue;
for (const part of msg.content) {
if (!isTextPart(part)) continue;
const { cleanText, workingMemory } = parseWorkingMemory(part.text);
if (workingMemory !== null) {
// Validate structured working memory if schema is configured
if (this.config.workingMemory?.structured && this.config.workingMemory.schema) {
try {
this.config.workingMemory.schema.parse(JSON.parse(workingMemory));
} catch {
// Validation failed — keep previous state, still strip tags
part.text = cleanText;
return;
}
}
part.text = cleanText;
// Fire-and-forget persist
this.config.memory!.saveWorkingMemory!(params, workingMemory).catch((error: unknown) => {
logger.warn('Failed to persist working memory', { error });
});
}
return;
}
}
}
/** Build the providerOptions object for thinking/reasoning config. */
private buildThinkingProviderOptions(): Record<string, Record<string, unknown>> | undefined {
if (!this.config.thinking) return undefined;
@ -1673,10 +1580,8 @@ export class AgentRuntime {
const toolResultMsg = makeToolResultMessage(toolCallId, toolName, modelResult);
list.addResponse([toolResultMsg]);
const customToolMessage = builtTool?.toMessage?.(actualResult);
let customMessage: AgentDbMessage | undefined;
if (customToolMessage) {
customMessage = toDbMessage(customToolMessage);
const customMessage = builtTool?.toMessage?.(actualResult);
if (customMessage) {
list.addResponse([customMessage]);
}
@ -1695,13 +1600,19 @@ export class AgentRuntime {
}
/** Build common LLM call dependencies shared by both the generate and stream loops. */
private buildLoopContext(execOptions?: ExecutionOptions) {
const aiTools = toAiSdkTools(this.config.tools);
private buildLoopContext(
execOptions?: ExecutionOptions & { persistence?: AgentPersistenceOptions },
) {
const wmTool = this.buildWorkingMemoryToolForRun(execOptions?.persistence);
const allUserTools = wmTool
? [...(this.config.tools ?? []), wmTool]
: (this.config.tools ?? []);
const aiTools = toAiSdkTools(allUserTools);
const aiProviderTools = toAiSdkProviderTools(this.config.providerTools);
const allTools = { ...aiTools, ...aiProviderTools };
return {
model: createModel(this.config.model),
toolMap: buildToolMap(this.config.tools),
toolMap: buildToolMap(allUserTools),
aiTools: allTools,
providerOptions: this.buildCallProviderOptions(execOptions?.providerOptions),
hasTools: Object.keys(allTools).length > 0,
@ -1711,6 +1622,20 @@ export class AgentRuntime {
};
}
/**
* Build the updateWorkingMemory BuiltTool for the current run.
* Returns undefined when working memory is not configured or persistence is unavailable.
*/
private buildWorkingMemoryToolForRun(persistence: AgentPersistenceOptions | undefined) {
const wmParams = this.resolveWorkingMemoryParams(persistence);
if (!wmParams) return undefined;
return buildWorkingMemoryTool({
structured: wmParams.structured,
schema: wmParams.schema,
persist: wmParams.persistFn,
});
}
/**
* Persist a suspended run state and update the current state snapshot.
* Returns the runId (reuses existingRunId when resuming to prevent dangling runs).
@ -1750,7 +1675,7 @@ export class AgentRuntime {
}
/** Emit a TurnEnd event when an assistant message is present in `newMessages`. */
private emitTurnEnd(newMessages: AgentDbMessage[], toolResults: ContentToolResult[]): void {
private emitTurnEnd(newMessages: AgentMessage[], toolResults: ContentToolResult[]): void {
const assistantMsg = newMessages.find((m) => 'role' in m && m.role === 'assistant');
if (assistantMsg) {
this.eventBus.emit({ type: AgentEvent.TurnEnd, message: assistantMsg, toolResults });
@ -1808,6 +1733,7 @@ export class AgentRuntime {
template: wmParams.template,
structured: wmParams.structured,
state: wmState,
...(wmParams.instruction !== undefined && { instruction: wmParams.instruction }),
};
}
@ -1836,6 +1762,7 @@ export class AgentRuntime {
template: this.config.workingMemory.template,
structured: this.config.workingMemory.structured,
schema: this.config.workingMemory.schema,
instruction: this.config.workingMemory.instruction,
};
}
}

View file

@ -1,6 +1,5 @@
import { toDbMessage } from '../sdk/message';
import type { BuiltMemory, Thread } from '../types';
import type { AgentDbMessage, AgentMessage } from '../types/sdk/message';
import type { AgentDbMessage } from '../types/sdk/message';
interface StoredMessage {
message: AgentDbMessage;
@ -73,7 +72,7 @@ export class InMemoryMemory implements BuiltMemory {
stored = stored.filter((s) => s.createdAt.getTime() < cutoff);
}
if (opts?.limit) stored = stored.slice(-opts.limit);
return stored.map((s) => s.message);
return stored.map((s) => ({ ...s.message, createdAt: s.createdAt }));
}
/**
@ -84,12 +83,11 @@ export class InMemoryMemory implements BuiltMemory {
async saveMessages(args: {
threadId: string;
resourceId?: string;
messages: AgentMessage[];
messages: AgentDbMessage[];
}): Promise<void> {
const existing = this.messagesByThread.get(args.threadId) ?? [];
const now = new Date();
for (const msg of args.messages) {
existing.push({ message: toDbMessage(msg), createdAt: now });
existing.push({ message: msg, createdAt: msg.createdAt });
}
this.messagesByThread.set(args.threadId, existing);
}
@ -115,7 +113,7 @@ export async function saveMessagesToThread(
memory: BuiltMemory,
threadId: string,
resourceId: string,
messages: AgentMessage[],
messages: AgentDbMessage[],
): Promise<void> {
await memory.saveThread({ id: threadId, resourceId });
await memory.saveMessages({ threadId, resourceId, messages });

View file

@ -4,21 +4,25 @@ import type { ModelMessage } from 'ai';
import { toAiMessages } from './messages';
import { stripOrphanedToolMessages } from './strip-orphaned-tool-messages';
import { buildWorkingMemoryInstruction } from './working-memory';
import { filterLlmMessages } from '../sdk/message';
import { filterLlmMessages, getCreatedAt } from '../sdk/message';
import type { SerializedMessageList } from '../types/runtime/message-list';
import type { AgentDbMessage } from '../types/sdk/message';
import type { AgentDbMessage, AgentMessage } from '../types/sdk/message';
export type { SerializedMessageList };
type MessageSource = 'history' | 'input' | 'response';
export interface WorkingMemoryContext {
template: string;
structured: boolean;
/** The current persisted state, or null if not yet loaded. Falls back to template. */
state: string | null;
/** Custom instruction text. When absent the default instruction is used. */
instruction?: string;
}
/**
* Append-only message container with Set-based source tracking.
* Message container with Set-based source tracking.
*
* Three named sources:
* history messages loaded from memory at the start of the turn.
@ -26,6 +30,9 @@ export interface WorkingMemoryContext {
* input the caller's raw input for this turn (custom messages preserved).
* response LLM replies, tool results, and custom tool messages from this turn.
*
* After each `addHistory` / `addInput` / `addResponse` batch, `all` is sorted by
* `createdAt` ascending, then `id`, so transcript order matches timestamps.
*
* Serialization stores the flat message array plus the IDs of each set so
* the full three-way source distinction survives a round-trip.
*/
@ -38,28 +45,93 @@ export class AgentMessageList {
private responseSet = new Set<AgentDbMessage>();
private lastCreatedAt: number = 0;
/**
* Normalize an AgentMessage into an AgentDbMessage and push it onto `this.all`,
* enforcing monotonically increasing createdAt across the list.
*
* source === 'history':
* The message is loaded from the database and already carries the authoritative
* createdAt. It is preserved exactly; lastCreatedAt is updated to the max so
* that subsequent live messages stay strictly later.
*
* source === 'input' | 'response':
* The message is a live, in-flight message. Its existing createdAt (if any)
* is used as a hint, but it is bumped to max(hint, lastCreatedAt + 1) so
* every message in the list has a unique, ordered timestamp.
* If no createdAt is present, Date.now() is used as the hint.
*/
private addMessage(message: AgentMessage, source: MessageSource): AgentDbMessage {
const id = 'id' in message && typeof message.id === 'string' ? message.id : crypto.randomUUID();
const existing = getCreatedAt(message);
let createdAt: Date;
if (existing !== null && source === 'history') {
// DB-loaded history message — keep the original timestamp exactly
createdAt = existing;
this.lastCreatedAt = Math.max(this.lastCreatedAt, createdAt.getTime());
} else {
// Live message — use any existing createdAt as a hint, then ensure monotonicity
const hint = existing !== null ? existing.getTime() : Date.now();
const ts = Math.max(hint, this.lastCreatedAt + 1);
createdAt = new Date(ts);
this.lastCreatedAt = ts;
}
const dbMsg: AgentDbMessage = { ...message, id, createdAt };
this.all.push(dbMsg);
return dbMsg;
}
/** Sort key for chronological ordering; non-finite times sort last. */
private createdAtSortKey(m: AgentDbMessage): number {
const t = m.createdAt instanceof Date ? m.createdAt.getTime() : new Date(m.createdAt).getTime();
return Number.isFinite(t) ? t : Number.POSITIVE_INFINITY;
}
/** Stable sort by `createdAt`, then `id`; refreshes `lastCreatedAt` from `all`. */
private sortAllByCreatedAt(): void {
this.all.sort((a, b) => {
const ta = this.createdAtSortKey(a);
const tb = this.createdAtSortKey(b);
if (ta !== tb) return ta < tb ? -1 : 1;
return a.id.localeCompare(b.id);
});
let max = 0;
for (const m of this.all) {
const t =
m.createdAt instanceof Date ? m.createdAt.getTime() : new Date(m.createdAt).getTime();
if (Number.isFinite(t) && t > max) max = t;
}
this.lastCreatedAt = max;
}
/** Working memory context for this run. Set by buildMessageList / resume. */
workingMemory: WorkingMemoryContext | undefined;
addHistory(messages: AgentDbMessage[]): void {
addHistory(messages: AgentMessage[] | AgentDbMessage[]): void {
for (const m of messages) {
this.all.push(m);
this.historySet.add(m);
const dbMsg = this.addMessage(m, 'history');
this.historySet.add(dbMsg);
}
this.sortAllByCreatedAt();
}
addInput(messages: AgentDbMessage[]): void {
addInput(messages: AgentMessage[] | AgentDbMessage[]): void {
for (const m of messages) {
this.all.push(m);
this.inputSet.add(m);
const dbMsg = this.addMessage(m, 'input');
this.inputSet.add(dbMsg);
}
this.sortAllByCreatedAt();
}
addResponse(messages: AgentDbMessage[]): void {
addResponse(messages: AgentMessage[] | AgentDbMessage[]): void {
for (const m of messages) {
this.all.push(m);
this.responseSet.add(m);
const dbMsg = this.addMessage(m, 'response');
this.responseSet.add(dbMsg);
}
this.sortAllByCreatedAt();
}
/**
@ -74,10 +146,11 @@ export class AgentMessageList {
const wmInstruction = buildWorkingMemoryInstruction(
this.workingMemory.template,
this.workingMemory.structured,
this.workingMemory.instruction,
);
const wmState = this.workingMemory.state ?? this.workingMemory.template;
systemPrompt +=
wmInstruction + '\n\nCurrent working memory state:\n```\n' + wmState + '\n```';
wmInstruction + '\n\nCurrent working memory state:\n```\n' + wmState + '\n```\n';
}
const systemMessage: ModelMessage = instructionProviderOptions
@ -123,6 +196,7 @@ export class AgentMessageList {
if (inputIdSet.has(m.id)) list.inputSet.add(m);
if (responseIdSet.has(m.id)) list.responseSet.add(m);
}
list.sortAllByCreatedAt();
return list;
}
}

View file

@ -10,10 +10,8 @@ import type {
FinishReason as AiFinishReason,
} from 'ai';
import { toDbMessage } from '../sdk/message';
import type { FinishReason } from '../types';
import type {
AgentDbMessage,
AgentMessage,
ContentFile,
ContentReasoning,
@ -263,7 +261,7 @@ export function toAiMessages(messages: Message[]): ModelMessage[] {
}
/** Convert a single AI SDK ModelMessage to an n8n AgentDbMessage (with a generated id). */
export function fromAiMessage(msg: ModelMessage): AgentDbMessage {
export function fromAiMessage(msg: ModelMessage): AgentMessage {
const rawContent = msg.content;
const content: MessageContent[] =
typeof rawContent === 'string'
@ -273,11 +271,11 @@ export function fromAiMessage(msg: ModelMessage): AgentDbMessage {
if ('providerOptions' in msg && msg.providerOptions) {
message.providerOptions = msg.providerOptions;
}
return toDbMessage(message);
return message;
}
/** Convert AI SDK ModelMessages to n8n AgentDbMessages (each with a generated id). */
export function fromAiMessages(messages: ModelMessage[]): AgentDbMessage[] {
export function fromAiMessages(messages: ModelMessage[]): AgentMessage[] {
return messages.map(fromAiMessage);
}

View file

@ -1,11 +1,15 @@
/* eslint-disable @typescript-eslint/no-require-imports */
import type { EmbeddingModel, LanguageModel } from 'ai';
import type * as Undici from 'undici';
import type { ModelConfig } from '../types/sdk/agent';
type FetchFn = typeof globalThis.fetch;
type CreateProviderFn = (opts?: {
apiKey?: string;
baseURL?: string;
fetch?: FetchFn;
headers?: Record<string, string>;
}) => (model: string) => LanguageModel;
type CreateEmbeddingProviderFn = (opts?: { apiKey?: string }) => {
embeddingModel(model: string): EmbeddingModel;
@ -15,6 +19,26 @@ function isLanguageModel(config: unknown): config is LanguageModel {
return typeof config === 'object' && config !== null && 'doGenerate' in config;
}
/**
* When HTTP_PROXY / HTTPS_PROXY is set (e.g. in e2e tests with MockServer),
* return a fetch function that routes requests through the proxy. The default
* globalThis.fetch in Node 18 does NOT respect these env vars, so AI SDK
* providers would bypass the proxy without this.
*/
function getProxyFetch(): FetchFn | undefined {
const proxyUrl = process.env.HTTPS_PROXY ?? process.env.HTTP_PROXY;
if (!proxyUrl) return undefined;
const { ProxyAgent } = require('undici') as typeof Undici;
const dispatcher = new ProxyAgent(proxyUrl);
return (async (url, init) =>
await globalThis.fetch(url, {
...init,
// @ts-expect-error dispatcher is a valid undici option for Node.js fetch
dispatcher,
})) as FetchFn;
}
/**
* Provider packages are loaded dynamically via require() so only the
* provider needed at runtime must be installed.
@ -33,6 +57,7 @@ export function createModel(config: ModelConfig): LanguageModel {
const modelId = stripEmpty(typeof config === 'string' ? config : config.id);
const apiKey = stripEmpty(typeof config === 'string' ? undefined : config.apiKey);
const baseURL = stripEmpty(typeof config === 'string' ? undefined : config.url);
const headers = typeof config === 'string' ? undefined : config.headers;
if (!modelId) {
throw new Error('Model ID is required');
@ -40,31 +65,32 @@ export function createModel(config: ModelConfig): LanguageModel {
const [provider, ...rest] = modelId.split('/');
const modelName = rest.join('/');
const fetch = getProxyFetch();
switch (provider) {
case 'anthropic': {
const { createAnthropic } = require('@ai-sdk/anthropic') as {
createAnthropic: CreateProviderFn;
};
return createAnthropic({ apiKey, baseURL })(modelName);
return createAnthropic({ apiKey, baseURL, fetch, headers })(modelName);
}
case 'openai': {
const { createOpenAI } = require('@ai-sdk/openai') as {
createOpenAI: CreateProviderFn;
};
return createOpenAI({ apiKey, baseURL })(modelName);
return createOpenAI({ apiKey, baseURL, fetch, headers })(modelName);
}
case 'google': {
const { createGoogleGenerativeAI } = require('@ai-sdk/google') as {
createGoogleGenerativeAI: CreateProviderFn;
};
return createGoogleGenerativeAI({ apiKey, baseURL })(modelName);
return createGoogleGenerativeAI({ apiKey, baseURL, fetch, headers })(modelName);
}
case 'xai': {
const { createXai } = require('@ai-sdk/xai') as {
createXai: CreateProviderFn;
};
return createXai({ apiKey, baseURL })(modelName);
return createXai({ apiKey, baseURL, fetch, headers })(modelName);
}
default:
throw new Error(

View file

@ -2,18 +2,20 @@
* Pure utility functions used by AgentRuntime that require no class context.
* These are extracted here to keep agent-runtime.ts focused on orchestration logic.
*/
import { toDbMessage } from '../sdk/message';
import type { GenerateResult, StreamChunk, TokenUsage } from '../types';
import { toTokenUsage } from './stream';
import type { AgentDbMessage, AgentMessage, ContentToolResult } from '../types/sdk/message';
import type { AgentMessage, ContentToolResult } from '../types/sdk/message';
import type { JSONValue } from '../types/utils/json';
/** Normalize a string input to an AgentDbMessage array, assigning ids where missing. */
export function normalizeInput(input: AgentMessage[] | string): AgentDbMessage[] {
/**
* Normalize caller input to `AgentMessage[]` for the runtime. String input becomes a
* single user message.
*/
export function normalizeInput(input: AgentMessage[] | string): AgentMessage[] {
if (typeof input === 'string') {
return [toDbMessage({ role: 'user', content: [{ type: 'text', text: input }] })];
return [{ role: 'user', content: [{ type: 'text', text: input }] }];
}
return input.map(toDbMessage);
return input;
}
/** Build an AI SDK tool ModelMessage for a tool execution result. */
@ -21,8 +23,8 @@ export function makeToolResultMessage(
toolCallId: string,
toolName: string,
result: unknown,
): AgentDbMessage {
return toDbMessage({
): AgentMessage {
return {
role: 'tool',
content: [
{
@ -32,7 +34,7 @@ export function makeToolResultMessage(
result: result as JSONValue,
},
],
});
};
}
/**
@ -44,9 +46,9 @@ export function makeErrorToolResultMessage(
toolCallId: string,
toolName: string,
error: unknown,
): AgentDbMessage {
): AgentMessage {
const message = error instanceof Error ? `${error.name}: ${error.message}` : String(error);
return toDbMessage({
return {
role: 'tool',
content: [
{
@ -57,11 +59,11 @@ export function makeErrorToolResultMessage(
isError: true,
},
],
});
};
}
/** Extract all tool-result content parts from a flat list of agent messages. */
export function extractToolResults(messages: AgentDbMessage[]): ContentToolResult[] {
export function extractToolResults(messages: AgentMessage[]): ContentToolResult[] {
return messages
.flatMap((m) => ('content' in m ? m.content : []))
.filter((c): c is ContentToolResult => c.type === 'tool-result');

View file

@ -1,5 +1,5 @@
import { isLlmMessage } from '../sdk/message';
import type { AgentDbMessage, MessageContent } from '../types/sdk/message';
import type { AgentMessage, MessageContent } from '../types/sdk/message';
/**
* Strip orphaned tool-call and tool-result content from a message list.
@ -17,7 +17,7 @@ import type { AgentDbMessage, MessageContent } from '../types/sdk/message';
* whose only content was the orphaned result).
* 5. Preserves non-tool content (text, reasoning, files) in mixed messages.
*/
export function stripOrphanedToolMessages(messages: AgentDbMessage[]): AgentDbMessage[] {
export function stripOrphanedToolMessages<T extends AgentMessage>(messages: T[]): T[] {
const callIds = new Set<string>();
const resultIds = new Set<string>();
@ -39,7 +39,7 @@ export function stripOrphanedToolMessages(messages: AgentDbMessage[]): AgentDbMe
return messages;
}
const result: AgentDbMessage[] = [];
const result: T[] = [];
for (const msg of messages) {
if (!isLlmMessage(msg)) {

View file

@ -1,4 +1,4 @@
import { generateText } from 'ai';
import { generateText, type LanguageModel } from 'ai';
import type { BuiltMemory, TitleGenerationConfig } from '../types';
import { createFilteredLogger } from './logger';
@ -10,12 +10,83 @@ const logger = createFilteredLogger();
const DEFAULT_TITLE_INSTRUCTIONS = [
'- you will generate a short title based on the first message a user begins a conversation with',
'- ensure it is not more than 80 characters long',
"- the title should be a summary of the user's message",
'- do not use quotes or colons',
'- the entire text you return will be used as the title',
'- the title should describe what the user asked for, not what an assistant might reply',
'- 1 to 5 words, no more than 80 characters',
'- use sentence case (e.g. "Conversation title" instead of "Conversation Title")',
'- do not use quotes, colons, or markdown formatting',
'- the entire text you return will be used directly as the title, so respond with the title only',
].join('\n');
const TRIVIAL_MESSAGE_MAX_CHARS = 15;
const TRIVIAL_MESSAGE_MAX_WORDS = 3;
const MAX_TITLE_LENGTH = 80;
/**
* Whether a user message has too little substance to title a conversation
* (e.g. "hey", "hello"). For these, the LLM tends to hallucinate an
* assistant-voice reply as the title better to signal "defer, not enough
* signal yet" so the caller can retry once more context accumulates.
*/
function isTrivialMessage(message: string): boolean {
const normalized = message.trim();
if (normalized.length <= TRIVIAL_MESSAGE_MAX_CHARS) return true;
const wordCount = normalized.split(/\s+/).filter(Boolean).length;
return wordCount <= TRIVIAL_MESSAGE_MAX_WORDS;
}
function sanitizeTitle(raw: string): string {
// Strip <think>...</think> blocks (e.g. from DeepSeek R1)
let title = raw.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
// Strip markdown heading prefixes and inline emphasis markers
title = title
.replace(/^#{1,6}\s+/, '')
.replace(/\*+/g, '')
.trim();
// Strip surrounding quotes
title = title.replace(/^["']|["']$/g, '').trim();
if (title.length > MAX_TITLE_LENGTH) {
const truncated = title.slice(0, MAX_TITLE_LENGTH);
const lastSpace = truncated.lastIndexOf(' ');
title = (lastSpace > 20 ? truncated.slice(0, lastSpace) : truncated) + '\u2026';
}
return title;
}
/**
* Generate a sanitized thread title from a user message using an LLM.
*
* Returns `null` on empty input or empty LLM output. For trivial messages
* (e.g. greetings), returns the sanitized message itself without calling
* the LLM this avoids the failure mode where the model responds with
* an assistant-voice reply as the title.
*/
export async function generateTitleFromMessage(
model: LanguageModel,
userMessage: string,
opts?: { instructions?: string },
): Promise<string | null> {
const trimmed = userMessage.trim();
if (!trimmed) return null;
if (isTrivialMessage(trimmed)) {
return null;
}
const result = await generateText({
model,
messages: [
{ role: 'system', content: opts?.instructions ?? DEFAULT_TITLE_INSTRUCTIONS },
{ role: 'user', content: trimmed },
],
});
const raw = result.text?.trim();
if (!raw) return null;
const title = sanitizeTitle(raw);
return title || null;
}
/**
* Generate a title for a thread if it doesn't already have one.
*
@ -48,21 +119,9 @@ export async function generateThreadTitle(opts: {
const titleModelId = opts.titleConfig.model ?? opts.agentModel;
const titleModel = createModel(titleModelId);
const instructions = opts.titleConfig.instructions ?? DEFAULT_TITLE_INSTRUCTIONS;
const result = await generateText({
model: titleModel,
messages: [
{ role: 'system', content: instructions },
{ role: 'user', content: userText },
],
const title = await generateTitleFromMessage(titleModel, userText, {
instructions: opts.titleConfig.instructions,
});
let title = result.text?.trim();
if (!title) return;
// Strip <think>...</think> blocks (e.g. from DeepSeek R1)
title = title.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
if (!title) return;
await opts.memory.saveThread({

View file

@ -1,58 +1,48 @@
import type { z } from 'zod';
import { z } from 'zod';
import type { StreamChunk } from '../types';
import { createFilteredLogger } from './logger';
const logger = createFilteredLogger();
import type { BuiltTool } from '../types';
type ZodObjectSchema = z.ZodObject<z.ZodRawShape>;
const OPEN_TAG = '<working_memory>';
const CLOSE_TAG = '</working_memory>';
export const UPDATE_WORKING_MEMORY_TOOL_NAME = 'updateWorkingMemory';
/**
* Extract working memory content from an LLM response.
* Returns the clean text (tags stripped) and the extracted working memory (or null).
* The default instruction block injected into the system prompt when working memory
* is configured. Exported so callers can reference it when building custom instructions.
*/
export function parseWorkingMemory(text: string): {
cleanText: string;
workingMemory: string | null;
} {
const openIdx = text.indexOf(OPEN_TAG);
if (openIdx === -1) return { cleanText: text, workingMemory: null };
const closeIdx = text.indexOf(CLOSE_TAG, openIdx);
if (closeIdx === -1) return { cleanText: text, workingMemory: null };
const contentStart = openIdx + OPEN_TAG.length;
const rawContent = text.slice(contentStart, closeIdx);
const workingMemory = rawContent.replace(/^\n/, '').replace(/\n$/, '');
const before = text.slice(0, openIdx).replace(/\n$/, '');
const after = text.slice(closeIdx + CLOSE_TAG.length).replace(/^\n/, '');
const cleanText = (before + (after ? '\n' + after : '')).trim();
return { cleanText, workingMemory };
}
export const WORKING_MEMORY_DEFAULT_INSTRUCTION = [
'You have persistent working memory that survives across conversations.',
'Your current working memory state is shown below.',
`When you learn new information about the user or conversation that should be remembered, call the \`${UPDATE_WORKING_MEMORY_TOOL_NAME}\` tool.`,
'Only call it when something has actually changed — do NOT call it if nothing new was learned.',
].join('\n');
/**
* Generate the system prompt instruction for working memory.
* Tells the LLM to call the updateWorkingMemory tool when it has new information to persist.
*
* @param template - The working memory template or schema.
* @param structured - Whether the working memory is structured (JSON schema).
* @param instruction - Custom instruction text to replace the default. Defaults to
* {@link WORKING_MEMORY_DEFAULT_INSTRUCTION}.
*/
export function buildWorkingMemoryInstruction(template: string, structured: boolean): string {
export function buildWorkingMemoryInstruction(
template: string,
structured: boolean,
instruction?: string,
): string {
const format = structured
? 'Emit the updated state as valid JSON matching the schema'
? 'The memory argument must be valid JSON matching the schema'
: 'Update the template with any new information learned';
const body = instruction ?? WORKING_MEMORY_DEFAULT_INSTRUCTION;
return [
'',
'## Working Memory',
'',
'You have persistent working memory that survives across conversations.',
'The current state will be shown to you in a system message.',
'IMPORTANT: Always respond to the user first with your normal reply.',
`Then, at the very end of your response, emit your updated working memory inside ${OPEN_TAG}...${CLOSE_TAG} tags on a new line.`,
`${format}. If nothing changed, emit the current state unchanged.`,
'The working memory block must be the last thing in your response, after your reply to the user.',
body,
`${format}.`,
'',
'Current template:',
'```',
@ -73,111 +63,51 @@ export function templateFromSchema(schema: ZodObjectSchema): string {
return JSON.stringify(obj, null, 2);
}
type PersistFn = (content: string) => Promise<void>;
export interface WorkingMemoryToolConfig {
/** Whether this is structured (Zod-schema-driven) working memory. */
structured: boolean;
/** Zod schema for structured working memory input validation. */
schema?: ZodObjectSchema;
/** Called with the serialized working memory string to persist it. */
persist: (content: string) => Promise<void>;
}
/**
* Wraps a stream writer to intercept <working_memory> tags from text-delta chunks.
* All non-text-delta chunks pass through unchanged.
* Text inside the tags is buffered and persisted when the closing tag is detected.
* Build the updateWorkingMemory BuiltTool that the agent calls to persist working memory.
*
* For freeform working memory the input schema is `{ memory: string }`.
* For structured working memory the input schema is the configured Zod object schema,
* whose values are serialized to JSON before persisting.
*/
export class WorkingMemoryStreamFilter {
private writer: WritableStreamDefaultWriter<StreamChunk>;
private persist: PersistFn;
private state: 'normal' | 'inside' = 'normal';
private buffer = '';
private pendingText = '';
constructor(writer: WritableStreamDefaultWriter<StreamChunk>, persist: PersistFn) {
this.writer = writer;
this.persist = persist;
export function buildWorkingMemoryTool(config: WorkingMemoryToolConfig): BuiltTool {
if (config.structured && config.schema) {
const schema = config.schema;
return {
name: UPDATE_WORKING_MEMORY_TOOL_NAME,
description:
'Update your persistent working memory with new information about the user or conversation. Only call this when something has actually changed.',
inputSchema: schema,
handler: async (input: unknown) => {
const content = JSON.stringify(input, null, 2);
await config.persist(content);
return { success: true, message: 'Working memory updated.' };
},
};
}
async write(chunk: StreamChunk): Promise<void> {
if (chunk.type !== 'text-delta') {
await this.writer.write(chunk);
return;
}
const freeformSchema = z.object({
memory: z.string().describe('The updated working memory content.'),
});
this.pendingText += chunk.delta;
while (this.pendingText.length > 0) {
if (this.state === 'normal') {
const openIdx = this.pendingText.indexOf(OPEN_TAG);
if (openIdx === -1) {
// No full open tag found. Check if the tail is a valid prefix of OPEN_TAG.
const lastLt = this.pendingText.lastIndexOf('<');
if (
lastLt !== -1 &&
this.pendingText.length - lastLt < OPEN_TAG.length &&
OPEN_TAG.startsWith(this.pendingText.slice(lastLt))
) {
// Potential partial tag at end — forward everything before it, hold the rest
if (lastLt > 0) {
await this.writer.write({
type: 'text-delta',
delta: this.pendingText.slice(0, lastLt),
});
}
this.pendingText = this.pendingText.slice(lastLt);
} else {
// No partial tag concern — forward everything
await this.writer.write({ type: 'text-delta', delta: this.pendingText });
this.pendingText = '';
}
break;
}
// Forward text before the tag
if (openIdx > 0) {
await this.writer.write({
type: 'text-delta',
delta: this.pendingText.slice(0, openIdx),
});
}
this.state = 'inside';
this.pendingText = this.pendingText.slice(openIdx + OPEN_TAG.length);
this.buffer = '';
} else {
// Inside tag — look for closing tag
const closeIdx = this.pendingText.indexOf(CLOSE_TAG);
if (closeIdx === -1) {
// Check if the tail is a valid prefix of CLOSE_TAG — hold it back
const lastLt = this.pendingText.lastIndexOf('<');
if (
lastLt !== -1 &&
this.pendingText.length - lastLt < CLOSE_TAG.length &&
CLOSE_TAG.startsWith(this.pendingText.slice(lastLt))
) {
this.buffer += this.pendingText.slice(0, lastLt);
this.pendingText = this.pendingText.slice(lastLt);
} else {
this.buffer += this.pendingText;
this.pendingText = '';
}
break;
}
this.buffer += this.pendingText.slice(0, closeIdx);
this.pendingText = this.pendingText.slice(closeIdx + CLOSE_TAG.length);
this.state = 'normal';
const content = this.buffer.replace(/^\n/, '').replace(/\n$/, '');
this.persist(content).catch((error: unknown) => {
logger.warn('Failed to persist working memory', { error });
});
this.buffer = '';
}
}
}
async flush(): Promise<void> {
if (this.state === 'normal' && this.pendingText.length > 0) {
await this.writer.write({ type: 'text-delta', delta: this.pendingText });
}
// Reset all state so the filter is clean for reuse after abort/completion.
this.pendingText = '';
this.buffer = '';
this.state = 'normal';
}
return {
name: UPDATE_WORKING_MEMORY_TOOL_NAME,
description:
'Update your persistent working memory with new information about the user or conversation. Only call this when something has actually changed.',
inputSchema: freeformSchema,
handler: async (input: unknown) => {
const { memory } = input as z.infer<typeof freeformSchema>;
await config.persist(memory);
return { success: true, message: 'Working memory updated.' };
},
};
}

View file

@ -2,12 +2,14 @@ import type { ProviderOptions } from '@ai-sdk/provider-utils';
import { z } from 'zod';
import type { Eval } from './eval';
import { fromSchema, type FromSchemaOptions } from './from-schema';
import type { McpClient } from './mcp-client';
import { Memory } from './memory';
import { Telemetry } from './telemetry';
import { Tool, wrapToolForApproval } from './tool';
import { AgentRuntime } from '../runtime/agent-runtime';
import { AgentEventBus } from '../runtime/event-bus';
import { InMemoryMemory } from '../runtime/memory-store';
import { createAgentToolResult } from '../runtime/tool-adapter';
import type {
AgentEvent,
@ -34,7 +36,20 @@ import type {
ThinkingConfigFor,
ResumeOptions,
} from '../types';
import type { AgentBuilder } from '../types/sdk/agent-builder';
import type { CredentialProvider } from '../types/sdk/credential-provider';
import type { AgentMessage } from '../types/sdk/message';
import type {
AgentSchema,
EvalSchema,
GuardrailSchema,
McpServerSchema,
MemorySchema,
ProviderToolSchema,
ThinkingSchema,
ToolSchema,
} from '../types/sdk/schema';
import { zodToJsonSchema } from '../utils/zod';
import type { Workspace } from '../workspace/workspace';
const DEFAULT_LAST_MESSAGES = 10;
@ -56,7 +71,7 @@ type ToolParameter = BuiltTool | { build(): BuiltTool };
* ```
*/
export class Agent implements BuiltAgent {
export class Agent implements BuiltAgent, AgentBuilder {
readonly name: string;
private modelId?: string;
@ -89,6 +104,8 @@ export class Agent implements BuiltAgent {
private credentialName?: string;
private credProvider?: CredentialProvider;
private resolvedKey?: string;
private runtime?: AgentRuntime;
@ -115,6 +132,30 @@ export class Agent implements BuiltAgent {
this.name = name;
}
/**
* Reconstruct a live Agent from an AgentSchema JSON.
* Custom tool handlers are proxied through the injected HandlerExecutor.
*
* This is the inverse of `Agent.describe()`.
*/
static async fromSchema(
schema: AgentSchema,
name: string,
options: FromSchemaOptions,
): Promise<Agent> {
const agent = new Agent(name);
await fromSchema(agent, schema, options);
return agent;
}
hasCheckpointStorage(): boolean {
return this.checkpointStore !== undefined;
}
hasMemory(): boolean {
return this.memoryConfig !== undefined;
}
/**
* Set the model with provider type information.
*
@ -164,6 +205,11 @@ export class Agent implements BuiltAgent {
return this;
}
/** @internal Read the declared tools (used by the compile step to detect workflow tool markers). */
get declaredTools(): BuiltTool[] {
return this.tools;
}
/** Set the memory configuration. Accepts a MemoryConfig, Memory builder, or bare BuiltMemory. */
memory(m: MemoryConfig | Memory | BuiltMemory): this {
if (m instanceof Memory) {
@ -172,9 +218,20 @@ export class Agent implements BuiltAgent {
} else if ('memory' in m && 'lastMessages' in m) {
// MemoryConfig — use directly
this.memoryConfig = m;
} else {
} else if (
typeof m === 'object' &&
m !== null &&
typeof m.getMessages === 'function' &&
typeof m.saveMessages === 'function'
) {
// Bare BuiltMemory — wrap in minimal config
this.memoryConfig = { memory: m, lastMessages: DEFAULT_LAST_MESSAGES };
} else {
throw new Error(
'Invalid memory configuration. Use: new Memory().lastMessages(N) for in-process memory, ' +
'or new Memory().storage(new SqliteMemory(path)).lastMessages(N) for persistent storage. ' +
'See the Memory class documentation for all options.',
);
}
return this;
}
@ -244,6 +301,26 @@ export class Agent implements BuiltAgent {
return this;
}
/**
* Attach a credential provider that resolves credential identifiers to
* decrypted API keys at build time. When both `.credential()` and
* `.credentialProvider()` are set, the provider resolves the credential
* before model creation no subclassing required.
*
* @example
* ```typescript
* const agent = new Agent('assistant')
* .model('anthropic', 'claude-sonnet-4')
* .credential('credential-id-123')
* .credentialProvider(myProvider)
* .instructions('You are helpful.');
* ```
*/
credentialProvider(provider: CredentialProvider): this {
this.credProvider = provider;
return this;
}
/** @internal Read the declared credential name (used by the execution engine). */
protected get declaredCredential(): string | undefined {
return this.credentialName;
@ -286,12 +363,12 @@ export class Agent implements BuiltAgent {
* // Anthropic — budgetTokens
* new Agent('thinker')
* .model('anthropic', 'claude-sonnet-4-5')
* .thinking({ budgetTokens: 10000 })
* .thinking('anthropic', { budgetTokens: 5000 })
*
* // OpenAI — reasoningEffort
* new Agent('thinker')
* .model('openai', 'o3-mini')
* .thinking({ reasoningEffort: 'high' })
* .thinking('openai', { reasoningEffort: 'high' })
* ```
*/
thinking<P extends Provider>(_provider: P, config?: ThinkingConfigFor<P>): this {
@ -447,6 +524,208 @@ export class Agent implements BuiltAgent {
return tool.build();
}
/**
* Return a schema object describing the agent's declared configuration.
* This is a synchronous introspection method it does not build the agent
* or connect to any external services.
*/
describe(): AgentSchema {
// --- Model ---
let model: AgentSchema['model'];
if (this.modelConfigObj) {
model = { provider: null, name: null, raw: 'object' };
} else if (this.modelId) {
const slashIdx = this.modelId.indexOf('/');
if (slashIdx === -1) {
model = { provider: null, name: this.modelId };
} else {
model = {
provider: this.modelId.slice(0, slashIdx),
name: this.modelId.slice(slashIdx + 1),
};
}
} else {
model = { provider: null, name: null };
}
// --- Tools (custom / workflow) ---
const toolSchemas: ToolSchema[] = this.tools.map((tool) => {
const isWorkflow = '__workflowTool' in tool && Boolean(tool.__workflowTool);
return {
name: tool.name,
description: tool.description,
type: isWorkflow ? ('workflow' as const) : ('custom' as const),
editable: !isWorkflow,
// Source strings — null, CLI patches with original TypeScript
inputSchemaSource: null,
outputSchemaSource: null,
handlerSource: tool.handler?.toString() ?? null,
suspendSchemaSource: null,
resumeSchemaSource: null,
toMessageSource: null,
requireApproval: tool.withDefaultApproval ?? false,
needsApprovalFnSource: null,
providerOptions: tool.providerOptions ?? null,
// Display fields — JSON Schema for UI rendering
inputSchema: zodToJsonSchema(tool.inputSchema),
outputSchema: zodToJsonSchema(tool.outputSchema),
// UI badge indicators — for approval-wrapped tools, hasSuspend/hasResume
// reflect the approval mechanism, not user-declared suspend/resume
hasSuspend: Boolean(tool.suspendSchema),
hasResume: Boolean(tool.resumeSchema),
hasToMessage: Boolean(tool.toMessage),
};
});
// --- Provider tools ---
const providerToolSchemas: ProviderToolSchema[] = this.providerTools.map((pt) => ({
name: pt.name,
source: '',
}));
// --- Guardrails ---
const guardrails: GuardrailSchema[] = [
...this.inputGuardrails.map((g) => ({
name: g.name,
guardType: g.guardType,
strategy: g.strategy,
position: 'input' as const,
config: g._config,
source: '',
})),
...this.outputGuardrails.map((g) => ({
name: g.name,
guardType: g.guardType,
strategy: g.strategy,
position: 'output' as const,
config: g._config,
source: '',
})),
];
// --- MCP servers ---
let mcp: McpServerSchema[] | null = null;
if (this.mcpClients.length > 0) {
mcp = [];
for (const client of this.mcpClients) {
for (const serverName of client.serverNames) {
mcp.push({
name: serverName,
configSource: '',
});
}
}
}
// --- Telemetry ---
const telemetry = this.telemetryBuilder || this.telemetryConfig ? { source: '' } : null;
// --- Checkpoint ---
const checkpoint = this.checkpointStore === 'memory' ? 'memory' : null;
// --- Memory ---
let memory: MemorySchema | null = null;
if (this.memoryConfig) {
const mc = this.memoryConfig;
let semanticRecall: MemorySchema['semanticRecall'] = null;
if (mc.semanticRecall) {
semanticRecall = {
topK: mc.semanticRecall.topK,
messageRange: mc.semanticRecall.messageRange
? {
before: mc.semanticRecall.messageRange.before,
after: mc.semanticRecall.messageRange.after,
}
: null,
embedder: mc.semanticRecall.embedder ?? null,
};
}
let workingMemory: MemorySchema['workingMemory'] = null;
if (mc.workingMemory) {
workingMemory = {
type: mc.workingMemory.structured ? 'structured' : 'freeform',
...(mc.workingMemory.schema
? { schema: zodToJsonSchema(mc.workingMemory.schema) ?? undefined }
: {}),
...(mc.workingMemory.template ? { template: mc.workingMemory.template } : {}),
};
}
memory = {
// TODO: each BuiltMemory should have describe() method to return a config showing connection params and other metadata
// this config must have enough information to rebuild the memory instance
source: null,
storage: mc.memory instanceof InMemoryMemory ? 'memory' : 'custom',
lastMessages: mc.lastMessages ?? null,
semanticRecall,
workingMemory,
};
}
// --- Evaluations ---
const evaluations: EvalSchema[] = this.agentEvals.map((e) => ({
name: e.name,
description: e.description ?? null,
type: e.evalType,
modelId: e.modelId ?? null,
hasCredential: e.credentialName !== null,
credentialName: e.credentialName,
handlerSource: null,
}));
// --- Structured output ---
// TODO: define structured output schema handling better
const structuredOutput = {
enabled: Boolean(this.outputSchema),
schemaSource: null as string | null,
};
// --- Thinking ---
let thinking: ThinkingSchema | null = null;
if (this.thinkingConfig) {
const provider = this.modelId?.split('/')[0];
if (provider === 'anthropic') {
thinking = {
provider: 'anthropic',
budgetTokens:
'budgetTokens' in this.thinkingConfig
? (this.thinkingConfig as { budgetTokens?: number }).budgetTokens
: undefined,
};
} else if (provider === 'openai') {
thinking = {
provider: 'openai',
reasoningEffort:
'reasoningEffort' in this.thinkingConfig
? String((this.thinkingConfig as { reasoningEffort?: string }).reasoningEffort)
: undefined,
};
}
}
return {
model,
credential: this.credentialName ?? null,
instructions: this.instructionsText ?? null,
description: null,
tools: toolSchemas,
providerTools: providerToolSchemas,
memory,
evaluations,
guardrails,
mcp,
telemetry,
checkpoint,
config: {
structuredOutput,
thinking,
toolCallConcurrency: this.concurrencyValue ?? null,
requireToolApproval: this.requireToolApprovalValue,
},
};
}
/** Return the latest state snapshot of the agent. Returns `{ status: 'idle' }` before first run. */
getState(): SerializableAgentState {
if (!this.runtime) {
@ -626,6 +905,12 @@ export class Agent implements BuiltAgent {
);
}
// Resolve credential via provider before building the model config.
if (this.credProvider && this.credentialName) {
const resolved = await this.credProvider.resolve(this.credentialName);
this.resolvedKey = resolved.apiKey;
}
let modelConfig: ModelConfig;
if (this.modelConfigObj) {
if (

View file

@ -137,6 +137,9 @@ export class Eval {
return {
name,
description: desc,
evalType: 'check' as const,
modelId: this.modelId ?? null,
credentialName: this.credentialName ?? null,
_run: async (input: EvalInput) => await checkFn(input),
};
}
@ -163,6 +166,9 @@ export class Eval {
return {
name,
description: desc,
evalType: 'judge' as const,
modelId: this.modelId ?? null,
credentialName: this.credentialName ?? null,
_run: async (input: EvalInput) => await judgeFn({ ...input, llm }),
};
}

View file

@ -0,0 +1,364 @@
import type { JSONSchema7 } from 'json-schema';
import type { ZodType } from 'zod';
import type { BuiltEval, BuiltGuardrail, BuiltTelemetry, BuiltTool } from '../types';
import { McpClient } from './mcp-client';
import { Memory } from './memory';
import { wrapToolForApproval } from './tool';
import type { AgentBuilder } from '../types/sdk/agent-builder';
import type { CredentialProvider } from '../types/sdk/credential-provider';
import type { EvalInput, EvalScore, JudgeInput } from '../types/sdk/eval';
import type { HandlerExecutor } from '../types/sdk/handler-executor';
import type { McpServerConfig } from '../types/sdk/mcp';
import type { AgentMessage } from '../types/sdk/message';
import type {
AgentSchema,
EvalSchema,
GuardrailSchema,
McpServerSchema,
ProviderToolSchema,
TelemetrySchema,
ToolSchema,
} from '../types/sdk/schema';
import type { InterruptibleToolContext, ToolContext } from '../types/sdk/tool';
import type { JSONObject } from '../types/utils/json';
export interface FromSchemaOptions {
handlerExecutor: HandlerExecutor;
credentialProvider?: CredentialProvider;
}
/** Sentinel used to signal that a sandboxed handler called ctx.suspend(). */
const SUSPEND_MARKER = Symbol.for('n8n.agent.suspend');
interface SuspendResult {
[key: symbol]: true;
payload: unknown;
}
export function isSuspendResult(value: unknown): value is SuspendResult {
return (
typeof value === 'object' &&
value !== null &&
(value as Record<symbol, unknown>)[SUSPEND_MARKER] === true
);
}
/**
* Reconstruct a live Agent from an AgentSchema JSON.
*
* This is the inverse of `Agent.describe()` it takes a serialised schema
* (produced by `describe()` or stored in the database) and rebuilds a
* fully-configured Agent instance with proxy handlers that delegate tool
* execution to the provided `HandlerExecutor`.
*
* All source expressions in the schema (provider tools, MCP configs,
* telemetry, structured output, suspend/resume schemas) are evaluated
* via `HandlerExecutor.evaluateExpression()` / `evaluateSchema()`.
*
* The `agent` parameter is the Agent instance to configure (avoids circular import).
*/
export async function fromSchema(
agent: AgentBuilder,
schema: AgentSchema,
options: FromSchemaOptions,
): Promise<void> {
const { handlerExecutor } = options;
applyModel(agent, schema.model);
if (schema.credential !== null) {
agent.credential(schema.credential);
}
if (schema.instructions !== null) {
agent.instructions(schema.instructions);
}
await applyTools(agent, schema.tools, handlerExecutor);
await applyProviderTools(agent, schema.providerTools, handlerExecutor);
applyConfig(agent, schema.config);
applyMemory(agent, schema);
applyGuardrails(agent, schema.guardrails);
applyEvals(agent, schema.evaluations, handlerExecutor);
await applyStructuredOutput(agent, schema.config.structuredOutput, handlerExecutor);
if (options.credentialProvider) {
agent.credentialProvider(options.credentialProvider);
}
await applyMcpServers(agent, schema.mcp, handlerExecutor);
await applyTelemetry(agent, schema.telemetry, handlerExecutor);
}
// ---------------------------------------------------------------------------
// Helpers each handles one section of the AgentSchema
// ---------------------------------------------------------------------------
function applyModel(agent: AgentBuilder, model: AgentSchema['model']): void {
if (model.provider && model.name) {
agent.model(model.provider, model.name);
} else if (model.name) {
agent.model(model.name);
}
}
async function applyTools(
agent: AgentBuilder,
tools: ToolSchema[],
executor: HandlerExecutor,
): Promise<void> {
const addedTools = new Set<string>();
for (const ts of tools) {
if (addedTools.has(ts.name)) {
throw new Error(`Schema has multiple definitions of tool ${ts.name}`);
}
addedTools.add(ts.name);
if (!ts.editable) {
agent.tool({
name: ts.name,
description: ts.description,
__workflowTool: true,
workflowName: ts.name,
} as unknown as BuiltTool);
continue;
}
const schemas: { suspend?: ZodType; resume?: ZodType } = {};
if (ts.suspendSchemaSource) {
schemas.suspend = await executor.evaluateSchema(ts.suspendSchemaSource);
}
if (ts.resumeSchemaSource) {
schemas.resume = await executor.evaluateSchema(ts.resumeSchemaSource);
}
const builtTool = buildToolFromSchema(ts, executor, schemas);
agent.tool(builtTool);
}
}
async function applyProviderTools(
agent: AgentBuilder,
providerTools: ProviderToolSchema[],
executor: HandlerExecutor,
): Promise<void> {
for (const pt of providerTools) {
if (pt.source) {
const evaluated = (await executor.evaluateExpression(pt.source)) as {
name: `${string}.${string}`;
args?: Record<string, unknown>;
};
agent.providerTool({
name: evaluated.name,
args: evaluated.args ?? {},
});
} else {
agent.providerTool({
name: pt.name as `${string}.${string}`,
args: {},
});
}
}
}
function applyConfig(agent: AgentBuilder, config: AgentSchema['config']): void {
if (config.thinking !== null) {
const { provider, ...thinkingConfig } = config.thinking;
agent.thinking(provider, thinkingConfig);
}
if (config.toolCallConcurrency !== null) {
agent.toolCallConcurrency(config.toolCallConcurrency);
}
if (config.requireToolApproval) {
agent.requireToolApproval();
}
}
function applyMemory(agent: AgentBuilder, schema: AgentSchema): void {
if (schema.memory !== null) {
const memory = new Memory();
if (schema.memory.lastMessages !== null) {
memory.lastMessages(schema.memory.lastMessages);
}
agent.memory(memory);
}
if (schema.checkpoint !== null) {
agent.checkpoint(schema.checkpoint);
}
}
function applyGuardrails(agent: AgentBuilder, guardrails: GuardrailSchema[]): void {
for (const g of guardrails) {
const builtGuardrail: BuiltGuardrail = {
name: g.name,
guardType: g.guardType,
strategy: g.strategy,
_config: g.config,
};
if (g.position === 'input') {
agent.inputGuardrail(builtGuardrail);
} else {
agent.outputGuardrail(builtGuardrail);
}
}
}
function applyEvals(
agent: AgentBuilder,
evaluations: EvalSchema[],
executor: HandlerExecutor,
): void {
for (const evalSchema of evaluations) {
const builtEval = buildEvalFromSchema(evalSchema, executor);
agent.eval(builtEval);
}
}
async function applyStructuredOutput(
agent: AgentBuilder,
structuredOutput: AgentSchema['config']['structuredOutput'],
executor: HandlerExecutor,
): Promise<void> {
if (structuredOutput.enabled && structuredOutput.schemaSource) {
const outputSchema = await executor.evaluateSchema(structuredOutput.schemaSource);
agent.structuredOutput(outputSchema);
}
}
async function applyMcpServers(
agent: AgentBuilder,
mcp: McpServerSchema[] | null,
executor: HandlerExecutor,
): Promise<void> {
if (!mcp || mcp.length === 0) return;
const mcpConfigs: McpServerConfig[] = [];
for (const m of mcp) {
if (m.configSource) {
const config = (await executor.evaluateExpression(m.configSource)) as McpServerConfig;
mcpConfigs.push(config);
}
}
if (mcpConfigs.length > 0) {
agent.mcp(new McpClient(mcpConfigs));
}
}
async function applyTelemetry(
agent: AgentBuilder,
telemetry: TelemetrySchema | null,
executor: HandlerExecutor,
): Promise<void> {
if (telemetry?.source) {
const built = (await executor.evaluateExpression(telemetry.source)) as BuiltTelemetry;
agent.telemetry(built);
}
}
// ---------------------------------------------------------------------------
// Tool & Eval builders
// ---------------------------------------------------------------------------
/**
* Build a `BuiltTool` from a `ToolSchema` with a proxy handler that
* delegates execution to the `HandlerExecutor`.
*
* For interruptible tools (hasSuspend), the proxy handles ctx.suspend at
* the host level: the sandbox receives a stub suspend that records the
* payload, and the proxy calls the real ctx.suspend on the host.
*/
function buildToolFromSchema(
toolSchema: ToolSchema,
executor: HandlerExecutor,
preEvaluated?: { suspend?: ZodType; resume?: ZodType },
): BuiltTool {
const handler = async (
input: unknown,
ctx: ToolContext | InterruptibleToolContext,
): Promise<unknown> => {
if (toolSchema.hasSuspend && 'suspend' in ctx) {
// Interruptible tool: the real ctx.suspend is a host-side function.
// We pass serialisable ctx data into the sandbox, and the sandbox
// returns a marker if suspend was called. Then we call the real
// ctx.suspend on the host.
const interruptCtx = ctx;
const result = await executor.executeTool(toolSchema.name, input, {
resumeData: interruptCtx.resumeData,
parentTelemetry: ctx.parentTelemetry,
});
if (isSuspendResult(result)) {
return await interruptCtx.suspend(result.payload);
}
return result;
}
// Non-interruptible tool: pass ctx through directly (only serialisable
// fields like parentTelemetry).
return await executor.executeTool(toolSchema.name, input, {
parentTelemetry: ctx.parentTelemetry,
});
};
// toMessage: The runtime calls toMessage synchronously (agent-runtime.ts).
// When the executor provides a sync variant (executeToMessageSync), use it
// directly for an immediate result. Otherwise fall back to async with a
// stale-cache workaround.
let toMessage: ((output: unknown) => AgentMessage | undefined) | undefined;
if (toolSchema.hasToMessage) {
if (executor.executeToMessageSync) {
const syncExecutor = executor.executeToMessageSync.bind(executor);
toMessage = (output: unknown): AgentMessage | undefined => {
return syncExecutor(toolSchema.name, output);
};
} else {
throw new Error('Executor does not support executeToMessageSync');
}
}
const built: BuiltTool = {
name: toolSchema.name,
description: toolSchema.description,
inputSchema: (toolSchema.inputSchema as JSONSchema7) ?? undefined,
handler,
toMessage,
suspendSchema: preEvaluated?.suspend,
resumeSchema: preEvaluated?.resume,
providerOptions: toolSchema.providerOptions
? (toolSchema.providerOptions as Record<string, JSONObject>)
: undefined,
};
// If the tool requires approval, wrap it with the approval gate.
// This re-applies the same wrapping that Tool.build() does at define time.
if (toolSchema.requireApproval) {
return wrapToolForApproval(built, { requireApproval: true });
}
return built;
}
/**
* Build a `BuiltEval` from an `EvalSchema` with a proxy _run function
* that delegates execution to the `HandlerExecutor`.
*/
function buildEvalFromSchema(evalSchema: EvalSchema, executor: HandlerExecutor): BuiltEval {
return {
name: evalSchema.name,
description: evalSchema.description ?? undefined,
evalType: evalSchema.type,
modelId: evalSchema.modelId ?? null,
credentialName: evalSchema.credentialName ?? null,
_run: async (evalInput: EvalInput): Promise<EvalScore> => {
// For judge evals, the llm function is bound inside the module
// when the full module runs in the sandbox. The executor passes
// the input to _run() which already has llm in its closure.
return await executor.executeEval(evalSchema.name, evalInput as EvalInput | JudgeInput);
},
};
}

View file

@ -66,6 +66,14 @@ export class McpClient {
this.connections = configs.map((cfg) => new McpConnection(cfg, requireToolApproval));
}
/**
* Returns the names of all configured MCP servers. Does NOT require a
* network connection safe to call before `listTools()` or `connect()`.
*/
get serverNames(): string[] {
return this.configs.map((cfg) => cfg.name);
}
/**
* Explicitly connect to all servers without listing tools.
* Optional `listTools()` connects implicitly.

View file

@ -37,6 +37,8 @@ export class Memory {
private workingMemoryScope: 'resource' | 'thread' = 'resource';
private workingMemoryInstruction?: string;
private memoryBackend?: BuiltMemory;
private titleGenerationConfig?: TitleGenerationConfig;
@ -102,6 +104,26 @@ export class Memory {
return this;
}
/**
* Override the default instruction text injected into the system prompt for working memory.
*
* The instruction tells the model when and how to call the `updateWorkingMemory` tool.
* When omitted, `WORKING_MEMORY_DEFAULT_INSTRUCTION` is used.
*
* Example:
* ```typescript
* import { WORKING_MEMORY_DEFAULT_INSTRUCTION } from '@n8n/agents';
*
* memory.instruction(
* WORKING_MEMORY_DEFAULT_INSTRUCTION + '\nAlways update after every user message.',
* );
* ```
*/
instruction(text: string): this {
this.workingMemoryInstruction = text;
return this;
}
/**
* Enable automatic title generation for new threads.
*
@ -167,12 +189,18 @@ export class Memory {
structured: true,
schema: this.workingMemorySchema,
scope: this.workingMemoryScope,
...(this.workingMemoryInstruction !== undefined && {
instruction: this.workingMemoryInstruction,
}),
};
} else if (this.workingMemoryTemplate !== undefined) {
workingMemory = {
template: this.workingMemoryTemplate,
structured: false,
scope: this.workingMemoryScope,
...(this.workingMemoryInstruction !== undefined && {
instruction: this.workingMemoryInstruction,
}),
};
}

View file

@ -1,14 +1,19 @@
import type { AgentDbMessage, AgentMessage, Message } from '../types/sdk/message';
import type { AgentMessage, Message } from '../types/sdk/message';
/**
* Wrap an AgentMessage with a stable id. If the message already carries an id
* (i.e. it is already an AgentDbMessage), it is returned unchanged.
*/
export function toDbMessage(message: AgentMessage): AgentDbMessage {
if ('id' in message && typeof message.id === 'string') {
return message as AgentDbMessage;
export function getCreatedAt(message: AgentMessage): Date | null {
if ('createdAt' in message) {
if (message.createdAt instanceof Date) {
return message.createdAt;
}
if (typeof message.createdAt === 'string' || typeof message.createdAt === 'number') {
const date = new Date(message.createdAt);
if (isNaN(date.getTime())) {
return null;
}
return date;
}
}
return { ...message, id: crypto.randomUUID() };
return null;
}
export function isLlmMessage(message: AgentMessage): message is Message {

Some files were not shown because too many files have changed in this diff Show more