fix(core): Better titles on instance AI, use common title logic on n8n agents sdk (no-changelog) (#28686)

This commit is contained in:
Jaakko Husso 2026-04-20 15:27:33 +02:00 committed by GitHub
parent 9f71e12e5f
commit 73d93d4edf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
22 changed files with 254 additions and 75 deletions

View file

@ -2,7 +2,12 @@ import type { LanguageModel } from 'ai';
import { createModel } from '../runtime/model-factory';
type ProviderOpts = { apiKey?: string; baseURL?: string; fetch?: typeof globalThis.fetch };
type ProviderOpts = {
apiKey?: string;
baseURL?: string;
fetch?: typeof globalThis.fetch;
headers?: Record<string, string>;
};
jest.mock('@ai-sdk/anthropic', () => ({
createAnthropic: (opts?: ProviderOpts) => (model: string) => ({
@ -11,6 +16,7 @@ jest.mock('@ai-sdk/anthropic', () => ({
apiKey: opts?.apiKey,
baseURL: opts?.baseURL,
fetch: opts?.fetch,
headers: opts?.headers,
specificationVersion: 'v3',
}),
}));
@ -22,6 +28,7 @@ jest.mock('@ai-sdk/openai', () => ({
apiKey: opts?.apiKey,
baseURL: opts?.baseURL,
fetch: opts?.fetch,
headers: opts?.headers,
specificationVersion: 'v3',
}),
}));
@ -105,6 +112,18 @@ describe('createModel', () => {
expect(mockProxyAgent).toHaveBeenCalledWith('http://proxy:9090');
});
it('should forward custom headers to the provider factory', () => {
const model = createModel({
id: 'anthropic/claude-sonnet-4-5',
apiKey: 'sk-test',
headers: { 'x-proxy-auth': 'Bearer abc', 'anthropic-beta': 'tools-2024' },
}) as unknown as Record<string, unknown>;
expect(model.headers).toEqual({
'x-proxy-auth': 'Bearer abc',
'anthropic-beta': 'tools-2024',
});
});
it('should prefer HTTPS_PROXY over HTTP_PROXY', () => {
process.env.HTTPS_PROXY = 'http://https-proxy:8080';
process.env.HTTP_PROXY = 'http://http-proxy:9090';

View file

@ -0,0 +1,123 @@
import type * as AiImport from 'ai';
import type { LanguageModel } from 'ai';
import { generateTitleFromMessage } from '../runtime/title-generation';
type GenerateTextCall = {
messages: Array<{ role: string; content: string }>;
};
const mockGenerateText = jest.fn<Promise<{ text: string }>, [GenerateTextCall]>();
jest.mock('ai', () => {
const actual = jest.requireActual<typeof AiImport>('ai');
return {
...actual,
generateText: async (call: GenerateTextCall): Promise<{ text: string }> =>
await mockGenerateText(call),
};
});
const fakeModel = {} as LanguageModel;
describe('generateTitleFromMessage', () => {
beforeEach(() => {
mockGenerateText.mockReset();
});
it('returns null for empty input without calling the LLM', async () => {
const result = await generateTitleFromMessage(fakeModel, ' ');
expect(result).toBeNull();
expect(mockGenerateText).not.toHaveBeenCalled();
});
it('returns the message itself for trivial greetings without calling the LLM', async () => {
const result = await generateTitleFromMessage(fakeModel, 'hey');
expect(result).toBe('hey');
expect(mockGenerateText).not.toHaveBeenCalled();
});
it('skips the LLM for short multi-word messages', async () => {
const result = await generateTitleFromMessage(fakeModel, 'hi there');
expect(result).toBe('hi there');
expect(mockGenerateText).not.toHaveBeenCalled();
});
it('strips markdown heading prefixes from the LLM response', async () => {
mockGenerateText.mockResolvedValue({ text: '# Daily Berlin rain alert' });
const result = await generateTitleFromMessage(
fakeModel,
'Build a daily Berlin rain alert workflow',
);
expect(result).toBe('Daily Berlin rain alert');
});
it('strips inline emphasis markers from the LLM response', async () => {
mockGenerateText.mockResolvedValue({ text: 'Your **Berlin** rain alert' });
const result = await generateTitleFromMessage(
fakeModel,
'Build a daily Berlin rain alert workflow',
);
expect(result).toBe('Your Berlin rain alert');
});
it('strips <think> reasoning blocks from the LLM response', async () => {
mockGenerateText.mockResolvedValue({
text: '<think>Let me think about this</think>Deploy release pipeline',
});
const result = await generateTitleFromMessage(
fakeModel,
'Help me set up an automated deploy pipeline',
);
expect(result).toBe('Deploy release pipeline');
});
it('strips surrounding quotes from the LLM response', async () => {
mockGenerateText.mockResolvedValue({ text: '"Build Gmail to Slack workflow"' });
const result = await generateTitleFromMessage(
fakeModel,
'Build a workflow that forwards Gmail to Slack',
);
expect(result).toBe('Build Gmail to Slack workflow');
});
it('truncates titles longer than 80 characters at a word boundary', async () => {
mockGenerateText.mockResolvedValue({
text: 'Create a data table for users, then build a workflow that syncs them to our CRM every hour',
});
const result = await generateTitleFromMessage(
fakeModel,
'Create a data table for users and sync them to our CRM every hour with error alerting',
);
expect(result).not.toBeNull();
expect(result!.length).toBeLessThanOrEqual(81);
expect(result!.endsWith('\u2026')).toBe(true);
});
it('returns null when the LLM returns empty text', async () => {
mockGenerateText.mockResolvedValue({ text: ' ' });
const result = await generateTitleFromMessage(
fakeModel,
'Build a daily Berlin rain alert workflow',
);
expect(result).toBeNull();
});
it('passes the default instructions to the LLM', async () => {
mockGenerateText.mockResolvedValue({ text: 'Berlin rain alert' });
await generateTitleFromMessage(fakeModel, 'Build a daily Berlin rain alert workflow');
const call = mockGenerateText.mock.calls[0][0];
expect(call.messages[0].role).toBe('system');
expect(call.messages[0].content).toContain('markdown');
expect(call.messages[0].content).toContain('sentence case');
});
it('accepts custom instructions', async () => {
mockGenerateText.mockResolvedValue({ text: 'Custom title' });
await generateTitleFromMessage(fakeModel, 'Build a daily Berlin rain alert workflow', {
instructions: 'Custom system prompt',
});
const call = mockGenerateText.mock.calls[0][0];
expect(call.messages[0].content).toBe('Custom system prompt');
});
});

View file

@ -114,6 +114,9 @@ export type { SqliteMemoryConfig } from './storage/sqlite-memory';
export { PostgresMemory } from './storage/postgres-memory';
export type { PostgresMemoryConfig } from './storage/postgres-memory';
export { createModel } from './runtime/model-factory';
export { generateTitleFromMessage } from './runtime/title-generation';
export { Workspace } from './workspace';
export { BaseFilesystem } from './workspace';
export { BaseSandbox } from './workspace';

View file

@ -9,6 +9,7 @@ type CreateProviderFn = (opts?: {
apiKey?: string;
baseURL?: string;
fetch?: FetchFn;
headers?: Record<string, string>;
}) => (model: string) => LanguageModel;
type CreateEmbeddingProviderFn = (opts?: { apiKey?: string }) => {
embeddingModel(model: string): EmbeddingModel;
@ -56,6 +57,7 @@ export function createModel(config: ModelConfig): LanguageModel {
const modelId = stripEmpty(typeof config === 'string' ? config : config.id);
const apiKey = stripEmpty(typeof config === 'string' ? undefined : config.apiKey);
const baseURL = stripEmpty(typeof config === 'string' ? undefined : config.url);
const headers = typeof config === 'string' ? undefined : config.headers;
if (!modelId) {
throw new Error('Model ID is required');
@ -70,25 +72,25 @@ export function createModel(config: ModelConfig): LanguageModel {
const { createAnthropic } = require('@ai-sdk/anthropic') as {
createAnthropic: CreateProviderFn;
};
return createAnthropic({ apiKey, baseURL, fetch })(modelName);
return createAnthropic({ apiKey, baseURL, fetch, headers })(modelName);
}
case 'openai': {
const { createOpenAI } = require('@ai-sdk/openai') as {
createOpenAI: CreateProviderFn;
};
return createOpenAI({ apiKey, baseURL, fetch })(modelName);
return createOpenAI({ apiKey, baseURL, fetch, headers })(modelName);
}
case 'google': {
const { createGoogleGenerativeAI } = require('@ai-sdk/google') as {
createGoogleGenerativeAI: CreateProviderFn;
};
return createGoogleGenerativeAI({ apiKey, baseURL, fetch })(modelName);
return createGoogleGenerativeAI({ apiKey, baseURL, fetch, headers })(modelName);
}
case 'xai': {
const { createXai } = require('@ai-sdk/xai') as {
createXai: CreateProviderFn;
};
return createXai({ apiKey, baseURL, fetch })(modelName);
return createXai({ apiKey, baseURL, fetch, headers })(modelName);
}
default:
throw new Error(

View file

@ -1,4 +1,4 @@
import { generateText } from 'ai';
import { generateText, type LanguageModel } from 'ai';
import type { BuiltMemory, TitleGenerationConfig } from '../types';
import { createFilteredLogger } from './logger';
@ -10,13 +10,83 @@ const logger = createFilteredLogger();
const DEFAULT_TITLE_INSTRUCTIONS = [
'- you will generate a short title based on the first message a user begins a conversation with',
"- the title should be a summary of the user's message",
'- the title should describe what the user asked for, not what an assistant might reply',
'- 1 to 5 words, no more than 80 characters',
'- use sentence case (e.g. "Conversation title" instead of "Conversation Title")',
'- do not use quotes, colons, or markdown formatting',
'- the entire text you return will be used directly as the title, so respond with the title only',
].join('\n');
const TRIVIAL_MESSAGE_MAX_CHARS = 15;
const TRIVIAL_MESSAGE_MAX_WORDS = 3;
const MAX_TITLE_LENGTH = 80;
/**
* Whether a user message is too trivial to bother sending to an LLM for
* title generation (e.g. "hey", "hello"). For these, the LLM tends to
* hallucinate an assistant-voice reply as the title instead of echoing
* the user intent it's better to just use the message itself.
*/
function isTrivialMessage(message: string): boolean {
const normalized = message.trim();
if (normalized.length <= TRIVIAL_MESSAGE_MAX_CHARS) return true;
const wordCount = normalized.split(/\s+/).filter(Boolean).length;
return wordCount <= TRIVIAL_MESSAGE_MAX_WORDS;
}
function sanitizeTitle(raw: string): string {
// Strip <think>...</think> blocks (e.g. from DeepSeek R1)
let title = raw.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
// Strip markdown heading prefixes and inline emphasis markers
title = title
.replace(/^#{1,6}\s+/, '')
.replace(/\*+/g, '')
.trim();
// Strip surrounding quotes
title = title.replace(/^["']|["']$/g, '').trim();
if (title.length > MAX_TITLE_LENGTH) {
const truncated = title.slice(0, MAX_TITLE_LENGTH);
const lastSpace = truncated.lastIndexOf(' ');
title = (lastSpace > 20 ? truncated.slice(0, lastSpace) : truncated) + '\u2026';
}
return title;
}
/**
* Generate a sanitized thread title from a user message using an LLM.
*
* Returns `null` on empty input or empty LLM output. For trivial messages
* (e.g. greetings), returns the sanitized message itself without calling
* the LLM this avoids the failure mode where the model responds with
* an assistant-voice reply as the title.
*/
export async function generateTitleFromMessage(
model: LanguageModel,
userMessage: string,
opts?: { instructions?: string },
): Promise<string | null> {
const trimmed = userMessage.trim();
if (!trimmed) return null;
if (isTrivialMessage(trimmed)) {
return sanitizeTitle(trimmed) || null;
}
const result = await generateText({
model,
messages: [
{ role: 'system', content: opts?.instructions ?? DEFAULT_TITLE_INSTRUCTIONS },
{ role: 'user', content: trimmed },
],
});
const raw = result.text?.trim();
if (!raw) return null;
const title = sanitizeTitle(raw);
return title || null;
}
/**
* Generate a title for a thread if it doesn't already have one.
*
@ -49,28 +119,9 @@ export async function generateThreadTitle(opts: {
const titleModelId = opts.titleConfig.model ?? opts.agentModel;
const titleModel = createModel(titleModelId);
const instructions = opts.titleConfig.instructions ?? DEFAULT_TITLE_INSTRUCTIONS;
const result = await generateText({
model: titleModel,
messages: [
{ role: 'system', content: instructions },
{ role: 'user', content: userText },
],
const title = await generateTitleFromMessage(titleModel, userText, {
instructions: opts.titleConfig.instructions,
});
let title = result.text?.trim();
if (!title) return;
// Strip <think>...</think> blocks (e.g. from DeepSeek R1)
title = title.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
if (!title) return;
// Strip markdown heading prefixes and inline formatting
title = title
.replace(/^#{1,6}\s+/, '')
.replace(/\*+/g, '')
.trim();
if (!title) return;
await opts.memory.saveThread({

View file

@ -27,8 +27,12 @@ export type TokenUsage<T extends Record<string, unknown> = Record<string, unknow
additionalMetadata?: T;
};
// eslint-disable-next-line @typescript-eslint/no-redundant-type-constituents -- LanguageModel is semantically distinct from string
export type ModelConfig = string | { id: string; apiKey?: string; url?: string } | LanguageModel;
/* eslint-disable @typescript-eslint/no-redundant-type-constituents -- LanguageModel is semantically distinct from string */
export type ModelConfig =
| string
| { id: string; apiKey?: string; url?: string; headers?: Record<string, string> }
| LanguageModel;
/* eslint-enable @typescript-eslint/no-redundant-type-constituents */
export interface AgentResult {
id?: string;

View file

@ -50,7 +50,7 @@ export type {
ThreadPatch,
WorkflowLoopWorkItemRecord,
} from './storage';
export { truncateToTitle, generateThreadTitle } from './memory/title-utils';
export { truncateToTitle, generateTitleForRun } from './memory/title-utils';
export { McpClientManager } from './mcp/mcp-client-manager';
export { mapMastraChunkToEvent } from './stream/map-chunk';
export { isRecord, parseSuspension, asResumable } from './utils/stream-helpers';

View file

@ -1,10 +1,3 @@
jest.mock('@mastra/core/agent', () => {
const MockAgent = jest.fn();
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
MockAgent.prototype.generate = jest.fn().mockResolvedValue({ text: '' });
return { Agent: MockAgent };
});
import { truncateToTitle } from '../title-utils';
describe('truncateToTitle', () => {

View file

@ -1,4 +1,4 @@
import { Agent } from '@mastra/core/agent';
import { createModel, generateTitleFromMessage } from '@n8n/agents';
import type { ModelConfig } from '../types';
@ -13,36 +13,20 @@ export function truncateToTitle(message: string): string {
return (lastSpace > 20 ? truncated.slice(0, lastSpace) : truncated) + '\u2026';
}
const TITLE_SYSTEM_PROMPT = [
'Generate a concise title (max 60 chars) summarizing what the user wants.',
'Return ONLY the title text. No quotes, colons, or explanation.',
'Focus on the user intent, not what the assistant might reply.',
'Examples: "Build Gmail to Slack workflow", "Debug failed execution", "Show project files"',
].join('\n');
/**
* Generate a polished thread title via a lightweight LLM call.
* Returns the cleaned title string or null on failure.
*
* Wraps @n8n/agents' title generation so callers don't have to build a
* LanguageModel themselves. Fails soft any error returns null.
*/
export async function generateThreadTitle(
export async function generateTitleForRun(
modelId: ModelConfig,
userMessage: string,
): Promise<string | null> {
try {
const agent = new Agent({
id: 'thread-title-generator',
name: 'Thread Title Generator',
instructions: {
role: 'system' as const,
content: TITLE_SYSTEM_PROMPT,
},
model: modelId,
});
const result = await agent.generate(userMessage, { maxSteps: 1 });
const title = result.text.trim().replace(/^["']|["']$/g, '');
if (!title) return null;
return title.length > MAX_TITLE_LENGTH ? truncateToTitle(title) : title;
const model = createModel(modelId);
return await generateTitleFromMessage(model, userMessage);
} catch {
return null;
}

View file

@ -47,7 +47,7 @@ import {
startResearchAgentTask,
streamAgentRun,
truncateToTitle,
generateThreadTitle,
generateTitleForRun,
patchThread,
type ConfirmationData,
type DomainAccessTracker,
@ -2466,7 +2466,7 @@ export class InstanceAiService {
? firstUserMsg.content
: JSON.stringify(firstUserMsg.content);
const llmTitle = await generateThreadTitle(modelId, userText);
const llmTitle = await generateTitleForRun(modelId, userText);
if (!llmTitle) return;
await patchThread(memory, {

View file

@ -4,7 +4,7 @@
"path": "/v1/messages",
"body": {
"type": "STRING",
"string": "[{\"type\":\"text\",\"text\":\"Generate a concise title (max 60 chars) summarizing what",
"string": "you will generate a short title based on the first message",
"subString": true
}
},

View file

@ -4,7 +4,7 @@
"path": "/v1/messages",
"body": {
"type": "STRING",
"string": "[{\"type\":\"text\",\"text\":\"Generate a concise title (max 60 chars) summarizing what",
"string": "you will generate a short title based on the first message",
"subString": true
}
},

View file

@ -4,7 +4,7 @@
"path": "/v1/messages",
"body": {
"type": "STRING",
"string": "[{\"type\":\"text\",\"text\":\"Generate a concise title (max 60 chars) summarizing what",
"string": "you will generate a short title based on the first message",
"subString": true
}
},

View file

@ -4,7 +4,7 @@
"path": "/v1/messages",
"body": {
"type": "STRING",
"string": "[{\"type\":\"text\",\"text\":\"Generate a concise title (max 60 chars) summarizing what",
"string": "you will generate a short title based on the first message",
"subString": true
}
},

View file

@ -4,7 +4,7 @@
"path": "/v1/messages",
"body": {
"type": "STRING",
"string": "[{\"type\":\"text\",\"text\":\"Generate a concise title (max 60 chars) summarizing what",
"string": "you will generate a short title based on the first message",
"subString": true
}
},

View file

@ -4,7 +4,7 @@
"path": "/v1/messages",
"body": {
"type": "STRING",
"string": "[{\"type\":\"text\",\"text\":\"Generate a concise title (max 60 chars) summarizing what",
"string": "you will generate a short title based on the first message",
"subString": true
}
},

View file

@ -4,7 +4,7 @@
"path": "/v1/messages",
"body": {
"type": "STRING",
"string": "[{\"type\":\"text\",\"text\":\"Generate a concise title (max 60 chars) summarizing what",
"string": "you will generate a short title based on the first message",
"subString": true
}
},

View file

@ -4,7 +4,7 @@
"path": "/v1/messages",
"body": {
"type": "STRING",
"string": "[{\"type\":\"text\",\"text\":\"Generate a concise title (max 60 chars) summarizing what",
"string": "you will generate a short title based on the first message",
"subString": true
}
},

View file

@ -4,7 +4,7 @@
"path": "/v1/messages",
"body": {
"type": "STRING",
"string": "[{\"type\":\"text\",\"text\":\"Generate a concise title (max 60 chars) summarizing what",
"string": "you will generate a short title based on the first message",
"subString": true
}
},

View file

@ -4,7 +4,7 @@
"path": "/v1/messages",
"body": {
"type": "STRING",
"string": "[{\"type\":\"text\",\"text\":\"Generate a concise title (max 60 chars) summarizing what",
"string": "you will generate a short title based on the first message",
"subString": true
}
},

View file

@ -4,7 +4,7 @@
"path": "/v1/messages",
"body": {
"type": "STRING",
"string": "[{\"type\":\"text\",\"text\":\"Generate a concise title (max 60 chars) summarizing what",
"string": "you will generate a short title based on the first message",
"subString": true
}
},

View file

@ -4,7 +4,7 @@
"path": "/v1/messages",
"body": {
"type": "STRING",
"string": "[{\"type\":\"text\",\"text\":\"Generate a concise title (max 60 chars) summarizing what",
"string": "you will generate a short title based on the first message",
"subString": true
}
},