feat(core): Land AgentHistoryProvider. (#23978)

This commit is contained in:
joshualitt 2026-03-27 12:22:35 -07:00 committed by GitHub
parent e7dccabf14
commit 320c8aba4c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 593 additions and 17 deletions

View file

@ -155,17 +155,21 @@ they appear in the UI.
### Experimental
| UI Label | Setting | Description | Default |
| -------------------------- | ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| Enable Tool Output Masking | `experimental.toolOutputMasking.enabled` | Enables tool output masking to save tokens. | `true` |
| Enable Git Worktrees | `experimental.worktrees` | Enable automated Git worktree management for parallel work. | `false` |
| Use OSC 52 Paste | `experimental.useOSC52Paste` | Use OSC 52 for pasting. This may be more robust than the default system when using remote terminal sessions (if your terminal is configured to allow it). | `false` |
| Use OSC 52 Copy | `experimental.useOSC52Copy` | Use OSC 52 for copying. This may be more robust than the default system when using remote terminal sessions (if your terminal is configured to allow it). | `false` |
| Plan | `experimental.plan` | Enable Plan Mode. | `true` |
| Model Steering | `experimental.modelSteering` | Enable model steering (user hints) to guide the model during tool execution. | `false` |
| Direct Web Fetch | `experimental.directWebFetch` | Enable web fetch behavior that bypasses LLM summarization. | `false` |
| Memory Manager Agent | `experimental.memoryManager` | Replace the built-in save_memory tool with a memory manager subagent that supports adding, removing, de-duplicating, and organizing memories. | `false` |
| Topic & Update Narration | `experimental.topicUpdateNarration` | Enable the experimental Topic & Update communication model for reduced chattiness and structured progress reporting. | `false` |
| UI Label | Setting | Description | Default |
| ---------------------------------- | ---------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| Enable Tool Output Masking | `experimental.toolOutputMasking.enabled` | Enables tool output masking to save tokens. | `true` |
| Enable Git Worktrees | `experimental.worktrees` | Enable automated Git worktree management for parallel work. | `false` |
| Use OSC 52 Paste | `experimental.useOSC52Paste` | Use OSC 52 for pasting. This may be more robust than the default system when using remote terminal sessions (if your terminal is configured to allow it). | `false` |
| Use OSC 52 Copy | `experimental.useOSC52Copy` | Use OSC 52 for copying. This may be more robust than the default system when using remote terminal sessions (if your terminal is configured to allow it). | `false` |
| Plan | `experimental.plan` | Enable Plan Mode. | `true` |
| Model Steering | `experimental.modelSteering` | Enable model steering (user hints) to guide the model during tool execution. | `false` |
| Direct Web Fetch | `experimental.directWebFetch` | Enable web fetch behavior that bypasses LLM summarization. | `false` |
| Memory Manager Agent | `experimental.memoryManager` | Replace the built-in save_memory tool with a memory manager subagent that supports adding, removing, de-duplicating, and organizing memories. | `false` |
| Agent History Truncation | `experimental.agentHistoryTruncation` | Enable truncation window logic for the Agent History Provider. | `false` |
| Agent History Truncation Threshold | `experimental.agentHistoryTruncationThreshold` | The maximum number of messages before history is truncated. | `30` |
| Agent History Retained Messages | `experimental.agentHistoryRetainedMessages` | The number of recent messages to retain after truncation. | `15` |
| Agent History Summarization | `experimental.agentHistorySummarization` | Enable summarization of truncated content via a small model for the Agent History Provider. | `false` |
| Topic & Update Narration | `experimental.topicUpdateNarration` | Enable the experimental Topic & Update communication model for reduced chattiness and structured progress reporting. | `false` |
### Skills

View file

@ -670,6 +670,11 @@ their corresponding top-level category object in your `settings.json` file.
"modelConfig": {
"model": "gemini-3-pro-preview"
}
},
"agent-history-provider-summarizer": {
"modelConfig": {
"model": "gemini-3-flash-preview"
}
}
}
```
@ -1677,6 +1682,28 @@ their corresponding top-level category object in your `settings.json` file.
- **Default:** `false`
- **Requires restart:** Yes
- **`experimental.agentHistoryTruncation`** (boolean):
- **Description:** Enable truncation window logic for the Agent History
Provider.
- **Default:** `false`
- **Requires restart:** Yes
- **`experimental.agentHistoryTruncationThreshold`** (number):
- **Description:** The maximum number of messages before history is truncated.
- **Default:** `30`
- **Requires restart:** Yes
- **`experimental.agentHistoryRetainedMessages`** (number):
- **Description:** The number of recent messages to retain after truncation.
- **Default:** `15`
- **Requires restart:** Yes
- **`experimental.agentHistorySummarization`** (boolean):
- **Description:** Enable summarization of truncated content via a small model
for the Agent History Provider.
- **Default:** `false`
- **Requires restart:** Yes
- **`experimental.topicUpdateNarration`** (boolean):
- **Description:** Enable the experimental Topic & Update communication model
for reduced chattiness and structured progress reporting.

View file

@ -109,6 +109,12 @@ export function createMockConfig(
enableEnvironmentVariableRedaction: false,
},
}),
isExperimentalAgentHistoryTruncationEnabled: vi.fn().mockReturnValue(false),
getExperimentalAgentHistoryTruncationThreshold: vi.fn().mockReturnValue(50),
getExperimentalAgentHistoryRetainedMessages: vi.fn().mockReturnValue(30),
isExperimentalAgentHistorySummarizationEnabled: vi
.fn()
.mockReturnValue(false),
...overrides,
} as unknown as Config;

View file

@ -975,6 +975,14 @@ export async function loadCliConfig(
disabledSkills: settings.skills?.disabled,
experimentalJitContext: settings.experimental?.jitContext,
experimentalMemoryManager: settings.experimental?.memoryManager,
experimentalAgentHistoryTruncation:
settings.experimental?.agentHistoryTruncation,
experimentalAgentHistoryTruncationThreshold:
settings.experimental?.agentHistoryTruncationThreshold,
experimentalAgentHistoryRetainedMessages:
settings.experimental?.agentHistoryRetainedMessages,
experimentalAgentHistorySummarization:
settings.experimental?.agentHistorySummarization,
modelSteering: settings.experimental?.modelSteering,
topicUpdateNarration: settings.experimental?.topicUpdateNarration,
toolOutputMasking: settings.experimental?.toolOutputMasking,

View file

@ -2141,6 +2141,46 @@ const SETTINGS_SCHEMA = {
'Replace the built-in save_memory tool with a memory manager subagent that supports adding, removing, de-duplicating, and organizing memories.',
showInDialog: true,
},
agentHistoryTruncation: {
type: 'boolean',
label: 'Agent History Truncation',
category: 'Experimental',
requiresRestart: true,
default: false,
description:
'Enable truncation window logic for the Agent History Provider.',
showInDialog: true,
},
agentHistoryTruncationThreshold: {
type: 'number',
label: 'Agent History Truncation Threshold',
category: 'Experimental',
requiresRestart: true,
default: 30,
description:
'The maximum number of messages before history is truncated.',
showInDialog: true,
},
agentHistoryRetainedMessages: {
type: 'number',
label: 'Agent History Retained Messages',
category: 'Experimental',
requiresRestart: true,
default: 15,
description:
'The number of recent messages to retain after truncation.',
showInDialog: true,
},
agentHistorySummarization: {
type: 'boolean',
label: 'Agent History Summarization',
category: 'Experimental',
requiresRestart: true,
default: false,
description:
'Enable summarization of truncated content via a small model for the Agent History Provider.',
showInDialog: true,
},
topicUpdateNarration: {
type: 'boolean',
label: 'Topic & Update Narration',

View file

@ -681,6 +681,10 @@ export interface ConfigParameters {
adminSkillsEnabled?: boolean;
experimentalJitContext?: boolean;
experimentalMemoryManager?: boolean;
experimentalAgentHistoryTruncation?: boolean;
experimentalAgentHistoryTruncationThreshold?: number;
experimentalAgentHistoryRetainedMessages?: number;
experimentalAgentHistorySummarization?: boolean;
topicUpdateNarration?: boolean;
toolOutputMasking?: Partial<ToolOutputMaskingConfig>;
disableLLMCorrection?: boolean;
@ -909,6 +913,10 @@ export class Config implements McpContext, AgentLoopContext {
private readonly experimentalJitContext: boolean;
private readonly experimentalMemoryManager: boolean;
private readonly experimentalAgentHistoryTruncation: boolean;
private readonly experimentalAgentHistoryTruncationThreshold: number;
private readonly experimentalAgentHistoryRetainedMessages: number;
private readonly experimentalAgentHistorySummarization: boolean;
private readonly topicUpdateNarration: boolean;
private readonly disableLLMCorrection: boolean;
private readonly planEnabled: boolean;
@ -1118,6 +1126,14 @@ export class Config implements McpContext, AgentLoopContext {
this.experimentalJitContext = params.experimentalJitContext ?? true;
this.experimentalMemoryManager = params.experimentalMemoryManager ?? false;
this.experimentalAgentHistoryTruncation =
params.experimentalAgentHistoryTruncation ?? false;
this.experimentalAgentHistoryTruncationThreshold =
params.experimentalAgentHistoryTruncationThreshold ?? 30;
this.experimentalAgentHistoryRetainedMessages =
params.experimentalAgentHistoryRetainedMessages ?? 15;
this.experimentalAgentHistorySummarization =
params.experimentalAgentHistorySummarization ?? false;
this.topicUpdateNarration = params.topicUpdateNarration ?? false;
this.modelSteering = params.modelSteering ?? false;
this.injectionService = new InjectionService(() =>
@ -2298,6 +2314,22 @@ export class Config implements McpContext, AgentLoopContext {
return this.experimentalMemoryManager;
}
isExperimentalAgentHistoryTruncationEnabled(): boolean {
return this.experimentalAgentHistoryTruncation;
}
getExperimentalAgentHistoryTruncationThreshold(): number {
return this.experimentalAgentHistoryTruncationThreshold;
}
getExperimentalAgentHistoryRetainedMessages(): number {
return this.experimentalAgentHistoryRetainedMessages;
}
isExperimentalAgentHistorySummarizationEnabled(): boolean {
return this.experimentalAgentHistorySummarization;
}
isTopicUpdateNarrationEnabled(): boolean {
return this.topicUpdateNarration;
}

View file

@ -243,6 +243,11 @@ export const DEFAULT_MODEL_CONFIGS: ModelConfigServiceConfig = {
model: 'gemini-3-pro-preview',
},
},
'agent-history-provider-summarizer': {
modelConfig: {
model: 'gemini-3-flash-preview',
},
},
},
overrides: [
{

View file

@ -279,6 +279,16 @@ describe('Gemini Client (client.ts)', () => {
getActiveModel: vi.fn().mockReturnValue('test-model'),
setActiveModel: vi.fn(),
resetTurn: vi.fn(),
isExperimentalAgentHistoryTruncationEnabled: vi
.fn()
.mockReturnValue(false),
getExperimentalAgentHistoryTruncationThreshold: vi
.fn()
.mockReturnValue(30),
getExperimentalAgentHistoryRetainedMessages: vi.fn().mockReturnValue(15),
isExperimentalAgentHistorySummarizationEnabled: vi
.fn()
.mockReturnValue(false),
getModelAvailabilityService: vi
.fn()
.mockReturnValue(createAvailabilityServiceMock()),
@ -704,6 +714,43 @@ describe('Gemini Client (client.ts)', () => {
});
describe('sendMessageStream', () => {
it('calls AgentHistoryProvider.manageHistory when history truncation is enabled', async () => {
// Arrange
mockConfig.isExperimentalAgentHistoryTruncationEnabled = vi
.fn()
.mockReturnValue(true);
const manageHistorySpy = vi
.spyOn(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(client as any).agentHistoryProvider,
'manageHistory',
)
.mockResolvedValue([
{ role: 'user', parts: [{ text: 'preserved message' }] },
]);
mockTurnRunFn.mockReturnValue(
(async function* () {
yield { type: 'content', value: 'Hello' };
})(),
);
// Act
const stream = client.sendMessageStream(
[{ text: 'Hi' }],
new AbortController().signal,
'prompt-id-1',
);
await fromAsync(stream);
// Assert
expect(manageHistorySpy).toHaveBeenCalledWith(
expect.any(Array),
expect.any(AbortSignal),
);
});
it('emits a compression event when the context was automatically compressed', async () => {
// Arrange
mockTurnRunFn.mockReturnValue(

View file

@ -44,6 +44,7 @@ import type {
import type { ContentGenerator } from './contentGenerator.js';
import { LoopDetectionService } from '../services/loopDetectionService.js';
import { ChatCompressionService } from '../services/chatCompressionService.js';
import { AgentHistoryProvider } from '../services/agentHistoryProvider.js';
import { ideContextStore } from '../ide/ideContext.js';
import {
logContentRetryFailure,
@ -98,6 +99,7 @@ export class GeminiClient {
private readonly loopDetector: LoopDetectionService;
private readonly compressionService: ChatCompressionService;
private readonly agentHistoryProvider: AgentHistoryProvider;
private readonly toolOutputMaskingService: ToolOutputMaskingService;
private lastPromptId: string;
private currentSequenceModel: string | null = null;
@ -113,6 +115,12 @@ export class GeminiClient {
constructor(private readonly context: AgentLoopContext) {
this.loopDetector = new LoopDetectionService(this.config);
this.compressionService = new ChatCompressionService();
this.agentHistoryProvider = new AgentHistoryProvider(this.config, {
truncationThreshold:
this.config.getExperimentalAgentHistoryTruncationThreshold(),
retainedMessages:
this.config.getExperimentalAgentHistoryRetainedMessages(),
});
this.toolOutputMaskingService = new ToolOutputMaskingService();
this.lastPromptId = this.config.getSessionId();
@ -613,10 +621,20 @@ export class GeminiClient {
// Check for context window overflow
const modelForLimitCheck = this._getActiveModelForCurrentTurn();
const compressed = await this.tryCompressChat(prompt_id, false, signal);
if (this.config.isExperimentalAgentHistoryTruncationEnabled()) {
const newHistory = await this.agentHistoryProvider.manageHistory(
this.getHistory(),
signal,
);
if (newHistory.length !== this.getHistory().length) {
this.getChat().setHistory(newHistory);
}
} else {
const compressed = await this.tryCompressChat(prompt_id, false, signal);
if (compressed.compressionStatus === CompressionStatus.COMPRESSED) {
yield { type: GeminiEventType.ChatCompressed, value: compressed };
if (compressed.compressionStatus === CompressionStatus.COMPRESSED) {
yield { type: GeminiEventType.ChatCompressed, value: compressed };
}
}
const remainingTokenCount =

View file

@ -0,0 +1,17 @@
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
exports[`AgentHistoryProvider > should handle summarizer failures gracefully 1`] = `
{
"parts": [
{
"text": "[System Note: Prior conversation history was truncated. The most recent user message before truncation was:]
Message 18",
},
{
"text": "Message 20",
},
],
"role": "user",
}
`;

View file

@ -0,0 +1,138 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { AgentHistoryProvider } from './agentHistoryProvider.js';
import type { Content, GenerateContentResponse } from '@google/genai';
import type { Config } from '../config/config.js';
import type { BaseLlmClient } from '../core/baseLlmClient.js';
describe('AgentHistoryProvider', () => {
let config: Config;
let provider: AgentHistoryProvider;
let generateContentMock: ReturnType<typeof vi.fn>;
beforeEach(() => {
config = {
isExperimentalAgentHistoryTruncationEnabled: vi
.fn()
.mockReturnValue(false),
isExperimentalAgentHistorySummarizationEnabled: vi
.fn()
.mockReturnValue(false),
getBaseLlmClient: vi.fn(),
} as unknown as Config;
generateContentMock = vi.fn().mockResolvedValue({
candidates: [{ content: { parts: [{ text: 'Mock intent summary' }] } }],
} as unknown as GenerateContentResponse);
config.getBaseLlmClient = vi.fn().mockReturnValue({
generateContent: generateContentMock,
} as unknown as BaseLlmClient);
provider = new AgentHistoryProvider(config, {
truncationThreshold: 30,
retainedMessages: 15,
});
});
const createMockHistory = (count: number): Content[] =>
Array.from({ length: count }).map((_, i) => ({
role: i % 2 === 0 ? 'user' : 'model',
parts: [{ text: `Message ${i}` }],
}));
it('should return history unchanged if truncation is disabled', async () => {
vi.spyOn(
config,
'isExperimentalAgentHistoryTruncationEnabled',
).mockReturnValue(false);
const history = createMockHistory(40);
const result = await provider.manageHistory(history);
expect(result).toBe(history);
expect(result.length).toBe(40);
});
it('should return history unchanged if length is under threshold', async () => {
vi.spyOn(
config,
'isExperimentalAgentHistoryTruncationEnabled',
).mockReturnValue(true);
const history = createMockHistory(20); // Threshold is 30
const result = await provider.manageHistory(history);
expect(result).toBe(history);
expect(result.length).toBe(20);
});
it('should truncate mechanically to RETAINED_MESSAGES without summarization when sum flag is off', async () => {
vi.spyOn(
config,
'isExperimentalAgentHistoryTruncationEnabled',
).mockReturnValue(true);
vi.spyOn(
config,
'isExperimentalAgentHistorySummarizationEnabled',
).mockReturnValue(false);
const history = createMockHistory(35); // Above 30 threshold, should truncate to 15
const result = await provider.manageHistory(history);
expect(result.length).toBe(15);
expect(generateContentMock).not.toHaveBeenCalled();
// Check fallback message logic
// Messages 20 to 34 are retained. Message 20 is 'user'.
expect(result[0].role).toBe('user');
expect(result[0].parts![0].text).toContain(
'System Note: Prior conversation history was truncated',
);
});
it('should call summarizer and prepend summary when summarization is enabled', async () => {
vi.spyOn(
config,
'isExperimentalAgentHistoryTruncationEnabled',
).mockReturnValue(true);
vi.spyOn(
config,
'isExperimentalAgentHistorySummarizationEnabled',
).mockReturnValue(true);
const history = createMockHistory(35);
const result = await provider.manageHistory(history);
expect(generateContentMock).toHaveBeenCalled();
expect(result.length).toBe(15); // retained messages
expect(result[0].role).toBe('user');
expect(result[0].parts![0].text).toContain('<intent_summary>');
expect(result[0].parts![0].text).toContain('Mock intent summary');
});
it('should handle summarizer failures gracefully', async () => {
vi.spyOn(
config,
'isExperimentalAgentHistoryTruncationEnabled',
).mockReturnValue(true);
vi.spyOn(
config,
'isExperimentalAgentHistorySummarizationEnabled',
).mockReturnValue(true);
generateContentMock.mockRejectedValue(new Error('API Error'));
const history = createMockHistory(35);
const result = await provider.manageHistory(history);
expect(generateContentMock).toHaveBeenCalled();
expect(result.length).toBe(15);
expect(result[0]).toMatchSnapshot();
});
});

View file

@ -0,0 +1,185 @@
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import type { Content } from '@google/genai';
import type { Config } from '../config/config.js';
import { getResponseText } from '../utils/partUtils.js';
import { LlmRole } from '../telemetry/llmRole.js';
import { debugLogger } from '../utils/debugLogger.js';
export interface AgentHistoryProviderConfig {
truncationThreshold: number;
retainedMessages: number;
}
export class AgentHistoryProvider {
constructor(
private readonly config: Config,
private readonly providerConfig: AgentHistoryProviderConfig,
) {}
/**
* Evaluates the chat history and performs truncation and summarization if necessary.
* Returns a new array of Content if truncation occurred, otherwise returns the original array.
*/
async manageHistory(
history: readonly Content[],
abortSignal?: AbortSignal,
): Promise<readonly Content[]> {
if (!this.shouldTruncate(history)) {
return history;
}
const { messagesToKeep, messagesToTruncate } =
this.splitHistoryForTruncation(history);
debugLogger.log(
`AgentHistoryProvider: Truncating ${messagesToTruncate.length} messages, retaining ${messagesToKeep.length} messages.`,
);
const summaryText = await this.getSummaryText(
messagesToTruncate,
abortSignal,
);
return this.mergeSummaryWithHistory(summaryText, messagesToKeep);
}
private shouldTruncate(history: readonly Content[]): boolean {
if (!this.config.isExperimentalAgentHistoryTruncationEnabled()) {
return false;
}
return history.length > this.providerConfig.truncationThreshold;
}
private splitHistoryForTruncation(history: readonly Content[]): {
messagesToKeep: readonly Content[];
messagesToTruncate: readonly Content[];
} {
return {
messagesToKeep: history.slice(-this.providerConfig.retainedMessages),
messagesToTruncate: history.slice(
0,
history.length - this.providerConfig.retainedMessages,
),
};
}
private getFallbackSummaryText(
messagesToTruncate: readonly Content[],
): string {
const defaultNote =
'System Note: Prior conversation history was truncated to maintain performance and focus. Important context should have been saved to memory.';
let lastUserText = '';
for (let i = messagesToTruncate.length - 1; i >= 0; i--) {
const msg = messagesToTruncate[i];
if (msg.role === 'user') {
lastUserText =
msg.parts
?.map((p) => p.text || '')
.join('')
.trim() || '';
if (lastUserText) {
break;
}
}
}
if (lastUserText) {
return `[System Note: Prior conversation history was truncated. The most recent user message before truncation was:]\n\n${lastUserText}`;
}
return defaultNote;
}
private async getSummaryText(
messagesToTruncate: readonly Content[],
abortSignal?: AbortSignal,
): Promise<string> {
if (!this.config.isExperimentalAgentHistorySummarizationEnabled()) {
debugLogger.log(
'AgentHistoryProvider: Summarization disabled, using fallback note.',
);
return this.getFallbackSummaryText(messagesToTruncate);
}
try {
const summary = await this.generateIntentSummary(
messagesToTruncate,
abortSignal,
);
debugLogger.log('AgentHistoryProvider: Summarization successful.');
return summary;
} catch (error) {
debugLogger.log('AgentHistoryProvider: Summarization failed.', error);
return this.getFallbackSummaryText(messagesToTruncate);
}
}
private mergeSummaryWithHistory(
summaryText: string,
messagesToKeep: readonly Content[],
): readonly Content[] {
if (messagesToKeep.length === 0) {
return [{ role: 'user', parts: [{ text: summaryText }] }];
}
// To ensure strict user/model alternating roles required by the Gemini API,
// we merge the summary into the first retained message if it's from the 'user'.
const firstRetainedMessage = messagesToKeep[0];
if (firstRetainedMessage.role === 'user') {
const mergedParts = [
{ text: summaryText },
...(firstRetainedMessage.parts || []),
];
const mergedMessage: Content = {
role: 'user',
parts: mergedParts,
};
return [mergedMessage, ...messagesToKeep.slice(1)];
} else {
const summaryMessage: Content = {
role: 'user',
parts: [{ text: summaryText }],
};
return [summaryMessage, ...messagesToKeep];
}
}
private async generateIntentSummary(
messagesToTruncate: readonly Content[],
abortSignal?: AbortSignal,
): Promise<string> {
const prompt = `Create a succinct, agent-continuity focused intent summary of the truncated conversation history.
Distill the essence of the ongoing work by capturing:
- The Original Mandate: What the user (or calling agent) originally requested and why.
- The Agent's Strategy: How you (the agent) are approaching the task and where the work is taking place (e.g., specific files, directories, or architectural layers).
- Evolving Context: Any significant shifts in the user's intent or the agent's technical approach over the course of the truncated history.
Write this summary to orient the active agent. Do NOT predict next steps or summarize the current task state, as those are covered by the active history. Focus purely on foundational context and strategic continuity.`;
const summaryResponse = await this.config
.getBaseLlmClient()
.generateContent({
modelConfigKey: { model: 'agent-history-provider-summarizer' },
contents: [
...messagesToTruncate,
{
role: 'user',
parts: [{ text: prompt }],
},
],
promptId: 'agent-history-provider',
abortSignal: abortSignal ?? new AbortController().signal,
role: LlmRole.UTILITY_COMPRESSOR,
});
let summary = getResponseText(summaryResponse) ?? '';
summary = summary.replace(/<\/?intent_summary>/g, '').trim();
return `<intent_summary>\n${summary}\n</intent_summary>`;
}
}

View file

@ -256,5 +256,9 @@
"chat-compression-default": {
"model": "gemini-3-pro-preview",
"generateContentConfig": {}
},
"agent-history-provider-summarizer": {
"model": "gemini-3-flash-preview",
"generateContentConfig": {}
}
}

View file

@ -256,5 +256,9 @@
"chat-compression-default": {
"model": "gemini-3-pro-preview",
"generateContentConfig": {}
},
"agent-history-provider-summarizer": {
"model": "gemini-3-flash-preview",
"generateContentConfig": {}
}
}

View file

@ -1151,8 +1151,11 @@ describe('loggers', () => {
getQuestion: () => 'test-question',
getToolRegistry: () =>
new ToolRegistry(cfg1, {} as unknown as MessageBus),
getUserMemory: () => 'user-memory',
isExperimentalAgentHistoryTruncationEnabled: () => false,
getExperimentalAgentHistoryTruncationThreshold: () => 30,
getExperimentalAgentHistoryRetainedMessages: () => 15,
isExperimentalAgentHistorySummarizationEnabled: () => false,
} as unknown as Config;
(cfg2 as unknown as { config: Config; promptId: string }).config = cfg2;

File diff suppressed because one or more lines are too long