feat(editor): implement AI input auto-completion (#13458)

*  feat: implement AI input auto-completion with ReactAutoCompletePlugin

Adds GitHub Copilot-style ghost text completion to the chat input,
powered by a configurable system agent (disabled by default).

Key changes:
- Add `inputCompletion` system agent config (type, default, selector, i18n)
- Create `chainInputCompletion` prompt chain (V2 few-shot, benchmarked)
- Mount `ReactAutoCompletePlugin` in InputEditor when enabled
- Wire `getMessages` through ChatInput store for conversation context
- Add settings UI in Service Model page with enable toggle

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

*  test: update systemAgent snapshot for inputCompletion

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

* 🐛 fix: restrict auto-complete context to visible user/assistant turns

Filter getMessages to use displayMessages (active visible thread)
instead of dbMessages (raw DB records including tool messages and
inactive branches). Also limit to last 10 user/assistant turns to
keep payload small and relevant.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

*  feat: enable input completion by default

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

* ️ perf: use non-streaming for input completion requests

Autocomplete needs the full result before displaying ghost text,
so streaming adds unnecessary overhead. Setting stream: false
reduces latency by avoiding SSE chunking.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

* 🐛 fix: revert stream:false for input completion

fetchPresetTaskResult uses fetchSSE internally which cannot handle
non-streaming JSON responses, causing the editor to freeze after
receiving the completion result.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

* ️ perf: use non-streaming for input completion requests

Autocomplete waits for the full result before displaying ghost text.
fetchSSE handles non-streaming responses via its fallback path
(response.clone().text()), avoiding SSE chunking overhead.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

* ️ perf: skip contextEngineering for input completion

Call getChatCompletion directly instead of fetchPresetTaskResult
to avoid triggering agentDocument.getDocuments on every autocomplete
request. Input completion only needs a simple LLM call with the
prompt chain, not the full context engineering pipeline.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

* ♻️ refactor: revert to fetchPresetTaskResult for input completion

Use the standard contextEngineering pipeline. The agentDocument
overhead will be addressed separately.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Arvin Xu 2026-04-03 02:00:18 +08:00 committed by GitHub
parent 3b13a1b6d4
commit 251e12c7d1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 194 additions and 26 deletions

View file

@ -768,6 +768,9 @@
"systemAgent.historyCompress.label": "Model",
"systemAgent.historyCompress.modelDesc": "Specify the model used to compress conversation history",
"systemAgent.historyCompress.title": "Conversation History Compression Agent",
"systemAgent.inputCompletion.label": "Model",
"systemAgent.inputCompletion.modelDesc": "Model used for input auto-completion suggestions (like GitHub Copilot ghost text)",
"systemAgent.inputCompletion.title": "Input Auto-Completion Agent",
"systemAgent.queryRewrite.label": "Model",
"systemAgent.queryRewrite.modelDesc": "Specify the model used to optimize user inquiries",
"systemAgent.queryRewrite.title": "Library query rewrite Agent",

View file

@ -768,6 +768,9 @@
"systemAgent.historyCompress.label": "模型",
"systemAgent.historyCompress.modelDesc": "指定用于压缩会话历史的模型",
"systemAgent.historyCompress.title": "会话历史压缩助理",
"systemAgent.inputCompletion.label": "模型",
"systemAgent.inputCompletion.modelDesc": "指定用于输入自动补全建议的模型(类似 GitHub Copilot 幽灵文本)",
"systemAgent.inputCompletion.title": "输入自动补全助理",
"systemAgent.queryRewrite.label": "模型",
"systemAgent.queryRewrite.modelDesc": "指定用于优化用户提问的模型",
"systemAgent.queryRewrite.title": "资源库提问重写助理",

View file

@ -23,10 +23,17 @@ export const DEFAULT_QUERY_REWRITE_SYSTEM_AGENT_ITEM: QueryRewriteSystemAgent =
provider: DEFAULT_MINI_SYSTEM_AGENT_ITEM.provider,
};
export const DEFAULT_INPUT_COMPLETION_SYSTEM_AGENT_ITEM: SystemAgentItem = {
enabled: true,
model: DEFAULT_MINI_SYSTEM_AGENT_ITEM.model,
provider: DEFAULT_MINI_SYSTEM_AGENT_ITEM.provider,
};
export const DEFAULT_SYSTEM_AGENT_CONFIG: UserSystemAgentConfig = {
agentMeta: DEFAULT_SYSTEM_AGENT_ITEM,
generationTopic: DEFAULT_MINI_SYSTEM_AGENT_ITEM,
historyCompress: DEFAULT_SYSTEM_AGENT_ITEM,
inputCompletion: DEFAULT_INPUT_COMPLETION_SYSTEM_AGENT_ITEM,
queryRewrite: DEFAULT_QUERY_REWRITE_SYSTEM_AGENT_ITEM,
thread: DEFAULT_SYSTEM_AGENT_ITEM,
topic: DEFAULT_MINI_SYSTEM_AGENT_ITEM,

View file

@ -1,6 +1,7 @@
export * from './abstractChunk';
export * from './answerWithContext';
export * from './compressContext';
export * from './inputCompletion';
export * from './langDetect';
export * from './pickEmoji';
export * from './rewriteQuery';

View file

@ -0,0 +1,37 @@
import type { ChatStreamPayload, OpenAIChatMessage } from '@lobechat/types';
export const chainInputCompletion = (
beforeCursor: string,
afterCursor: string,
context?: OpenAIChatMessage[],
): Partial<ChatStreamPayload> => {
let contextBlock = '';
if (context?.length) {
contextBlock = `\n\nCurrent conversation context:
${context.map((m) => `${m.role}: ${m.content}`).join('\n')}`;
}
return {
max_tokens: 100,
messages: [
{
content: `Complete the user's partially typed message. Output ONLY the missing text to insert at the cursor. Keep it short and natural. No explanations.
Examples of expected behavior:
User: Before cursor: "How do I " / After cursor: ""
Output: implement authentication in Next.js?
User: Before cursor: "Can you explain the difference between " / After cursor: ""
Output: useEffect and useLayoutEffect in React?
User: Before cursor: "我想了解一下" / After cursor: ""
Output: 如何在项目中使用 TypeScript ${contextBlock}`,
role: 'system',
},
{
content: `Before cursor: "${beforeCursor}"\nAfter cursor: "${afterCursor}"`,
role: 'user',
},
],
};
};

View file

@ -13,6 +13,7 @@ export interface UserSystemAgentConfig {
agentMeta: SystemAgentItem;
generationTopic: SystemAgentItem;
historyCompress: SystemAgentItem;
inputCompletion: SystemAgentItem;
queryRewrite: QueryRewriteSystemAgent;
thread: SystemAgentItem;
topic: SystemAgentItem;

View file

@ -25,6 +25,7 @@ export const ChatInputProvider = memo<ChatInputProviderProps>(
mentionItems,
allowExpand = true,
slashPlacement,
getMessages,
}) => {
const editor = useEditor();
const slashMenuRef = useRef<HTMLDivElement>(null);
@ -50,6 +51,7 @@ export const ChatInputProvider = memo<ChatInputProviderProps>(
agentId={agentId}
allowExpand={allowExpand}
chatInputEditorRef={chatInputEditorRef}
getMessages={getMessages}
leftActions={leftActions}
mentionItems={mentionItems}
mobile={mobile}

View file

@ -1,7 +1,8 @@
import { isDesktop } from '@lobechat/const';
import { chainInputCompletion } from '@lobechat/prompts';
import { HotkeyEnum, KeyEnum } from '@lobechat/types';
import { isCommandPressed } from '@lobechat/utils';
import { INSERT_MENTION_COMMAND, ReactMathPlugin } from '@lobehub/editor';
import { isCommandPressed, merge } from '@lobechat/utils';
import { INSERT_MENTION_COMMAND, ReactAutoCompletePlugin, ReactMathPlugin } from '@lobehub/editor';
import { Editor, FloatMenu, useEditorState } from '@lobehub/editor/react';
import { combineKeys } from '@lobehub/ui';
import { css, cx } from 'antd-style';
@ -11,10 +12,16 @@ import { useHotkeysContext } from 'react-hotkeys-hook';
import { usePasteFile, useUploadFiles } from '@/components/DragUploadZone';
import { useIMECompositionEvent } from '@/hooks/useIMECompositionEvent';
import { chatService } from '@/services/chat';
import { useAgentStore } from '@/store/agent';
import { agentByIdSelectors } from '@/store/agent/selectors';
import { useUserStore } from '@/store/user';
import { labPreferSelectors, preferenceSelectors, settingsSelectors } from '@/store/user/selectors';
import {
labPreferSelectors,
preferenceSelectors,
settingsSelectors,
systemAgentSelectors,
} from '@/store/user/selectors';
import { useAgentId } from '../hooks/useAgentId';
import { useChatInputStore, useStoreApi } from '../store';
@ -125,31 +132,94 @@ const InputEditor = memo<{ defaultRows?: number }>(({ defaultRows = 2 }) => {
[slashActionItems],
);
const richRenderProps = useMemo(
() =>
!enableRichRender
? {
enablePasteMarkdown: false,
markdownOption: false,
plugins: CHAT_INPUT_EMBED_PLUGINS,
}
: {
plugins: createChatInputRichPlugins({
mathPlugin: Editor.withProps(ReactMathPlugin, {
renderComp: expand
? undefined
: (props) => (
<FloatMenu
{...props}
getPopupContainer={() => (slashMenuRef as any)?.current}
/>
),
}),
}),
// --- Auto-completion ---
const inputCompletionConfig = useUserStore(systemAgentSelectors.inputCompletion);
const isAutoCompleteEnabled = inputCompletionConfig.enabled;
const getMessagesRef = useRef(storeApi.getState().getMessages);
useEffect(() => {
return storeApi.subscribe((s) => {
getMessagesRef.current = s.getMessages;
});
}, [storeApi]);
const handleAutoComplete = useCallback(
async ({
abortSignal,
afterText,
input,
}: {
abortSignal: AbortSignal;
afterText: string;
editor: any;
input: string;
selectionType: string;
}): Promise<string | null> => {
if (!input.trim()) return null;
const { enabled: _, ...config } = systemAgentSelectors.inputCompletion(
useUserStore.getState(),
);
const context = getMessagesRef.current?.();
const chainParams = chainInputCompletion(input, afterText, context);
const abortController = new AbortController();
abortSignal.addEventListener('abort', () => abortController.abort());
let result = '';
try {
await chatService.fetchPresetTaskResult({
abortController,
onMessageHandle: (chunk) => {
if (chunk.type === 'text') {
result += chunk.text;
}
},
[enableRichRender, expand, slashMenuRef],
params: merge(config, chainParams),
});
} catch {
return null;
}
if (abortSignal.aborted) return null;
return result || null;
},
[],
);
const autoCompletePlugin = useMemo(
() =>
isAutoCompleteEnabled
? Editor.withProps(ReactAutoCompletePlugin, {
delay: 600,
onAutoComplete: handleAutoComplete,
})
: null,
[isAutoCompleteEnabled, handleAutoComplete],
);
const richRenderProps = useMemo(() => {
const basePlugins = !enableRichRender
? CHAT_INPUT_EMBED_PLUGINS
: createChatInputRichPlugins({
mathPlugin: Editor.withProps(ReactMathPlugin, {
renderComp: expand
? undefined
: (props) => (
<FloatMenu {...props} getPopupContainer={() => (slashMenuRef as any)?.current} />
),
}),
});
const plugins = autoCompletePlugin ? [...basePlugins, autoCompletePlugin] : basePlugins;
return !enableRichRender
? { enablePasteMarkdown: false, markdownOption: false, plugins }
: { plugins };
}, [enableRichRender, expand, slashMenuRef, autoCompletePlugin]);
return (
<Editor
autoFocus

View file

@ -27,6 +27,7 @@ const StoreUpdater = memo<StoreUpdaterProps>(
mentionItems,
allowExpand,
slashPlacement,
getMessages,
}) => {
const storeApi = useStoreApi();
const useStoreUpdater = createStoreUpdater(storeApi);
@ -40,6 +41,7 @@ const StoreUpdater = memo<StoreUpdaterProps>(
useStoreUpdater('rightActions', rightActions!);
useStoreUpdater('allowExpand', allowExpand);
useStoreUpdater('slashPlacement', slashPlacement);
useStoreUpdater('getMessages', getMessages);
useStoreUpdater('sendButtonProps', sendButtonProps);
useStoreUpdater('onSend', onSend);

View file

@ -1,3 +1,4 @@
import { type OpenAIChatMessage } from '@lobechat/types';
import { type IEditor, type SlashOptions } from '@lobehub/editor';
import { type ChatInputProps } from '@lobehub/editor/react';
import { type MenuProps } from '@lobehub/ui';
@ -31,6 +32,7 @@ export interface PublicState {
agentId?: string;
allowExpand?: boolean;
expand?: boolean;
getMessages?: () => OpenAIChatMessage[];
leftActions: ActionKeys[];
mentionItems?: SlashOptions['items'];
mobile?: boolean;

View file

@ -20,9 +20,33 @@ import { fileChatSelectors, useFileStore } from '@/store/file';
import WideScreenContainer from '../../WideScreenContainer';
import InterventionBar from '../InterventionBar';
import { dataSelectors, messageStateSelectors, useConversationStore } from '../store';
import {
dataSelectors,
messageStateSelectors,
useConversationStore,
useConversationStoreApi,
} from '../store';
import QueueTray from './QueueTray';
/** Max recent messages to feed into auto-complete context (≈10 conversation turns) */
const MAX_CONTEXT_MESSAGES = 25;
const useGetMessages = () => {
const storeApi = useConversationStoreApi();
return useCallback(
() =>
dataSelectors
.dbMessages(storeApi.getState())
.filter((m) => m.role === 'user' || m.role === 'assistant' || m.role === 'tool')
.slice(-MAX_CONTEXT_MESSAGES)
.map((m) => ({
content: typeof m.content === 'string' ? m.content : '',
role: m.role as 'user' | 'assistant' | 'system',
})),
[storeApi],
);
};
export interface ChatInputProps {
/**
* Custom style for the action bar container
@ -108,6 +132,8 @@ const ChatInput = memo<ChatInputProps>(
}) => {
const { t } = useTranslation('chat');
const getMessages = useGetMessages();
// ConversationStore state
const context = useConversationStore((s) => s.context);
const [agentId, inputMessage, sendMessage, stopGenerating] = useConversationStore((s) => [
@ -249,6 +275,7 @@ const ChatInput = memo<ChatInputProps>(
<ChatInputProvider
agentId={agentId}
allowExpand={allowExpand}
getMessages={getMessages}
leftActions={leftActions}
mentionItems={mentionItems}
rightActions={rightActions}

View file

@ -886,6 +886,10 @@ When I am ___, I need ___
'systemAgent.historyCompress.modelDesc':
'Specify the model used to compress conversation history',
'systemAgent.historyCompress.title': 'Conversation History Compression Agent',
'systemAgent.inputCompletion.label': 'Model',
'systemAgent.inputCompletion.modelDesc':
'Model used for input auto-completion suggestions (like GitHub Copilot ghost text)',
'systemAgent.inputCompletion.title': 'Input Auto-Completion Agent',
'systemAgent.queryRewrite.label': 'Model',
'systemAgent.queryRewrite.modelDesc': 'Specify the model used to optimize user inquiries',
'systemAgent.queryRewrite.title': 'Library query rewrite Agent',

View file

@ -19,6 +19,7 @@ const Page = () => {
<SystemAgentForm systemAgentKey="translation" />
<SystemAgentForm systemAgentKey="historyCompress" />
<SystemAgentForm systemAgentKey="agentMeta" />
<SystemAgentForm allowDisable systemAgentKey="inputCompletion" />
{enableKnowledgeBase && (
<SystemAgentForm
allowCustomPrompt

View file

@ -23,6 +23,7 @@ const Page = () => {
<SystemAgentForm systemAgentKey="translation" />
<SystemAgentForm systemAgentKey="historyCompress" />
<SystemAgentForm systemAgentKey="agentMeta" />
<SystemAgentForm allowDisable systemAgentKey="inputCompletion" />
{enableKnowledgeBase && (
<SystemAgentForm
allowCustomPrompt

View file

@ -63,6 +63,11 @@ exports[`settingsSelectors > currentSystemAgent > should merge DEFAULT_SYSTEM_AG
"model": "claude-sonnet-4-6",
"provider": "anthropic",
},
"inputCompletion": {
"enabled": true,
"model": "gpt-5.4-mini",
"provider": "openai",
},
"queryRewrite": {
"enabled": true,
"model": "gpt-5.4-mini",

View file

@ -14,11 +14,13 @@ const agentMeta = (s: UserStore) => currentSystemAgent(s).agentMeta;
const queryRewrite = (s: UserStore) => currentSystemAgent(s).queryRewrite;
const historyCompress = (s: UserStore) => currentSystemAgent(s).historyCompress;
const generationTopic = (s: UserStore) => currentSystemAgent(s).generationTopic;
const inputCompletion = (s: UserStore) => currentSystemAgent(s).inputCompletion;
export const systemAgentSelectors = {
agentMeta,
generationTopic,
historyCompress,
inputCompletion,
queryRewrite,
thread,
topic,