🐛 fix: type not preserved when model batch processing (#10015)

*  feat(aiModel): preserve type information when creating and updating models
🔤 fix(clerk): update translation for password to通行密钥

*  feat(qwen): 添加 Qwen3 Max Preview 模型,支持上下文缓存和复杂任务

*  feat(qwen): 更新 qwenChatModels,添加推理和搜索能力

*  feat(aiModel): 优化批量插入和更新模型

*  feat(cerebras, google): 移除 Qwen 3 Coder 480B 模型并更新 Nano Banana 模型的上下文窗口和最大输出

*  feat(moonshot): 添加 Kimi K2 Thinking 和 Kimi K2 Thinking Turbo 模型,更新模型参数处理

*  feat(minimax, ollamacloud): 添加缓存读取和写入定价,更新 Kimi K2 Thinking 模型信息

* ♻️ refactor: 更新通行密钥相关文本及优化数据库模型代码

*  feat(moonshot): 处理模型列表以包含上下文窗口令牌和模型 ID

*  feat(moonshot): 添加支持图像输入的模型属性

*  feat: 更新模型参数,调整上下文窗口令牌,移除冗余代码
This commit is contained in:
sxjeru 2026-03-01 19:30:07 +08:00 committed by GitHub
parent 9cd63765b0
commit 902a265aed
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 89 additions and 39 deletions

View file

@ -301,6 +301,32 @@ describe('AiModelModel', () => {
expect(models).toHaveLength(1);
expect(models[0].enabled).toBe(false);
});
it('should preserve type property when disabling all models', async () => {
// Create models with type information
await aiProviderModel.create({
id: 'gpt-4',
providerId: 'openai',
enabled: true,
type: 'chat',
});
await aiProviderModel.create({
id: 'dall-e-3',
providerId: 'openai',
enabled: true,
type: 'image',
});
// Batch disable all models
await aiProviderModel.batchToggleAiModels('openai', ['gpt-4', 'dall-e-3'], false);
// Verify type is preserved
const models = await aiProviderModel.getModelListByProviderId('openai');
expect(models).toHaveLength(2);
expect(models.find((m) => m.id === 'gpt-4')?.type).toBe('chat');
expect(models.find((m) => m.id === 'dall-e-3')?.type).toBe('image');
expect(models.every((m) => !m.enabled)).toBe(true);
});
});
describe('clearRemoteModels', () => {

View file

@ -1,4 +1,4 @@
import { and, asc, desc, eq, inArray } from 'drizzle-orm';
import { and, asc, desc, eq, sql } from 'drizzle-orm';
import type {
AiModelSortMap,
AiProviderModelListItem,
@ -189,41 +189,42 @@ export class AiModelModel {
return;
}
return this.db.transaction(async (trx) => {
// 1. insert models that are not in the db
const insertedRecords = await trx
.insert(aiModels)
.values(
models.map((i) => ({
enabled,
id: i,
providerId,
// if the model is not in the db, it's a builtin model
source: AiModelSourceEnum.Builtin,
updatedAt: new Date(),
userId: this.userId,
})),
)
.onConflictDoNothing({
target: [aiModels.id, aiModels.userId, aiModels.providerId],
})
.returning();
// Get default model list to preserve type information
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
const defaultModelMap = new Map(LOBE_DEFAULT_MODEL_LIST.map((m) => [m.id, m]));
// 2. update models that are in the db
const insertedIds = new Set(insertedRecords.map((r) => r.id));
const recordsToUpdate = models.filter((r) => !insertedIds.has(r));
// Prepare all records for batch upsert
const allRecords = models.map((modelId) => {
const defaultModel = defaultModelMap.get(modelId);
const record: typeof aiModels.$inferInsert = {
enabled,
id: modelId,
providerId,
// if the model is not in the db, it's a builtin model
source: AiModelSourceEnum.Builtin,
updatedAt: new Date(),
userId: this.userId,
};
await trx
.update(aiModels)
.set({ enabled })
.where(
and(
eq(aiModels.providerId, providerId),
inArray(aiModels.id, recordsToUpdate),
eq(aiModels.userId, this.userId),
),
);
// Preserve type if available from default model list
if (defaultModel?.type) {
record.type = defaultModel.type;
}
return record;
});
// Use batch upsert to handle both insert and update in a single query
return this.db
.insert(aiModels)
.values(allRecords)
.onConflictDoUpdate({
set: {
enabled: sql`excluded.enabled`,
updatedAt: sql`excluded.updated_at`,
},
target: [aiModels.id, aiModels.userId, aiModels.providerId],
});
};
clearRemoteModels(providerId: string) {

View file

@ -526,12 +526,12 @@ const googleChatModels: AIChatModelCard[] = [
imageOutput: true,
vision: true,
},
contextWindowTokens: 32_768 + 8192,
contextWindowTokens: 65_536 + 32_768,
description:
'Nano Banana is Googles newest, fastest, and most efficient native multimodal model, enabling conversational image generation and editing.',
displayName: 'Nano Banana',
id: 'gemini-2.5-flash-image',
maxOutput: 8192,
maxOutput: 32_768,
pricing: {
approximatePricePerImage: 0.039,
units: [

View file

@ -86,6 +86,7 @@ const moonshotChatModels: AIChatModelCard[] = [
'kimi-k2-0905-preview offers a 256k context window, stronger agentic coding, better front-end code quality, and improved context understanding.',
displayName: 'Kimi K2 0905',
id: 'kimi-k2-0905-preview',
maxOutput: 65_536,
pricing: {
currency: 'CNY',
units: [

View file

@ -38,6 +38,19 @@ const ollamaCloudModels: AIChatModelCard[] = [
id: 'minimax-m2.5',
type: 'chat',
},
{
abilities: {
functionCall: true,
reasoning: true,
},
contextWindowTokens: 262_144,
description:
'K2 long thinking model supports 256k contexts, supports multi-step tool calling and thinking, and is good at solving more complex problems.',
displayName: 'Kimi K2 Thinking',
enabled: true,
id: 'kimi-k2-thinking',
type: 'chat',
},
{
abilities: {
functionCall: true,

View file

@ -1,21 +1,24 @@
import type Anthropic from '@anthropic-ai/sdk';
import type { ChatModelCard } from '@lobechat/types';
import { ModelProvider } from 'model-bank';
import OpenAI from 'openai';
import type OpenAI from 'openai';
import { CreateRouterRuntimeOptions, createRouterRuntime } from '../../core/RouterRuntime';
import {
buildDefaultAnthropicPayload,
createAnthropicCompatibleParams,
createAnthropicCompatibleRuntime,
} from '../../core/anthropicCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ChatStreamPayload } from '../../types';
import type { CreateRouterRuntimeOptions } from '../../core/RouterRuntime';
import { createRouterRuntime } from '../../core/RouterRuntime';
import type { ChatStreamPayload } from '../../types';
import { getModelPropertyWithFallback } from '../../utils/getFallbackModelProperty';
import { MODEL_LIST_CONFIGS, processModelList } from '../../utils/modelParse';
export interface MoonshotModelCard {
context_length?: number;
id: string;
supports_image_in?: boolean;
}
const DEFAULT_MOONSHOT_BASE_URL = 'https://api.moonshot.ai/v1';
@ -164,7 +167,13 @@ const fetchMoonshotModels = async ({ client }: { client: OpenAI }): Promise<Chat
const modelsPage = (await client.models.list()) as any;
const modelList: MoonshotModelCard[] = modelsPage.data || [];
return processModelList(modelList, MODEL_LIST_CONFIGS.moonshot, 'moonshot');
const processedList = modelList.map((model) => ({
contextWindowTokens: model.context_length,
id: model.id,
vision: model.supports_image_in,
}));
return processModelList(processedList, MODEL_LIST_CONFIGS.moonshot, 'moonshot');
} catch (error) {
console.warn('Failed to fetch Moonshot models:', error);
return [];