fix(studio): key model list cache by resolved baseUrl

This commit is contained in:
Ma 2026-04-18 01:22:21 +08:00
parent fdb36bdbd4
commit 835afeddd9
2 changed files with 65 additions and 6 deletions

View file

@ -37,7 +37,7 @@ const getServiceApiKeyMock = vi.fn();
type ServicePresetMock = {
providerFamily: "openai" | "anthropic";
baseUrl: string;
modelsBaseUrl: string;
modelsBaseUrl?: string;
knownModels: string[];
};
const SERVICE_PRESETS_MOCK: Record<string, ServicePresetMock> = {
@ -45,7 +45,7 @@ const SERVICE_PRESETS_MOCK: Record<string, ServicePresetMock> = {
anthropic: { providerFamily: "anthropic", baseUrl: "https://api.anthropic.com", modelsBaseUrl: "https://api.anthropic.com", knownModels: [] as string[] },
minimax: { providerFamily: "anthropic", baseUrl: "https://api.minimaxi.com/anthropic", modelsBaseUrl: "https://api.minimaxi.com/anthropic", knownModels: [] as string[] },
bailian: { providerFamily: "anthropic", baseUrl: "https://dashscope.aliyuncs.com/apps/anthropic", modelsBaseUrl: "https://dashscope.aliyuncs.com/compatible-mode/v1", knownModels: [] as string[] },
custom: { providerFamily: "openai", baseUrl: "", modelsBaseUrl: "", knownModels: [] as string[] },
custom: { providerFamily: "openai", baseUrl: "", knownModels: [] as string[] },
};
const resolveServicePresetMock = vi.fn((service: string) => SERVICE_PRESETS_MOCK[service]);
const resolveServiceProviderFamilyMock = vi.fn((service: string) => resolveServicePresetMock(service)?.providerFamily);
@ -1045,6 +1045,64 @@ describe("createStudioServer daemon lifecycle", () => {
);
});
it("keys cached model lists by baseUrl so custom endpoints do not leak stale results", async () => {
await writeFile(join(root, "inkos.json"), JSON.stringify({
...projectConfig,
llm: {
services: [
{ service: "custom", name: "Switcher", baseUrl: "https://a.example.com/v1" },
],
},
}, null, 2), "utf-8");
const fetchMock = vi.fn(async (input: string | URL | Request) => {
const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : input.url;
if (url === "https://a.example.com/v1/models") {
return {
ok: true,
json: async () => ({ data: [{ id: "model-a" }] }),
text: async () => "",
};
}
if (url === "https://b.example.com/v1/models") {
return {
ok: true,
json: async () => ({ data: [{ id: "model-b" }] }),
text: async () => "",
};
}
return {
ok: false,
status: 404,
text: async () => "404 page not found",
};
});
vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch);
const { createStudioServer } = await import("./server.js");
const app = createStudioServer(cloneProjectConfig() as never, root);
const first = await app.request("http://localhost/api/v1/services/custom%3ASwitcher/models?apiKey=sk-shared-tail");
expect(first.status).toBe(200);
await expect(first.json()).resolves.toMatchObject({
models: [{ id: "model-a", name: "model-a" }],
});
await writeFile(join(root, "inkos.json"), JSON.stringify({
...projectConfig,
llm: {
services: [
{ service: "custom", name: "Switcher", baseUrl: "https://b.example.com/v1" },
],
},
}, null, 2), "utf-8");
const second = await app.request("http://localhost/api/v1/services/custom%3ASwitcher/models?apiKey=sk-shared-tail");
expect(second.status).toBe(200);
await expect(second.json()).resolves.toMatchObject({
models: [{ id: "model-b", name: "model-b" }],
});
});
it("returns stored service secret for detail page rehydration", async () => {
loadSecretsMock.mockResolvedValue({
services: {

View file

@ -1063,8 +1063,11 @@ export function createStudioServer(initialConfig: ProjectConfig, root: string) {
// No key = no models
if (!apiKey) return c.json({ models: [] });
// Cache by service + apiKey fingerprint; valid for 10 min unless ?refresh=1
const cacheKey = `${service}::${apiKey.slice(-8)}`;
const preset = resolveServicePreset(isCustomServiceId(service) ? "custom" : service);
const resolvedBaseUrl = await resolveConfiguredServiceBaseUrl(root, service);
// Cache by service + resolved baseUrl + apiKey fingerprint; valid for 10 min unless ?refresh=1
const cacheKey = `${service}::${resolvedBaseUrl ?? ""}::${apiKey.slice(-8)}`;
if (!refresh) {
const cached = modelListCache.get(cacheKey);
if (cached && Date.now() - cached.at < 10 * 60 * 1000) {
@ -1073,7 +1076,6 @@ export function createStudioServer(initialConfig: ProjectConfig, root: string) {
}
// Fast path: services with knownModels return immediately
const preset = resolveServicePreset(isCustomServiceId(service) ? "custom" : service);
if (preset?.knownModels && preset.knownModels.length > 0) {
const models = preset.knownModels.map((id) => ({ id, name: id }));
modelListCache.set(cacheKey, { models, at: Date.now() });
@ -1081,7 +1083,6 @@ export function createStudioServer(initialConfig: ProjectConfig, root: string) {
}
// Simple /models API call + fallback to pi-ai built-in list (no slow probe)
const resolvedBaseUrl = await resolveConfiguredServiceBaseUrl(root, service);
if (!resolvedBaseUrl) return c.json({ models: [] });
const modelsBase = preset?.modelsBaseUrl ?? resolvedBaseUrl;