♻️ refactor: refactor model runtime folder structure and add more tests (#9210)

* add test

* fix tests

* fix tests

* revert tests

* refactor model runtime folder

* refactor model runtime folder and remove @/libs/model-runtime

* fix lint

* move

* fix tests
This commit is contained in:
Arvin Xu 2025-09-11 11:22:05 +08:00 committed by GitHub
parent 6ace931e52
commit 7fe17e4028
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
206 changed files with 1661 additions and 604 deletions

View file

@ -4,7 +4,7 @@
"private": true,
"exports": {
".": "./src/index.ts",
"./vertexai": "./src/vertexai/index.ts"
"./vertexai": "./src/providers/vertexai/index.ts"
},
"scripts": {
"test": "vitest",

View file

@ -1,17 +0,0 @@
import { LobeAnthropicAI } from '../anthropic';
import { LobeAzureAI } from '../azureai';
import { LobeCloudflareAI } from '../cloudflare';
import { LobeFalAI } from '../fal';
import { LobeGoogleAI } from '../google';
import { LobeOpenAI } from '../openai';
import { LobeXAI } from '../xai';
export const baseRuntimeMap = {
anthropic: LobeAnthropicAI,
azure: LobeAzureAI,
cloudflare: LobeCloudflareAI,
fal: LobeFalAI,
google: LobeGoogleAI,
openai: LobeOpenAI,
xai: LobeXAI,
};

View file

@ -0,0 +1,64 @@
export enum ModelProvider {
Ai21 = 'ai21',
Ai302 = 'ai302',
Ai360 = 'ai360',
AiHubMix = 'aihubmix',
AkashChat = 'akashchat',
Anthropic = 'anthropic',
Azure = 'azure',
AzureAI = 'azureai',
Baichuan = 'baichuan',
Bedrock = 'bedrock',
Cloudflare = 'cloudflare',
Cohere = 'cohere',
CometAPI = 'cometapi',
DeepSeek = 'deepseek',
Fal = 'fal',
FireworksAI = 'fireworksai',
GiteeAI = 'giteeai',
Github = 'github',
Google = 'google',
Groq = 'groq',
Higress = 'higress',
HuggingFace = 'huggingface',
Hunyuan = 'hunyuan',
InfiniAI = 'infiniai',
InternLM = 'internlm',
Jina = 'jina',
LMStudio = 'lmstudio',
LobeHub = 'lobehub',
Minimax = 'minimax',
Mistral = 'mistral',
ModelScope = 'modelscope',
Moonshot = 'moonshot',
Nebius = 'nebius',
NewAPI = 'newapi',
Novita = 'novita',
Nvidia = 'nvidia',
Ollama = 'ollama',
OpenAI = 'openai',
OpenRouter = 'openrouter',
PPIO = 'ppio',
Perplexity = 'perplexity',
Qiniu = 'qiniu',
Qwen = 'qwen',
SambaNova = 'sambanova',
Search1API = 'search1api',
SenseNova = 'sensenova',
SiliconCloud = 'siliconcloud',
Spark = 'spark',
Stepfun = 'stepfun',
Taichu = 'taichu',
TencentCloud = 'tencentcloud',
TogetherAI = 'togetherai',
Upstage = 'upstage',
V0 = 'v0',
VLLM = 'vllm',
VertexAI = 'vertexai',
Volcengine = 'volcengine',
Wenxin = 'wenxin',
XAI = 'xai',
Xinference = 'xinference',
ZeroOne = 'zeroone',
ZhiPu = 'zhipu',
}

View file

@ -0,0 +1,9 @@
import { ModelProvider } from '@lobechat/model-runtime';
import { describe, expect, it } from 'vitest';
describe('ModelProvider', () => {
it('should be a valid enum object', () => {
expect(typeof ModelProvider).toBe('object');
expect(ModelProvider).not.toBeNull();
});
});

View file

@ -1,6 +1,5 @@
import OpenAI from 'openai';
import { AIBaseModelCard } from 'model-bank';
import OpenAI from 'openai';
import {
ChatMethodOptions,
@ -13,8 +12,8 @@ import {
TextToImagePayload,
TextToSpeechOptions,
TextToSpeechPayload,
} from './types';
import { CreateImagePayload, CreateImageResponse } from './types/image';
} from '../types';
import { CreateImagePayload, CreateImageResponse } from '../types/image';
/* eslint-disable sort-keys-fix/sort-keys-fix , typescript-sort-keys/interface */
export interface LobeRuntimeAI {

View file

@ -8,10 +8,10 @@ import { beforeEach, describe, expect, it, vi } from 'vitest';
import * as langfuseCfg from '@/envs/langfuse';
import { createTraceOptions } from '@/server/modules/ModelRuntime';
import { ChatStreamPayload, LobeOpenAI, ModelProvider, ModelRuntime } from '.';
import { ChatStreamPayload, LobeOpenAI, ModelProvider, ModelRuntime } from '../index';
import { providerRuntimeMap } from '../runtimeMap';
import { CreateImagePayload } from '../types/image';
import { AgentChatOptions } from './ModelRuntime';
import { providerRuntimeMap } from './runtimeMap';
import { CreateImagePayload } from './types/image';
const specialProviders = [
{ id: 'openai', payload: { apiKey: 'user-openai-key', baseURL: 'user-endpoint' } },

View file

@ -1,11 +1,11 @@
import type { TracePayload } from '@lobechat/types';
import { ClientOptions } from 'openai';
import { LobeRuntimeAI } from './BaseAI';
import { LobeBedrockAIParams } from './bedrock';
import { LobeCloudflareParams } from './cloudflare';
import { LobeOpenAI } from './openai';
import { providerRuntimeMap } from './runtimeMap';
import type { TracePayload } from '@/types/index';
import { LobeBedrockAIParams } from '../providers/bedrock';
import { LobeCloudflareParams } from '../providers/cloudflare';
import { LobeOpenAI } from '../providers/openai';
import { providerRuntimeMap } from '../runtimeMap';
import {
ChatMethodOptions,
ChatStreamPayload,
@ -15,8 +15,9 @@ import {
PullModelParams,
TextToImagePayload,
TextToSpeechPayload,
} from './types';
import { CreateImagePayload } from './types/image';
} from '../types';
import { CreateImagePayload } from '../types/image';
import { LobeRuntimeAI } from './BaseAI';
export interface AgentChatOptions {
enableTrace?: boolean;

View file

@ -0,0 +1,17 @@
import { LobeAnthropicAI } from '../../providers/anthropic';
import { LobeAzureAI } from '../../providers/azureai';
import { LobeCloudflareAI } from '../../providers/cloudflare';
import { LobeFalAI } from '../../providers/fal';
import { LobeGoogleAI } from '../../providers/google';
import { LobeOpenAI } from '../../providers/openai';
import { LobeXAI } from '../../providers/xai';
export const baseRuntimeMap = {
anthropic: LobeAnthropicAI,
azure: LobeAzureAI,
cloudflare: LobeCloudflareAI,
fal: LobeFalAI,
google: LobeGoogleAI,
openai: LobeOpenAI,
xai: LobeXAI,
};

View file

@ -6,9 +6,8 @@ import { Stream } from 'openai/streaming';
import type { ChatModelCard } from '@/types/llm';
import { LobeRuntimeAI } from '../BaseAI';
import { LobeOpenAI } from '../openai';
import { CreateImagePayload, CreateImageResponse, ILobeAgentRuntimeErrorType } from '../types';
import { LobeOpenAI } from '../../providers/openai';
import { CreateImagePayload, CreateImageResponse, ILobeAgentRuntimeErrorType } from '../../types';
import {
type ChatCompletionErrorPayload,
ChatMethodOptions,
@ -18,9 +17,10 @@ import {
EmbeddingsPayload,
TextToImagePayload,
TextToSpeechPayload,
} from '../types';
import { CreateImageOptions, CustomClientOptions } from '../utils/openaiCompatibleFactory';
import { postProcessModelList } from '../utils/postProcessModelList';
} from '../../types';
import { postProcessModelList } from '../../utils/postProcessModelList';
import { LobeRuntimeAI } from '../BaseAI';
import { CreateImageOptions, CustomClientOptions } from '../openaiCompatibleFactory';
import { baseRuntimeMap } from './baseRuntimeMap';
export interface RuntimeItem {
@ -127,7 +127,7 @@ export const createRouterRuntime = ({
// 支持动态 routers 配置
const resolvedRouters = typeof routers === 'function' ? routers(_options) : routers;
if (resolvedRouters.length === 0) {
throw new Error('empty providers');
}

View file

@ -3,9 +3,9 @@ import { RuntimeImageGenParamsValue } from 'model-bank';
import OpenAI from 'openai';
import { CreateImagePayload, CreateImageResponse } from '../../types/image';
import { imageUrlToBase64 } from '../imageToBase64';
import { convertImageUrlToFile } from '../openaiHelpers';
import { parseDataUri } from '../uriParser';
import { imageUrlToBase64 } from '../../utils/imageToBase64';
import { convertImageUrlToFile } from '../../utils/openaiHelpers';
import { parseDataUri } from '../../utils/uriParser';
const log = createDebug('lobe-image:openai-compatible');

View file

@ -10,8 +10,8 @@ import OpenAI from 'openai';
import type { Stream } from 'openai/streaming';
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import * as debugStreamModule from '../debugStream';
import * as openaiHelpers from '../openaiHelpers';
import * as debugStreamModule from '../../utils/debugStream';
import * as openaiHelpers from '../../utils/openaiHelpers';
import { createOpenAICompatibleRuntime } from './index';
const sleep = async (ms: number) =>

View file

@ -7,7 +7,6 @@ import { Stream } from 'openai/streaming';
import type { ChatModelCard } from '@/types/llm';
import { LobeRuntimeAI } from '../../BaseAI';
import {
ChatCompletionErrorPayload,
ChatCompletionTool,
@ -24,14 +23,15 @@ import {
} from '../../types';
import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '../../types/error';
import { CreateImagePayload, CreateImageResponse } from '../../types/image';
import { AgentRuntimeError } from '../createError';
import { debugResponse, debugStream } from '../debugStream';
import { desensitizeUrl } from '../desensitizeUrl';
import { getModelPropertyWithFallback } from '../getFallbackModelProperty';
import { handleOpenAIError } from '../handleOpenAIError';
import { convertOpenAIMessages, convertOpenAIResponseInputs } from '../openaiHelpers';
import { postProcessModelList } from '../postProcessModelList';
import { StreamingResponse } from '../response';
import { AgentRuntimeError } from '../../utils/createError';
import { debugResponse, debugStream } from '../../utils/debugStream';
import { desensitizeUrl } from '../../utils/desensitizeUrl';
import { getModelPropertyWithFallback } from '../../utils/getFallbackModelProperty';
import { handleOpenAIError } from '../../utils/handleOpenAIError';
import { convertOpenAIMessages, convertOpenAIResponseInputs } from '../../utils/openaiHelpers';
import { postProcessModelList } from '../../utils/postProcessModelList';
import { StreamingResponse } from '../../utils/response';
import { LobeRuntimeAI } from '../BaseAI';
import { OpenAIResponsesStream, OpenAIStream, OpenAIStreamOptions } from '../streams';
import { createOpenAICompatibleImage } from './createImage';

View file

@ -1,7 +1,7 @@
import { InvokeModelWithResponseStreamResponse } from '@aws-sdk/client-bedrock-runtime';
import { ChatStreamCallbacks } from '../../../types';
import { nanoid } from '../../uuid';
import { nanoid } from '../../../utils/uuid';
import { transformAnthropicStream } from '../anthropic';
import {
StreamContext,

View file

@ -1,7 +1,7 @@
import { InvokeModelWithResponseStreamResponse } from '@aws-sdk/client-bedrock-runtime';
import { describe, expect, it, vi } from 'vitest';
import * as uuidModule from '../../uuid';
import * as uuidModule from '../../../utils/uuid';
import { AWSBedrockLlamaStream } from './llama';
describe('AWSBedrockLlamaStream', () => {

View file

@ -1,7 +1,7 @@
import { InvokeModelWithResponseStreamResponse } from '@aws-sdk/client-bedrock-runtime';
import { ChatStreamCallbacks } from '../../../types';
import { nanoid } from '../../uuid';
import { nanoid } from '../../../utils/uuid';
import {
StreamContext,
StreamProtocolChunk,

View file

@ -1,7 +1,7 @@
import { GenerateContentResponse } from '@google/genai';
import { describe, expect, it, vi } from 'vitest';
import * as uuidModule from '../uuid';
import * as uuidModule from '../../utils/uuid';
import { GoogleGenerativeAIStream } from './google-ai';
describe('GoogleGenerativeAIStream', () => {

View file

@ -5,7 +5,7 @@ import { ModelTokensUsage } from '@/types/message';
import { GroundingSearch } from '@/types/search';
import { ChatStreamCallbacks } from '../../types';
import { nanoid } from '../uuid';
import { nanoid } from '../../utils/uuid';
import {
StreamContext,
StreamProtocolChunk,

View file

@ -0,0 +1,268 @@
import { describe, expect, it, vi } from 'vitest';
import { createModelPullStream } from './model';
describe('createModelPullStream', () => {
const createMockAsyncIterable = <T>(values: T[]) => ({
async *[Symbol.asyncIterator]() {
for (const value of values) {
yield value;
}
},
});
it('should create a readable stream from async iterable', async () => {
const mockData = [
{ status: 'downloading', completed: 100, total: 1000 },
{ status: 'downloading', completed: 500, total: 1000 },
{ status: 'complete', completed: 1000, total: 1000 },
];
const iterable = createMockAsyncIterable(mockData);
const stream = createModelPullStream(iterable, 'test-model');
const reader = stream.getReader();
const decoder = new TextDecoder();
const chunks: string[] = [];
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
chunks.push(decoder.decode(value));
}
} finally {
reader.releaseLock();
}
expect(chunks).toHaveLength(3);
const parsedChunks = chunks.map((chunk) => JSON.parse(chunk));
expect(parsedChunks[0]).toEqual({
completed: 100,
digest: undefined,
model: 'test-model',
status: 'downloading',
total: 1000,
});
expect(parsedChunks[2]).toEqual({
completed: 1000,
digest: undefined,
model: 'test-model',
status: 'complete',
total: 1000,
});
});
it('should skip "pulling manifest" status', async () => {
const mockData = [
{ status: 'pulling manifest' },
{ status: 'downloading', completed: 100, total: 1000 },
{ status: 'complete', completed: 1000, total: 1000 },
];
const iterable = createMockAsyncIterable(mockData);
const stream = createModelPullStream(iterable, 'test-model');
const reader = stream.getReader();
const decoder = new TextDecoder();
const chunks: string[] = [];
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
chunks.push(decoder.decode(value));
}
} finally {
reader.releaseLock();
}
// Should only have 2 chunks (skipping "pulling manifest")
expect(chunks).toHaveLength(2);
const parsedChunks = chunks.map((chunk) => JSON.parse(chunk));
expect(parsedChunks[0].status).toBe('downloading');
expect(parsedChunks[1].status).toBe('complete');
});
it('should include digest when provided', async () => {
const mockData = [
{ status: 'downloading', completed: 100, total: 1000, digest: 'sha256:abc123' },
];
const iterable = createMockAsyncIterable(mockData);
const stream = createModelPullStream(iterable, 'test-model');
const reader = stream.getReader();
const decoder = new TextDecoder();
const { value } = await reader.read();
reader.releaseLock();
const parsed = JSON.parse(decoder.decode(value));
expect(parsed.digest).toBe('sha256:abc123');
});
it('should handle cancel with onCancel callback', async () => {
const mockData = [
{ status: 'downloading', completed: 100, total: 1000 },
{ status: 'downloading', completed: 500, total: 1000 },
];
const onCancel = vi.fn();
const iterable = createMockAsyncIterable(mockData);
const stream = createModelPullStream(iterable, 'test-model', { onCancel });
const reader = stream.getReader();
// Read first chunk then cancel
await reader.read();
await reader.cancel('user cancelled');
expect(onCancel).toHaveBeenCalledWith('user cancelled');
});
it('should handle iterator with return method', async () => {
const returnMock = vi.fn().mockResolvedValue({ done: true });
const mockIterable = {
[Symbol.asyncIterator]: () => ({
next: vi.fn().mockResolvedValue({ done: false, value: { status: 'downloading' } }),
return: returnMock,
}),
};
const stream = createModelPullStream(mockIterable as any, 'test-model');
const reader = stream.getReader();
await reader.cancel();
expect(returnMock).toHaveBeenCalled();
});
it('should handle AbortError gracefully', async () => {
const mockIterable = {
async *[Symbol.asyncIterator]() {
yield { status: 'downloading', completed: 100, total: 1000 };
throw new DOMException('Operation aborted', 'AbortError');
},
};
const stream = createModelPullStream(mockIterable, 'test-model');
const reader = stream.getReader();
const decoder = new TextDecoder();
const chunks: string[] = [];
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
chunks.push(decoder.decode(value));
}
} finally {
reader.releaseLock();
}
// Should have at least the first chunk and possibly a cancelled status
expect(chunks.length).toBeGreaterThanOrEqual(1);
// First chunk should be the normal data
const firstChunk = JSON.parse(chunks[0]);
expect(firstChunk.status).toBe('downloading');
// If there's a second chunk, it should be the cancelled status
if (chunks.length > 1) {
const lastChunk = JSON.parse(chunks[chunks.length - 1]);
expect(lastChunk.status).toBe('cancelled');
}
});
it('should handle generic errors', async () => {
const mockIterable = {
async *[Symbol.asyncIterator]() {
yield { status: 'downloading', completed: 100, total: 1000 };
throw new Error('Network error');
},
};
const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
const stream = createModelPullStream(mockIterable, 'test-model');
const reader = stream.getReader();
const decoder = new TextDecoder();
const chunks: string[] = [];
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
chunks.push(decoder.decode(value));
}
} finally {
reader.releaseLock();
}
expect(consoleSpy).toHaveBeenCalledWith(
'[createModelPullStream] model download stream error:',
expect.any(Error),
);
// Should have the normal chunk and error chunk
expect(chunks.length).toBeGreaterThanOrEqual(1);
const firstChunk = JSON.parse(chunks[0]);
expect(firstChunk.status).toBe('downloading');
// Last chunk should be error status
if (chunks.length > 1) {
const lastChunk = JSON.parse(chunks[chunks.length - 1]);
expect(lastChunk.status).toBe('error');
expect(lastChunk.error).toBe('Network error');
}
consoleSpy.mockRestore();
});
it('should handle empty async iterable', async () => {
const iterable = createMockAsyncIterable([]);
const stream = createModelPullStream(iterable, 'test-model');
const reader = stream.getReader();
const { done } = await reader.read();
reader.releaseLock();
expect(done).toBe(true);
});
it('should handle non-Error objects in catch', async () => {
const mockIterable = {
async *[Symbol.asyncIterator]() {
yield { status: 'downloading', completed: 100, total: 1000 };
throw 'String error';
},
};
const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
const stream = createModelPullStream(mockIterable, 'test-model');
const reader = stream.getReader();
const decoder = new TextDecoder();
const chunks: string[] = [];
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
chunks.push(decoder.decode(value));
}
} finally {
reader.releaseLock();
}
if (chunks.length > 1) {
const lastChunk = JSON.parse(chunks[chunks.length - 1]);
expect(lastChunk.error).toBe('String error');
}
consoleSpy.mockRestore();
});
});

View file

@ -1,7 +1,7 @@
import { ChatResponse } from 'ollama/browser';
import { describe, expect, it, vi } from 'vitest';
import * as uuidModule from '../uuid';
import * as uuidModule from '../../utils/uuid';
import { OllamaStream } from './ollama';
describe('OllamaStream', () => {

View file

@ -1,7 +1,7 @@
import { ChatResponse } from 'ollama/browser';
import { ChatStreamCallbacks } from '../../types';
import { nanoid } from '../uuid';
import { nanoid } from '../../utils/uuid';
import {
StreamContext,
StreamProtocolChunk,

View file

@ -167,9 +167,7 @@ describe('OpenAIStream', () => {
const data = [
{
id: 'img-1',
choices: [
{ index: 0, delta: { role: 'assistant', content: '这是一张图片: ' } },
],
choices: [{ index: 0, delta: { role: 'assistant', content: '这是一张图片: ' } }],
},
{
id: 'img-1',
@ -2368,7 +2366,8 @@ describe('OpenAIStream', () => {
});
it('should handle finish_reason with markdown image in content', async () => {
const base64 = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAADwf7zUAAAgAElEQVR4nFy9a5okSY4jCFBU3SOr53HdvcZeYW/YVZnhZqpCYn+AVIuZ7PqqKyPczfQhQgIgSOH/+//9PxRVu7QzX5nvqveVP5mv+3rf+XPt985b2NIVgVgK1jr0da7zrAiegWPhPBABLi1GILhCEMkFnCuOFRFxHN/r/CbOym/om/h1X+d1H/v667rP9328r9g3VNblpoXsAwsnTtnWp0kQ40siih6NixuHlN9Rt7ehv1mbW2dkg1ef03J9zQQpQg5yc/XllveG4wa4arKtSr0NwSCdGEJVNeKlkDZMov695YaQ5NVK3fmjn4OrE9N/U04C0EqT/2HCBxrf9pJe1L2nPBjqhKEq1TEi1Q/OXiIq+IrqX2fUb+qF+2kF10k/4ScwIXidU6/T6vGkA/bSR/fZ7Ok8yOd0s+27CnP8PH3cijINdbAcAAAAASUVORK5CYII=';
const base64 =
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABAAAAAQACAIAAADwf7zUAAAgAElEQVR4nFy9a5okSY4jCFBU3SOr53HdvcZeYW/YVZnhZqpCYn+AVIuZ7PqqKyPczfQhQgIgSOH/+//9PxRVu7QzX5nvqveVP5mv+3rf+XPt985b2NIVgVgK1jr0da7zrAiegWPhPBABLi1GILhCEMkFnCuOFRFxHN/r/CbOym/om/h1X+d1H/v667rP9328r9g3VNblpoXsAwsnTtnWp0kQ40siih6NixuHlN9Rt7ehv1mbW2dkg1ef03J9zQQpQg5yc/XllveG4wa4arKtSr0NwSCdGEJVNeKlkDZMov695YaQ5NVK3fmjn4OrE9N/U04C0EqT/2HCBxrf9pJe1L2nPBjqhKEq1TEi1Q/OXiIq+IrqX2fUb+qF+2kF10k/4ScwIXidU6/T6vGkA/bSR/fZ7Ok8yOd0s+27CnP8PH3cijINdbAcAAAAASUVORK5CYII=';
const mockOpenAIStream = new ReadableStream({
start(controller) {
controller.enqueue({

View file

@ -5,7 +5,7 @@ import { ChatCitationItem, ChatMessageError } from '@/types/message';
import { ChatStreamCallbacks } from '../../../types';
import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '../../../types/error';
import { convertUsage } from '../../usageConverter';
import { convertUsage } from '../../../utils/usageConverter';
import {
FIRST_CHUNK_ERROR_KEY,
StreamContext,

View file

@ -4,7 +4,7 @@ import type { Stream } from 'openai/streaming';
import { ChatCitationItem, ChatMessageError } from '@/types/message';
import { AgentRuntimeErrorType } from '../../../types/error';
import { convertResponseUsage } from '../../usageConverter';
import { convertResponseUsage } from '../../../utils/usageConverter';
import {
FIRST_CHUNK_ERROR_KEY,
StreamContext,

View file

@ -3,8 +3,8 @@ import { ChatCitationItem, ModelSpeed, ModelTokensUsage } from '@/types/message'
import { parseToolCalls } from '../../helpers';
import { ChatStreamCallbacks } from '../../types';
import { AgentRuntimeErrorType } from '../../types/error';
import { safeParseJSON } from '../safeParseJSON';
import { nanoid } from '../uuid';
import { safeParseJSON } from '../../utils/safeParseJSON';
import { nanoid } from '../../utils/uuid';
/**
* context in the stream to save temporarily data

View file

@ -6,7 +6,7 @@ import {
import type { Stream } from 'openai/streaming';
import { ChatStreamCallbacks } from '../../types';
import { convertUsage } from '../usageConverter';
import { convertUsage } from '../../utils/usageConverter';
import {
StreamContext,
StreamProtocolChunk,

View file

@ -2,7 +2,7 @@ import OpenAI from 'openai';
import type { Stream } from 'openai/streaming';
import { ChatStreamCallbacks } from '../../types';
import { convertUsage } from '../usageConverter';
import { convertUsage } from '../../utils/usageConverter';
import {
StreamProtocolChunk,
StreamProtocolToolCallChunk,

View file

@ -0,0 +1,164 @@
import { describe, expect, it } from 'vitest';
import { createReadableStream, readStreamChunk } from './utils';
describe('createReadableStream', () => {
it('should create a readable stream from array of chunks', async () => {
const chunks = ['chunk1', 'chunk2', 'chunk3'];
const stream = createReadableStream(chunks);
const reader = stream.getReader();
const result: string[] = [];
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
result.push(value);
}
} finally {
reader.releaseLock();
}
expect(result).toEqual(chunks);
});
it('should handle empty array', async () => {
const stream = createReadableStream([]);
const reader = stream.getReader();
const { done } = await reader.read();
reader.releaseLock();
expect(done).toBe(true);
});
it('should handle different data types', async () => {
const chunks = [1, 'string', { key: 'value' }, [1, 2, 3]];
const stream = createReadableStream(chunks);
const reader = stream.getReader();
const result: any[] = [];
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
result.push(value);
}
} finally {
reader.releaseLock();
}
expect(result).toEqual(chunks);
});
it('should close the stream after enqueuing all chunks', async () => {
const chunks = ['test'];
const stream = createReadableStream(chunks);
const reader = stream.getReader();
// Read the chunk
const { done: firstDone, value } = await reader.read();
expect(firstDone).toBe(false);
expect(value).toBe('test');
// Next read should indicate stream is closed
const { done: secondDone } = await reader.read();
expect(secondDone).toBe(true);
reader.releaseLock();
});
});
describe('readStreamChunk', () => {
it('should read all chunks from a stream', async () => {
const encoder = new TextEncoder();
const testData = ['Hello ', 'world', '!'];
const stream = new ReadableStream({
start(controller) {
testData.forEach((chunk) => {
controller.enqueue(encoder.encode(chunk));
});
controller.close();
},
});
const chunks = await readStreamChunk(stream);
expect(chunks).toEqual(testData);
});
it('should handle empty stream', async () => {
const stream = new ReadableStream({
start(controller) {
controller.close();
},
});
const chunks = await readStreamChunk(stream);
expect(chunks).toEqual([]);
});
it('should decode UTF-8 text correctly', async () => {
const encoder = new TextEncoder();
const testText = 'Hello 世界 🌍';
const stream = new ReadableStream({
start(controller) {
controller.enqueue(encoder.encode(testText));
controller.close();
},
});
const chunks = await readStreamChunk(stream);
expect(chunks).toEqual([testText]);
});
it('should handle multiple chunks with streaming decode', async () => {
const encoder = new TextEncoder();
// Split a multi-byte character across chunks to test streaming decode
const fullText = 'Hello 世界';
const encoded = encoder.encode(fullText);
const chunk1 = encoded.slice(0, 8); // Split in middle of multi-byte char
const chunk2 = encoded.slice(8);
const stream = new ReadableStream({
start(controller) {
controller.enqueue(chunk1);
controller.enqueue(chunk2);
controller.close();
},
});
const chunks = await readStreamChunk(stream);
expect(chunks.join('')).toBe(fullText);
});
it('should work with createReadableStream output', async () => {
const encoder = new TextEncoder();
const textChunks = ['chunk1', 'chunk2', 'chunk3'];
const encodedChunks = textChunks.map((chunk) => encoder.encode(chunk));
const stream = createReadableStream(encodedChunks);
const result = await readStreamChunk(stream);
expect(result).toEqual(textChunks);
});
it('should handle single large chunk', async () => {
const encoder = new TextEncoder();
const largeText = 'A'.repeat(10000);
const stream = new ReadableStream({
start(controller) {
controller.enqueue(encoder.encode(largeText));
controller.close();
},
});
const chunks = await readStreamChunk(stream);
expect(chunks).toEqual([largeText]);
});
});

View file

@ -1,6 +1,6 @@
import { describe, expect, it, vi } from 'vitest';
import * as uuidModule from '../uuid';
import * as uuidModule from '../../utils/uuid';
import { VertexAIStream } from './vertex-ai';
describe('VertexAIStream', () => {

View file

@ -3,7 +3,7 @@ import { GenerateContentResponse } from '@google/genai';
import { GroundingSearch } from '@/types/search';
import { ModelTokensUsage } from '../../types';
import { nanoid } from '../uuid';
import { nanoid } from '../../utils/uuid';
import { type GoogleAIStreamOptions } from './google-ai';
import {
StreamContext,

View file

@ -1,34 +1,35 @@
export { LobeAkashChatAI } from './akashchat';
export { LobeAnthropicAI } from './anthropic';
export { LobeAzureAI } from './azureai';
export { LobeAzureOpenAI } from './azureOpenai';
export * from './BaseAI';
export { LobeBedrockAI } from './bedrock';
export { LobeBflAI } from './bfl';
export { LobeCometAPIAI } from './cometapi';
export { LobeDeepSeekAI } from './deepseek';
export { LobeGoogleAI } from './google';
export { LobeGroq } from './groq';
export * from './core/BaseAI';
export { ModelRuntime } from './core/ModelRuntime';
export { createOpenAICompatibleRuntime } from './core/openaiCompatibleFactory';
export * from './helpers';
export { LobeMinimaxAI } from './minimax';
export { LobeMistralAI } from './mistral';
export { ModelRuntime } from './ModelRuntime';
export { LobeMoonshotAI } from './moonshot';
export { LobeNebiusAI } from './nebius';
export { LobeNewAPIAI } from './newapi';
export { LobeOllamaAI } from './ollama';
export { LobeOpenAI } from './openai';
export { LobeOpenRouterAI } from './openrouter';
export { LobePerplexityAI } from './perplexity';
export { LobeQwenAI } from './qwen';
export { LobeTogetherAI } from './togetherai';
export { LobeAkashChatAI } from './providers/akashchat';
export { LobeAnthropicAI } from './providers/anthropic';
export { LobeAzureAI } from './providers/azureai';
export { LobeAzureOpenAI } from './providers/azureOpenai';
export { LobeBedrockAI } from './providers/bedrock';
export { LobeBflAI } from './providers/bfl';
export { LobeCometAPIAI } from './providers/cometapi';
export { LobeDeepSeekAI } from './providers/deepseek';
export { LobeGoogleAI } from './providers/google';
export { LobeGroq } from './providers/groq';
export { LobeMinimaxAI } from './providers/minimax';
export { LobeMistralAI } from './providers/mistral';
export { LobeMoonshotAI } from './providers/moonshot';
export { LobeNebiusAI } from './providers/nebius';
export { LobeNewAPIAI } from './providers/newapi';
export { LobeOllamaAI } from './providers/ollama';
export { LobeOpenAI } from './providers/openai';
export { LobeOpenRouterAI } from './providers/openrouter';
export { LobePerplexityAI } from './providers/perplexity';
export { LobeQwenAI } from './providers/qwen';
export { LobeStepfunAI } from './providers/stepfun';
export { LobeTogetherAI } from './providers/togetherai';
export { LobeVolcengineAI } from './providers/volcengine';
export { LobeZeroOneAI } from './providers/zeroone';
export { LobeZhipuAI } from './providers/zhipu';
export * from './types';
export * from './types/error';
export { AgentRuntimeError } from './utils/createError';
export { getModelPropertyWithFallback } from './utils/getFallbackModelProperty';
export { createOpenAICompatibleRuntime } from './utils/openaiCompatibleFactory';
export { pruneReasoningPayload } from './utils/openaiHelpers';
export { parseDataUri } from './utils/uriParser';
export { LobeVolcengineAI } from './volcengine';
export { LobeZeroOneAI } from './zeroone';
export { LobeZhipuAI } from './zhipu';

View file

@ -1,7 +1,7 @@
import OpenAI from 'openai';
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { LobeOpenAICompatibleRuntime } from './BaseAI';
import { LobeOpenAICompatibleRuntime } from './core/BaseAI';
import * as debugStreamModule from './utils/debugStream';
interface TesstProviderParams {

View file

@ -1,7 +1,7 @@
// @vitest-environment node
import { ModelProvider } from '@lobechat/model-runtime';
import { testProvider } from '../providerTestUtils';
import { testProvider } from '../../providerTestUtils';
import { LobeAi21AI } from './index';
testProvider({

View file

@ -1,5 +1,5 @@
import { ModelProvider } from '../types';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ModelProvider } from '../../types';
export const LobeAi21AI = createOpenAICompatibleRuntime({
baseURL: 'https://api.ai21.com/studio/v1',

View file

@ -1,7 +1,7 @@
import { ChatCompletionErrorPayload, ModelProvider } from '../types';
import { AgentRuntimeErrorType } from '../types/error';
import { processMultiProviderModelList } from '../utils/modelParse';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ChatCompletionErrorPayload, ModelProvider } from '../../types';
import { AgentRuntimeErrorType } from '../../types/error';
import { processMultiProviderModelList } from '../../utils/modelParse';
export interface Ai302ModelCard {
id: string;

View file

@ -1,7 +1,7 @@
// @vitest-environment node
import { ModelProvider } from '@lobechat/model-runtime';
import { testProvider } from '../providerTestUtils';
import { testProvider } from '../../providerTestUtils';
import { LobeAi360AI } from './index';
testProvider({

View file

@ -1,7 +1,7 @@
import type { ChatModelCard } from '@/types/llm';
import { ModelProvider } from '../types';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ModelProvider } from '../../types';
export interface Ai360ModelCard {
id: string;

View file

@ -1,11 +1,11 @@
import { LOBE_DEFAULT_MODEL_LIST } from 'model-bank';
import urlJoin from 'url-join';
import { createRouterRuntime } from '../RouterRuntime';
import { responsesAPIModels } from '../const/models';
import { ModelProvider } from '../types';
import { ChatStreamPayload } from '../types/chat';
import { detectModelProvider, processMultiProviderModelList } from '../utils/modelParse';
import { responsesAPIModels } from '../../const/models';
import { createRouterRuntime } from '../../core/RouterRuntime';
import { ModelProvider } from '../../types';
import { ChatStreamPayload } from '../../types/chat';
import { detectModelProvider, processMultiProviderModelList } from '../../utils/modelParse';
export interface AiHubMixModelCard {
created: number;

View file

@ -1,7 +1,7 @@
// @vitest-environment node
import { ModelProvider } from '@lobechat/model-runtime';
import { testProvider } from '../providerTestUtils';
import { testProvider } from '../../providerTestUtils';
import { LobeAkashChatAI } from './index';
const provider = ModelProvider.AkashChat;

View file

@ -1,6 +1,6 @@
import { ModelProvider } from '../types';
import { processMultiProviderModelList } from '../utils/modelParse';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ModelProvider } from '../../types';
import { processMultiProviderModelList } from '../../utils/modelParse';
export interface AkashChatModelCard {
id: string;
@ -28,7 +28,8 @@ export const LobeAkashChatAI = createOpenAICompatibleRuntime({
const rawList: any[] = modelsPage.data || [];
// Remove `created` field from each model item
const modelList: AkashChatModelCard[] = rawList.map(({ created, ...rest }) => rest);
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const modelList: AkashChatModelCard[] = rawList.map(({ created: _, ...rest }) => rest);
return await processMultiProviderModelList(modelList, 'akashchat');
} catch (error) {

View file

@ -2,8 +2,8 @@
import { ChatCompletionTool, ChatStreamPayload } from '@lobechat/model-runtime';
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import * as anthropicHelpers from '../utils/anthropicHelpers';
import * as debugStreamModule from '../utils/debugStream';
import * as anthropicHelpers from '../../utils/anthropicHelpers';
import * as debugStreamModule from '../../utils/debugStream';
import { LobeAnthropicAI } from './index';
const provider = 'anthropic';

View file

@ -1,20 +1,20 @@
import Anthropic, { ClientOptions } from '@anthropic-ai/sdk';
import { LobeRuntimeAI } from '../BaseAI';
import { LobeRuntimeAI } from '../../core/BaseAI';
import { AnthropicStream } from '../../core/streams';
import {
type ChatCompletionErrorPayload,
ChatMethodOptions,
ChatStreamPayload,
ModelProvider,
} from '../types';
import { AgentRuntimeErrorType } from '../types/error';
import { buildAnthropicMessages, buildAnthropicTools } from '../utils/anthropicHelpers';
import { AgentRuntimeError } from '../utils/createError';
import { debugStream } from '../utils/debugStream';
import { desensitizeUrl } from '../utils/desensitizeUrl';
import { MODEL_LIST_CONFIGS, processModelList } from '../utils/modelParse';
import { StreamingResponse } from '../utils/response';
import { AnthropicStream } from '../utils/streams';
} from '../../types';
import { AgentRuntimeErrorType } from '../../types/error';
import { buildAnthropicMessages, buildAnthropicTools } from '../../utils/anthropicHelpers';
import { AgentRuntimeError } from '../../utils/createError';
import { debugStream } from '../../utils/debugStream';
import { desensitizeUrl } from '../../utils/desensitizeUrl';
import { MODEL_LIST_CONFIGS, processModelList } from '../../utils/modelParse';
import { StreamingResponse } from '../../utils/response';
import { handleAnthropicError } from './handleAnthropicError';
export interface AnthropicModelCard {

View file

@ -2,8 +2,8 @@
import { AzureOpenAI } from 'openai';
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import * as debugStreamModule from '../utils/debugStream';
import * as openaiCompatibleFactoryModule from '../utils/openaiCompatibleFactory';
import * as openaiCompatibleFactoryModule from '../../core/openaiCompatibleFactory';
import * as debugStreamModule from '../../utils/debugStream';
import { LobeAzureOpenAI } from './index';
const bizErrorType = 'ProviderBizError';
@ -442,7 +442,7 @@ describe('LobeAzureOpenAI', () => {
.spyOn(instance['client'].images, 'edit')
.mockResolvedValue({ data: [{ url }] } as any);
const helpers = await import('../utils/openaiHelpers');
const helpers = await import('../../utils/openaiHelpers');
vi.spyOn(helpers, 'convertImageUrlToFile').mockResolvedValue({} as any);
const res = await instance.createImage({
@ -462,7 +462,7 @@ describe('LobeAzureOpenAI', () => {
.spyOn(instance['client'].images, 'edit')
.mockResolvedValue({ data: [{ url }] } as any);
const helpers = await import('../utils/openaiHelpers');
const helpers = await import('../../utils/openaiHelpers');
const spy = vi.spyOn(helpers, 'convertImageUrlToFile').mockResolvedValue({} as any);
await instance.createImage({

View file

@ -2,8 +2,10 @@ import debug from 'debug';
import OpenAI, { AzureOpenAI } from 'openai';
import type { Stream } from 'openai/streaming';
import { LobeRuntimeAI } from '../BaseAI';
import { systemToUserModels } from '../const/models';
import { systemToUserModels } from '../../const/models';
import { LobeRuntimeAI } from '../../core/BaseAI';
import { transformResponseToStream } from '../../core/openaiCompatibleFactory';
import { OpenAIStream } from '../../core/streams';
import {
ChatMethodOptions,
ChatStreamPayload,
@ -11,15 +13,13 @@ import {
EmbeddingsOptions,
EmbeddingsPayload,
ModelProvider,
} from '../types';
import { AgentRuntimeErrorType } from '../types/error';
import { CreateImagePayload, CreateImageResponse } from '../types/image';
import { AgentRuntimeError } from '../utils/createError';
import { debugStream } from '../utils/debugStream';
import { transformResponseToStream } from '../utils/openaiCompatibleFactory';
import { convertImageUrlToFile, convertOpenAIMessages } from '../utils/openaiHelpers';
import { StreamingResponse } from '../utils/response';
import { OpenAIStream } from '../utils/streams';
} from '../../types';
import { AgentRuntimeErrorType } from '../../types/error';
import { CreateImagePayload, CreateImageResponse } from '../../types/image';
import { AgentRuntimeError } from '../../utils/createError';
import { debugStream } from '../../utils/debugStream';
import { convertImageUrlToFile, convertOpenAIMessages } from '../../utils/openaiHelpers';
import { StreamingResponse } from '../../utils/response';
const azureImageLogger = debug('lobe-image:azure');
export class LobeAzureOpenAI implements LobeRuntimeAI {

View file

@ -2,15 +2,15 @@ import createClient, { ModelClient } from '@azure-rest/ai-inference';
import { AzureKeyCredential } from '@azure/core-auth';
import OpenAI from 'openai';
import { LobeRuntimeAI } from '../BaseAI';
import { systemToUserModels } from '../const/models';
import { ChatMethodOptions, ChatStreamPayload, ModelProvider } from '../types';
import { AgentRuntimeErrorType } from '../types/error';
import { AgentRuntimeError } from '../utils/createError';
import { debugStream } from '../utils/debugStream';
import { transformResponseToStream } from '../utils/openaiCompatibleFactory';
import { StreamingResponse } from '../utils/response';
import { OpenAIStream, createSSEDataExtractor } from '../utils/streams';
import { systemToUserModels } from '../../const/models';
import { LobeRuntimeAI } from '../../core/BaseAI';
import { transformResponseToStream } from '../../core/openaiCompatibleFactory';
import { OpenAIStream, createSSEDataExtractor } from '../../core/streams';
import { ChatMethodOptions, ChatStreamPayload, ModelProvider } from '../../types';
import { AgentRuntimeErrorType } from '../../types/error';
import { AgentRuntimeError } from '../../utils/createError';
import { debugStream } from '../../utils/debugStream';
import { StreamingResponse } from '../../utils/response';
interface AzureAIParams {
apiKey?: string;

View file

@ -2,7 +2,7 @@
import { LobeOpenAICompatibleRuntime, ModelProvider } from '@lobechat/model-runtime';
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { testProvider } from '../providerTestUtils';
import { testProvider } from '../../providerTestUtils';
import { LobeBaichuanAI } from './index';
testProvider({

View file

@ -1,7 +1,7 @@
import type { ChatModelCard } from '@/types/llm';
import { ChatStreamPayload, ModelProvider } from '../types';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ChatStreamPayload, ModelProvider } from '../../types';
export interface BaichuanModelCard {
function_call: boolean;

View file

@ -3,7 +3,7 @@ import { InvokeModelWithResponseStreamCommand } from '@aws-sdk/client-bedrock-ru
import { AgentRuntimeErrorType, ModelProvider } from '@lobechat/model-runtime';
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import * as debugStreamModule from '../utils/debugStream';
import * as debugStreamModule from '../../utils/debugStream';
import { LobeBedrockAI } from './index';
const provider = 'bedrock';

View file

@ -4,7 +4,12 @@ import {
InvokeModelWithResponseStreamCommand,
} from '@aws-sdk/client-bedrock-runtime';
import { LobeRuntimeAI } from '../BaseAI';
import { LobeRuntimeAI } from '../../core/BaseAI';
import {
AWSBedrockClaudeStream,
AWSBedrockLlamaStream,
createBedrockStream,
} from '../../core/streams';
import {
ChatMethodOptions,
ChatStreamPayload,
@ -12,17 +17,12 @@ import {
EmbeddingsOptions,
EmbeddingsPayload,
ModelProvider,
} from '../types';
import { AgentRuntimeErrorType } from '../types/error';
import { buildAnthropicMessages, buildAnthropicTools } from '../utils/anthropicHelpers';
import { AgentRuntimeError } from '../utils/createError';
import { debugStream } from '../utils/debugStream';
import { StreamingResponse } from '../utils/response';
import {
AWSBedrockClaudeStream,
AWSBedrockLlamaStream,
createBedrockStream,
} from '../utils/streams';
} from '../../types';
import { AgentRuntimeErrorType } from '../../types/error';
import { buildAnthropicMessages, buildAnthropicTools } from '../../utils/anthropicHelpers';
import { AgentRuntimeError } from '../../utils/createError';
import { debugStream } from '../../utils/debugStream';
import { StreamingResponse } from '../../utils/response';
/**
* A prompt constructor for HuggingFace LLama 2 chat models.

View file

@ -1,20 +1,20 @@
// @vitest-environment node
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { CreateImagePayload } from '../types/image';
import { CreateImagePayload } from '../../types/image';
import { createBflImage } from './createImage';
import { BflStatusResponse } from './types';
// Mock external dependencies
vi.mock('../utils/imageToBase64', () => ({
vi.mock('../../utils/imageToBase64', () => ({
imageUrlToBase64: vi.fn(),
}));
vi.mock('../utils/uriParser', () => ({
vi.mock('../../utils/uriParser', () => ({
parseDataUri: vi.fn(),
}));
vi.mock('../utils/asyncifyPolling', () => ({
vi.mock('../../utils/asyncifyPolling', () => ({
asyncifyPolling: vi.fn(),
}));
@ -42,7 +42,7 @@ describe('createBflImage', () => {
describe('Parameter mapping and defaults', () => {
it('should map standard parameters to BFL-specific parameters', async () => {
// Arrange
const { asyncifyPolling } = await import('../utils/asyncifyPolling');
const { asyncifyPolling } = await import('../../utils/asyncifyPolling');
const mockAsyncifyPolling = vi.mocked(asyncifyPolling);
mockFetch.mockResolvedValueOnce({
@ -96,7 +96,7 @@ describe('createBflImage', () => {
it('should add raw: true for ultra models', async () => {
// Arrange
const { asyncifyPolling } = await import('../utils/asyncifyPolling');
const { asyncifyPolling } = await import('../../utils/asyncifyPolling');
const mockAsyncifyPolling = vi.mocked(asyncifyPolling);
mockFetch.mockResolvedValueOnce({
@ -138,7 +138,7 @@ describe('createBflImage', () => {
it('should filter out undefined values', async () => {
// Arrange
const { asyncifyPolling } = await import('../utils/asyncifyPolling');
const { asyncifyPolling } = await import('../../utils/asyncifyPolling');
const mockAsyncifyPolling = vi.mocked(asyncifyPolling);
mockFetch.mockResolvedValueOnce({
@ -186,9 +186,9 @@ describe('createBflImage', () => {
describe('Image URL handling', () => {
it('should convert single imageUrl to image_prompt base64', async () => {
// Arrange
const { parseDataUri } = await import('../utils/uriParser');
const { imageUrlToBase64 } = await import('../utils/imageToBase64');
const { asyncifyPolling } = await import('../utils/asyncifyPolling');
const { parseDataUri } = await import('../../utils/uriParser');
const { imageUrlToBase64 } = await import('../../utils/imageToBase64');
const { asyncifyPolling } = await import('../../utils/asyncifyPolling');
const mockParseDataUri = vi.mocked(parseDataUri);
const mockImageUrlToBase64 = vi.mocked(imageUrlToBase64);
@ -243,8 +243,8 @@ describe('createBflImage', () => {
it('should handle base64 imageUrl directly', async () => {
// Arrange
const { parseDataUri } = await import('../utils/uriParser');
const { asyncifyPolling } = await import('../utils/asyncifyPolling');
const { parseDataUri } = await import('../../utils/uriParser');
const { asyncifyPolling } = await import('../../utils/asyncifyPolling');
const mockParseDataUri = vi.mocked(parseDataUri);
const mockAsyncifyPolling = vi.mocked(asyncifyPolling);
@ -289,9 +289,9 @@ describe('createBflImage', () => {
it('should convert multiple imageUrls for Kontext models', async () => {
// Arrange
const { parseDataUri } = await import('../utils/uriParser');
const { imageUrlToBase64 } = await import('../utils/imageToBase64');
const { asyncifyPolling } = await import('../utils/asyncifyPolling');
const { parseDataUri } = await import('../../utils/uriParser');
const { imageUrlToBase64 } = await import('../../utils/imageToBase64');
const { asyncifyPolling } = await import('../../utils/asyncifyPolling');
const mockParseDataUri = vi.mocked(parseDataUri);
const mockImageUrlToBase64 = vi.mocked(imageUrlToBase64);
@ -349,9 +349,9 @@ describe('createBflImage', () => {
it('should limit imageUrls to maximum 4 images', async () => {
// Arrange
const { parseDataUri } = await import('../utils/uriParser');
const { imageUrlToBase64 } = await import('../utils/imageToBase64');
const { asyncifyPolling } = await import('../utils/asyncifyPolling');
const { parseDataUri } = await import('../../utils/uriParser');
const { imageUrlToBase64 } = await import('../../utils/imageToBase64');
const { asyncifyPolling } = await import('../../utils/asyncifyPolling');
const mockParseDataUri = vi.mocked(parseDataUri);
const mockImageUrlToBase64 = vi.mocked(imageUrlToBase64);
@ -417,7 +417,7 @@ describe('createBflImage', () => {
describe('Model endpoint mapping', () => {
it('should map models to correct endpoints', async () => {
// Arrange
const { asyncifyPolling } = await import('../utils/asyncifyPolling');
const { asyncifyPolling } = await import('../../utils/asyncifyPolling');
const mockAsyncifyPolling = vi.mocked(asyncifyPolling);
mockFetch.mockResolvedValue({
@ -480,7 +480,7 @@ describe('createBflImage', () => {
it('should use custom baseURL when provided', async () => {
// Arrange
const { asyncifyPolling } = await import('../utils/asyncifyPolling');
const { asyncifyPolling } = await import('../../utils/asyncifyPolling');
const mockAsyncifyPolling = vi.mocked(asyncifyPolling);
mockFetch.mockResolvedValueOnce({
@ -522,7 +522,7 @@ describe('createBflImage', () => {
describe('Status handling', () => {
it('should return success when status is Ready with result', async () => {
// Arrange
const { asyncifyPolling } = await import('../utils/asyncifyPolling');
const { asyncifyPolling } = await import('../../utils/asyncifyPolling');
const mockAsyncifyPolling = vi.mocked(asyncifyPolling);
mockFetch.mockResolvedValueOnce({
@ -568,7 +568,7 @@ describe('createBflImage', () => {
it('should throw error when status is Ready but no result', async () => {
// Arrange
const { asyncifyPolling } = await import('../utils/asyncifyPolling');
const { asyncifyPolling } = await import('../../utils/asyncifyPolling');
const mockAsyncifyPolling = vi.mocked(asyncifyPolling);
mockFetch.mockResolvedValueOnce({
@ -610,7 +610,7 @@ describe('createBflImage', () => {
it('should handle error statuses', async () => {
// Arrange
const { asyncifyPolling } = await import('../utils/asyncifyPolling');
const { asyncifyPolling } = await import('../../utils/asyncifyPolling');
const mockAsyncifyPolling = vi.mocked(asyncifyPolling);
mockFetch.mockResolvedValueOnce({
@ -660,7 +660,7 @@ describe('createBflImage', () => {
it('should handle TaskNotFound status', async () => {
// Arrange
const { asyncifyPolling } = await import('../utils/asyncifyPolling');
const { asyncifyPolling } = await import('../../utils/asyncifyPolling');
const mockAsyncifyPolling = vi.mocked(asyncifyPolling);
mockFetch.mockResolvedValueOnce({
@ -701,7 +701,7 @@ describe('createBflImage', () => {
it('should continue polling for Pending status', async () => {
// Arrange
const { asyncifyPolling } = await import('../utils/asyncifyPolling');
const { asyncifyPolling } = await import('../../utils/asyncifyPolling');
const mockAsyncifyPolling = vi.mocked(asyncifyPolling);
mockFetch.mockResolvedValueOnce({

View file

@ -1,12 +1,12 @@
import createDebug from 'debug';
import { RuntimeImageGenParamsValue } from 'model-bank';
import { AgentRuntimeErrorType } from '../types/error';
import { CreateImagePayload, CreateImageResponse } from '../types/image';
import { type TaskResult, asyncifyPolling } from '../utils/asyncifyPolling';
import { AgentRuntimeError } from '../utils/createError';
import { imageUrlToBase64 } from '../utils/imageToBase64';
import { parseDataUri } from '../utils/uriParser';
import { AgentRuntimeErrorType } from '../../types/error';
import { CreateImagePayload, CreateImageResponse } from '../../types/image';
import { type TaskResult, asyncifyPolling } from '../../utils/asyncifyPolling';
import { AgentRuntimeError } from '../../utils/createError';
import { imageUrlToBase64 } from '../../utils/imageToBase64';
import { parseDataUri } from '../../utils/uriParser';
import {
BFL_ENDPOINTS,
BflAsyncResponse,

View file

@ -1,7 +1,7 @@
// @vitest-environment node
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { CreateImagePayload } from '../types/image';
import { CreateImagePayload } from '../../types/image';
import { LobeBflAI } from './index';
// Mock the createBflImage function

View file

@ -1,10 +1,10 @@
import createDebug from 'debug';
import { ClientOptions } from 'openai';
import { LobeRuntimeAI } from '../BaseAI';
import { AgentRuntimeErrorType } from '../types/error';
import { CreateImagePayload, CreateImageResponse } from '../types/image';
import { AgentRuntimeError } from '../utils/createError';
import { LobeRuntimeAI } from '../../core/BaseAI';
import { AgentRuntimeErrorType } from '../../types/error';
import { CreateImagePayload, CreateImageResponse } from '../../types/image';
import { AgentRuntimeError } from '../../utils/createError';
import { createBflImage } from './createImage';
const log = createDebug('lobe-image:bfl');

View file

@ -2,7 +2,7 @@
import { ChatCompletionTool } from '@lobechat/model-runtime';
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import * as debugStreamModule from '../utils/debugStream';
import * as debugStreamModule from '../../utils/debugStream';
import { LobeCloudflareAI } from './index';
const provider = 'cloudflare';

View file

@ -1,18 +1,18 @@
import { ChatModelCard } from '@/types/llm';
import { LobeRuntimeAI } from '../BaseAI';
import { ChatMethodOptions, ChatStreamPayload, ModelProvider } from '../types';
import { AgentRuntimeErrorType } from '../types/error';
import { LobeRuntimeAI } from '../../core/BaseAI';
import { createCallbacksTransformer } from '../../core/streams';
import { ChatMethodOptions, ChatStreamPayload, ModelProvider } from '../../types';
import { AgentRuntimeErrorType } from '../../types/error';
import {
CloudflareStreamTransformer,
DEFAULT_BASE_URL_PREFIX,
desensitizeCloudflareUrl,
fillUrl,
} from '../utils/cloudflareHelpers';
import { AgentRuntimeError } from '../utils/createError';
import { debugStream } from '../utils/debugStream';
import { StreamingResponse } from '../utils/response';
import { createCallbacksTransformer } from '../utils/streams';
} from '../../utils/cloudflareHelpers';
import { AgentRuntimeError } from '../../utils/createError';
import { debugStream } from '../../utils/debugStream';
import { StreamingResponse } from '../../utils/response';
export interface CloudflareModelCard {
description: string;

View file

@ -1,7 +1,7 @@
// @vitest-environment node
import { ModelProvider } from '@lobechat/model-runtime';
import { testProvider } from '../providerTestUtils';
import { testProvider } from '../../providerTestUtils';
import { LobeCohereAI } from './index';
const provider = ModelProvider.Cohere;

View file

@ -1,7 +1,7 @@
import type { ChatModelCard } from '@/types/llm';
import { ModelProvider } from '../types';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ModelProvider } from '../../types';
export interface CohereModelCard {
context_length: number;

View file

@ -0,0 +1,12 @@
// @vitest-environment node
import { testProvider } from '../../providerTestUtils';
import { ModelProvider } from '../../types';
import { LobeCometAPIAI } from './index';
testProvider({
Runtime: LobeCometAPIAI,
provider: ModelProvider.CometAPI,
defaultBaseURL: 'https://api.cometapi.com/v1',
chatDebugEnv: 'DEBUG_COMETAPI_COMPLETION',
chatModel: 'gpt-3.5-turbo',
});

View file

@ -1,6 +1,6 @@
import { ModelProvider } from '../types';
import { processMultiProviderModelList } from '../utils/modelParse';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ModelProvider } from '../../types';
import { processMultiProviderModelList } from '../../utils/modelParse';
export interface CometAPIModelCard {
id: string;

View file

@ -1,7 +1,7 @@
// @vitest-environment node
import { ModelProvider } from '@lobechat/model-runtime';
import { testProvider } from '../providerTestUtils';
import { testProvider } from '../../providerTestUtils';
import { LobeDeepSeekAI } from './index';
const provider = ModelProvider.DeepSeek;

View file

@ -1,6 +1,6 @@
import { ModelProvider } from '../types';
import { MODEL_LIST_CONFIGS, processModelList } from '../utils/modelParse';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ModelProvider } from '../../types';
import { MODEL_LIST_CONFIGS, processModelList } from '../../utils/modelParse';
export interface DeepSeekModelCard {
id: string;
@ -12,8 +12,6 @@ export const LobeDeepSeekAI = createOpenAICompatibleRuntime({
chatCompletion: () => process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION === '1',
},
models: async ({ client }) => {
const { LOBE_DEFAULT_MODEL_LIST } = await import('model-bank');
const modelsPage = (await client.models.list()) as any;
const modelList: DeepSeekModelCard[] = modelsPage.data;

View file

@ -2,7 +2,7 @@
import { fal } from '@fal-ai/client';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { CreateImagePayload } from '../types';
import { CreateImagePayload } from '../../types';
import { LobeFalAI } from './index';
// Mock the fal client

View file

@ -4,10 +4,10 @@ import { pick } from 'lodash-es';
import { RuntimeImageGenParamsValue } from 'model-bank';
import { ClientOptions } from 'openai';
import { LobeRuntimeAI } from '../BaseAI';
import { AgentRuntimeErrorType } from '../types/error';
import { CreateImagePayload, CreateImageResponse } from '../types/image';
import { AgentRuntimeError } from '../utils/createError';
import { LobeRuntimeAI } from '../../core/BaseAI';
import { AgentRuntimeErrorType } from '../../types/error';
import { CreateImagePayload, CreateImageResponse } from '../../types/image';
import { AgentRuntimeError } from '../../utils/createError';
// Create debug logger
const log = debug('lobe-image:fal');

View file

@ -1,7 +1,7 @@
// @vitest-environment node
import { ModelProvider } from '@lobechat/model-runtime';
import { testProvider } from '../providerTestUtils';
import { testProvider } from '../../providerTestUtils';
import { LobeFireworksAI } from './index';
const provider = ModelProvider.FireworksAI;

View file

@ -1,7 +1,7 @@
import type { ChatModelCard } from '@/types/llm';
import { ModelProvider } from '../types';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ModelProvider } from '../../types';
export interface FireworksAIModelCard {
context_length: number;

View file

@ -1,7 +1,7 @@
// @vitest-environment node
import { ModelProvider } from '@lobechat/model-runtime';
import { testProvider } from '../providerTestUtils';
import { testProvider } from '../../providerTestUtils';
import { LobeGiteeAI } from './index';
testProvider({

View file

@ -1,6 +1,6 @@
import { ModelProvider } from '../types';
import { processMultiProviderModelList } from '../utils/modelParse';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ModelProvider } from '../../types';
import { processMultiProviderModelList } from '../../utils/modelParse';
export interface GiteeAIModelCard {
id: string;

View file

@ -1,7 +1,7 @@
// @vitest-environment node
import { ModelProvider } from '@lobechat/model-runtime';
import { testProvider } from '../providerTestUtils';
import { testProvider } from '../../providerTestUtils';
import { LobeGithubAI } from './index';
testProvider({

View file

@ -1,8 +1,8 @@
import { ModelProvider } from '../types';
import { AgentRuntimeErrorType } from '../types/error';
import { processMultiProviderModelList } from '../utils/modelParse';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { pruneReasoningPayload } from '../utils/openaiHelpers';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ModelProvider } from '../../types';
import { AgentRuntimeErrorType } from '../../types/error';
import { processMultiProviderModelList } from '../../utils/modelParse';
import { pruneReasoningPayload } from '../../utils/openaiHelpers';
export interface GithubModelCard {
capabilities: string[];

View file

@ -2,8 +2,8 @@
import { GoogleGenAI } from '@google/genai';
import { beforeEach, describe, expect, it, vi } from 'vitest';
import { CreateImagePayload } from '../types/image';
import * as imageToBase64Module from '../utils/imageToBase64';
import { CreateImagePayload } from '../../types/image';
import * as imageToBase64Module from '../../utils/imageToBase64';
import { createGoogleImage } from './createImage';
const provider = 'google';

View file

@ -1,10 +1,10 @@
import { Content, GoogleGenAI, Part } from '@google/genai';
import { CreateImagePayload, CreateImageResponse } from '../types/image';
import { AgentRuntimeError } from '../utils/createError';
import { parseGoogleErrorMessage } from '../utils/googleErrorParser';
import { imageUrlToBase64 } from '../utils/imageToBase64';
import { parseDataUri } from '../utils/uriParser';
import { CreateImagePayload, CreateImageResponse } from '../../types/image';
import { AgentRuntimeError } from '../../utils/createError';
import { parseGoogleErrorMessage } from '../../utils/googleErrorParser';
import { imageUrlToBase64 } from '../../utils/imageToBase64';
import { parseDataUri } from '../../utils/uriParser';
// Maximum number of images allowed for processing
const MAX_IMAGE_COUNT = 10;

View file

@ -6,8 +6,8 @@ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { ChatStreamPayload } from '@/types/openai/chat';
import * as debugStreamModule from '../utils/debugStream';
import * as imageToBase64Module from '../utils/imageToBase64';
import * as debugStreamModule from '../../utils/debugStream';
import * as imageToBase64Module from '../../utils/imageToBase64';
import { LobeGoogleAI } from './index';
const provider = 'google';

View file

@ -10,24 +10,24 @@ import {
ThinkingConfig,
} from '@google/genai';
import { LobeRuntimeAI } from '../BaseAI';
import { LobeRuntimeAI } from '../../core/BaseAI';
import { GoogleGenerativeAIStream, VertexAIStream } from '../../core/streams';
import {
ChatCompletionTool,
ChatMethodOptions,
ChatStreamPayload,
OpenAIChatMessage,
UserMessageContentPart,
} from '../types';
import { AgentRuntimeErrorType } from '../types/error';
import { CreateImagePayload, CreateImageResponse } from '../types/image';
import { AgentRuntimeError } from '../utils/createError';
import { debugStream } from '../utils/debugStream';
import { parseGoogleErrorMessage } from '../utils/googleErrorParser';
import { imageUrlToBase64 } from '../utils/imageToBase64';
import { StreamingResponse } from '../utils/response';
import { safeParseJSON } from '../utils/safeParseJSON';
import { GoogleGenerativeAIStream, VertexAIStream } from '../utils/streams';
import { parseDataUri } from '../utils/uriParser';
} from '../../types';
import { AgentRuntimeErrorType } from '../../types/error';
import { CreateImagePayload, CreateImageResponse } from '../../types/image';
import { AgentRuntimeError } from '../../utils/createError';
import { debugStream } from '../../utils/debugStream';
import { parseGoogleErrorMessage } from '../../utils/googleErrorParser';
import { imageUrlToBase64 } from '../../utils/imageToBase64';
import { StreamingResponse } from '../../utils/response';
import { safeParseJSON } from '../../utils/safeParseJSON';
import { parseDataUri } from '../../utils/uriParser';
import { createGoogleImage } from './createImage';
const modelsOffSafetySettings = new Set(['gemini-2.0-flash-exp']);
@ -344,7 +344,7 @@ export class LobeGoogleAI implements LobeRuntimeAI {
};
});
const { MODEL_LIST_CONFIGS, processModelList } = await import('../utils/modelParse');
const { MODEL_LIST_CONFIGS, processModelList } = await import('../../utils/modelParse');
return processModelList(processedModels, MODEL_LIST_CONFIGS.google);
} catch (error) {

View file

@ -2,7 +2,7 @@
import { LobeOpenAICompatibleRuntime } from '@lobechat/model-runtime';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { testProvider } from '../providerTestUtils';
import { testProvider } from '../../providerTestUtils';
import { LobeGroq } from './index';
testProvider({

View file

@ -1,8 +1,8 @@
import type { ChatModelCard } from '@/types/llm';
import { ModelProvider } from '../types';
import { AgentRuntimeErrorType } from '../types/error';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ModelProvider } from '../../types';
import { AgentRuntimeErrorType } from '../../types/error';
export interface GroqModelCard {
context_window: number;

View file

@ -1,8 +1,9 @@
import type { ChatModelCard } from '@lobechat/types';
import uniqueId from 'lodash-es/uniqueId';
import { ModelProvider } from '../types';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import type { ChatModelCard } from '@/types/index';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ModelProvider } from '../../types';
export interface HigressModelCard {
context_length: number;
@ -17,8 +18,8 @@ export interface HigressModelCard {
export const LobeHigressAI = createOpenAICompatibleRuntime({
constructorOptions: {
defaultHeaders: {
'HTTP-Referer': 'https://chat-preview.lobehub.com',
'X-Title': 'Lobe Chat',
'HTTP-Referer': 'https://lobehub.com',
'X-Title': 'LobeHub',
'x-Request-Id': uniqueId('lobe-chat-'),
},
},

View file

@ -3,10 +3,10 @@ import urlJoin from 'url-join';
import type { ChatModelCard } from '@/types/llm';
import { ModelProvider } from '../types';
import { AgentRuntimeErrorType } from '../types/error';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { convertIterableToStream } from '../utils/streams';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { convertIterableToStream } from '../../core/streams';
import { ModelProvider } from '../../types';
import { AgentRuntimeErrorType } from '../../types/error';
export interface HuggingFaceModelCard {
id: string;

View file

@ -2,7 +2,7 @@
import { LobeOpenAICompatibleRuntime, ModelProvider } from '@lobechat/model-runtime';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { testProvider } from '../providerTestUtils';
import { testProvider } from '../../providerTestUtils';
import { LobeHunyuanAI } from './index';
testProvider({

View file

@ -1,7 +1,7 @@
import type { ChatModelCard } from '@/types/llm';
import { ModelProvider } from '../types';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ModelProvider } from '../../types';
export interface HunyuanModelCard {
id: string;

View file

@ -1,8 +1,8 @@
import type { ChatModelCard } from '@/types/llm';
import { ChatCompletionErrorPayload, ModelProvider } from '../types';
import { AgentRuntimeErrorType } from '../types/error';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ChatCompletionErrorPayload, ModelProvider } from '../../types';
import { AgentRuntimeErrorType } from '../../types/error';
export interface InfiniAIModelCard {
id: string;

View file

@ -1,7 +1,7 @@
// @vitest-environment node
import { ModelProvider } from '@lobechat/model-runtime';
import { testProvider } from '../providerTestUtils';
import { testProvider } from '../../providerTestUtils';
import { LobeInternLMAI } from './index';
testProvider({

View file

@ -1,7 +1,7 @@
import type { ChatModelCard } from '@/types/llm';
import { ModelProvider } from '../types';
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
import { ModelProvider } from '../../types';
export interface InternLMModelCard {
id: string;

Some files were not shown because too many files have changed in this diff Show more