💄 style: video loading circular progress indicator (#12418)

This commit is contained in:
YuTengjing 2026-02-22 15:03:12 +08:00 committed by GitHub
parent 5ab874e877
commit bdc901d1dc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 493 additions and 30 deletions

View file

@ -81,6 +81,7 @@ export interface Generation {
}
export interface GenerationBatch {
avgLatencyMs?: number | null;
config?: GenerationConfig;
createdAt: Date;
generations: Generation[];

View file

@ -193,7 +193,10 @@ export const POST = async (req: Request, { params }: { params: Promise<{ provide
FileSource.VideoGeneration,
);
const duration = Date.now() - asyncTask.createdAt.getTime();
await asyncTaskModel.update(asyncTask.id, {
duration,
status: AsyncTaskStatus.Success,
});
@ -203,7 +206,7 @@ export const POST = async (req: Request, { params }: { params: Promise<{ provide
computePriceParams: {
generateAudio: (batch?.config as RuntimeVideoGenParams)?.generateAudio,
},
latency: Date.now() - asyncTask.createdAt.getTime(),
latency: duration,
metadata: {
asyncTaskId: asyncTask.id,
generationBatchId: generation.generationBatchId!,

View file

@ -134,7 +134,13 @@ export const VideoGenerationBatchItem = memo<VideoGenerationBatchItemProps>(({ b
);
}
return <VideoLoadingItem aspectRatio={batch.config?.aspectRatio} generation={generation} />;
return (
<VideoLoadingItem
aspectRatio={batch.config?.aspectRatio}
avgLatencyMs={batch.avgLatencyMs}
generation={generation}
/>
);
};
const hasReferenceFrames = batch.config?.imageUrl || batch.config?.endImageUrl;

View file

@ -2,39 +2,89 @@
import { LoadingOutlined } from '@ant-design/icons';
import { Block, Center } from '@lobehub/ui';
import { Spin } from 'antd';
import { memo } from 'react';
import { Progress, Spin } from 'antd';
import { memo, useEffect, useState } from 'react';
import { ElapsedTime } from '@/app/[variants]/(main)/image/features/GenerationFeed/GenerationItem/ElapsedTime';
import { AsyncTaskStatus } from '@/types/asyncTask';
import type { Generation } from '@/types/generation';
const DEFAULT_AVG_LATENCY_MS = 180_000;
const getSessionStorageKey = (generationId: string) => `generation_start_time_${generationId}`;
const useEstimatedProgress = (generationId: string, avgLatencyMs: number, isActive: boolean) => {
const [progress, setProgress] = useState<number | null>(null);
useEffect(() => {
if (!isActive) {
setProgress(null);
return;
}
const storageKey = getSessionStorageKey(generationId);
const startTime = (() => {
const stored = sessionStorage.getItem(storageKey);
if (stored) return Number(stored);
const now = Date.now();
sessionStorage.setItem(storageKey, now.toString());
return now;
})();
const update = () => {
const elapsed = Date.now() - startTime;
const pct = Math.min(Math.round((elapsed / avgLatencyMs) * 100), 99);
setProgress(pct);
};
update();
const timer = setInterval(update, 1000);
return () => clearInterval(timer);
}, [isActive, avgLatencyMs, generationId]);
return progress;
};
interface VideoLoadingItemProps {
aspectRatio?: string;
avgLatencyMs?: number | null;
generation: Generation;
}
const VideoLoadingItem = memo<VideoLoadingItemProps>(({ generation, aspectRatio }) => {
const isGenerating =
generation.task.status === AsyncTaskStatus.Processing ||
generation.task.status === AsyncTaskStatus.Pending;
const VideoLoadingItem = memo<VideoLoadingItemProps>(
({ generation, aspectRatio, avgLatencyMs }) => {
const latency = avgLatencyMs && avgLatencyMs > 0 ? avgLatencyMs : DEFAULT_AVG_LATENCY_MS;
const isGenerating =
generation.task.status === AsyncTaskStatus.Processing ||
generation.task.status === AsyncTaskStatus.Pending;
return (
<Block
align={'center'}
justify={'center'}
variant={'filled'}
style={{
aspectRatio: aspectRatio?.includes(':') ? aspectRatio.replace(':', '/') : '16/9',
}}
>
<Center gap={8}>
<Spin indicator={<LoadingOutlined spin />} />
<ElapsedTime generationId={generation.id} isActive={isGenerating} />
</Center>
</Block>
);
});
const progress = useEstimatedProgress(generation.id, latency, isGenerating);
return (
<Block
align={'center'}
justify={'center'}
style={{
aspectRatio: aspectRatio?.includes(':') ? aspectRatio.replace(':', '/') : '16/9',
}}
variant={'filled'}
>
<Center gap={8}>
{progress !== null ? (
<Progress percent={progress} size={48} type="circle" />
) : (
<Spin indicator={<LoadingOutlined spin />} />
)}
{progress === 99 && (
<ElapsedTime generationId={generation.id} isActive={isGenerating} />
)}
</Center>
</Block>
);
},
);
VideoLoadingItem.displayName = 'VideoLoadingItem';

View file

@ -3,11 +3,13 @@ import { describe, expect, it, vi } from 'vitest';
import { GenerationBatchModel } from '@/database/models/generationBatch';
import { type GenerationBatchItem } from '@/database/schemas/generation';
import { FileService } from '@/server/services/file';
import { getVideoAvgLatency } from '@/server/services/generation/latency';
import { generationBatchRouter } from '../generationBatch';
vi.mock('@/database/models/generationBatch');
vi.mock('@/server/services/file');
vi.mock('@/server/services/generation/latency');
describe('generationBatchRouter', () => {
const mockCtx = {
@ -373,4 +375,93 @@ describe('generationBatchRouter', () => {
consoleSpy.mockRestore();
});
describe('getGenerationBatches latency enrichment', () => {
const mockBatches = [
{ id: 'batch-1', model: 'model-a', generations: [] },
{ id: 'batch-2', model: 'model-b', generations: [] },
];
it('should skip latency enrichment when type is image', async () => {
const mockQuery = vi.fn().mockResolvedValue(mockBatches);
vi.mocked(GenerationBatchModel).mockImplementation(
() => ({ queryGenerationBatchesByTopicIdWithGenerations: mockQuery }) as any,
);
vi.mocked(FileService).mockImplementation(() => ({}) as any);
const caller = generationBatchRouter.createCaller(mockCtx);
const result = await caller.getGenerationBatches({ topicId: 'topic-1', type: 'image' });
expect(result).toEqual(mockBatches);
expect(getVideoAvgLatency).not.toHaveBeenCalled();
});
it('should skip latency enrichment when type is omitted', async () => {
const mockQuery = vi.fn().mockResolvedValue(mockBatches);
vi.mocked(GenerationBatchModel).mockImplementation(
() => ({ queryGenerationBatchesByTopicIdWithGenerations: mockQuery }) as any,
);
vi.mocked(FileService).mockImplementation(() => ({}) as any);
const caller = generationBatchRouter.createCaller(mockCtx);
const result = await caller.getGenerationBatches({ topicId: 'topic-1' });
expect(result).toEqual(mockBatches);
expect(getVideoAvgLatency).not.toHaveBeenCalled();
});
it('should enrich batches with latency when type is video', async () => {
const mockQuery = vi.fn().mockResolvedValue(mockBatches);
vi.mocked(GenerationBatchModel).mockImplementation(
() => ({ queryGenerationBatchesByTopicIdWithGenerations: mockQuery }) as any,
);
vi.mocked(FileService).mockImplementation(() => ({}) as any);
vi.mocked(getVideoAvgLatency).mockImplementation(async (model) => {
if (model === 'model-a') return 120_000;
if (model === 'model-b') return 180_000;
return null;
});
const caller = generationBatchRouter.createCaller(mockCtx);
const result = await caller.getGenerationBatches({ topicId: 'topic-1', type: 'video' });
expect(result).toEqual([
{ ...mockBatches[0], avgLatencyMs: 120_000 },
{ ...mockBatches[1], avgLatencyMs: 180_000 },
]);
});
it('should deduplicate model latency lookups', async () => {
const sameModelBatches = [
{ id: 'batch-1', model: 'model-a', generations: [] },
{ id: 'batch-2', model: 'model-a', generations: [] },
{ id: 'batch-3', model: 'model-a', generations: [] },
];
const mockQuery = vi.fn().mockResolvedValue(sameModelBatches);
vi.mocked(GenerationBatchModel).mockImplementation(
() => ({ queryGenerationBatchesByTopicIdWithGenerations: mockQuery }) as any,
);
vi.mocked(FileService).mockImplementation(() => ({}) as any);
vi.mocked(getVideoAvgLatency).mockResolvedValue(100_000);
const caller = generationBatchRouter.createCaller(mockCtx);
await caller.getGenerationBatches({ topicId: 'topic-1', type: 'video' });
expect(getVideoAvgLatency).toHaveBeenCalledTimes(1);
});
it('should fallback to null when latency lookup fails', async () => {
const mockQuery = vi.fn().mockResolvedValue([mockBatches[0]]);
vi.mocked(GenerationBatchModel).mockImplementation(
() => ({ queryGenerationBatchesByTopicIdWithGenerations: mockQuery }) as any,
);
vi.mocked(FileService).mockImplementation(() => ({}) as any);
vi.mocked(getVideoAvgLatency).mockRejectedValue(new Error('DB timeout'));
const caller = generationBatchRouter.createCaller(mockCtx);
const result = await caller.getGenerationBatches({ topicId: 'topic-1', type: 'video' });
expect(result).toEqual([{ ...mockBatches[0], avgLatencyMs: null }]);
});
});
});

View file

@ -4,6 +4,7 @@ import { GenerationBatchModel } from '@/database/models/generationBatch';
import { authedProcedure, router } from '@/libs/trpc/lambda';
import { serverDatabase } from '@/libs/trpc/lambda/middleware';
import { FileService } from '@/server/services/file';
import { getVideoAvgLatency } from '@/server/services/generation/latency';
const generationBatchProcedure = authedProcedure.use(serverDatabase).use(async (opts) => {
const { ctx } = opts;
@ -47,9 +48,25 @@ export const generationBatchRouter = router({
}),
getGenerationBatches: generationBatchProcedure
.input(z.object({ topicId: z.string() }))
.input(z.object({ topicId: z.string(), type: z.enum(['image', 'video']).optional() }))
.query(async ({ ctx, input }) => {
return ctx.generationBatchModel.queryGenerationBatchesByTopicIdWithGenerations(input.topicId);
const batches = await ctx.generationBatchModel.queryGenerationBatchesByTopicIdWithGenerations(
input.topicId,
);
if (input.type !== 'video') return batches;
const uniqueModels = [...new Set(batches.map((b) => b.model))];
const latencyMap = new Map<string, number | null>();
await Promise.all(
uniqueModels.map(async (model) => {
const latency = await getVideoAvgLatency(model).catch(() => null);
latencyMap.set(model, latency);
}),
);
return batches.map((b) => ({ ...b, avgLatencyMs: latencyMap.get(b.model) ?? null }));
}),
});

View file

@ -0,0 +1,193 @@
import { describe, expect, it, vi } from 'vitest';
vi.mock('@/database/server', () => ({
getServerDB: vi.fn(),
}));
vi.mock('@/envs/redis', () => ({
getRedisConfig: vi.fn().mockReturnValue({}),
}));
vi.mock('@/libs/redis', () => ({
isRedisEnabled: vi.fn().mockReturnValue(false),
initializeRedis: vi.fn(),
}));
// Must import after vi.mock declarations
const { getVideoAvgLatency } = await import('./latency');
const { getServerDB } = await import('@/database/server');
const { isRedisEnabled, initializeRedis } = await import('@/libs/redis');
function createMockDB(rows: { latency: number | null }[]) {
const orderBy = vi.fn().mockResolvedValue(rows);
const where = vi.fn().mockReturnValue({ orderBy });
const innerJoin2 = vi.fn().mockReturnValue({ where });
const innerJoin1 = vi.fn().mockReturnValue({ innerJoin: innerJoin2 });
const from = vi.fn().mockReturnValue({ innerJoin: innerJoin1 });
const select = vi.fn().mockReturnValue({ from });
return { select, from, innerJoin1, innerJoin2, where, orderBy } as const;
}
describe('getVideoAvgLatency', () => {
beforeEach(() => {
vi.clearAllMocks();
vi.mocked(isRedisEnabled).mockReturnValue(false);
});
it('should return null when no samples exist', async () => {
const db = createMockDB([]);
vi.mocked(getServerDB).mockResolvedValue(db as any);
const result = await getVideoAvgLatency('test-model');
expect(result).toBeNull();
});
it('should return simple average when fewer than 5 samples', async () => {
const db = createMockDB([
{ latency: 100_000 },
{ latency: 120_000 },
{ latency: 140_000 },
]);
vi.mocked(getServerDB).mockResolvedValue(db as any);
const result = await getVideoAvgLatency('test-model');
// (100000 + 120000 + 140000) / 3 = 120000
expect(result).toBe(120_000);
});
it('should return trimmed mean when 5 or more samples', async () => {
// 10 samples, sorted ascending (DB returns sorted by duration)
const db = createMockDB([
{ latency: 10_000 }, // trimmed (bottom 10%)
{ latency: 50_000 },
{ latency: 60_000 },
{ latency: 70_000 },
{ latency: 80_000 },
{ latency: 90_000 },
{ latency: 100_000 },
{ latency: 110_000 },
{ latency: 120_000 },
{ latency: 500_000 }, // trimmed (top 10%)
]);
vi.mocked(getServerDB).mockResolvedValue(db as any);
const result = await getVideoAvgLatency('test-model');
// trimCount = floor(10 * 0.1) = 1, slice(1, 9)
// [50000, 60000, 70000, 80000, 90000, 100000, 110000, 120000]
// sum = 680000, avg = 85000
expect(result).toBe(85_000);
});
it('should return exact value for single sample', async () => {
const db = createMockDB([{ latency: 95_000 }]);
vi.mocked(getServerDB).mockResolvedValue(db as any);
const result = await getVideoAvgLatency('test-model');
expect(result).toBe(95_000);
});
it('should return trimmed mean for exactly 5 samples', async () => {
const db = createMockDB([
{ latency: 10_000 },
{ latency: 20_000 },
{ latency: 30_000 },
{ latency: 40_000 },
{ latency: 50_000 },
]);
vi.mocked(getServerDB).mockResolvedValue(db as any);
const result = await getVideoAvgLatency('test-model');
// trimCount = floor(5 * 0.1) = 0, no trimming
// all 5 samples averaged: (10000+20000+30000+40000+50000)/5 = 30000
expect(result).toBe(30_000);
});
describe('Redis caching', () => {
it('should return cached value from Redis', async () => {
const mockRedis = { get: vi.fn().mockResolvedValue('120000'), set: vi.fn() };
vi.mocked(isRedisEnabled).mockReturnValue(true);
vi.mocked(initializeRedis).mockResolvedValue(mockRedis as any);
const result = await getVideoAvgLatency('test-model');
expect(result).toBe(120_000);
expect(mockRedis.get).toHaveBeenCalledWith('video:avg_latency:test-model');
});
it('should return null when cached value is "null"', async () => {
const mockRedis = { get: vi.fn().mockResolvedValue('null'), set: vi.fn() };
vi.mocked(isRedisEnabled).mockReturnValue(true);
vi.mocked(initializeRedis).mockResolvedValue(mockRedis as any);
const result = await getVideoAvgLatency('test-model');
expect(result).toBeNull();
});
it('should query DB and write cache on cache miss', async () => {
const mockRedis = { get: vi.fn().mockResolvedValue(null), set: vi.fn() };
vi.mocked(isRedisEnabled).mockReturnValue(true);
vi.mocked(initializeRedis).mockResolvedValue(mockRedis as any);
const db = createMockDB([{ latency: 100_000 }, { latency: 200_000 }]);
vi.mocked(getServerDB).mockResolvedValue(db as any);
const result = await getVideoAvgLatency('test-model');
expect(result).toBe(150_000);
expect(mockRedis.set).toHaveBeenCalledWith('video:avg_latency:test-model', '150000', {
ex: 300,
});
});
it('should cache null result when no DB data', async () => {
const mockRedis = { get: vi.fn().mockResolvedValue(null), set: vi.fn() };
vi.mocked(isRedisEnabled).mockReturnValue(true);
vi.mocked(initializeRedis).mockResolvedValue(mockRedis as any);
const db = createMockDB([]);
vi.mocked(getServerDB).mockResolvedValue(db as any);
const result = await getVideoAvgLatency('test-model');
expect(result).toBeNull();
expect(mockRedis.set).toHaveBeenCalledWith('video:avg_latency:test-model', 'null', {
ex: 300,
});
});
it('should fall through to DB when Redis is unavailable', async () => {
vi.mocked(isRedisEnabled).mockReturnValue(true);
vi.mocked(initializeRedis).mockRejectedValue(new Error('Connection refused'));
const db = createMockDB([{ latency: 80_000 }]);
vi.mocked(getServerDB).mockResolvedValue(db as any);
const result = await getVideoAvgLatency('test-model');
expect(result).toBe(80_000);
});
it('should fall through when Redis get throws', async () => {
const mockRedis = {
get: vi.fn().mockRejectedValue(new Error('Redis error')),
set: vi.fn(),
};
vi.mocked(isRedisEnabled).mockReturnValue(true);
vi.mocked(initializeRedis).mockResolvedValue(mockRedis as any);
const db = createMockDB([{ latency: 90_000 }]);
vi.mocked(getServerDB).mockResolvedValue(db as any);
const result = await getVideoAvgLatency('test-model');
expect(result).toBe(90_000);
});
});
});

View file

@ -0,0 +1,99 @@
import { AsyncTaskStatus, AsyncTaskType } from '@lobechat/types';
import { and, eq, gte, isNotNull, sql } from 'drizzle-orm';
import { asyncTasks, generationBatches, generations } from '@/database/schemas';
import { getServerDB } from '@/database/server';
import { getRedisConfig } from '@/envs/redis';
import { initializeRedis, isRedisEnabled, type RedisClient } from '@/libs/redis';
const CACHE_KEY_PREFIX = 'video:avg_latency';
const CACHE_TTL_SECONDS = 300; // 5 minutes
/** Trim ratio: remove top/bottom 10% of samples before averaging */
const TRIM_RATIO = 0.1;
async function getRedis(): Promise<RedisClient | null> {
const config = getRedisConfig();
if (!isRedisEnabled(config)) return null;
return initializeRedis(config);
}
function getCacheKey(model: string): string {
return `${CACHE_KEY_PREFIX}:${model}`;
}
async function queryTrimmedAvgLatency(model: string): Promise<number | null> {
const db = await getServerDB();
const threeDaysAgo = sql`NOW() - INTERVAL '3 days'`;
const rows = await db
.select({ latency: asyncTasks.duration })
.from(asyncTasks)
.innerJoin(generations, eq(generations.asyncTaskId, asyncTasks.id))
.innerJoin(generationBatches, eq(generations.generationBatchId, generationBatches.id))
.where(
and(
eq(asyncTasks.type, AsyncTaskType.VideoGeneration),
eq(asyncTasks.status, AsyncTaskStatus.Success),
eq(generationBatches.model, model),
gte(asyncTasks.createdAt, threeDaysAgo),
isNotNull(asyncTasks.duration),
),
)
.orderBy(asyncTasks.duration);
if (rows.length === 0) return null;
const latencies = rows.map((r) => r.latency!);
// Not enough samples to trim meaningfully, just average all
if (latencies.length < 5) {
const sum = latencies.reduce((acc, v) => acc + v, 0);
return Math.round(sum / latencies.length);
}
const trimCount = Math.floor(latencies.length * TRIM_RATIO);
const trimmed = latencies.slice(trimCount, latencies.length - trimCount);
const sum = trimmed.reduce((acc, v) => acc + v, 0);
return Math.round(sum / trimmed.length);
}
export async function getVideoAvgLatency(model: string): Promise<number | null> {
let redis: RedisClient | null = null;
try {
redis = await getRedis();
} catch {
// Redis unavailable, fall through to direct query
}
const cacheKey = getCacheKey(model);
// Try cache first
if (redis) {
try {
const cached = await redis.get(cacheKey);
if (cached !== null && cached !== undefined) {
return cached === 'null' ? null : Number(cached);
}
} catch {
// Cache read failed, fall through
}
}
const avgLatency = await queryTrimmedAvgLatency(model);
// Write back to cache
if (redis) {
try {
await redis.set(cacheKey, String(avgLatency ?? 'null'), { ex: CACHE_TTL_SECONDS });
} catch {
// Cache write failed, ignore
}
}
return avgLatency;
}

View file

@ -10,8 +10,11 @@ class GenerationBatchService {
/**
* Get generation batches for a specific topic
*/
async getGenerationBatches(topicId: string): Promise<GenerationBatchWithAsyncTaskId[]> {
return lambdaClient.generationBatch.getGenerationBatches.query({ topicId });
async getGenerationBatches(
topicId: string,
type?: 'image' | 'video',
): Promise<GenerationBatchWithAsyncTaskId[]> {
return lambdaClient.generationBatch.getGenerationBatches.query({ topicId, type });
}
/**

View file

@ -167,7 +167,7 @@ export class GenerationBatchActionImpl {
return useClientDataSWR<GenerationBatch[]>(
topicId ? [SWR_USE_FETCH_GENERATION_BATCHES, topicId] : null,
async ([, topicId]: [string, string]) => {
return generationBatchService.getGenerationBatches(topicId);
return generationBatchService.getGenerationBatches(topicId, 'image');
},
{
onSuccess: (data) => {

View file

@ -250,7 +250,7 @@ export const createGenerationBatchSlice: StateCreator<
useClientDataSWR<GenerationBatch[]>(
topicId ? [SWR_USE_FETCH_GENERATION_BATCHES, topicId] : null,
async ([, topicId]: [string, string]) => {
return generationBatchService.getGenerationBatches(topicId);
return generationBatchService.getGenerationBatches(topicId, 'video');
},
{
onSuccess: (data) => {