feat: Add SiliconCloud model provider (#3092)

* feat: Add SiliconCloud as a model provider

* model icon

* Update index.ts

* Update siliconcloud.ts

* Update .env.example

* Add docs

* Update siliconcloud.ts

* Update siliconcloud.ts

* Update siliconcloud.ts

* Update siliconcloud.ts
This commit is contained in:
sxjeru 2024-08-14 10:36:04 +08:00 committed by GitHub
parent a5ac9901f0
commit 0781dc5233
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 279 additions and 2 deletions

View file

@ -112,6 +112,10 @@ OPENAI_API_KEY=sk-xxxxxxxxx
# QWEN_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### SiliconCloud AI ####
# SILICONCLOUD_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
########################################
############ Market Service ############
########################################

View file

@ -0,0 +1,46 @@
---
title: Using SiliconCloud API Key in LobeChat
description: Learn how to configure and use SiliconCloud's large language models in LobeChat, get your API key, and start chatting.
tags:
- LobeChat
- SiliconCloud
- API Key
- Web UI
---
# Using SiliconCloud in LobeChat
[SiliconCloud](https://siliconflow.cn/zh-cn/siliconcloud) is a cost-effective large model service provider, offering various services such as text generation and image generation.
This document will guide you on how to use SiliconCloud in LobeChat:
<Steps>
### Step 1: Get your SiliconCloud API Key
- First, you need to register and log in to [SiliconCloud](https://cloud.siliconflow.cn/auth/login)
<Callout type={'info'}>Currently, new users can get 14 yuan free credit upon registration</Callout>
- Go to the `API Key` menu and click `Create New API Key`
- Click copy API key and keep it safe
### Step 2: Configure SiliconCloud in LobeChat
- Visit the `App Settings` interface of LobeChat
- Under `Language Model`, find the `SiliconCloud` settings
- Enable SiliconCloud and enter the obtained API key
- Choose a SiliconCloud model for your assistant and start chatting
<Callout type={'warning'}>
You may need to pay the API service provider during use. Please refer to SiliconCloud's relevant fee policy.
</Callout>
</Steps>
Now you can use the models provided by SiliconCloud for conversation in LobeChat.

View file

@ -0,0 +1,46 @@
---
title: 在 LobeChat 中使用 SiliconCloud API Key
description: 学习如何在 LobeChat 中配置和使用 SiliconCloud 提供的大语言模型,获取 API 密钥并开始对话。
tags:
- LobeChat
- SiliconCloud
- API密钥
- Web UI
---
# 在 LobeChat 中使用 SiliconCloud
[SiliconCloud](https://siliconflow.cn/zh-cn/siliconcloud) 是高性价比的大模型服务提供商,提供文本生成与图片生成等多种服务。
本文档将指导你如何在 LobeChat 中使用 SiliconCloud:
<Steps>
### 步骤一:获取 SiliconCloud API 密钥
- 首先,你需要注册并登录 [SiliconCloud](https://cloud.siliconflow.cn/auth/login)
<Callout type={'info'}>当前新用户注册可获赠 14 元免费额度</Callout>
- 进入 `API密钥` 菜单,并点击 `创建新API密钥`
- 点击复制 API 密钥并妥善保存
### 步骤二:在 LobeChat 中配置 SiliconCloud
- 访问 LobeChat 的 `应用设置` 界面
- 在 `语言模型` 下找到 `SiliconCloud` 的设置项
- 打开 SiliconCloud 并填入获取的 API 密钥
- 为你的助手选择一个 SiliconCloud 模型即可开始对话
<Callout type={'warning'}>
在使用过程中你可能需要向 API 服务提供商付费,请参考 SiliconCloud 的相关费用政策。
</Callout>
</Steps>
至此你已经可以在 LobeChat 中使用 SiliconCloud 提供的模型进行对话了。

View file

@ -14,6 +14,7 @@ import {
Novita,
OpenRouter,
Perplexity,
SiliconCloud,
Stepfun,
Together,
Tongyi,
@ -40,6 +41,7 @@ import {
OpenRouterProviderCard,
PerplexityProviderCard,
QwenProviderCard,
SiliconCloudProviderCard,
StepfunProviderCard,
TaichuProviderCard,
TogetherAIProviderCard,
@ -198,6 +200,11 @@ export const useProviderList = (): ProviderItem[] => {
docUrl: urlJoin(BASE_DOC_URL, 'ai360'),
title: <Ai360.Combine size={ 20 } type={ 'color' } />,
},
{
...SiliconCloudProviderCard,
docUrl: urlJoin(BASE_DOC_URL, 'siliconcloud'),
title: <SiliconCloud.Combine size={20} type={'color'} />,
},
],
[azureProvider, ollamaProvider, ollamaProvider, bedrockProvider],
);

View file

@ -198,6 +198,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
const apiKey = apiKeyManager.pick(payload?.apiKey || AI360_API_KEY);
return { apiKey };
}
case ModelProvider.SiliconCloud: {
const { SILICONCLOUD_API_KEY } = getLLMConfig();
const apiKey = apiKeyManager.pick(payload?.apiKey || SILICONCLOUD_API_KEY);
return { apiKey };
}
}

View file

@ -51,7 +51,7 @@ const ModelIcon = memo<ModelProviderIconProps>(({ model: originModel, size = 12
// currently supported models, maybe not in its own provider
if (model.includes('gpt-3')) return <OpenAI.Avatar size={size} type={'gpt3'} />;
if (model.includes('gpt-4')) return <OpenAI.Avatar size={size} type={'gpt4'} />;
if (model.startsWith('glm') || model.includes('chatglm')) return <ChatGLM.Avatar size={size} />;
if (model.includes('glm-') || model.includes('chatglm')) return <ChatGLM.Avatar size={size} />;
if (model.startsWith('codegeex')) return <CodeGeeX.Avatar size={size} />;
if (model.includes('deepseek')) return <DeepSeek.Avatar size={size} />;
if (model.includes('claude')) return <Claude.Avatar size={size} />;

View file

@ -17,6 +17,7 @@ import {
OpenAI,
OpenRouter,
Perplexity,
SiliconCloud,
Stepfun,
Together,
Tongyi,
@ -134,6 +135,10 @@ const ModelProviderIcon = memo<ModelProviderIconProps>(({ provider }) => {
return <Ai360 size={20} />;
}
case ModelProvider.SiliconCloud: {
return <SiliconCloud size={20} />;
}
default: {
return null;
}

View file

@ -87,6 +87,9 @@ export const getLLMConfig = () => {
ENABLED_AI360: z.boolean(),
AI360_API_KEY: z.string().optional(),
ENABLED_SILICONCLOUD: z.boolean(),
SILICONCLOUD_API_KEY: z.string().optional(),
},
runtimeEnv: {
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@ -171,6 +174,9 @@ export const getLLMConfig = () => {
ENABLED_AI360: !!process.env.AI360_API_KEY,
AI360_API_KEY: process.env.AI360_API_KEY,
ENABLED_SILICONCLOUD: !!process.env.SILICONCLOUD_API_KEY,
SILICONCLOUD_API_KEY: process.env.SILICONCLOUD_API_KEY,
},
});
};

View file

@ -17,6 +17,7 @@ import OpenAIProvider from './openai';
import OpenRouterProvider from './openrouter';
import PerplexityProvider from './perplexity';
import QwenProvider from './qwen';
import SiliconCloudProvider from './siliconcloud';
import StepfunProvider from './stepfun';
import TaichuProvider from './taichu';
import TogetherAIProvider from './togetherai';
@ -45,6 +46,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
BaichuanProvider.chatModels,
TaichuProvider.chatModels,
Ai360Provider.chatModels,
SiliconCloudProvider.chatModels,
].flat();
export const DEFAULT_MODEL_PROVIDER_LIST = [
@ -70,6 +72,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
BaichuanProvider,
TaichuProvider,
Ai360Provider,
SiliconCloudProvider,
];
export const filterEnabledModels = (provider: ModelProviderCard) => {
@ -98,6 +101,7 @@ export { default as OpenAIProviderCard } from './openai';
export { default as OpenRouterProviderCard } from './openrouter';
export { default as PerplexityProviderCard } from './perplexity';
export { default as QwenProviderCard } from './qwen';
export { default as SiliconCloudProviderCard } from './siliconcloud';
export { default as StepfunProviderCard } from './stepfun';
export { default as TaichuProviderCard } from './taichu';
export { default as TogetherAIProviderCard } from './togetherai';

View file

@ -0,0 +1,127 @@
import { ModelProviderCard } from '@/types/llm';
// ref https://siliconflow.cn/zh-cn/models
const SiliconCloud: ModelProviderCard = {
chatModels: [
{
enabled: true,
id: 'Qwen/Qwen2-72B-Instruct',
tokens: 32_768,
},
{
enabled: true,
id: 'Qwen/Qwen2-Math-72B-Instruct',
tokens: 32_768,
},
{
enabled: true,
id: 'Qwen/Qwen2-57B-A14B-Instruct',
tokens: 32_768,
},
{
id: 'Qwen/Qwen2-7B-Instruct',
tokens: 32_768,
},
{
id: 'Qwen/Qwen2-1.5B-Instruct',
tokens: 32_768,
},
{
id: 'Qwen/Qwen1.5-110B-Chat',
tokens: 32_768,
},
{
id: 'Qwen/Qwen1.5-32B-Chat',
tokens: 32_768,
},
{
id: 'Qwen/Qwen1.5-14B-Chat',
tokens: 32_768,
},
{
id: 'Qwen/Qwen1.5-7B-Chat',
tokens: 32_768,
},
{
id: 'deepseek-ai/DeepSeek-Coder-V2-Instruct',
tokens: 32_768,
},
{
enabled: true,
id: 'deepseek-ai/DeepSeek-V2-Chat',
tokens: 32_768,
},
{
id: 'deepseek-ai/deepseek-llm-67b-chat',
tokens: 32_768,
},
{
id: 'THUDM/glm-4-9b-chat',
tokens: 32_768,
},
{
id: 'THUDM/chatglm3-6b',
tokens: 32_768,
},
{
enabled: true,
id: '01-ai/Yi-1.5-34B-Chat-16K',
tokens: 16_384,
},
{
id: '01-ai/Yi-1.5-9B-Chat-16K',
tokens: 16_384,
},
{
id: '01-ai/Yi-1.5-6B-Chat',
tokens: 4096,
},
{
id: 'internlm/internlm2_5-7b-chat',
tokens: 32_768,
},
{
id: 'google/gemma-2-9b-it',
tokens: 8192,
},
{
id: 'google/gemma-2-27b-it',
tokens: 8192,
},
{
id: 'internlm/internlm2_5-20b-chat',
tokens: 32_768,
},
{
id: 'meta-llama/Meta-Llama-3.1-8B-Instruct',
tokens: 32_768,
},
{
enabled: true,
id: 'meta-llama/Meta-Llama-3.1-70B-Instruct',
tokens: 32_768,
},
{
id: 'meta-llama/Meta-Llama-3.1-405B-Instruct',
tokens: 32_768,
},
{
id: 'meta-llama/Meta-Llama-3-70B-Instruct',
tokens: 8192,
},
{
id: 'mistralai/Mistral-7B-Instruct-v0.2',
tokens: 32_768,
},
{
id: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
tokens: 32_768,
},
],
checkModel: 'Qwen/Qwen2-1.5B-Instruct',
id: 'siliconcloud',
modelList: { showModelFetcher: true },
name: 'SiliconCloud',
};
export default SiliconCloud;

View file

@ -15,6 +15,7 @@ import {
OpenRouterProviderCard,
PerplexityProviderCard,
QwenProviderCard,
SiliconCloudProviderCard,
StepfunProviderCard,
TaichuProviderCard,
TogetherAIProviderCard,
@ -94,6 +95,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
enabled: false,
enabledModels: filterEnabledModels(QwenProviderCard),
},
siliconcloud: {
enabled: false,
enabledModels: filterEnabledModels(SiliconCloudProviderCard),
},
stepfun: {
enabled: false,
enabledModels: filterEnabledModels(StepfunProviderCard),

View file

@ -20,6 +20,7 @@ import { LobeOpenAI } from './openai';
import { LobeOpenRouterAI } from './openrouter';
import { LobePerplexityAI } from './perplexity';
import { LobeQwenAI } from './qwen';
import { LobeSiliconCloudAI } from './siliconcloud';
import { LobeStepfunAI } from './stepfun';
import { LobeTaichuAI } from './taichu';
import { LobeTogetherAI } from './togetherai';
@ -122,6 +123,7 @@ class AgentRuntime {
openrouter: Partial<ClientOptions>;
perplexity: Partial<ClientOptions>;
qwen: Partial<ClientOptions>;
siliconcloud: Partial<ClientOptions>;
stepfun: Partial<ClientOptions>;
taichu: Partial<ClientOptions>;
togetherai: Partial<ClientOptions>;
@ -247,6 +249,11 @@ class AgentRuntime {
runtimeModel = new LobeAi360AI(params.ai360 ?? {});
break
}
case ModelProvider.SiliconCloud: {
runtimeModel = new LobeSiliconCloudAI(params.siliconcloud ?? {});
break
}
}
return new AgentRuntime(runtimeModel);

View file

@ -0,0 +1,10 @@
import { ModelProvider } from '../types';
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
baseURL: 'https://api.siliconflow.cn/v1',
debug: {
chatCompletion: () => process.env.DEBUG_SILICONCLOUD_CHAT_COMPLETION === '1',
},
provider: ModelProvider.SiliconCloud,
});

View file

@ -39,6 +39,7 @@ export enum ModelProvider {
OpenRouter = 'openrouter',
Perplexity = 'perplexity',
Qwen = 'qwen',
SiliconCloud = 'siliconcloud',
Stepfun = 'stepfun',
Taichu = 'taichu',
TogetherAI = 'togetherai',

View file

@ -39,6 +39,7 @@ export const getServerGlobalConfig = () => {
ENABLED_BAICHUAN,
ENABLED_TAICHU,
ENABLED_AI360,
ENABLED_SILICONCLOUD,
ENABLED_AZURE_OPENAI,
AZURE_MODEL_LIST,
@ -112,7 +113,7 @@ export const getServerGlobalConfig = () => {
},
perplexity: { enabled: ENABLED_PERPLEXITY },
qwen: { enabled: ENABLED_QWEN },
siliconcloud: { enabled: ENABLED_SILICONCLOUD },
stepfun: { enabled: ENABLED_STEPFUN },
taichu: { enabled: ENABLED_TAICHU },

View file

@ -35,6 +35,7 @@ export interface UserKeyVaults {
password?: string;
perplexity?: OpenAICompatibleKeyVault;
qwen?: OpenAICompatibleKeyVault;
siliconcloud?: OpenAICompatibleKeyVault;
stepfun?: OpenAICompatibleKeyVault;
taichu?: OpenAICompatibleKeyVault;
togetherai?: OpenAICompatibleKeyVault;