diff --git a/.eslintrc.js b/.eslintrc.js index cc14828d0c..4aa82d9c6e 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -30,6 +30,7 @@ config.overrides = [ files: ['*.mdx'], rules: { '@typescript-eslint/no-unused-vars': 1, + 'micromark-extension-mdx-jsx': 0, 'no-undef': 0, 'react/jsx-no-undef': 0, 'react/no-unescaped-entities': 0, diff --git a/.i18nrc.js b/.i18nrc.js index 0e77a6bbe3..e67ef5bde4 100644 --- a/.i18nrc.js +++ b/.i18nrc.js @@ -33,18 +33,13 @@ module.exports = defineConfig({ }, markdown: { reference: - '你需要保持 mdx 的组件格式,输出文本不需要在最外层包裹任何代码块语法。\n' + + 'You need to maintain the component format of the mdx file; the output text does not need to be wrapped in any code block syntax on the outermost layer.\n' + fs.readFileSync(path.join(__dirname, 'docs/glossary.md'), 'utf-8'), - entry: ['./README.zh-CN.md', './contributing/**/*.zh-CN.md', './docs/**/*.zh-CN.mdx'], - entryLocale: 'zh-CN', - outputLocales: ['en-US'], + entry: ['./README.md', './docs/**/*.md', './docs/**/*.mdx'], + entryLocale: 'en-US', + outputLocales: ['zh-CN'], includeMatter: true, - exclude: [ - './src/**/*', - './contributing/_Sidebar.md', - './contributing/_Footer.md', - './contributing/Home.md', - ], + exclude: ['./README.zh-CN.md', './docs/**/*.zh-CN.md', './docs/**/*.zh-CN.mdx'], outputExtensions: (locale, { filePath }) => { if (filePath.includes('.mdx')) { if (locale === 'en-US') return '.mdx'; diff --git a/.remarkrc.mdx.js b/.remarkrc.mdx.js index 22af1dc8a1..8da09727c3 100644 --- a/.remarkrc.mdx.js +++ b/.remarkrc.mdx.js @@ -2,5 +2,5 @@ const config = require('@lobehub/lint').remarklint; module.exports = { ...config, - plugins: ['remark-mdx', ...config.plugins], + plugins: ['remark-mdx', ...config.plugins, ['remark-lint-file-extension', false]], }; diff --git a/README.md b/README.md index 3dd17094d4..4fc208e9d1 100644 --- a/README.md +++ b/README.md @@ -422,11 +422,13 @@ Regardless of which database you choose, LobeChat can provide you with an excell ### [Support Multi-User Management][docs-feat-auth] -LobeChat supports multi-user management and provides flexible user authentication solutions: +LobeChat supports multi-user management and provides two main user authentication and management solutions to meet different needs: -- **Better Auth**: LobeChat integrates `Better Auth`, a modern and flexible authentication library that supports multiple authentication methods, including OAuth, email login, credential login, magic link, and more. With `Better Auth`, you can easily implement user registration, login, session management, social login, multi-factor authentication (MFA), and other functions to ensure the security and privacy of user data. +- **next-auth**: LobeChat integrates `next-auth`, a flexible and powerful identity verification library that supports multiple authentication methods, including OAuth, email login, credential login, etc. With `next-auth`, you can easily implement user registration, login, session management, social login, and other functions to ensure the security and privacy of user data. -- **next-auth**: LobeChat also supports `next-auth`, a widely-used identity verification library with extensive OAuth provider support and flexible session management options. +- [**Clerk**](https://go.clerk.com/exgqLG0): For users who need more advanced user management features, LobeChat also supports `Clerk`, a modern user management platform. `Clerk` provides richer functions, such as multi-factor authentication (MFA), user profile management, login activity monitoring, etc. With `Clerk`, you can get higher security and flexibility, and easily cope with complex user management needs. + +Regardless of which user management solution you choose, LobeChat can provide you with an excellent user experience and powerful functional support.
diff --git a/docs/.cdn.cache.json b/docs/.cdn.cache.json index 92b1655b88..c8b60b2aff 100644 --- a/docs/.cdn.cache.json +++ b/docs/.cdn.cache.json @@ -1,26 +1,429 @@ { + "https://file.rene.wang/clipboard-1768907980491-9cc0669fc3a38.png": "/blog/assets8be3a46c8f9c5d3b61bc541f44b7f245.webp", + "https://file.rene.wang/clipboard-1768908081787-ed9eb1cb78bdb.png": "/blog/assetsab009b79dd794f02aec24b7607f342e8.webp", + "https://file.rene.wang/clipboard-1768908121691-b3517bf882633.png": "/blog/assetsd3cae44cba0d3f57df6440b46246e5e7.webp", + "https://file.rene.wang/clipboard-1768908209289-9d3ecff50142f.png": "/blog/assets75a5cf08b3e432d2477899d30acc9d47.webp", + "https://file.rene.wang/clipboard-1768908230723-3fce0ae5baf9b.png": "/blog/assets8e9b164fa30c795850ce8fa8ef7e7c24.webp", + "https://file.rene.wang/clipboard-1768908420554-e3b90ce1a2e5.png": "/blog/assets98cddf4b80b8bac0c250a5236062d198.webp", + "https://file.rene.wang/clipboard-1768908630618-30748e3c30adf.png": "/blog/assetsdd913561927c64d32bd390cee6846f9a.webp", + "https://file.rene.wang/clipboard-1768908653789-cc68b35708f2b.png": "/blog/assets3c160860feef0bd7c653eeb46f683445.webp", + "https://file.rene.wang/clipboard-1768908678216-fb89263572506.png": "/blog/assets974acc551878f2f395518a3fbb9bd924.webp", + "https://file.rene.wang/clipboard-1768908724824-2812deaf9e521.png": "/blog/assets62f82cba03d5dcad5465ec6c626aeb05.webp", + "https://file.rene.wang/clipboard-1768908820019-98c5a12b19fd6.png": "/blog/assets99a30932374a5f6193de7c842a34850f.webp", + "https://file.rene.wang/clipboard-1768908871983-94def40e520bb.png": "/blog/assets8b75f09941172c3a8620617cddfb7a4b.webp", + "https://file.rene.wang/clipboard-1768908943850-00bc4e05bc6bb.png": "/blog/assets7caf7e0d83b8a4f3d177283bb0bc55d1.webp", + "https://file.rene.wang/clipboard-1768960626260-35c3384ade91c.png": "/blog/assetsbcd98b0913d2dfc30d5a2b5523115d33.webp", + "https://file.rene.wang/clipboard-1768961800651-151a9b076745c.png": "/blog/assets6ebefe8183f31de4de5bac1a921fb153.webp", + "https://file.rene.wang/clipboard-1768961895831-d380de3507b63.png": "/blog/assets27b2bf8596f8e65d545322c66a0d81a2.webp", + "https://file.rene.wang/clipboard-1768961929559-f1d7d74c54ca1.png": "/blog/assets07820089deb72e5636024ae7e3d1855f.webp", + "https://file.rene.wang/clipboard-1768962259734-dc28b56340cb1.png": "/blog/assetseb9b70814679291052dfa4618a44a856.webp", + "https://file.rene.wang/clipboard-1768962364687-81d03308f7b3f.png": "/blog/assetse14ddb728d66905c164664b8b5e044d3.webp", + "https://file.rene.wang/clipboard-1768962379267-f580e519a03f2.png": "/blog/assetsd6eba2b1881977c9533ba86c1cd3dfce.webp", + "https://file.rene.wang/clipboard-1768963219814-ba3d14d1facf3.png": "/blog/assets2964497066067ca0588a7767eb4c1709.webp", + "https://file.rene.wang/clipboard-1768963283361-5b62ccd3a1862.png": "/blog/assetsbcd98b0913d2dfc30d5a2b5523115d33.webp", + "https://file.rene.wang/clipboard-1768976299511-a09372a7e6fd9.png": "/blog/assets60bf3667e56862024d047444d9b4c2fb.webp", + "https://file.rene.wang/clipboard-1768976422506-24d64ffd3fd26.png": "/blog/assets7bf0102f1cae47bf24aeb01eaa2796d9.webp", + "https://file.rene.wang/clipboard-1768976706650-e810aaaa86f44.png": "/blog/assets7caf7e0d83b8a4f3d177283bb0bc55d1.webp", + "https://file.rene.wang/clipboard-1769000274218-d02c4c8024709.png": "/blog/assets3cdf933016e6f53bca12b8cedb17061f.webp", + "https://file.rene.wang/clipboard-1769000328858-48f0503640245.png": "/blog/assets04d6fae3d9aa3c33697028f1cc9f4706.webp", + "https://file.rene.wang/clipboard-1769001225111-af0244fff25f3.png": "/blog/assetsb811f2aae8e8346aef16793d6bd10f88.webp", + "https://file.rene.wang/clipboard-1769049968016-4a62699ad0b36.png": "/blog/assetsa88afc117d283790187c366f29d03284.webp", + "https://file.rene.wang/clipboard-1769050621749-3709f9939d7d4.png": "/blog/assetsc89931ace11a936d87d0b87a6bca1069.webp", + "https://file.rene.wang/clipboard-1769050642741-4b72fd17934cf.png": "/blog/assetsdc6362a2c6db476fc1b7b9d6bf443af2.webp", + "https://file.rene.wang/clipboard-1769050808722-bd6e724a5b54b.png": "/blog/assets55bea1dc3e938580591d75d72908835a.webp", + "https://file.rene.wang/clipboard-1769050853107-750be5f83cbe3.png": "/blog/assetse6139c4d5b1b26b05f41a579d98fc6f3.webp", + "https://file.rene.wang/clipboard-1769052898732-b7bb78ae1f1f8.png": "/blog/assetsafa74c85aafea8a057e6047b0823e280.webp", + "https://file.rene.wang/clipboard-1769056077960-cac34bc157a65.png": "/blog/assetsa8e173bec038d1d21d413f6fa0ace342.webp", + "https://file.rene.wang/clipboard-1769155711708-710967bee57bc.png": "/blog/assets7f3b38c1d76cceb91edb29d6b1eb60db.webp", + "https://file.rene.wang/clipboard-1769155737647-1b4fc6558f029.png": "/blog/assets3a7f0b29839603336e39e923b423409b.webp", + "https://file.rene.wang/clipboard-1769155791342-7f43b72cc6b42.png": "/blog/assets35e6aa692b0c16009c61964279514166.webp", + "https://file.rene.wang/clipboard-1769155818070-7eb403550b6c7.png": "/blog/assetsce5d6dc93676f974be2e162e8ace03f0.webp", + "https://file.rene.wang/clipboard-1769155880302-272fbd2c5290b.png": "/blog/assetsdf48eed9de76b7e37c269b294285f09d.webp", + "https://file.rene.wang/clipboard-1769155935435-93dab92dd0f44.png": "/blog/assets902eb746fe2042fc2ea831c71002be72.webp", + "https://file.rene.wang/clipboard-1769155973881-ff1ee142d5b8f.png": "/blog/assets5cc27b8cae995074da20d4ffe06a1460.webp", + "https://file.rene.wang/clipboard-1769156005535-c2e79e11f4b56.png": "/blog/assets2a36d86a4eed6e7938dd6e9c684701ed.webp", + "https://file.rene.wang/clipboard-1769156036607-2b4fe37c4b56c.png": "/blog/assetsc0efdb82443556ae3acefe00099b3f23.webp", + "https://file.rene.wang/clipboard-1769156050787-ecf4f48474ae2.png": "/blog/assetse743f0a47127390dde766a0a790476db.webp", + "https://file.rene.wang/lobehub/467951f5-ad65-498d-aea9-fca8f35a4314.png": "/blog/assets907ea775d228958baca38e2dbb65939a.webp", + "https://file.rene.wang/lobehub/58d91528-373a-4a42-b520-cf6cb1f8ce1e.png": "/blog/assets7dccdd4df55aede71001da649639437f.webp", + "https://file.rene.wang/lobehub/ee700103-3c08-41dc-9ddf-c7705bb7bc6a.png": "/blog/assets196d679bc7071abbf71f2a8566f05aa3.webp", + "https://file.rene.wang/lobehub/image-2.png": "/blog/assets58737403bd41f2f0ea70bdea609e9169.webp", + "https://file.rene.wang/lobehub/image-3.png": "/blog/assets9e2139a302264b278eb3f4296640fe8a.webp", + "https://file.rene.wang/lobehub/image-4.png": "/blog/assets095af3a0a0f850fc206fc3bbc19a4095.webp", + "https://github.com/lobehub/lobe-chat/assets/13883964/48a0b702-05bd-4ce4-a007-a8ad00a36e5a": "/blog/assets13883964/48a0b702-05bd-4ce4-a007-a8ad00a36e5a.webp", + "https://github.com/lobehub/lobe-chat/assets/13883964/4f9d83bd-b3fc-4abc-bcf4-ccbad65c219d": "/blog/assets13883964/4f9d83bd-b3fc-4abc-bcf4-ccbad65c219d.webp", + "https://github.com/lobehub/lobe-chat/assets/13883964/c9d66fa0-158c-4bd3-a1fa-969e638259d2": "/blog/assets13883964/c9d66fa0-158c-4bd3-a1fa-969e638259d2.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/0c2c399f-2ed3-44b5-97c8-53e007e8c095": "/blog/assets17870709/0c2c399f-2ed3-44b5-97c8-53e007e8c095.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/1148639c-2687-4a9c-9950-8ca8672f34b6": "/blog/assets17870709/1148639c-2687-4a9c-9950-8ca8672f34b6.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/15e09e71-5899-4805-9c5e-1f7c57be04ae": "/blog/assets17870709/15e09e71-5899-4805-9c5e-1f7c57be04ae.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/1d840e27-fa74-4e71-b777-330bf41d6dff": "/blog/assets17870709/1d840e27-fa74-4e71-b777-330bf41d6dff.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/21b94782-875b-4dee-a572-3c5843f3e1e3": "/blog/assets17870709/21b94782-875b-4dee-a572-3c5843f3e1e3.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/385f663f-cae2-4383-9bb0-52c45e5d7d7a": "/blog/assets17870709/385f663f-cae2-4383-9bb0-52c45e5d7d7a.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/3f31bc33-509f-4ad2-ba81-280c2a6ec5fa": "/blog/assets17870709/3f31bc33-509f-4ad2-ba81-280c2a6ec5fa.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/4e56e080-9b8c-42e1-87e1-11123dbb9067": "/blog/assets17870709/4e56e080-9b8c-42e1-87e1-11123dbb9067.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/4fae3e6f-e680-4471-93c4-987c19d7170a": "/blog/assets17870709/4fae3e6f-e680-4471-93c4-987c19d7170a.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/51f8f8f6-5d8a-4cf0-a2e5-d96c69fe05b8": "/blog/assets17870709/51f8f8f6-5d8a-4cf0-a2e5-d96c69fe05b8.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/5efa34c2-6523-43e6-9ade-70ab5d802e13": "/blog/assets17870709/5efa34c2-6523-43e6-9ade-70ab5d802e13.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/620b956b-dcb2-442a-8bb1-9aa22681dfa4": "/blog/assets17870709/620b956b-dcb2-442a-8bb1-9aa22681dfa4.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/63d9f6d4-5b78-4c65-8cd1-ff8b7f143406": "/blog/assets17870709/63d9f6d4-5b78-4c65-8cd1-ff8b7f143406.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/65d2dd2a-fdcf-4f3f-a6af-4ed5164a510d": "/blog/assets17870709/65d2dd2a-fdcf-4f3f-a6af-4ed5164a510d.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/6cdc5c0e-0508-44ed-a283-03f6b538ed8a": "/blog/assets17870709/6cdc5c0e-0508-44ed-a283-03f6b538ed8a.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/79faa59a-dfc0-4365-a679-5fc12c12bc70": "/blog/assets17870709/79faa59a-dfc0-4365-a679-5fc12c12bc70.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/80e22593-dc0f-482c-99bf-69acdb62d952": "/blog/assets17870709/80e22593-dc0f-482c-99bf-69acdb62d952.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/82cf4f5c-be5c-4126-a475-3a03468a9c39": "/blog/assets17870709/82cf4f5c-be5c-4126-a475-3a03468a9c39.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/a3f9f63a-48f8-4567-b960-7f3636c0d4ed": "/blog/assets17870709/a3f9f63a-48f8-4567-b960-7f3636c0d4ed.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/a77b0fb2-87d7-4527-a804-2f7ad3634aa5": "/blog/assets17870709/a77b0fb2-87d7-4527-a804-2f7ad3634aa5.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/ab94a7b5-6bc4-41e0-97bc-724ee8e315db": "/blog/assets17870709/ab94a7b5-6bc4-41e0-97bc-724ee8e315db.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/ac10d9dd-a977-43fb-8397-b2bbdee6a1a1": "/blog/assets17870709/ac10d9dd-a977-43fb-8397-b2bbdee6a1a1.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/b4a01219-e7b1-48a0-888c-f0271b18e3a6": "/blog/assets17870709/b4a01219-e7b1-48a0-888c-f0271b18e3a6.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/ba8e688a-e0c1-4567-9013-94205f83fc60": "/blog/assets17870709/ba8e688a-e0c1-4567-9013-94205f83fc60.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/c9e5eafc-ca22-496b-a88d-cc0ae53bf720": "/blog/assets17870709/c9e5eafc-ca22-496b-a88d-cc0ae53bf720.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/ddb44517-8696-4492-acd9-25b590f6069c": "/blog/assets17870709/ddb44517-8696-4492-acd9-25b590f6069c.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/e1b5f84f-015e-437c-98cc-a3431fa3b077": "/blog/assets17870709/e1b5f84f-015e-437c-98cc-a3431fa3b077.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/e6a429cb-96e1-4e85-9aa3-1334ffcad8c0": "/blog/assets17870709/e6a429cb-96e1-4e85-9aa3-1334ffcad8c0.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/eb7273f8-f0ed-4b9b-884e-96d29c406cb7": "/blog/assets17870709/eb7273f8-f0ed-4b9b-884e-96d29c406cb7.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/f579b39b-e771-402c-a1d1-620e57a10c75": "/blog/assets17870709/f579b39b-e771-402c-a1d1-620e57a10c75.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/ff7ebacf-27f0-42d7-810b-00314499a084": "/blog/assets17870709/ff7ebacf-27f0-42d7-810b-00314499a084.webp", + "https://github.com/lobehub/lobe-chat/assets/17870709/ff9c3eb8-412b-4275-80be-177ae7b7acbc": "/blog/assets17870709/ff9c3eb8-412b-4275-80be-177ae7b7acbc.webp", + "https://github.com/lobehub/lobe-chat/assets/1845053/fe34fdfe-c2e4-4d6a-84d7-4ebc61b2516a": "/blog/assets1845053/fe34fdfe-c2e4-4d6a-84d7-4ebc61b2516a.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/0249ea56-ab17-4aa9-a56c-9ebd556c2645": "/blog/assets28616219/0249ea56-ab17-4aa9-a56c-9ebd556c2645.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/034a328c-8465-4499-8f93-fdcdb03343cd": "/blog/assets28616219/034a328c-8465-4499-8f93-fdcdb03343cd.mp4", + "https://github.com/lobehub/lobe-chat/assets/28616219/1c689738-809b-4199-b305-ba5770d39da7": "/blog/assets28616219/1c689738-809b-4199-b305-ba5770d39da7.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/1c82d707-cb6f-4924-b246-a5235a919864": "/blog/assets28616219/1c82d707-cb6f-4924-b246-a5235a919864.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/1ed8b13d-046e-47c8-bd61-116ffdf5d01b": "/blog/assets28616219/1ed8b13d-046e-47c8-bd61-116ffdf5d01b.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/2bfa13df-6e20-4768-97c0-4dad06c85a2f": "/blog/assets28616219/2bfa13df-6e20-4768-97c0-4dad06c85a2f.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/2ceb210c-eca0-4439-ba27-8734d4ebb3ee": "/blog/assets28616219/2ceb210c-eca0-4439-ba27-8734d4ebb3ee.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/31b999e5-2a74-45fc-935b-f036e72a684d": "/blog/assets28616219/31b999e5-2a74-45fc-935b-f036e72a684d.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/31e5f625-8dc4-4a5f-a5fd-d28d0457782d": "/blog/assets28616219/31e5f625-8dc4-4a5f-a5fd-d28d0457782d.mp4", + "https://github.com/lobehub/lobe-chat/assets/28616219/3b607482-4d99-455a-bc10-3090dd4fe3c5": "/blog/assets28616219/3b607482-4d99-455a-bc10-3090dd4fe3c5.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/41f7f677-0153-4a96-b849-5ac9b7ebefee": "/blog/assets28616219/41f7f677-0153-4a96-b849-5ac9b7ebefee.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/4e81decc-776c-43b8-9a54-dfb43e9f601a": "/blog/assets28616219/4e81decc-776c-43b8-9a54-dfb43e9f601a.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/54b3696b-5b13-4761-8c1b-1e664867b2dd": "/blog/assets28616219/54b3696b-5b13-4761-8c1b-1e664867b2dd.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/5fdc9479-007f-46ab-9d6e-a9603e949116": "/blog/assets28616219/5fdc9479-007f-46ab-9d6e-a9603e949116.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/69414c79-642e-4323-9641-bfa43a74fcc8": "/blog/assets28616219/69414c79-642e-4323-9641-bfa43a74fcc8.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/6c3968a8-fbbb-4268-a587-edaced2d96af": "/blog/assets28616219/6c3968a8-fbbb-4268-a587-edaced2d96af.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/7049a811-a08b-45d3-8491-970f579c2ebd": "/blog/assets28616219/7049a811-a08b-45d3-8491-970f579c2ebd.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/763b18f9-2b5f-44bb-a479-9b56d46f7397": "/blog/assets28616219/763b18f9-2b5f-44bb-a479-9b56d46f7397.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/7b0ea46c-5157-40a8-888f-f47664a4884f": "/blog/assets28616219/7b0ea46c-5157-40a8-888f-f47664a4884f.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/7f9a9a9f-fd91-4f59-aac9-3f26c6d49a1e": "/blog/assets28616219/7f9a9a9f-fd91-4f59-aac9-3f26c6d49a1e.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/89883703-7a1a-4a11-b944-5d804544e57c": "/blog/assets28616219/89883703-7a1a-4a11-b944-5d804544e57c.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/95828c11-0ae5-4dfa-84ed-854124e927a6": "/blog/assets28616219/95828c11-0ae5-4dfa-84ed-854124e927a6.mp4", + "https://github.com/lobehub/lobe-chat/assets/28616219/9c0d184c-3169-40fa-9115-011cfffb9ca7": "/blog/assets28616219/9c0d184c-3169-40fa-9115-011cfffb9ca7.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/9cb5150d-6e1e-4c59-9a18-4e418dce1a5d": "/blog/assets28616219/9cb5150d-6e1e-4c59-9a18-4e418dce1a5d.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/a7fd85d5-fd32-4756-814e-ff7ab7567fe1": "/blog/assets28616219/a7fd85d5-fd32-4756-814e-ff7ab7567fe1.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/ab008be7-26b2-4b78-8bd9-24301bf34d23": "/blog/assets28616219/ab008be7-26b2-4b78-8bd9-24301bf34d23.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/b04723eb-64ad-4028-a901-dc4e4ee2d0c1": "/blog/assets28616219/b04723eb-64ad-4028-a901-dc4e4ee2d0c1.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/b3a78112-adc8-4837-b4e3-48f67058f16e": "/blog/assets28616219/b3a78112-adc8-4837-b4e3-48f67058f16e.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/b6b8226b-183f-4249-8255-663a5e9f5af4": "/blog/assets28616219/b6b8226b-183f-4249-8255-663a5e9f5af4.mp4", + "https://github.com/lobehub/lobe-chat/assets/28616219/bb9cd00f-b20c-4d7b-9c60-b921d350e319": "/blog/assets28616219/bb9cd00f-b20c-4d7b-9c60-b921d350e319.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/bdeb678e-6502-4667-86b1-504221ee7ded": "/blog/assets28616219/bdeb678e-6502-4667-86b1-504221ee7ded.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/be0c95c0-6693-44ee-a490-7e8dfaa8b34d": "/blog/assets28616219/be0c95c0-6693-44ee-a490-7e8dfaa8b34d.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/c1f945d1-f3e2-4100-b6bb-24d4cb13c438": "/blog/assets28616219/c1f945d1-f3e2-4100-b6bb-24d4cb13c438.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/c32b56db-c6a1-4876-9bc3-acbd37ec0c0c": "/blog/assets28616219/c32b56db-c6a1-4876-9bc3-acbd37ec0c0c.mp4", + "https://github.com/lobehub/lobe-chat/assets/28616219/c9c58141-5ec6-43f1-8d97-0a84a04dcdba": "/blog/assets28616219/c9c58141-5ec6-43f1-8d97-0a84a04dcdba.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/c9f74ec4-ce63-4ce9-b9e2-34bda6fda10b": "/blog/assets28616219/c9f74ec4-ce63-4ce9-b9e2-34bda6fda10b.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/cd74152d-0ae8-44fd-b815-3307c56a3c18": "/blog/assets28616219/cd74152d-0ae8-44fd-b815-3307c56a3c18.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/d4a710cd-6404-4196-90d0-cd08ca385074": "/blog/assets28616219/d4a710cd-6404-4196-90d0-cd08ca385074.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/d6f5a918-7b50-4d6e-83a6-3894ab930ddf": "/blog/assets28616219/d6f5a918-7b50-4d6e-83a6-3894ab930ddf.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/da84edc3-46f7-4e2b-a0cd-dc33a98bf5cb": "/blog/assets28616219/da84edc3-46f7-4e2b-a0cd-dc33a98bf5cb.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/dfcc2cb3-2958-4498-a8a4-51bec584fe7d": "/blog/assets28616219/dfcc2cb3-2958-4498-a8a4-51bec584fe7d.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/e0608cca-f62f-414a-bc55-28a61ba21f14": "/blog/assets28616219/e0608cca-f62f-414a-bc55-28a61ba21f14.webp", "https://github.com/lobehub/lobe-chat/assets/28616219/f29475a3-f346-4196-a435-41a6373ab9e2": "/blog/assets/28616219/f29475a3-f346-4196-a435-41a6373ab9e2.mp4", + "https://github.com/lobehub/lobe-chat/assets/28616219/f3885537-6d43-422f-b1b8-e70732401025": "/blog/assets28616219/f3885537-6d43-422f-b1b8-e70732401025.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/f50f47fb-5e8e-4930-bf4e-8cf6f5b8afb9": "/blog/assets28616219/f50f47fb-5e8e-4930-bf4e-8cf6f5b8afb9.webp", + "https://github.com/lobehub/lobe-chat/assets/28616219/fab4abb2-584b-49de-9340-813382951635": "/blog/assets28616219/fab4abb2-584b-49de-9340-813382951635.webp", + "https://github.com/lobehub/lobe-chat/assets/30863298/0beda150-d0b6-43cf-a9f1-fce928b83a96": "/blog/assets30863298/0beda150-d0b6-43cf-a9f1-fce928b83a96.webp", + "https://github.com/lobehub/lobe-chat/assets/30863298/3e0082df-9b6f-46f3-b67f-bdc79e1eb2cc": "/blog/assets30863298/3e0082df-9b6f-46f3-b67f-bdc79e1eb2cc.webp", + "https://github.com/lobehub/lobe-chat/assets/30863298/62fbd09f-a69a-4460-949b-0f6285fa65b9": "/blog/assets30863298/62fbd09f-a69a-4460-949b-0f6285fa65b9.webp", + "https://github.com/lobehub/lobe-chat/assets/30863298/880749a6-5ba4-4e20-a968-b583a54de7fa": "/blog/assets30863298/880749a6-5ba4-4e20-a968-b583a54de7fa.webp", + "https://github.com/lobehub/lobe-chat/assets/30863298/9891347e-a338-4aa9-8714-f16c8dbcfcec": "/blog/assets30863298/9891347e-a338-4aa9-8714-f16c8dbcfcec.webp", + "https://github.com/lobehub/lobe-chat/assets/30863298/df4cea85-616a-46f5-b2de-42725d9b82a6": "/blog/assets30863298/df4cea85-616a-46f5-b2de-42725d9b82a6.webp", + "https://github.com/lobehub/lobe-chat/assets/30863298/f068190f-0027-4d3b-8667-d632e43d5a86": "/blog/assets30863298/f068190f-0027-4d3b-8667-d632e43d5a86.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/0275a552-f189-42b5-bf40-f9891c428b3d": "/blog/assets34400653/0275a552-f189-42b5-bf40-f9891c428b3d.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/094d701f-ce80-464a-bbbc-0a5ecc8d08e3": "/blog/assets34400653/094d701f-ce80-464a-bbbc-0a5ecc8d08e3.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/0cc6c9b8-4688-472b-a80f-f84c5ebbc719": "/blog/assets34400653/0cc6c9b8-4688-472b-a80f-f84c5ebbc719.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/11442ce4-a615-49c4-937a-ca2ae93dd27c": "/blog/assets34400653/11442ce4-a615-49c4-937a-ca2ae93dd27c.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/164b34b5-671e-418d-b34a-3b70f1156d06": "/blog/assets34400653/164b34b5-671e-418d-b34a-3b70f1156d06.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/198217a6-84fa-441c-bcbe-8cded1106d6c": "/blog/assets34400653/198217a6-84fa-441c-bcbe-8cded1106d6c.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/22ce5a72-bc46-41f3-b402-bda6dee90184": "/blog/assets34400653/22ce5a72-bc46-41f3-b402-bda6dee90184.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/28d89add-cb18-4b86-9807-f2a5ed65ceba": "/blog/assets34400653/28d89add-cb18-4b86-9807-f2a5ed65ceba.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/28e025dd-367b-4add-85b6-499f4aacda61": "/blog/assets34400653/28e025dd-367b-4add-85b6-499f4aacda61.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/2afffe79-1d37-423c-9363-f09605d5e640": "/blog/assets34400653/2afffe79-1d37-423c-9363-f09605d5e640.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/33d8ce3b-0083-48aa-9a66-3825e726c4de": "/blog/assets34400653/33d8ce3b-0083-48aa-9a66-3825e726c4de.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/34d92da4-783f-4f16-8c4a-9d8e9a03c8da": "/blog/assets34400653/34d92da4-783f-4f16-8c4a-9d8e9a03c8da.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/3c64b747-f6f1-4ed2-84bc-bfa8e5d90966": "/blog/assets34400653/3c64b747-f6f1-4ed2-84bc-bfa8e5d90966.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/40520a43-ac03-4954-8a4d-282fbb946066": "/blog/assets34400653/40520a43-ac03-4954-8a4d-282fbb946066.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/4485fbc3-c309-4c4e-83ee-cb82392307a1": "/blog/assets34400653/4485fbc3-c309-4c4e-83ee-cb82392307a1.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/470e5669-650b-46cf-8024-a1476c166059": "/blog/assets34400653/470e5669-650b-46cf-8024-a1476c166059.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/497e3b20-57ca-4963-b6f4-897c9710c16e": "/blog/assets34400653/497e3b20-57ca-4963-b6f4-897c9710c16e.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/4e057b43-1e3e-4e96-a948-7cdbff303dcb": "/blog/assets34400653/4e057b43-1e3e-4e96-a948-7cdbff303dcb.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/4e0e87d1-4970-45c5-a9ef-287098f6a198": "/blog/assets34400653/4e0e87d1-4970-45c5-a9ef-287098f6a198.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/4f8d0102-7ca7-4f23-b96f-3fc5cf2cd66e": "/blog/assets34400653/4f8d0102-7ca7-4f23-b96f-3fc5cf2cd66e.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/50b73232-01fc-4ef0-939a-3e06354d1b5a": "/blog/assets34400653/50b73232-01fc-4ef0-939a-3e06354d1b5a.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/5707b392-1ee6-4db6-95cb-9d6c902747d2": "/blog/assets34400653/5707b392-1ee6-4db6-95cb-9d6c902747d2.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/5c3898ab-23d7-44c2-bbd9-b255e25e400c": "/blog/assets34400653/5c3898ab-23d7-44c2-bbd9-b255e25e400c.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/663335d0-fb37-4882-9c7f-ebbd53275644": "/blog/assets34400653/663335d0-fb37-4882-9c7f-ebbd53275644.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/6942287e-fbb1-4a10-a1ce-caaa6663da1e": "/blog/assets34400653/6942287e-fbb1-4a10-a1ce-caaa6663da1e.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/6d6f2bc5-1407-471d-95a8-fb03193edbdb": "/blog/assets34400653/6d6f2bc5-1407-471d-95a8-fb03193edbdb.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/703f170b-c03b-4c71-b57d-c2357596bdfb": "/blog/assets34400653/703f170b-c03b-4c71-b57d-c2357596bdfb.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/72f165f4-d529-4f01-a3ac-163c66e5ea73": "/blog/assets34400653/72f165f4-d529-4f01-a3ac-163c66e5ea73.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/7468594b-3355-4cb9-85bc-c9dace137653": "/blog/assets34400653/7468594b-3355-4cb9-85bc-c9dace137653.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/74768b36-28ca-4ec3-a42d-b32abe2c7057": "/blog/assets34400653/74768b36-28ca-4ec3-a42d-b32abe2c7057.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/750b5cd1-f16a-4330-b899-c27b28b1e837": "/blog/assets34400653/750b5cd1-f16a-4330-b899-c27b28b1e837.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/77b5feee-3f46-486d-9a36-31ff60efa5e9": "/blog/assets34400653/77b5feee-3f46-486d-9a36-31ff60efa5e9.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/7a012a11-87bd-4366-a567-0ebf6d12ae10": "/blog/assets34400653/7a012a11-87bd-4366-a567-0ebf6d12ae10.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/88948a3a-6681-4a8d-9734-a464e09e4957": "/blog/assets34400653/88948a3a-6681-4a8d-9734-a464e09e4957.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/8bf73498-4649-4c4d-a95b-b68447599781": "/blog/assets34400653/8bf73498-4649-4c4d-a95b-b68447599781.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/94836b32-7fc5-45ca-8556-7a23f53b15f9": "/blog/assets34400653/94836b32-7fc5-45ca-8556-7a23f53b15f9.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/95717e2b-1a55-4fca-a96b-b1c186ed4563": "/blog/assets34400653/95717e2b-1a55-4fca-a96b-b1c186ed4563.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/9a8dc1d4-152b-415f-a7cd-8f0c8fbb9913": "/blog/assets34400653/9a8dc1d4-152b-415f-a7cd-8f0c8fbb9913.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/a024af40-e1d9-4df0-b998-0e6e87cebe5b": "/blog/assets34400653/a024af40-e1d9-4df0-b998-0e6e87cebe5b.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/ac2ed716-d270-43f6-856b-3ff81265f4e6": "/blog/assets34400653/ac2ed716-d270-43f6-856b-3ff81265f4e6.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/b4d12904-9d5d-46de-bd66-901eeb9c8e52": "/blog/assets34400653/b4d12904-9d5d-46de-bd66-901eeb9c8e52.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/b695f26a-5bcd-477c-af08-bf03adb717c2": "/blog/assets34400653/b695f26a-5bcd-477c-af08-bf03adb717c2.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/b839e04e-0cef-46a3-bb84-0484a3f51c69": "/blog/assets34400653/b839e04e-0cef-46a3-bb84-0484a3f51c69.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/b83da559-73d1-4734-87d5-5e22955a9da2": "/blog/assets34400653/b83da559-73d1-4734-87d5-5e22955a9da2.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/ba3595e3-d9cb-4d0d-b414-8306b16df186": "/blog/assets34400653/ba3595e3-d9cb-4d0d-b414-8306b16df186.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/c1d1d816-6339-41a6-9bc9-e2c3b2762291": "/blog/assets34400653/c1d1d816-6339-41a6-9bc9-e2c3b2762291.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/c2e6a58b-95eb-4f40-8add-83f4316a719b": "/blog/assets34400653/c2e6a58b-95eb-4f40-8add-83f4316a719b.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/cbc23ca9-1188-4b85-8ef0-e75ac7d74b92": "/blog/assets34400653/cbc23ca9-1188-4b85-8ef0-e75ac7d74b92.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/cec2e032-54e1-49b1-a212-4d9736927156": "/blog/assets34400653/cec2e032-54e1-49b1-a212-4d9736927156.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/e6058456-8f9d-40c1-9ae5-1e9d5eeb9476": "/blog/assets34400653/e6058456-8f9d-40c1-9ae5-1e9d5eeb9476.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/eb57ca57-4f45-4409-91ce-9fa9c7c626d6": "/blog/assets34400653/eb57ca57-4f45-4409-91ce-9fa9c7c626d6.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/eee046cb-189b-4635-ac94-19d50b17a18a": "/blog/assets34400653/eee046cb-189b-4635-ac94-19d50b17a18a.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/ef9ed1b8-6828-4dd6-b86b-bb0b4fa40619": "/blog/assets34400653/ef9ed1b8-6828-4dd6-b86b-bb0b4fa40619.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/f4a23c2a-503e-4731-bc4d-922bce0b6039": "/blog/assets34400653/f4a23c2a-503e-4731-bc4d-922bce0b6039.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/f539d104-6d64-4cc7-8781-3b36b00d32d0": "/blog/assets34400653/f539d104-6d64-4cc7-8781-3b36b00d32d0.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/f6e46f1c-0ac9-42ae-8e83-ddb0cc6c5bf8": "/blog/assets34400653/f6e46f1c-0ac9-42ae-8e83-ddb0cc6c5bf8.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/f7d59c7a-abd0-4ebd-8c72-ca10c47a0f1a": "/blog/assets34400653/f7d59c7a-abd0-4ebd-8c72-ca10c47a0f1a.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/f892fe64-c734-4944-91ff-9916a41bd1c9": "/blog/assets34400653/f892fe64-c734-4944-91ff-9916a41bd1c9.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/f9a5a394-c8f8-4567-9d51-cf84811418ca": "/blog/assets34400653/f9a5a394-c8f8-4567-9d51-cf84811418ca.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/fa725e49-4c17-4055-82bc-98a31e73fa54": "/blog/assets34400653/fa725e49-4c17-4055-82bc-98a31e73fa54.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/fb0f7574-c2f5-40d6-8613-3749e85ce881": "/blog/assets34400653/fb0f7574-c2f5-40d6-8613-3749e85ce881.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/fcdcda9b-8668-4eac-b5cb-04803a888e92": "/blog/assets34400653/fcdcda9b-8668-4eac-b5cb-04803a888e92.webp", + "https://github.com/lobehub/lobe-chat/assets/34400653/fd06c0aa-4bd3-4f4e-bf2b-38374dfe775d": "/blog/assets34400653/fd06c0aa-4bd3-4f4e-bf2b-38374dfe775d.webp", + "https://github.com/lobehub/lobe-chat/assets/64475363/23131ca1-9e84-4a89-a840-ef79c4bc0251": "/blog/assets64475363/23131ca1-9e84-4a89-a840-ef79c4bc0251.webp", + "https://github.com/lobehub/lobe-chat/assets/64475363/2f919f99-2aaa-4fa7-9938-169d3ed09db7": "/blog/assets64475363/2f919f99-2aaa-4fa7-9938-169d3ed09db7.webp", + "https://github.com/lobehub/lobe-chat/assets/64475363/358bca8d-3d82-4e76-9a5e-90d16a39efde": "/blog/assets64475363/358bca8d-3d82-4e76-9a5e-90d16a39efde.webp", + "https://github.com/lobehub/lobe-chat/assets/64475363/6d69bdca-7d18-4cbc-b3e0-220d8815cd29": "/blog/assets64475363/6d69bdca-7d18-4cbc-b3e0-220d8815cd29.webp", + "https://github.com/lobehub/lobe-chat/assets/64475363/995780cb-9096-4a36-ab17-d422703ab970": "/blog/assets64475363/995780cb-9096-4a36-ab17-d422703ab970.webp", + "https://github.com/lobehub/lobe-chat/assets/64475363/c6108133-a918-48b0-ab1a-e3fa607572a4": "/blog/assets64475363/c6108133-a918-48b0-ab1a-e3fa607572a4.webp", + "https://github.com/lobehub/lobe-chat/assets/64475363/d7ef5ad1-b1a3-435e-b1bc-4436d2b6fecd": "/blog/assets64475363/d7ef5ad1-b1a3-435e-b1bc-4436d2b6fecd.webp", + "https://github.com/lobehub/lobe-chat/assets/67304509/4244634e-5f68-48d5-aac0-e5f4b06d1c4b": "/blog/assets67304509/4244634e-5f68-48d5-aac0-e5f4b06d1c4b.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/12451b47-8dcd-40a9-b18d-2806b07efecc": "/blog/assets8692892/12451b47-8dcd-40a9-b18d-2806b07efecc.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/14ecaa12-74a1-4e2f-b171-9d9ac09d3d63": "/blog/assets8692892/14ecaa12-74a1-4e2f-b171-9d9ac09d3d63.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/1699bf46-0c8d-4238-9eb5-34282bfe529a": "/blog/assets8692892/1699bf46-0c8d-4238-9eb5-34282bfe529a.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/20a257b5-d086-46f3-b5c2-f76394b11f55": "/blog/assets8692892/20a257b5-d086-46f3-b5c2-f76394b11f55.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/3564110d-bef9-47f3-b775-e5f28b4275b2": "/blog/assets8692892/3564110d-bef9-47f3-b775-e5f28b4275b2.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/375b3d73-6796-465c-9063-f2762093f763": "/blog/assets8692892/375b3d73-6796-465c-9063-f2762093f763.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/407b4eed-7f21-4aa6-b68f-9bae2faf09d0": "/blog/assets8692892/407b4eed-7f21-4aa6-b68f-9bae2faf09d0.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/40bb6b4c-18e0-4ae5-abae-ae0cf202cf08": "/blog/assets8692892/40bb6b4c-18e0-4ae5-abae-ae0cf202cf08.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/4ff3c3a0-9ca0-45ff-8f3a-219f4445098b": "/blog/assets8692892/4ff3c3a0-9ca0-45ff-8f3a-219f4445098b.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/5a0e6c58-9e6f-4ffb-8af2-32e48cfb45b0": "/blog/assets8692892/5a0e6c58-9e6f-4ffb-8af2-32e48cfb45b0.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/79c55d44-8dcb-429c-a072-d3eb014bbceb": "/blog/assets8692892/79c55d44-8dcb-429c-a072-d3eb014bbceb.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/9d5cb651-ad10-47c7-8c8b-2256163c5521": "/blog/assets8692892/9d5cb651-ad10-47c7-8c8b-2256163c5521.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/b05473ad-04a6-4ebc-9810-116c778d4448": "/blog/assets8692892/b05473ad-04a6-4ebc-9810-116c778d4448.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/bee24764-aa42-47d9-ad43-bcb8e7b35bc3": "/blog/assets8692892/bee24764-aa42-47d9-ad43-bcb8e7b35bc3.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/d12a2661-7b98-484f-8f3d-07e84d42ae08": "/blog/assets8692892/d12a2661-7b98-484f-8f3d-07e84d42ae08.webp", + "https://github.com/lobehub/lobe-chat/assets/8692892/d84235b1-45f6-447d-bdd9-58ab9527dc9b": "/blog/assets8692892/d84235b1-45f6-447d-bdd9-58ab9527dc9b.webp", + "https://github.com/user-attachments/assets/00c02637-873e-4e7e-9dc3-a95085b16dd7": "/blog/assets05d786345e99f92d11baae5667a04a62.webp", "https://github.com/user-attachments/assets/03433283-08a5-481a-8f6c-069b2fc6bace": "/blog/assets/8d4c2cc0ce8654fa8ac06cc036a7f941.webp", + "https://github.com/user-attachments/assets/08ced88b-4968-46e8-b1da-0c04ddf5b743": "/blog/assets5fd5fb937b9b05d50ce8659cea3210a4.webp", + "https://github.com/user-attachments/assets/09be499c-3b04-4dd6-a161-6e8ebe788354": "/blog/assets65003d69fa745e7cac376a79ea4bb742.webp", + "https://github.com/user-attachments/assets/09c994cf-78f8-46ea-9fef-a06022c0f6d7": "/blog/assets6b6c251a2d4a77784c08fb07fc51abf9.webp", + "https://github.com/user-attachments/assets/0af85438-ac99-4c95-b888-a17e88ede043": "/blog/assetsf1e1ca1adaac36881ec6c3b2ce1a099e.webp", + "https://github.com/user-attachments/assets/0c73c453-6ee3-4f90-bc5d-119c52c38fef": "/blog/assets2a74d926ae05faf2ee9f8da858bec3f6.webp", + "https://github.com/user-attachments/assets/0e2fdc5d-9623-4a74-a7f6-dcb802d52297": "/blog/assets61324ea13398c8920f798b97ac19d58f.webp", "https://github.com/user-attachments/assets/0e3a7174-6b66-4432-a319-dff60b033c24": "/blog/assets/39d7890f8cbe21e77db8d3c94f7f22e4.webp", + "https://github.com/user-attachments/assets/0f79c266-cce5-4936-aabd-4c8f19196d91": "/blog/assets6b67dabe7b9226cdff1bace5a3b8ab18.webp", + "https://github.com/user-attachments/assets/1028aa1a-6c19-4191-b28a-2020e5637155": "/blog/assetsd9e6fe2197270f0f774accd6abcf4019.webp", + "https://github.com/user-attachments/assets/1077bee5-b379-4063-b7bd-23b98ec146e2": "/blog/assetsa74aedc4fbbfb2caf6b51f286922b576.webp", + "https://github.com/user-attachments/assets/12863a0e-a1ee-406d-8dee-011b20701fd6": "/blog/assets513c9045fef49b4d46549f485e69f505.webp", + "https://github.com/user-attachments/assets/12c1957d-f050-4235-95da-d55ddedfa6c9": "/blog/assets74f5de8ee68e57b472d1fc56a2df40d5.webp", + "https://github.com/user-attachments/assets/143ff392-97b5-427a-97a7-f2f577915728": "/blog/assetsc52da5833158f3b3143e40bf2a534ac7.webp", + "https://github.com/user-attachments/assets/14696698-03f7-4856-b36c-9a53997eb12c": "/blog/assets710bf5a4704e520976b19dd2466fa56a.webp", + "https://github.com/user-attachments/assets/15af6d94-af4f-4aa9-bbab-7a46e9f9e837": "/blog/assetsffb84575674e2bc5dfdd07af8f41e794.webp", + "https://github.com/user-attachments/assets/15d92756-92f0-45da-8f95-bfe725d13003": "/blog/assets653a83fe7d837e0d225c1de12e60cf92.webp", + "https://github.com/user-attachments/assets/162bc64e-0d34-4a4e-815a-028247b73143": "/blog/assets308f9fd45d0e8a140c1c18e6c92a1a57.webp", + "https://github.com/user-attachments/assets/16cd9aef-c87b-48a4-95c0-b666082e7515": "/blog/assets0ceb7e446f9a850df283093563ba7803.webp", + "https://github.com/user-attachments/assets/199b862a-5de4-4a54-83b2-f4dbf69be902": "/blog/assetsb9d1f02ab6c26f8a2c7873a949b4dd3c.webp", + "https://github.com/user-attachments/assets/1a7e9600-cd0f-4c82-9d32-4e61bbb351cc": "/blog/assets5997a6461e20103f5bc9d6b78b872833.webp", + "https://github.com/user-attachments/assets/1bf1a5f0-32ad-418c-a8d1-6c54740f50b9": "/blog/assets4d0d191b487c114abf084eb7f2dc381c.webp", + "https://github.com/user-attachments/assets/1c6a3e42-8e24-4148-b2c3-0bfe60a8cf77": "/blog/assets8096422e62e10dcd58efe75c616f9e88.webp", + "https://github.com/user-attachments/assets/1d77cca4-7363-4a46-9ad5-10604e111d7c": "/blog/assets1049abec5850cebf8ce12cd50199b9c5.webp", + "https://github.com/user-attachments/assets/1e33aff2-6186-4e1f-80a8-4a2c855d8cc1": "/blog/assets6f2a84bee4245ca507e98e96247d5c5e.webp", + "https://github.com/user-attachments/assets/1fb5df18-5261-483e-a445-96f52f80dd20": "/blog/assets69146738e31a47ac6425070208ebd906.webp", "https://github.com/user-attachments/assets/2048b4c2-4a56-4029-acf9-71e35ff08652": "/blog/assets/d9cbfcbef130183bc490d515d8a38aa4.webp", + "https://github.com/user-attachments/assets/21c52e2a-b2f8-4de8-a5d4-cf3444608db7": "/blog/assets50607dece1bbffe80fdcbe76324ff9b6.webp", + "https://github.com/user-attachments/assets/22e1a039-5e6e-4c40-8266-19821677618a": "/blog/assets89b45345c84f8b7c3bf4d554169689ac.webp", + "https://github.com/user-attachments/assets/237864d6-cc5d-4fe4-8a2b-c278016855c5": "/blog/assetsf3e7c2e961d1d2886fe231a4ac59e2f1.webp", "https://github.com/user-attachments/assets/2787824c-a13c-466c-ba6f-820bddfe099f": "/blog/assets/8d6c17a6ea5e784edf4449fb18ca3f76.webp", + "https://github.com/user-attachments/assets/28590f7f-bfee-4215-b50b-8feddbf72366": "/blog/assets89a8dadc85902334ce8d2d5b78abf709.webp", "https://github.com/user-attachments/assets/29508dda-2382-430f-bc81-fb23f02149f8": "/blog/assets/29b13dc042e3b839ad8865354afe2fac.webp", "https://github.com/user-attachments/assets/2a4116a7-15ad-43e5-b801-cc62d8da2012": "/blog/assets/37d85fdfccff9ed56e9c6827faee01c7.webp", + "https://github.com/user-attachments/assets/2bb4c09d-75bb-4c46-bb2f-faf538308305": "/blog/assetsf0ebf396dbe9559eb3478f48f648a6e2.webp", + "https://github.com/user-attachments/assets/2dd3cde5-fa0d-4f52-b82b-28d9e89379a0": "/blog/assets66b0dfa56c1f5b3063b5ba740dd3ef8d.webp", + "https://github.com/user-attachments/assets/2f7c5c45-ec6a-4393-8fa9-19a4c5f52f7a": "/blog/assets89168f61edcb2ee92d2ad7064da218b2.webp", + "https://github.com/user-attachments/assets/3050839a-cb16-485d-8bae-1bc2f9ade632": "/blog/assetsf117203c39294f45930785d85773c83e.webp", + "https://github.com/user-attachments/assets/30c33426-412d-4dec-b096-317fe5880e79": "/blog/assets66829206b15b6c36fa3344835659c041.webp", + "https://github.com/user-attachments/assets/328e9755-8da9-4849-8569-e099924822fe": "/blog/assetsf78c85b0a0183a3ae3f2e916d59c0a67.webp", + "https://github.com/user-attachments/assets/35164b25-c964-42ce-9cb0-32f6ebe1d07c": "/blog/assetsb6af626eeb0e1e638d80dc9ff7a6eba9.webp", + "https://github.com/user-attachments/assets/37251adf-949b-4aec-bc49-bf4647e119da": "/blog/assetscd53b161a6d02424d03f8c5dcadc3dd5.webp", + "https://github.com/user-attachments/assets/378df8df-8ec4-436e-8451-fbc52705faee": "/blog/assetsba0243e75b0421b6dd7dadad02e4b0d6.webp", "https://github.com/user-attachments/assets/385eaca6-daea-484a-9bea-ba7270b4753d": "/blog/assets/d6129350de510a62fe87b2d2f0fb9477.webp", + "https://github.com/user-attachments/assets/3ad2655e-dd20-4534-bf6d-080b3677df86": "/blog/assets48b5c19e20fb870c7bdd34bd3aefbb21.webp", + "https://github.com/user-attachments/assets/3c1a492d-a3d4-4570-9e74-785c2942ca41": "/blog/assets9880145be3e52b8f9dcd8343cd34a6ca.webp", "https://github.com/user-attachments/assets/3d80e0f5-d32a-4412-85b2-e709731460a0": "/blog/assets2d409f43b58953ad5396c6beab8a0719.webp", + "https://github.com/user-attachments/assets/3da4c8c4-88c6-40a9-8005-6a0a44aa3b1f": "/blog/assetse717764a3618df4e56212e447a6c20cd.webp", + "https://github.com/user-attachments/assets/3ed3226c-3d4c-49ef-b2c0-8953dac8a92e": "/blog/assets7dbd100dac8e2614ef6b297885b3c9e2.webp", + "https://github.com/user-attachments/assets/411e2002-61f0-4010-9841-18e88ca895ec": "/blog/assets7c3eab218c0823fa353b1cd23afe21c3.webp", + "https://github.com/user-attachments/assets/420379cd-d8a4-4ab3-9a46-75dcc3d56920": "/blog/assets0ca3e3989fb3884658765ee0ef2587a0.webp", + "https://github.com/user-attachments/assets/4257e123-9018-4562-ac66-0f39278906f5": "/blog/assetsadbc0db573a0f581b22c30ecf243f721.webp", + "https://github.com/user-attachments/assets/433fdce4-0af5-417f-b80d-163c2d4f02f6": "/blog/assets4aaf8d5d092608b649230e0e6fc92df6.webp", + "https://github.com/user-attachments/assets/452d0b48-5ff7-4f42-a46e-68a62b87632b": "/blog/assets78232916d13ddc942ab3d0b62b639509.webp", + "https://github.com/user-attachments/assets/467bb431-ca0d-4bb4-ac17-e5e2b764a770": "/blog/assetsff480f9009cf873852a43c252ac36828.webp", "https://github.com/user-attachments/assets/484f28f4-017c-4ed7-948b-4a8d51f0b63a": "/blog/assets/5bbb4b421d6df63780b3c7a05f5a102d.webp", + "https://github.com/user-attachments/assets/4c792f62-5203-4f13-8f23-df228f70d67f": "/blog/assets94f55c97a24a08c7a5923c23ee2d7eef.webp", + "https://github.com/user-attachments/assets/4cbbbcce-36be-48ff-bb0b-31607a0bba5c": "/blog/assetsb33085e7553d2b7194005b102184553e.webp", + "https://github.com/user-attachments/assets/4d671a7c-5d94-4c4b-b4fd-71a5a0e9d227": "/blog/assetsc74cf5c8daee1515c37a85bce087f0d6.webp", + "https://github.com/user-attachments/assets/4e04928d-0171-48d1-afff-e22fc2faaf4e": "/blog/assetsb26b68a4875a6510ddc202dd4b40d010.webp", + "https://github.com/user-attachments/assets/530c7c96-bac3-456d-a429-f60e7d2ade66": "/blog/assets6541bab7e0047f9c5dbad98dc272d64d.webp", + "https://github.com/user-attachments/assets/5321f987-2c64-4211-8549-bd30ca9b59b9": "/blog/assetsaf57d31364a41634b10c243ed9b1f8f8.webp", "https://github.com/user-attachments/assets/533f7a5e-8a93-4a57-a62f-8233897d72b5": "/blog/assets/9498087e85f27e692716a63cb3b58d79.webp", + "https://github.com/user-attachments/assets/539349dd-2c16-4f42-b525-cca74e113541": "/blog/assetsd8927338578c426b833e5cb57e0b57ec.webp", + "https://github.com/user-attachments/assets/55028fe5-44db-49e2-93c5-5dabbd664f10": "/blog/assets8f3458d794828c38220e88b66e994e2a.webp", + "https://github.com/user-attachments/assets/55230f32-b8dd-47db-a2ba-b3fe7e533dc8": "/blog/assetsc1d4290f05fe474dba74577409cad6e9.webp", + "https://github.com/user-attachments/assets/5abcf21d-5a6c-4fc8-8de6-bc47d4d2fa98": "/blog/assetsbd39adddc9a1cdb85ce4a0e37fa595c1.webp", + "https://github.com/user-attachments/assets/5b816379-c07b-40ea-bde4-df16e2e4e523": "/blog/assets2d41542b390020209bbd5814009abcdf.webp", + "https://github.com/user-attachments/assets/5d672e8b-566f-4f82-bdce-947168726bc0": "/blog/assetsd89998edb6b2dc8311d8d86664d5cd4d.webp", + "https://github.com/user-attachments/assets/5ea37821-4ea8-437c-a15e-3b182d10f19e": "/blog/assets273112caa08528852992b81dd8f3b75d.webp", + "https://github.com/user-attachments/assets/5f344314-ecbc-41e6-9120-520a2d5352ff": "/blog/assets01dbe69cc092163f7ac782afb7d314c5.webp", + "https://github.com/user-attachments/assets/5fe4c373-ebd0-42a9-bdca-0ab7e0a2e747": "/blog/assets18168d5fe64ea34905a7e52fd82d0e9d.webp", "https://github.com/user-attachments/assets/6069332b-8e15-4d3c-8a77-479e8bc09c23": "/blog/assets/603fefbb944bc6761ebdab5956fc0084.webp", + "https://github.com/user-attachments/assets/6234428d-5633-4b2f-be22-1a1772a69a55": "/blog/assets67a17a6c66592ad85dbcf190e73b182d.webp", + "https://github.com/user-attachments/assets/629adf4e-e9e1-40dc-b9e5-d7b908878170": "/blog/assets257e1a3a0d99ee043a4c6cb90e160a2b.webp", "https://github.com/user-attachments/assets/635f1c74-6327-48a8-a8d9-68d7376c7749": "/blog/assets/f6d047a345e47a52592cff916c9a64ce.webp", + "https://github.com/user-attachments/assets/638dcd7c-2bff-4adb-bade-da2aaef872bf": "/blog/assets95e6fe7c19ebfb9ead1c5a267aaf2a4e.webp", "https://github.com/user-attachments/assets/639ed70b-abc5-476f-9eb0-10c739e5a115": "/blog/assets/b2845057b23bccfec3bfea90e43ac381.webp", + "https://github.com/user-attachments/assets/63e5ced7-1d23-44e1-b933-cc3b5df47eab": "/blog/assets5f1a6cb003752055b9ed131c1715154c.webp", + "https://github.com/user-attachments/assets/659b5ac1-82f1-43bd-9d4b-a98491e05794": "/blog/assets856bd407c8a1510f616a4bdb1e02a883.webp", + "https://github.com/user-attachments/assets/669c68bf-3f85-4a6f-bb08-d0d7fb7f7417": "/blog/assets02dce7325584974cdba327fe2f996b9e.webp", + "https://github.com/user-attachments/assets/692e7c67-f173-45da-86ef-5c69e17988e4": "/blog/assets6b01801b405c366fa4ebe683a77f289d.webp", "https://github.com/user-attachments/assets/6935e155-4a1d-4ab7-a61a-2b813d65bb7b": "/blog/assets/6ee2609d79281b6b915e317461013f31.webp", + "https://github.com/user-attachments/assets/6d068fe0-8100-4b43-b0c3-7934f54e688f": "/blog/assets87c281587b15f05b6b4e1afcd5bb47e8.webp", + "https://github.com/user-attachments/assets/6dbf4560-3f62-4b33-9f41-96e12b5087b1": "/blog/assets03f3f52817a626339071e6329b445cb3.webp", + "https://github.com/user-attachments/assets/6e383b75-09e3-42d1-8a6c-5fb7cf558f00": "/blog/assets15ecc1bbe365f3e02702631e28c7b764.webp", + "https://github.com/user-attachments/assets/6f9f400a-72e0-49de-94cb-5069fddf1163": "/blog/assets9db16311eb6772ea74eb63dd2d397bc0.webp", + "https://github.com/user-attachments/assets/702c191f-8250-4462-aed7-accb18b18dea": "/blog/assetsd56d1af67bb2be60b0c580be0a6c7110.webp", + "https://github.com/user-attachments/assets/71035610-0706-434e-9488-ab5819b55330": "/blog/assets18bb134dbc5792d6a624199cca8bf7d3.webp", + "https://github.com/user-attachments/assets/7239d611-1989-414b-a51c-444e47096d75": "/blog/assets8669131e67e5276fe0744754ba4b1645.webp", + "https://github.com/user-attachments/assets/7257eb0e-4e2c-4db2-981d-354598e2c60f": "/blog/assets2ad69e4e124f49710fcedf8e9827f2f3.webp", + "https://github.com/user-attachments/assets/72da7af1-e180-4759-84a5-a6f6ca28392e": "/blog/assets688e6e10904ad46cf7f44bba6359f90c.webp", + "https://github.com/user-attachments/assets/72f02ce5-9991-425b-9864-9113ee1ed6bf": "/blog/assetsfa2c650be15522ac2fd71a3e434a1b2e.webp", + "https://github.com/user-attachments/assets/7350f211-61ce-488e-b0e2-f0fcac25caeb": "/blog/assetsf9ed064fe764cbeff2f46910e7099a91.webp", + "https://github.com/user-attachments/assets/76ad163e-ee19-4f95-a712-85bea764d3ec": "/blog/assets5205b6dd0f80b8ba02c297fcdfc1aecb.webp", + "https://github.com/user-attachments/assets/796c94af-9bad-4e3c-b1c7-dbb17c215c56": "/blog/assetsbd8c97ef67055e3ff93c56e46c33fa8d.webp", + "https://github.com/user-attachments/assets/798ddb18-50c7-462a-a083-0c6841351d26": "/blog/assets11a8089b511aaa61e8982dea0a3665c5.webp", + "https://github.com/user-attachments/assets/7cb3019b-78c1-48e0-a64c-a6a4836affd9": "/blog/assets3ca963d92475f34b0789cfa50071bc52.webp", + "https://github.com/user-attachments/assets/808f8849-5738-4a60-8ccf-01e300b0dc88": "/blog/assets0f893c504377ba45a9f5cdbb5ccb1612.webp", + "https://github.com/user-attachments/assets/81d0349a-44fe-4dfc-bbc4-8e9a1e09567d": "/blog/assets29de82efbe7657a8b9ba7daf0904585d.webp", + "https://github.com/user-attachments/assets/82a7ebe0-69ad-43b6-8767-1316b443fa03": "/blog/assets5374759bfe39ca7fc864e72ddfce98d0.webp", "https://github.com/user-attachments/assets/82bfc467-e0c6-4d99-9b1f-18e4aea24285": "/blog/assets/eb477e62217f4d1b644eff975c7ac168.webp", + "https://github.com/user-attachments/assets/840442b1-bf56-4a5f-9700-b3608b16a8a5": "/blog/assetsc6ff27b7134f280727e1fd7ff83ed2fa.webp", + "https://github.com/user-attachments/assets/84a5c971-1262-4639-b79f-c8b138530803": "/blog/assetsb09a1f1dc99b86343ae196fcfdcc3fe1.webp", + "https://github.com/user-attachments/assets/8570db14-dac6-4279-ab71-04a072c15490": "/blog/assetsc376d2e9e97f9ea9d788589f0a9e23d6.webp", + "https://github.com/user-attachments/assets/868df2eb-0c44-4419-a76a-e173094e1e17": "/blog/assetsf3ccd42bf36b1c75f06f925ffe049f0c.webp", + "https://github.com/user-attachments/assets/872756dc-305e-4e63-9fb7-60550280fc12": "/blog/assets56e5331e7fae3754820790c824cdc480.webp", + "https://github.com/user-attachments/assets/8787716c-833e-44ab-b506-922ddb6121de": "/blog/assets217222e643d99ab3ba01fe92906f3314.webp", + "https://github.com/user-attachments/assets/88e14294-20a6-47c6-981e-fb65453b57cd": "/blog/assets6ef9f3f3627633bb5282fe9df1d31a4a.webp", + "https://github.com/user-attachments/assets/8910186f-4609-4798-a588-2780dcf8db60": "/blog/assets4175fc55c2093d635f15a3287e89e977.webp", + "https://github.com/user-attachments/assets/899a4393-db41-45a6-97ec-9813e1f9879d": "/blog/assets88248c034ef28ca9b909219d2e7ef32a.webp", + "https://github.com/user-attachments/assets/8a0225e0-16ed-40ce-9cd5-553dda561679": "/blog/assets74fbd94a0dc865d2178954662dc964ae.webp", + "https://github.com/user-attachments/assets/8ce79bd6-f1a3-48bb-b3d0-5271c84801c2": "/blog/assets5f8cc99da9c3c1eaca284411833c99e3.webp", + "https://github.com/user-attachments/assets/8d90ae64-cf8e-4d90-8a31-c18ab484740b": "/blog/assets04ab03ac7920031925f7ee27846b3f7d.webp", + "https://github.com/user-attachments/assets/8ec7656e-1e3d-41e0-95a0-f6883135c2fc": "/blog/assets71b5cfd165bc907f437bf807048a3e67.webp", + "https://github.com/user-attachments/assets/91fe32a8-e5f0-47ff-b8ae-d036c8a7bff1": "/blog/assets1837dd567f75fcc083553a1078c0f088.webp", + "https://github.com/user-attachments/assets/9336d6c5-2a83-4aa9-854e-75e245b665cb": "/blog/assetsc16177645281b332883403e7f193f6e3.webp", + "https://github.com/user-attachments/assets/97899819-278f-42fd-804a-144d521d4b4f": "/blog/assets7006b60baaf62aa0d95cd40456e24afe.webp", + "https://github.com/user-attachments/assets/9a78bbb9-7c96-4f32-9b66-e57f92660410": "/blog/assets0e9c7125960a2d00b8c3c3d15d88f0a7.webp", + "https://github.com/user-attachments/assets/9b70b292-6c52-4715-b844-ff5df78d16b9": "/blog/assetsbfe7d519c29884b6699e89866e1db7e2.webp", + "https://github.com/user-attachments/assets/9baacac6-5af4-460b-862d-682b76c18459": "/blog/assets195200c7bc42360675e78a6bfa9fe320.webp", + "https://github.com/user-attachments/assets/9cb27b68-f2ac-4ff9-8f97-d96314b1af03": "/blog/assetsd3fefc9a525701b9d0f25116cea2ff00.webp", + "https://github.com/user-attachments/assets/9f989104-bb8e-4acd-9721-6b1db1017d2b": "/blog/assets5d3551635c580d8781e31256e1fb0f2e.webp", + "https://github.com/user-attachments/assets/a00f06cc-da7c-41e8-a4d5-d4b675a22673": "/blog/assetse0d53ba2bfb6ba5bf33f2b8a547f4e41.webp", + "https://github.com/user-attachments/assets/a1af5778-f47a-4fdc-baf5-ca2a1e66f48e": "/blog/assets97ac48dab1a35e45e034fefe0a1a1006.webp", + "https://github.com/user-attachments/assets/a1ba8ec0-e259-4da4-8980-0cf82ca5f52b": "/blog/assetsbd69842ebb37848ecd50c242aad835b0.webp", + "https://github.com/user-attachments/assets/a42ba52b-491e-4993-8e2f-217aa1776e0f": "/blog/assets0f847842a5dedf7bef1f534278aec584.webp", + "https://github.com/user-attachments/assets/a53deb11-2c14-441a-8a5c-a0f3a74e2a63": "/blog/assets65c86d6e63ddd5dd9896a6a67c054c0d.webp", + "https://github.com/user-attachments/assets/a9de7780-d0cb-47d5-ad9c-fcbbec14b940": "/blog/assets79e8fff075490d2a4535590a02333316.webp", + "https://github.com/user-attachments/assets/aa91ca54-65fc-4e33-8c76-999f0a5d2bee": "/blog/assetsf625540e8340bafe69ccbb89ad75707a.webp", + "https://github.com/user-attachments/assets/aaa3e2c5-7f16-4cfb-86b6-2814a1aafe3a": "/blog/assets93da89c4892a80e2e5a6caa49d80af5f.webp", + "https://github.com/user-attachments/assets/ab87120c-15ff-4bc7-bb28-4b0b43cfe91a": "/blog/assetsec0f694c9f6140620217bde441440170.webp", + "https://github.com/user-attachments/assets/ae03eab5-a319-4d2a-a5f6-1683ab7739ee": "/blog/assetsa25c48c9faa225bf6f72658e5bd58d64.webp", + "https://github.com/user-attachments/assets/aea782b1-27bd-4d9c-b521-c172c2095fe6": "/blog/assets52c8de6425a785409464561c09f8c98d.webp", + "https://github.com/user-attachments/assets/aead3c6c-891e-47c3-9f34-bdc33875e0c2": "/blog/assetsb6959f725c38f86053e4b07c9188d825.webp", "https://github.com/user-attachments/assets/aee846d5-b5ee-46cb-9dd0-d952ea708b67": "/blog/assets/8a8d361b4c0cce6da350cc0de65c0ad6.webp", + "https://github.com/user-attachments/assets/b2b36128-6a43-4a1f-9c08-99fe73fb565f": "/blog/assets85af5a2a51b851fe125055d374cc8263.webp", + "https://github.com/user-attachments/assets/b3ab6e35-4fbc-468d-af10-e3e0c687350f": "/blog/assets4cd6d49afb0ab1354156961d396195a1.webp", + "https://github.com/user-attachments/assets/b49ed0c1-d6bf-4f46-b9df-5f7c730afaa3": "/blog/assets74000cc1bc59ee4a15e8f0304afbf866.webp", + "https://github.com/user-attachments/assets/b4e89dd4-877b-43fe-aa42-4680de17ba8e": "/blog/assets1b9283f9cc5fc5073ff9cffc24880e96.webp", + "https://github.com/user-attachments/assets/b6e6a3eb-13c6-46f0-9c7c-69a20deae30f": "/blog/assets768ec6fd300785186b202437985857c4.webp", + "https://github.com/user-attachments/assets/b824b741-f2d8-42c8-8cb9-1266862affa7": "/blog/assets89d0dcbf5ffccd21086845cea3a514cc.webp", + "https://github.com/user-attachments/assets/b9da065e-f964-44f2-8260-59e182be2729": "/blog/assets80a8b9627374fc345f4bf8e3adf11074.webp", + "https://github.com/user-attachments/assets/bd399cef-283c-4706-bdc8-de9de662de41": "/blog/assets4224bf4978bea84e82b3b3aec77656f0.webp", "https://github.com/user-attachments/assets/bd6d0c82-8f14-4167-ad09-2a841f1e34e4": "/blog/assets/d7e57f8e69f97b76b3c2414f3441b6e4.webp", + "https://github.com/user-attachments/assets/be06e348-8d4c-440c-b59f-b71120f21335": "/blog/assetsd9f99f2adff9051313ca44205b022d8c.webp", + "https://github.com/user-attachments/assets/be7dcd49-0165-4f7b-bf90-0739cc9dd212": "/blog/assetsf069368b9162f58247318dde850c0807.webp", + "https://github.com/user-attachments/assets/bfda556a-d3fc-409f-8647-e718788f2fb8": "/blog/assets2cfe64ead120815f7ba7100bc3dcfd48.webp", + "https://github.com/user-attachments/assets/c44b6894-70cb-4876-b792-2e76e75ac542": "/blog/assets94499977be2f01c795b9876e4fe60709.webp", + "https://github.com/user-attachments/assets/c4fe4430-7860-4339-b014-4d8d264a12c0": "/blog/assets87010372bdf39890a7478a7a8cd4a9f0.webp", + "https://github.com/user-attachments/assets/c6319e83-c4e7-48cf-9625-2edfc4aa77b3": "/blog/assetsfae60ba54155478a1c363f0065ce76a6.webp", "https://github.com/user-attachments/assets/c68e88e4-cf2e-4122-82bc-89ba193b1eb4": "/blog/assets/1f6c4f1c5e6211735ca4924c7807aca1.webp", + "https://github.com/user-attachments/assets/c75eb19e-e0f5-4135-91e4-55be8be8a996": "/blog/assets0f97d1dfccd5ba07172aff71ff9acd7b.webp", + "https://github.com/user-attachments/assets/c77fcf70-9039-49ff-86e4-f8eaa267bbf6": "/blog/assets5a2f360c19fcf9a037b2d1609479b713.webp", + "https://github.com/user-attachments/assets/cb4ba5fe-c223-4b9f-a662-de93e4a536d1": "/blog/assets45d90e73abffd7ae7d85808f81827bb9.webp", + "https://github.com/user-attachments/assets/cc1f6146-8063-4a4d-947a-7fd6b9133c0c": "/blog/assets28749075f0c4d62c1642694a4ed9ec08.webp", + "https://github.com/user-attachments/assets/cf3bfd44-9c13-4026-95cd-67f54f40ce6c": "/blog/assetsc557d9ee77afeb958d198abf5ca79761.webp", + "https://github.com/user-attachments/assets/d0a5e152-160a-4862-8393-546f4e2e5387": "/blog/assets06d4e543cbaca9a2762923a23b2cae67.webp", + "https://github.com/user-attachments/assets/d3626294-74ba-4944-9a63-052e6cf719ab": "/blog/assets0f244d5fe648127774636a54ae9ffafc.webp", + "https://github.com/user-attachments/assets/d524c20d-306a-45bc-971b-96920b87fab4": "/blog/assetsbeefe4dbe3e6f141e09c62064c6dc397.webp", + "https://github.com/user-attachments/assets/d643af6d-ca0f-4abd-9dd2-977dacecb25d": "/blog/assets34424062ad6ab98df7f56c9e61341be5.webp", + "https://github.com/user-attachments/assets/d693be02-e08c-43ae-8bde-1294f180aaf6": "/blog/assets4169b5d9f7534f9f89c8426445e9a080.webp", + "https://github.com/user-attachments/assets/d6ace96f-0398-4847-83e1-75c3004a0e8b": "/blog/assetsf7007eebef93bc1d8a29aaf9080ab404.webp", + "https://github.com/user-attachments/assets/d7666e2a-0202-4b45-8338-9806ddffa44e": "/blog/assets8f95f09ce51ad5917107d84db1e980ab.webp", + "https://github.com/user-attachments/assets/d7d65e32-679d-4e50-a933-28cf5dde1330": "/blog/assetsc51018f1581b769727ad1bb3bb641567.webp", + "https://github.com/user-attachments/assets/d902b5df-edb1-48d6-b659-daf948a97aed": "/blog/assets1e640c898e897bfb4ce4b66d5377010b.webp", + "https://github.com/user-attachments/assets/d961f2af-47b0-4806-8288-b1e8f7ee8a47": "/blog/assets9c1839eb146b89e9e2d262ca95d24323.webp", + "https://github.com/user-attachments/assets/db59a5e7-32ed-49d7-a791-8f8ee6618c01": "/blog/assetsf601ee6fa15bed25e17d6b6879691f0f.webp", + "https://github.com/user-attachments/assets/dba58ea6-7df8-4971-b6d4-b24d5f486ba7": "/blog/assetsbbe90aa719d182d3d2f327e4182732c5.webp", + "https://github.com/user-attachments/assets/dd6bc4a4-3c20-4162-87fd-5cac57e5d7e7": "/blog/assetseebf66254337ce88357629c34e78c08d.webp", "https://github.com/user-attachments/assets/dde2c9c5-cdda-4a65-8f32-b6f4da907df2": "/blog/assets/d47654360d626f80144cdedb979a3526.webp", + "https://github.com/user-attachments/assets/dec6665a-b3ec-4c50-a57f-7c7eb3160e7b": "/blog/assets8d4fbb776e2209a1ec58c6b3516351a1.webp", + "https://github.com/user-attachments/assets/dfc45807-2ed6-43eb-af4c-47df66dfff7d": "/blog/assetscad58c557fda04b9379000cbbaa4c493.webp", + "https://github.com/user-attachments/assets/e269bd27-d323-43ba-811b-c0f5e4137903": "/blog/assetse12925fba0dda232168e695e6a5e4384.webp", + "https://github.com/user-attachments/assets/e3f44bc8-2fa5-441d-8934-943481472450": "/blog/assets3c54d6f2d55fae843fbbfdc0bd7ffec7.webp", + "https://github.com/user-attachments/assets/e43dacf6-313e-499c-8888-f1065c53e424": "/blog/assets89b0698da3476c6df24ba1f0a07e438e.webp", + "https://github.com/user-attachments/assets/e617def1-ce50-4acc-974b-12f5ed592a0e": "/blog/assets3386e7adc46d19be5cc6dae46533d9bd.webp", "https://github.com/user-attachments/assets/e70c2db6-05c9-43ea-b111-6f6f99e0ae88": "/blog/assets/944c671604833cd2457445b211ebba33.webp", + "https://github.com/user-attachments/assets/e887fa04-c553-45f1-917f-5c123ac9c68b": "/blog/assets73ba166f1e6d54e8c860b91f61c23355.webp", + "https://github.com/user-attachments/assets/e89d2a56-4bf0-4bff-ac39-0d44789fa858": "/blog/assets9f6d4113be26efbcab41d83ed39dcb14.webp", + "https://github.com/user-attachments/assets/eaa2a1fb-41ad-473d-ac10-a39c05886425": "/blog/assetsf5a62c963127764ebdf1cd226fac3dac.webp", "https://github.com/user-attachments/assets/eaed3762-136f-4297-b161-ca92a27c4982": "/blog/assets/50b38eac1769ae6f13aef72f3d725eec.webp", + "https://github.com/user-attachments/assets/eb027093-5ceb-4a9d-8850-b791fbf69a71": "/blog/assetsd0c4369f894abb5ad6e514059b8f378e.webp", "https://github.com/user-attachments/assets/eb3f3d8a-79ce-40aa-a206-2c846206c0c0": "/blog/assets/f10a4b98782e36797c38071eed785c6f.webp", - "https://github.com/user-attachments/assets/fa8fab19-ace2-4f85-8428-a3a0e28845bb": "/blog/assets/2d678631c55369ba7d753c3ffcb73782.webp" + "https://github.com/user-attachments/assets/eb41f77f-ccdd-4a48-a8a2-7badac868c03": "/blog/assets0a81d34f707bd87cee3852f26a3d14f0.webp", + "https://github.com/user-attachments/assets/ebdbc01a-a6b5-4bbc-b7ff-240d6015fbfc": "/blog/assets13656829368732a95940edeff9ddfca6.webp", + "https://github.com/user-attachments/assets/ed6965c8-6884-4adf-a457-573a96755f55": "/blog/assets2f83a9f03f13e73b7393641078627cf1.webp", + "https://github.com/user-attachments/assets/f0b2e72d-9eee-46a8-b094-4834b78764df": "/blog/assets8d6bb40d21d74cfa0312bdec347a11d0.webp", + "https://github.com/user-attachments/assets/f3068287-8ade-4eca-9841-ea67d8ff1226": "/blog/assetsa343af49a2d7da73a3fa51f2086afdd4.webp", + "https://github.com/user-attachments/assets/f3177ce2-281c-4ed4-a061-239547b466c6": "/blog/assets86924c724c66931cf61417dbdcc04ee8.webp", + "https://github.com/user-attachments/assets/f4dbbadb-7461-4370-a836-09c487fdd206": "/blog/assets94397c91265c37b9f313dc439b90125f.webp", + "https://github.com/user-attachments/assets/f54c912d-3ee9-4f85-b8bf-619790e51b49": "/blog/assets620c308554394e72034d27ea743f8bff.webp", + "https://github.com/user-attachments/assets/f67180c2-47ba-4b04-9f12-d274c7821085": "/blog/assetscbda3a61a2d158eeb6046e1d1bf9972f.webp", + "https://github.com/user-attachments/assets/f878355f-710b-452e-8606-0c75c47f29d2": "/blog/assets3e2af0090f02059c687b6add6b73a90b.webp", + "https://github.com/user-attachments/assets/f9ccce84-4fd4-48ca-9450-40660112d0d7": "/blog/assetsd94f3e0cf32639bea46dbf92e0862f89.webp", + "https://github.com/user-attachments/assets/f9f7ed26-e506-4c52-a118-e0bb5e0918db": "/blog/assetse5dff9a2e16a134d85e891e4eb98fe55.webp", + "https://github.com/user-attachments/assets/fa8fab19-ace2-4f85-8428-a3a0e28845bb": "/blog/assets/2d678631c55369ba7d753c3ffcb73782.webp", + "https://github.com/user-attachments/assets/facdc83c-e789-4649-8060-7f7a10a1b1dd": "/blog/assets05b20e40c03ced0ec8707fed2e8e0f25.webp", + "https://github.com/user-attachments/assets/fcdfb9c5-819a-488f-b28d-0857fe861219": "/blog/assets8477415ecec1f37e38ab38ff1217d0a7.webp" } diff --git a/docs/changelog/2023-09-09-plugin-system.mdx b/docs/changelog/2023-09-09-plugin-system.mdx index 9ef28b4b00..dfe217de57 100644 --- a/docs/changelog/2023-09-09-plugin-system.mdx +++ b/docs/changelog/2023-09-09-plugin-system.mdx @@ -1,11 +1,11 @@ --- -title: LobeChat Plugin Ecosystem - Functionality Extensions and Development Resources +title: LobeHub Plugin Ecosystem - Functionality Extensions and Development Resources description: >- - Discover how the LobeChat plugin ecosystem enhances the utility and - flexibility of the LobeChat assistant, along with the development resources - and plugin development guidelines provided. + Discover how the LobeHub plugin ecosystem enhances the utility and flexibility + of the LobeHub assistant, along with the development resources and plugin + development guidelines provided. tags: - - LobeChat + - LobeHub - Plugins - Real-time Information - Voice Options @@ -13,11 +13,11 @@ tags: # Supported Plugin System -The LobeChat plugin ecosystem is a significant extension of its core functionalities, greatly enhancing the utility and flexibility of the LobeChat assistant. +The LobeHub plugin ecosystem is a significant extension of its core functionalities, greatly enhancing the utility and flexibility of the LobeHub assistant. -
-We provide a [Docker image][docker-release-link] for deploying the LobeChat service on your private device. +We provide a [Docker image][docker-release-link] for deploying the LobeHub service on your private device. ### Install Docker Container Environment diff --git a/docs/self-hosting/platform/docker-compose.zh-CN.mdx b/docs/self-hosting/platform/docker-compose.zh-CN.mdx index b6aa7b9be7..4eda35cdcd 100644 --- a/docs/self-hosting/platform/docker-compose.zh-CN.mdx +++ b/docs/self-hosting/platform/docker-compose.zh-CN.mdx @@ -1,9 +1,9 @@ --- -title: 通过 Docker Compose 部署 LobeChat -description: 学习如何使用 Docker Compose 部署 LobeChat 服务,包括安装 Docker 容器环境和自动更新脚本设置。 +title: 通过 Docker Compose 部署 LobeHub +description: 学习如何使用 Docker Compose 部署 LobeHub 服务,包括安装 Docker 容器环境和自动更新脚本设置。 tags: - Docker Compose - - LobeChat + - LobeHub - Docker 容器 - 自动更新脚本 - 部署指引 @@ -19,7 +19,7 @@ tags: [![][docker-pulls-shield]][docker-pulls-link] -我们提供了 [Docker 镜像](https://hub.docker.com/r/lobehub/lobe-chat) ,供你在自己的私有设备上部署 LobeChat 服务。 +我们提供了 [Docker 镜像](https://hub.docker.com/r/lobehub/lobe-chat) ,供你在自己的私有设备上部署 LobeHub 服务。 ### 安装 Docker 容器环境 diff --git a/docs/self-hosting/platform/docker.mdx b/docs/self-hosting/platform/docker.mdx index 8793dc3453..de6956bfe4 100644 --- a/docs/self-hosting/platform/docker.mdx +++ b/docs/self-hosting/platform/docker.mdx @@ -1,11 +1,11 @@ --- -title: Deploy LobeChat with Docker +title: Deploy LobeHub with Docker description: >- - Learn how to deploy the LobeChat service using Docker, including installation + Learn how to deploy the LobeHub service using Docker, including installation steps, command deployment, proxy configuration, and automatic update scripts. tags: - Docker Deployment - - LobeChat Service + - LobeHub Service - Docker Command - Proxy Configuration - Automatic Update Script @@ -21,7 +21,7 @@ tags: [![][docker-pulls-shield]][docker-pulls-link] -We provide a [Docker image][docker-release-link] for you to deploy the LobeChat service on your private device. +We provide a [Docker image][docker-release-link] for you to deploy the LobeHub service on your private device. ### Install Docker Container Environment @@ -44,7 +44,7 @@ We provide a [Docker image][docker-release-link] for you to deploy the LobeChat ### Docker Command Deployment - Use the following command to start the LobeChat service with one click: + Use the following command to start the LobeHub service with one click: ```fish $ docker run -d -p 3210:3210 \ @@ -59,7 +59,7 @@ We provide a [Docker image][docker-release-link] for you to deploy the LobeChat - Replace `sk-xxxx` in the above command with your OpenAI API Key. - - For the complete list of environment variables supported by LobeChat, please refer to the [Environment Variables](/docs/self-hosting/environment-variables) section. + - For the complete list of environment variables supported by LobeHub, please refer to the [Environment Variables](/docs/self-hosting/environment-variables) section. Since the official Docker image build takes about half an hour, if you see the "update available" diff --git a/docs/self-hosting/platform/docker.zh-CN.mdx b/docs/self-hosting/platform/docker.zh-CN.mdx index d23c1365b1..5b2b0d7af6 100644 --- a/docs/self-hosting/platform/docker.zh-CN.mdx +++ b/docs/self-hosting/platform/docker.zh-CN.mdx @@ -1,9 +1,9 @@ --- -title: 通过 Docker 部署 LobeChat -description: 学习如何使用 Docker 部署 LobeChat 服务,包括安装 Docker 容器环境和使用指令一键启动服务。详细说明如何配置环境变量和使用代理地址。 +title: 通过 Docker 部署 LobeHub +description: 学习如何使用 Docker 部署 LobeHub 服务,包括安装 Docker 容器环境和使用指令一键启动服务。详细说明如何配置环境变量和使用代理地址。 tags: - Docker - - LobeChat + - LobeHub - 部署指引 - 环境变量 - 代理地址 @@ -20,7 +20,7 @@ tags: [![][docker-pulls-shield]][docker-pulls-link] -我们提供了 [Docker 镜像][docker-release-link],供你在自己的私有设备上部署 LobeChat 服务。 +我们提供了 [Docker 镜像][docker-release-link],供你在自己的私有设备上部署 LobeHub 服务。 ## 部署指南 @@ -45,7 +45,7 @@ tags: ### Docker 指令部署 - 使用以下命令即可使用一键启动 LobeChat 服务: + 使用以下命令即可使用一键启动 LobeHub 服务: ```fish $ docker run -d -p 3210:3210 \ @@ -60,7 +60,7 @@ tags: - 使用你的 OpenAI API Key 替换上述命令中的 `sk-xxxx` ,获取 API Key 的方式详见最后一节。 - LobeChat 支持的完整环境变量列表请参考 [📘 环境变量](/zh/docs/self-hosting/environment-variables) + LobeHub 支持的完整环境变量列表请参考 [📘 环境变量](/zh/docs/self-hosting/environment-variables) 部分 @@ -151,7 +151,7 @@ tags: ## 获取 OpenAI API Key -API Key 是使用 LobeChat 进行大语言模型会话的必要信息,本节以 OpenAI 模型服务商为例,简要介绍获取 API Key 的方式。 +API Key 是使用 LobeHub 进行大语言模型会话的必要信息,本节以 OpenAI 模型服务商为例,简要介绍获取 API Key 的方式。 ### `A` 通过 OpenAI 官方渠道 @@ -172,7 +172,7 @@ API Key 是使用 LobeChat 进行大语言模型会话的必要信息,本节 {'获取 -将此 API Key 填写到 LobeChat 的 API Key 配置中,即可开始使用。 +将此 API Key 填写到 LobeHub 的 API Key 配置中,即可开始使用。 账户注册后,一般有 5 美元的免费额度,但有效期只有三个月。如果你希望长期使用你的 API diff --git a/docs/self-hosting/platform/netlify.mdx b/docs/self-hosting/platform/netlify.mdx index 9833d81e49..4ec97ca97e 100644 --- a/docs/self-hosting/platform/netlify.mdx +++ b/docs/self-hosting/platform/netlify.mdx @@ -1,26 +1,27 @@ --- -title: Deploy LobeChat with Netlify - Step-by-Step Guide +title: Deploy LobeHub with Netlify - Step-by-Step Guide description: >- - Learn how to deploy LobeChat on Netlify with detailed instructions on forking - the repository, importing to Netlify workspace, configuring site name and - environment variables, and monitoring deployment progress. + Learn how to deploy LobeHub on Netlify with detailed instructions on forking + the repository, preparing your OpenAI API Key, importing to Netlify workspace, + configuring site name and environment variables, and monitoring deployment + progress. tags: - - Deploy LobeChat + - Deploy LobeHub - Netlify Deployment - Environment Variables - Custom Domain Setup --- -# Deploy LobeChat with Netlify +# Deploy LobeHub with Netlify -If you want to deploy LobeChat on Netlify, you can follow these steps: +If you want to deploy LobeHub on Netlify, you can follow these steps: -## Deploy LobeChat with Netlify +## Deploy LobeHub with Netlify - ### Fork the LobeChat Repository + ### Fork the LobeHub Repository - Click the Fork button to fork the LobeChat repository to your GitHub account. + Click the Fork button to fork the LobeHub repository to your GitHub account. ### Import to Netlify Workspace @@ -31,58 +32,58 @@ If you want to deploy LobeChat on Netlify, you can follow these steps: Click "Import from git" - {'Click + {'Click Then click "Deploy with Github" and authorize Netlify to access your GitHub account. - {'Authorize + {'Authorize - Next, select the LobeChat project: + Next, select the LobeHub project: - {'Select + {'Select ### Configure Site Name and Environment Variables In this step, you need to configure your site, including the site name, build command, and publish directory. Fill in your site name in the "Site Name" field. If there are no special requirements, you do not need to modify the remaining configurations as we have already set the default configurations. - {'Configure + {'Configure Click the "Add environment variables" button to add site environment variables if needed: - {'Add + {'Add - For a complete list of environment variables supported by LobeChat, please refer to the [📘 + For a complete list of environment variables supported by LobeHub, please refer to the [📘 Environment Variables](/docs/self-hosting/environment-variables) Finally click "Deploy lobe-chat" to enter the deployment phase - {'Environment + {'Environment ### Wait for Deployment to Complete After clicking deploy, you will enter the site details page, where you can click the "Deploying your site" in blue or the "Building" in yellow to view the deployment progress. - {'Netlify + {'Netlify - Upon entering the deployment details, you will see the following interface, indicating that your LobeChat is currently being deployed. Simply wait for the deployment to complete. + Upon entering the deployment details, you will see the following interface, indicating that your LobeHub is currently being deployed. Simply wait for the deployment to complete. - {'LobeChat + {'LobeHub During the deployment and build process: - {'Deployment + {'Deployment ### Deployment Successful, Start Using - If your Deploy Log in the interface looks like the following, it means your LobeChat has been successfully deployed. + If your Deploy Log in the interface looks like the following, it means your LobeHub has been successfully deployed. - {'Deployment + {'Deployment - At this point, you can click on "Open production deploy" to access your LobeChat site. + At this point, you can click on "Open production deploy" to access your LobeHub site. - {'Access + {'Access ## Set up Custom Domain (Optional) diff --git a/docs/self-hosting/platform/netlify.zh-CN.mdx b/docs/self-hosting/platform/netlify.zh-CN.mdx index b8f6c9985f..2d66efe927 100644 --- a/docs/self-hosting/platform/netlify.zh-CN.mdx +++ b/docs/self-hosting/platform/netlify.zh-CN.mdx @@ -1,24 +1,25 @@ --- -title: 在 Netlify 上部署 LobeChat +title: 在 Netlify 上部署 LobeHub description: >- - 学习如何在 Netlify 上部署 LobeChat,包括 Fork 仓库、导入到 Netlify 工作台、配置站点名称与环境变量等步骤。 + 学习如何在 Netlify 上部署 LobeHub,包括 Fork 仓库、准备 OpenAI API Key、导入到 Netlify + 工作台、配置站点名称与环境变量等步骤。 tags: - Netlify - - LobeChat + - LobeHub - 部署教程 - 环境配置 --- # 使用 Netlify 部署 -如果想在 Netlify 上部署 LobeChat,可以按照以下步骤进行操作: +如果想在 Netlify 上部署 LobeHub,可以按照以下步骤进行操作: -## Netlify 部署 LobeChat +## Netlify 部署 LobeHub - ### Fork LobeChat 仓库 + ### Fork LobeHub 仓库 - 点击 Fork 按钮,将 LobeChat 仓库 Fork 到你的 GitHub 账号下。 + 点击 Fork 按钮,将 LobeHub 仓库 Fork 到你的 GitHub 账号下。 ### 在 Netlify 工作台导入 @@ -26,58 +27,58 @@ tags: 点击 「Import from git」 - {'在 + {'在 然后点击 「Deploy with Github」,并授权 Netlify 访问你的 GitHub 账号 - {'授权 + {'授权 - 然后选择 LobeChat 项目: + 然后选择 LobeHub 项目: - {'选择 + {'选择 ### 配置站点名称与环境变量 在这一步,你需要配置你的站点,包括站点名称、构建命令、发布目录等。在「Site Name」字段填写上你的站点名称。其余配置如果没有特殊要求,无需修改,我们已经设定好了默认配置。 - {'配置 + {'配置 如需要,点击 「Add environment variables」按钮添加站点环境变量: - {'添加 + {'添加 - LobeChat 支持的完整环境变量列表请参考 [📘 环境变量](/zh/docs/self-hosting/environment-variables) + LobeHub 支持的完整环境变量列表请参考 [📘 环境变量](/zh/docs/self-hosting/environment-variables) 部分 最后点击「Deploy lobe-chat」进入部署阶段。 - {'环境变量添加完成'} + {'环境变量添加完成'} ### 等待部署完成 点击部署后,会进入站点详情页面,你可以点击青色字样的「Deploying your site」或者 「Building」 黄色标签查看部署进度。 - {'Netlify + {'Netlify - 进入部署详情,你会看到下述界面,这意味着你的 LobeChat 正在部署中,只需等待部署完成即可。 + 进入部署详情,你会看到下述界面,这意味着你的 LobeHub 正在部署中,只需等待部署完成即可。 - {'LobeChat + {'LobeHub 部署构建过程中: - {'部署构建中'} + {'部署构建中'} ### 部署成功,开始使用 - 如果你的界面中的 Deploy Log 如下所示,意味着你的 LobeChat 部署成功了。 + 如果你的界面中的 Deploy Log 如下所示,意味着你的 LobeHub 部署成功了。 - {'部署成功'} + {'部署成功'} - 此时,你可以点击「Open production deploy」,即可访问你的 LobeChat 站点 + 此时,你可以点击「Open production deploy」,即可访问你的 LobeHub 站点 - {'访问你的 + {'访问你的 ## 绑定自定义域名(可选) diff --git a/docs/self-hosting/platform/railway.mdx b/docs/self-hosting/platform/railway.mdx index bc022487c0..73acff77d6 100644 --- a/docs/self-hosting/platform/railway.mdx +++ b/docs/self-hosting/platform/railway.mdx @@ -1,18 +1,18 @@ --- -title: Deploy LobeChat with Railway +title: Deploy LobeHub with Railway description: >- - Learn how to deploy LobeChat on Railway and follow the step-by-step process. - Deploy with a click and start using it. Optionally, bind a custom domain for - your deployment. + Learn how to deploy LobeHub on Railway and follow the step-by-step process. + Get your OpenAI API Key, deploy with a click, and start using it. Optionally, + bind a custom domain for your deployment. tags: - - Deploy LobeChat + - Deploy LobeHub - Railway Deployment - Custom Domain Binding --- -# Deploy LobeChat with Railway +# Deploy LobeHub with Railway -If you want to deploy LobeChat on Railway, you can follow the steps below: +If you want to deploy LobeHub on Railway, you can follow the steps below: ## Railway Deployment Process diff --git a/docs/self-hosting/platform/railway.zh-CN.mdx b/docs/self-hosting/platform/railway.zh-CN.mdx index 77e54aa046..0bfdc1eb09 100644 --- a/docs/self-hosting/platform/railway.zh-CN.mdx +++ b/docs/self-hosting/platform/railway.zh-CN.mdx @@ -1,16 +1,18 @@ --- -title: 在 Railway 上部署 LobeChat -description: 学习如何在 Railway 上部署 LobeChat 应用,包括点击按钮进行部署、绑定自定义域名等步骤。 +title: 在 Railway 上部署 LobeHub +description: 学习如何在 Railway 上部署 LobeHub 应用,包括准备 OpenAI API Key、点击按钮进行部署、绑定自定义域名等步骤。 tags: - Railway - 部署 - - LobeChat + - LobeHub + - OpenAI + - API Key - 自定义域名 --- # 使用 Railway 部署 -如果想在 Railway 上部署 LobeChat,可以按照以下步骤进行操作: +如果想在 Railway 上部署 LobeHub,可以按照以下步骤进行操作: ## Railway 部署流程 diff --git a/docs/self-hosting/platform/repocloud.mdx b/docs/self-hosting/platform/repocloud.mdx index e0a7a00fae..1d321d96d3 100644 --- a/docs/self-hosting/platform/repocloud.mdx +++ b/docs/self-hosting/platform/repocloud.mdx @@ -1,18 +1,18 @@ --- -title: Deploy LobeChat on RepoCloud +title: Deploy LobeHub on RepoCloud description: >- - Learn how to deploy LobeChat on RepoCloud with ease. Follow these steps to - deploy the application and start using it. Optional: Bind a custom domain for - a personalized touch. + Learn how to deploy LobeHub on RepoCloud with ease. Follow these steps to + prepare your OpenAI API Key, deploy the application, and start using it. + Optional: Bind a custom domain for a personalized touch. tags: - - Deploy LobeChat + - Deploy LobeHub - RepoCloud Deployment - Custom Domain Binding --- -# Deploy LobeChat with RepoCloud +# Deploy LobeHub with RepoCloud -If you want to deploy LobeChat on RepoCloud, you can follow the steps below: +If you want to deploy LobeHub on RepoCloud, you can follow the steps below: ## RepoCloud Deployment Process diff --git a/docs/self-hosting/platform/repocloud.zh-CN.mdx b/docs/self-hosting/platform/repocloud.zh-CN.mdx index 9e3bb81ad3..2129eb894b 100644 --- a/docs/self-hosting/platform/repocloud.zh-CN.mdx +++ b/docs/self-hosting/platform/repocloud.zh-CN.mdx @@ -1,16 +1,16 @@ --- -title: 在 RepoCloud 上部署 LobeChat -description: 学习如何在 RepoCloud 上部署 LobeChat 应用,包括点击部署按钮、绑定自定义域名等操作。 +title: 在 RepoCloud 上部署 LobeHub +description: 学习如何在RepoCloud上部署LobeHub应用,包括准备OpenAI API Key、点击部署按钮、绑定自定义域名等操作。 tags: - RepoCloud - - LobeChat + - LobeHub - 部署流程 - 自定义域名 --- # 使用 RepoCloud 部署 -如果想在 RepoCloud 上部署 LobeChat,可以按照以下步骤进行操作: +如果想在 RepoCloud 上部署 LobeHub,可以按照以下步骤进行操作: ## RepoCloud 部署流程 diff --git a/docs/self-hosting/platform/sealos.mdx b/docs/self-hosting/platform/sealos.mdx index 562b8ada20..f79af5a234 100644 --- a/docs/self-hosting/platform/sealos.mdx +++ b/docs/self-hosting/platform/sealos.mdx @@ -1,17 +1,17 @@ --- -title: Deploy LobeChat on Sealos +title: Deploy LobeHub on Sealos description: >- - Learn how to deploy LobeChat on Sealos with ease. Follow the provided steps to - set up LobeChat and start using it efficiently. + Learn how to deploy LobeHub on Sealos with ease. Follow the provided steps to + set up LobeHub and start using it efficiently. tags: - - Deploy LobeChat + - Deploy LobeHub - Sealos Deployment - Custom Domain Binding --- -# Deploy LobeChat with Sealos +# Deploy LobeHub with Sealos -If you want to deploy LobeChat on Sealos, you can follow the steps below: +If you want to deploy LobeHub on Sealos, you can follow the steps below: ## Sealos Deployment Process diff --git a/docs/self-hosting/platform/sealos.zh-CN.mdx b/docs/self-hosting/platform/sealos.zh-CN.mdx index 42d4fc1572..ee02eafa1d 100644 --- a/docs/self-hosting/platform/sealos.zh-CN.mdx +++ b/docs/self-hosting/platform/sealos.zh-CN.mdx @@ -1,16 +1,17 @@ --- -title: 在 Sealos 上部署 LobeChat -description: 学习如何在 Sealos 上部署 LobeChat,包括点击部署按钮、绑定自定义域名等操作。 +title: 在 Sealos 上部署 LobeHub +description: 学习如何在 Sealos 上部署 LobeHub,包括准备 OpenAI API Key、点击部署按钮、绑定自定义域名等操作。 tags: - Sealos - - LobeChat + - LobeHub + - OpenAI API Key - 部署流程 - 自定义域名 --- # 使用 Sealos 部署 -如果想在 Sealos 上部署 LobeChat,可以按照以下步骤进行操作: +如果想在 Sealos 上部署 LobeHub,可以按照以下步骤进行操作: ## Sealos 部署流程 diff --git a/docs/self-hosting/platform/tencentcloud-lighthouse.mdx b/docs/self-hosting/platform/tencentcloud-lighthouse.mdx index 238b3e2151..caf9a1505b 100644 --- a/docs/self-hosting/platform/tencentcloud-lighthouse.mdx +++ b/docs/self-hosting/platform/tencentcloud-lighthouse.mdx @@ -1,17 +1,19 @@ --- -title: Deploy LobeChat on TencentCloud Lighthouse +title: Deploy LobeHub on TencentCloud Lighthouse description: >- - Learn how to deploy the LobeChat application on TencentCloud Lighthouse, - including clicking the deploy button and other operations. + Learn how to deploy the LobeHub application on TencentCloud Lighthouse, + including preparing the large model API Key, clicking the deploy button, and + other operations. tags: - TencentCloud Lighthouse - TencentCloud - - LobeChat + - LobeHub + - API Key --- -# Deploy LobeChat with TencentCloud Lighthouse +# Deploy LobeHub with TencentCloud Lighthouse -If you want to deploy LobeChat on TencentCloud Lighthouse, you can follow the steps below: +If you want to deploy LobeHub on TencentCloud Lighthouse, you can follow the steps below: ## Tencent Cloud Deployment Process @@ -24,4 +26,4 @@ If you want to deploy LobeChat on TencentCloud Lighthouse, you can follow the st [deploy-button-image]: https://cloudcache.tencent-cloud.com/qcloud/ui/static/static_source_business/d65fb782-4fb0-4348-ad85-f2943d6bee8f.svg -[deploy-link]: https://buy.tencentcloud.com/lighthouse?blueprintType=APP_OS&blueprintOfficialId=lhbp-6u0ti132®ionId=9&zone=ap-singapore-3&bundleId=bundle_starter_nmc_lin_med2_01&loginSet=AUTO&rule=true&from=lobechat +[deploy-link]: https://buy.tencentcloud.com/lighthouse?blueprintType=APP_OS&blueprintOfficialId=lhbp-6u0ti132®ionId=9&zone=ap-singapore-3&bundleId=bundle_starter_nmc_lin_med2_01&loginSet=AUTO&rule=true&from=LobeHub diff --git a/docs/self-hosting/platform/tencentcloud-lighthouse.zh-CN.mdx b/docs/self-hosting/platform/tencentcloud-lighthouse.zh-CN.mdx index ffb76467f2..f0a2aec9fc 100644 --- a/docs/self-hosting/platform/tencentcloud-lighthouse.zh-CN.mdx +++ b/docs/self-hosting/platform/tencentcloud-lighthouse.zh-CN.mdx @@ -1,16 +1,16 @@ --- -title: 在 腾讯轻量云 上部署 LobeChat -description: 学习如何快速在腾讯轻量云上部署 LobeChat 应用,包括点击部署按钮等操作。 +title: 在 腾讯轻量云 上部署 LobeHub +description: 学习如何快速在腾讯轻量云上部署LobeHub应用,包括准备大模型 API Key、点击部署按钮等操作。 tags: - 腾讯云 - 腾讯轻量云 - - LobeChat + - LobeHub - 部署流程 --- # 使用 腾讯轻量云 部署 -如果想在 腾讯云 上部署 LobeChat,可以按照以下步骤进行操作: +如果想在 腾讯云 上部署 LobeHub,可以按照以下步骤进行操作: ## 腾讯轻量云 部署流程 @@ -23,4 +23,4 @@ tags: [deploy-button-image]: https://cloudcache.tencent-cloud.com/qcloud/ui/static/static_source_business/d65fb782-4fb0-4348-ad85-f2943d6bee8f.svg -[deploy-link]: https://buy.cloud.tencent.com/lighthouse?blueprintType=APP_OS&blueprintOfficialId=lhbp-6u0ti132®ionId=8&zone=ap-beijing-3&bundleId=bundle_starter_mc_med2_01&loginSet=AUTO&rule=true&from=lobechat +[deploy-link]: https://buy.cloud.tencent.com/lighthouse?blueprintType=APP_OS&blueprintOfficialId=lhbp-6u0ti132®ionId=8&zone=ap-beijing-3&bundleId=bundle_starter_mc_med2_01&loginSet=AUTO&rule=true&from=LobeHub diff --git a/docs/self-hosting/platform/vercel.mdx b/docs/self-hosting/platform/vercel.mdx index c59af76897..8210352496 100644 --- a/docs/self-hosting/platform/vercel.mdx +++ b/docs/self-hosting/platform/vercel.mdx @@ -1,17 +1,18 @@ --- -title: Deploy LobeChat with Vercel +title: Deploy LobeHub with Vercel description: >- - Learn how to deploy LobeChat on Vercel with ease. Follow the provided steps to - deploy the project and start using it efficiently. + Learn how to deploy LobeHub on Vercel with ease. Follow the provided steps to + prepare your OpenAI API Key, deploy the project, and start using it + efficiently. tags: - - Deploy LobeChat + - Deploy LobeHub - Vercel Deployment - Custom Domain Binding --- -# Deploy LobeChat with Vercel +# Deploy LobeHub with Vercel -If you want to deploy LobeChat on Vercel, you can follow the steps below: +If you want to deploy LobeHub on Vercel, you can follow the steps below: ## Vercel Deployment Process diff --git a/docs/self-hosting/platform/vercel.zh-CN.mdx b/docs/self-hosting/platform/vercel.zh-CN.mdx index 952ac1b7e9..6842d70cd9 100644 --- a/docs/self-hosting/platform/vercel.zh-CN.mdx +++ b/docs/self-hosting/platform/vercel.zh-CN.mdx @@ -1,17 +1,18 @@ --- -title: 在 Vercel 上部署 LobeChat -description: 学习如何在 Vercel 上一键部署 LobeChat,点击按钮进行部署,绑定自定义域名,自动同步更新等。 +title: 在 Vercel 上部署 LobeHub +description: 学习如何在 Vercel 上一键部署 LobeHub,准备 OpenAI API Key,点击按钮进行部署,绑定自定义域名,自动同步更新等。 tags: - Vercel - 部署指引 - - LobeChat + - LobeHub + - OpenAI API Key - 自定义域名 - 自动同步更新 --- # Vercel 部署指引 -如果想在 Vercel 上部署 LobeChat,可以按照以下步骤进行操作: +如果想在 Vercel 上部署 LobeHub,可以按照以下步骤进行操作: ## Vercel 部署流程 @@ -34,7 +35,7 @@ tags: 如果你根据上述中的一键部署步骤部署了自己的项目,你可能会发现总是被提示 “有可用更新”。这是因为 Vercel 默认为你创建新项目而非 fork 本项目,这将导致无法准确检测更新。 - 我们建议按照 [📘 LobeChat 自部署保持更新](/zh/docs/self-hosting/advanced/upstream-sync) + 我们建议按照 [📘 LobeHub 自部署保持更新](/zh/docs/self-hosting/advanced/upstream-sync) 步骤重新部署。 diff --git a/docs/self-hosting/platform/zeabur.mdx b/docs/self-hosting/platform/zeabur.mdx index f18bfe67ff..6d1bce4ceb 100644 --- a/docs/self-hosting/platform/zeabur.mdx +++ b/docs/self-hosting/platform/zeabur.mdx @@ -1,17 +1,17 @@ --- -title: Deploy LobeChat on Zeabur +title: Deploy LobeHub on Zeabur description: >- - Learn how to deploy LobeChat on Zeabur with ease. Follow the provided steps to + Learn how to deploy LobeHub on Zeabur with ease. Follow the provided steps to set up your chat application seamlessly. tags: - - Deploy LobeChat + - Deploy LobeHub - Zeabur Deployment - Custom Domain Binding --- -# Deploy LobeChat with Zeabur +# Deploy LobeHub with Zeabur -If you want to deploy LobeChat on Zeabur, you can follow the steps below: +If you want to deploy LobeHub on Zeabur, you can follow the steps below: ## Zeabur Deployment Process @@ -27,16 +27,16 @@ If you want to deploy LobeChat on Zeabur, you can follow the steps below: You can use the subdomain provided by Zeabur, or choose to bind a custom domain. Currently, the domains provided by Zeabur have not been contaminated, and most regions can connect directly. -# Deploy LobeChat with Zeabur as serverless function +# Deploy LobeHub with Zeabur as serverless function > Note: There are still issues with [middlewares and rewrites of next.js on Zeabur](https://github.com/lobehub/lobe-chat/pull/2775?notification_referrer_id=NT_kwDOAdi2DrQxMDkyODQ4MDc2NTozMDk3OTU5OA#issuecomment-2146713899), use at your own risk! -Since Zeabur does NOT officially support FREE users deploy containerized service, you may wish to deploy LobeChat as a serverless function service. To deploy LobeChat as a serverless function service on Zeabur, you can follow the steps below: +Since Zeabur does NOT officially support FREE users deploy containerized service, you may wish to deploy LobeHub as a serverless function service. To deploy LobeHub as a serverless function service on Zeabur, you can follow the steps below: ## Zeabur Deployment Process - ### Fork LobeChat + ### Fork LobeHub ### Add Zeabur pack config file @@ -57,7 +57,7 @@ Since Zeabur does NOT officially support FREE users deploy containerized service Create a project, then create a service under this project. - ### Link your fork of LobeChat to the just created Zeabur service. + ### Link your fork of LobeHub to the just created Zeabur service. When adding service, choose github. This may triger a oAuth depend on varies factors like how you login to Zeabur and if you have already authorized Zeabur to access all your repos diff --git a/docs/self-hosting/platform/zeabur.zh-CN.mdx b/docs/self-hosting/platform/zeabur.zh-CN.mdx index 810a019674..f70b5ad6c8 100644 --- a/docs/self-hosting/platform/zeabur.zh-CN.mdx +++ b/docs/self-hosting/platform/zeabur.zh-CN.mdx @@ -1,16 +1,17 @@ --- -title: 在 Zeabur 上部署 LobeChat -description: 点击按钮进行部署。在部署完成后,即可开始使用 LobeChat 并选择是否绑定自定义域名。 +title: 在 Zeabur 上部署 LobeHub +description: 按照指南准备 OpenAI API Key 并点击按钮进行部署。在部署完成后,即可开始使用 LobeHub 并选择是否绑定自定义域名。 tags: - Zeabur - - LobeChat + - LobeHub + - OpenAI API Key - 部署流程 - 自定义域名 --- # 使用 Zeabur 部署 -如果想在 Zeabur 上部署 LobeChat,可以按照以下步骤进行操作: +如果想在 Zeabur 上部署 LobeHub,可以按照以下步骤进行操作: ## Zeabur 部署流程 @@ -26,16 +27,16 @@ tags: 你可以使用 Zeabur 提供的子域名,也可以选择绑定自定义域名。目前 Zeabur 提供的域名还未被污染,大多数地区都可以直连。 -# 使用 Zeabur 将 LobeChat 部署为无服务器函数 +# 使用 Zeabur 将 LobeHub 部署为无服务器函数 > **注意:** 仍然存在关于 [Zeabur 上 next.js 的中间件和重写问题](https://github.com/lobehub/lobe-chat/pull/2775?notification_referrer_id=NT_kwDOAdi2DrQxMDkyODQ4MDc2NTozMDk3OTU5OA#issuecomment-2146713899),请自担风险! -由于 Zeabur 并未官方支持免费用户部署容器化服务,您可能希望将 LobeChat 部署为无服务器函数服务。要在 Zeabur 上将 LobeChat 部署为无服务器函数服务,您可以按照以下步骤操作: +由于 Zeabur 并未官方支持免费用户部署容器化服务,您可能希望将 LobeHub 部署为无服务器函数服务。要在 Zeabur 上将 LobeHub 部署为无服务器函数服务,您可以按照以下步骤操作: ## Zeabur 部署流程 - ### Fork LobeChat + ### Fork LobeHub ### 添加 Zeabur 打包配置文件 @@ -56,7 +57,7 @@ tags: 创建一个项目,并再这个项目下新建一个服务。 - ### 将您的 LobeChat 分支链接到刚创建的 Zeabur 服务。 + ### 将您的 LobeHub 分支链接到刚创建的 Zeabur 服务。 在添加服务时,选择 github。这可能会触发一个 oAuth,取决于诸如您如何登录到 Zeabur 以及您是否已经授权 Zeabur 访问所有您的存储库等各种因素。 diff --git a/docs/self-hosting/server-database.mdx b/docs/self-hosting/server-database.mdx index 01cb380016..8150729367 100644 --- a/docs/self-hosting/server-database.mdx +++ b/docs/self-hosting/server-database.mdx @@ -1,8 +1,8 @@ --- -title: Deploying Server-Side Database for LobeChat -description: Learn how to deploy LobeChat's server-side database using Postgres. +title: Deploying Server-Side Database for LobeHub +description: Learn how to deploy LobeHub's server-side database using Postgres. tags: - - LobeChat + - LobeHub - Server-Side Database - Postgres - Deployment Guide @@ -10,7 +10,7 @@ tags: # Deploying Server-Side Database -LobeChat defaults to using a client-side database (IndexedDB) but also supports deploying a server-side database. LobeChat uses Postgres as the backend storage database. +LobeHub defaults to using a client-side database (IndexedDB) but also supports deploying a server-side database. LobeHub uses Postgres as the backend storage database. PostgreSQL is a powerful open-source relational database management system with high scalability @@ -19,7 +19,7 @@ LobeChat defaults to using a client-side database (IndexedDB) but also supports management. -This guide will introduce the process and principles of deploying the server-side database version of LobeChat on any platform from a framework perspective, so you can understand both the what and the why, and then deploy according to your specific needs. +This guide will introduce the process and principles of deploying the server-side database version of LobeHub on any platform from a framework perspective, so you can understand both the what and the why, and then deploy according to your specific needs. If you are already familiar with the complete principles, you can quickly get started by checking the deployment guides for each platform: @@ -27,7 +27,7 @@ If you are already familiar with the complete principles, you can quickly get st --- -For the server-side database version of LobeChat, a normal deployment process typically involves configuring three modules: +For the server-side database version of LobeHub, a normal deployment process typically involves configuring three modules: 1. Database configuration; 2. Authentication service configuration; @@ -44,9 +44,27 @@ Before deployment, make sure you have a Postgres database instance ready. You ca There is a slight difference in the way they are configured in terms of environment variables. -Since we support file-based conversations/knowledge base conversations, we need to install the `pgvector` plugin for Postgres. This plugin provides vector search capabilities and is a key component for LobeChat to implement RAG. +Since we support file-based conversations/knowledge base conversations, we need to install the `pgvector` plugin for Postgres. This plugin provides vector search capabilities and is a key component for LobeHub to implement RAG. + ### `NEXT_PUBLIC_SERVICE_MODE` + + LobeHub supports both client-side and server-side databases, so we provide an environment variable for switching modes, which is `NEXT_PUBLIC_SERVICE_MODE`, with a default value of `client`. + + For server-side database deployment scenarios, you need to set `NEXT_PUBLIC_SERVICE_MODE` to `server`. + + + In the official `lobe-chat-database` Docker image, this environment variable is already set to + `server` by default. Therefore, if you deploy using the Docker image, you do not need to configure + this environment variable again. + + + + Since environment variables starting with `NEXT_PUBLIC` take effect in the front-end code, they cannot be modified through container runtime injection. (Refer to the `next.js` documentation [Configuring: Environment Variables | Next.js (nextjs.org)](https://nextjs.org/docs/pages/building-your-application/configuring/environment-variables)). This is why we chose to create a separate DB version image. + + If you need to modify variables with the `NEXT_PUBLIC` prefix in a Docker deployment, you must build the image yourself and inject your own `NEXT_PUBLIC` prefixed environment variables during the build. + + ### `DATABASE_URL` The core of configuring the database is to add the `DATABASE_URL` environment variable and fill in the Postgres database connection URL you have prepared. The typical format of the database connection URL is `postgres://username:password@host:port/database`. @@ -77,7 +95,7 @@ Since we support file-based conversations/knowledge base conversations, we need You can generate a random 32-character string as the value of `KEY_VAULTS_SECRET` using `openssl - rand -base64 32`. + rand -base64 32`. @@ -87,7 +105,7 @@ In the server-side database mode, we need an authentication service to distingui ### Clerk -[Clerk](https://clerk.com?utm_source=lobehub\&utm_medium=docs) is an authentication SaaS service that provides out-of-the-box authentication capabilities with high productization, low integration costs, and a great user experience. For those who offer SaaS products, Clerk is a good choice. Our official [LobeChat Cloud](https://lobechat.com) uses Clerk as the authentication service. +[Clerk](https://clerk.com?utm_source=lobehub\&utm_medium=docs) is an authentication SaaS service that provides out-of-the-box authentication capabilities with high productization, low integration costs, and a great user experience. For those who offer SaaS products, Clerk is a good choice. Our official [LobeHub Cloud](https://LobeHub.com) uses Clerk as the authentication service. The integration of Clerk is relatively simple, requiring only the configuration of these environment variables: @@ -120,7 +138,7 @@ For information on configuring NextAuth, you can refer to the [Authentication](/ ## Configuring S3 Storage Service -LobeChat has supported multimodal AI conversations since [a long time ago](https://x.com/lobehub/status/1724289575672291782), involving the function of uploading images to large models. In the client-side database solution, image files are stored as binary data directly in the browser's IndexedDB database. However, this solution is not feasible in the server-side database. Storing file-like data directly in Postgres will greatly waste valuable database storage space and slow down computational performance. +LobeHub has supported multimodal AI conversations since [a long time ago](https://x.com/lobehub/status/1724289575672291782), involving the function of uploading images to large models. In the client-side database solution, image files are stored as binary data directly in the browser's IndexedDB database. However, this solution is not feasible in the server-side database. Storing file-like data directly in Postgres will greatly waste valuable database storage space and slow down computational performance. The best practice in this area is to use a file storage service (S3) to store image files, which is also the storage solution relied upon for subsequent file uploads/knowledge base functions. @@ -134,6 +152,6 @@ For detailed configuration guidelines on S3, please refer to [S3 Object Storage] ## Getting Started with Deployment -The above is a detailed explanation of configuring LobeChat with a server-side database. You can configure it according to your actual situation and then choose a deployment platform that suits you to start deployment: +The above is a detailed explanation of configuring LobeHub with a server-side database. You can configure it according to your actual situation and then choose a deployment platform that suits you to start deployment: diff --git a/docs/self-hosting/server-database.zh-CN.mdx b/docs/self-hosting/server-database.zh-CN.mdx index 4f40d16ecb..3c11d54e32 100644 --- a/docs/self-hosting/server-database.zh-CN.mdx +++ b/docs/self-hosting/server-database.zh-CN.mdx @@ -1,6 +1,6 @@ --- title: 使用服务端数据库部署 - 配置数据库、身份验证服务和 S3 存储服务 -description: 本文将介绍服务端数据库版 LobeChat 的部署思路,解释如何配置数据库、身份验证服务和 S3 存储服务。 +description: 本文将介绍服务端数据库版 LobeHub 的部署思路,解释如何配置数据库、身份验证服务和 S3 存储服务。 tags: - 服务端数据库 - Postgres @@ -12,14 +12,14 @@ tags: # 使用服务端数据库部署 -LobeChat 默认使用客户端数据库(IndexedDB),同时也支持使用服务端数据库(下简称 DB 版)。LobeChat 采用了 Postgres 作为后端存储数据库。 +LobeHub 默认使用客户端数据库(IndexedDB),同时也支持使用服务端数据库(下简称 DB 版)。LobeHub 采用了 Postgres 作为后端存储数据库。 PostgreSQL 是一种强大的开源关系型数据库管理系统,具备高度扩展性和标准 SQL 支持。它提供了丰富的数据类型、并发处理、数据完整性、安全性及可编程性,适用于复杂应用和大规模数据管理。 -本文将从框架角度介绍在任何一个平台中部署 DB 版 LobeChat 的流程和原理,让你知其然也知其所以然,最后可以根据自己的实际情况进行部署。 +本文将从框架角度介绍在任何一个平台中部署 DB 版 LobeHub 的流程和原理,让你知其然也知其所以然,最后可以根据自己的实际情况进行部署。 如你已经熟悉完整原理,可以查看各个平台的部署指南快速开始: @@ -27,7 +27,7 @@ LobeChat 默认使用客户端数据库(IndexedDB),同时也支持使用 --- -对于 LobeChat 的 DB 版,正常的部署流程都需要包含三个模块的配置: +对于 LobeHub 的 DB 版,正常的部署流程都需要包含三个模块的配置: 1. 数据库配置; 2. 身份验证服务配置; @@ -42,9 +42,26 @@ LobeChat 默认使用客户端数据库(IndexedDB),同时也支持使用 两者的配置方式在环境变量的取值上会略有一点区别,其他方面是一样的。 -同时,由于我们支持了文件对话 / 知识库对话的能力,因此我们需要为 Postgres 安装 `pgvector` 插件,该插件提供了向量搜索的能力,是 LobeChat 实现 RAG 的重要构件之一。 +同时,由于我们支持了文件对话 / 知识库对话的能力,因此我们需要为 Postgres 安装 `pgvector` 插件,该插件提供了向量搜索的能力,是 LobeHub 实现 RAG 的重要构件之一。 + ### `NEXT_PUBLIC_SERVICE_MODE` + + LobeHub 同时支持了客户端数据库和服务端数据库,因此我们提供了一个环境变量用于切换模式,这个变量为 `NEXT_PUBLIC_SERVICE_MODE`,该值默认为 `client`。 + + 针对服务端数据库部署场景,你需要将 `NEXT_PUBLIC_SERVICE_MODE` 设置为 `server`。 + + + 在官方的 `lobe-chat-database` Docker 镜像中,已经默认将该环境变量设为 `server`,因此如果你使用 + Docker 镜像部署,则无需再配置该环境变量。 + + + + 由于 `NEXT_PUBLIC` 开头的环境变量是在前端代码中生效的,而因此无法通过容器运行时注入进行修改。 (`next.js`的参考文档 [Configuring: Environment Variables | Next.js (nextjs.org)](https://nextjs.org/docs/pages/building-your-application/configuring/environment-variables) ) 这也是为什么我们选择再打一个 DB 版镜像的原因。 + + 如果你需要在 Docker 部署中修改 `NEXT_PUBLIC` 前缀的变量,你必须自行构建镜像,在 build 时就把自己的 `NEXT_PUBLIC` 开头的环境变量打进去。 + + ### `DATABASE_URL` 配置数据库,核心是添加 `DATABASE_URL` 环境变量,将你准备好的 Postgres 数据库连接 URL 填入其中。数据库连接 URL 的通常格式为 `postgres://username:password@host:port/database`。 @@ -83,7 +100,7 @@ LobeChat 默认使用客户端数据库(IndexedDB),同时也支持使用 ### Clerk -[Clerk](https://clerk.com?utm_source=lobehub\&utm_medium=docs) 是一个身份验证 SaaS 服务,提供了开箱即用的身份验证能力,产品化程度很高,集成成本较低,体验很好。对于提供 SaaS 化产品的诉求来说,Clerk 是一个不错的选择。我们官方提供的 [LobeChat Cloud](https://lobechat.com),就是使用了 Clerk 作为身份验证服务。 +[Clerk](https://clerk.com?utm_source=lobehub\&utm_medium=docs) 是一个身份验证 SaaS 服务,提供了开箱即用的身份验证能力,产品化程度很高,集成成本较低,体验很好。对于提供 SaaS 化产品的诉求来说,Clerk 是一个不错的选择。我们官方提供的 [LobeHub Cloud](https://LobeHub.com),就是使用了 Clerk 作为身份验证服务。 Clerk 的集成也相对简单,只需要配置 `NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY` 、 `CLERK_SECRET_KEY` 和 `CLERK_WEBHOOK_SECRET` 环境变量即可,这三个环境变量可以在 Clerk 控制台中获取。 @@ -111,7 +128,7 @@ NextAuth 是一个开源的身份验证库,支持多种身份验证提供商 ## 配置 S3 存储服务 -LobeChat 在 [很早以前](https://x.com/lobehub/status/1724289575672291782) 就支持了多模态的 AI 会话,其中涉及到图片上传给大模型的功能。在客户端数据库方案中,图片文件直接以二进制数据存储在浏览器 IndexedDB 数据库,但在服务端数据库中这个方案并不可行。因为在 Postgres 中直接存储文件类二进制数据会大大浪费宝贵的数据库存储空间,并拖慢计算性能。 +LobeHub 在 [很早以前](https://x.com/lobehub/status/1724289575672291782) 就支持了多模态的 AI 会话,其中涉及到图片上传给大模型的功能。在客户端数据库方案中,图片文件直接以二进制数据存储在浏览器 IndexedDB 数据库,但在服务端数据库中这个方案并不可行。因为在 Postgres 中直接存储文件类二进制数据会大大浪费宝贵的数据库存储空间,并拖慢计算性能。 这块最佳实践是使用文件存储服务(S3)来存储图片文件,同时 S3 也是文件上传 / 知识库功能所依赖的大容量静态文件存储方案。 @@ -124,6 +141,6 @@ LobeChat 在 [很早以前](https://x.com/lobehub/status/1724289575672291782) ## 开始部署 -以上就是关于服务端数据库版 LobeChat 的配置详解,你可以根据自己的实际情况进行配置,然后选择适合自己的部署平台开始部署: +以上就是关于服务端数据库版 LobeHub 的配置详解,你可以根据自己的实际情况进行配置,然后选择适合自己的部署平台开始部署: diff --git a/docs/self-hosting/server-database/docker-compose.mdx b/docs/self-hosting/server-database/docker-compose.mdx index 7bd7ee3cf5..0fc58f926c 100644 --- a/docs/self-hosting/server-database/docker-compose.mdx +++ b/docs/self-hosting/server-database/docker-compose.mdx @@ -1,16 +1,16 @@ --- -title: Deploying LobeChat with Docker Compose +title: Deploying LobeHub with Docker Compose description: >- - Learn how to deploy the LobeChat service using Docker Compose, including + Learn how to deploy the LobeHub service using Docker Compose, including configuration tutorials for various services. tags: - Docker Compose - - LobeChat + - LobeHub - Docker Container - Deployment Guide --- -# Deploying LobeChat Server Database Version with Docker Compose +# Deploying LobeHub Server Database Version with Docker Compose
[![][docker-release-shield]][docker-release-link] @@ -70,13 +70,13 @@ The script supports the following deployment modes; please choose the appropriat After the script finishes running, you need to check the configuration generation report, which includes the accounts and initial login passwords for the Casdoor administrator and user. - Please log in to LobeChat using the user account; the administrator account is only for managing + Please log in to LobeHub using the user account; the administrator account is only for managing Casdoor. ```log The results of the secure key generation are as follows: - LobeChat: + LobeHub: - URL: http://localhost:3210 - Username: user - Password: c66f8c @@ -118,7 +118,7 @@ The script supports the following deployment modes; please choose the appropriat ### Access Application - Visit your LobeChat service at [http://localhost:3210](http://localhost:3210). The account credentials for the application can be found in the report from step `2`. + Visit your LobeHub service at [http://localhost:3210](http://localhost:3210). The account credentials for the application can be found in the report from step `2`. ### Port Mode @@ -136,13 +136,13 @@ The script supports the following deployment modes; please choose the appropriat After the script finishes running, please check the configuration generation report for the Casdoor administrator account, user account, and their initial login passwords. - Please log in to LobeChat using the user account; the administrator account is only for managing + Please log in to LobeHub using the user account; the administrator account is only for managing Casdoor. ```log The results of the secure key generation are as follows: - LobeChat: + LobeHub: - URL: http://your_server_ip:3210 - Username: user - Password: 837e26 @@ -183,7 +183,7 @@ The script supports the following deployment modes; please choose the appropriat ### Access Application - You can access your LobeChat service at `http://your_server_ip:3210`. The account credentials for the application can be found in the report from step `2`. + You can access your LobeHub service at `http://your_server_ip:3210`. The account credentials for the application can be found in the report from step `2`. If your service can accessed via the public network, @@ -231,7 +231,7 @@ The script supports the following deployment modes; please choose the appropriat In domain mode, you need to complete the following configurations based on script prompts: - - Domain setup for the LobeChat service: `lobe.example.com` + - Domain setup for the LobeHub service: `lobe.example.com` - Domain setup for the Minio service: `minio.example.com` - Domain setup for the Casdoor service: `auth.example.com` - Choose the access protocol: `http` or `https` @@ -252,13 +252,13 @@ The script supports the following deployment modes; please choose the appropriat After the script finishes running, you need to check the configuration generation report, which includes the initial login password for the Casdoor administrator. - Please log in to LobeChat using the user account; the administrator account is only for managing + Please log in to LobeHub using the user account; the administrator account is only for managing Casdoor. ```log The results of the secure key generation are as follows: - LobeChat: + LobeHub: - URL: https://lobe.example.com - Username: user - Password: 837e26 @@ -299,7 +299,7 @@ The script supports the following deployment modes; please choose the appropriat ### Access Application - You can access your LobeChat service via `https://lobe.example.com`. The account credentials for the application can be found in the report from step `3`. + You can access your LobeHub service via `https://lobe.example.com`. The account credentials for the application can be found in the report from step `3`. If your service can accessed via the public network, @@ -311,7 +311,7 @@ The script supports the following deployment modes; please choose the appropriat ## Custom Deployment -This section mainly introduces the configurations that need to be modified to customize the deployment of the LobeChat service in different network environments. Before starting, you can download the [Docker Compose configuration file](https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local/docker-compose.yml) and the [environment variable configuration file](https://raw.githubusercontent.com/lobehub/lobe-chat/refs/heads/main/docker-compose/local/.env.example). +This section mainly introduces the configurations that need to be modified to customize the deployment of the LobeHub service in different network environments. Before starting, you can download the [Docker Compose configuration file](https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local/docker-compose.yml) and the [environment variable configuration file](https://raw.githubusercontent.com/lobehub/lobe-chat/refs/heads/main/docker-compose/local/.env.example). ```sh curl -O https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local/docker-compose.yml @@ -326,14 +326,14 @@ mv .env.example .env ### Prerequisites -Generally, to fully run the LobeChat database version, you will need at least the following four services: +Generally, to fully run the LobeHub database version, you will need at least the following four services: -- The LobeChat database version itself +- The LobeHub database version itself - PostgreSQL database with PGVector plugin - Object storage service that supports S3 protocol -- An SSO authentication service supported by LobeChat +- An SSO authentication service supported by LobeHub -These services can be combined through self-hosting or online cloud services to meet various deployment needs. In this article, we provide a Docker Compose configuration entirely based on open-source self-hosted services, which can be used directly to start the LobeChat database version or modified to suit your requirements. +These services can be combined through self-hosting or online cloud services to meet various deployment needs. In this article, we provide a Docker Compose configuration entirely based on open-source self-hosted services, which can be used directly to start the LobeHub database version or modified to suit your requirements. We use [MinIO](https://github.com/minio/minio) as the local S3 object storage service and [Casdoor](https://github.com/casdoor/casdoor) as the local authentication service by default. @@ -348,15 +348,15 @@ Now, we will introduce the necessary configurations for running these services: 1. Casdoor -- LobeChat requires communication with Casdoor, so you need to configure Casdoor's Issuer. +- LobeHub requires communication with Casdoor, so you need to configure Casdoor's Issuer. ```env AUTH_CASDOOR_ISSUER=https://auth.example.com ``` -This configuration will affect LobeChat's login authentication service, and you need to ensure that the URL of the Casdoor service is correct. You can find common manifestations and solutions for errors in this configuration in the [FAQ](#faq). +This configuration will affect LobeHub's login authentication service, and you need to ensure that the URL of the Casdoor service is correct. You can find common manifestations and solutions for errors in this configuration in the [FAQ](#faq). -- Additionally, you need to allow the callback URL in Casdoor to point to the LobeChat address: +- Additionally, you need to allow the callback URL in Casdoor to point to the LobeHub address: Please add a line in the `Authentication -> Application` -> `` -> `Redirect URI` in Casdoor's web panel: @@ -372,7 +372,7 @@ origin=https://auth.example.com 2. MinIO -- LobeChat needs to provide a public access URL for object files for the LLM service provider, hence you need to configure MinIO's Endpoint. +- LobeHub needs to provide a public access URL for object files for the LLM service provider, hence you need to configure MinIO's Endpoint. ```env S3_PUBLIC_DOMAIN=https://minio.example.com @@ -423,13 +423,13 @@ Solutions: lobe-chat | [auth][error] TypeError: fetch failed ``` -Cause: LobeChat cannot access the authentication service. +Cause: LobeHub cannot access the authentication service. Solutions: -- Check whether your authentication service is running properly and whether LobeChat's network can reach the authentication service. +- Check whether your authentication service is running properly and whether LobeHub's network can reach the authentication service. -- A straightforward troubleshooting method is to use the `curl` command in the LobeChat container terminal to access your authentication service at `https://auth.example.com/.well-known/openid-configuration`. If JSON format data is returned, it indicates your authentication service is functioning correctly. +- A straightforward troubleshooting method is to use the `curl` command in the LobeHub container terminal to access your authentication service at `https://auth.example.com/.well-known/openid-configuration`. If JSON format data is returned, it indicates your authentication service is functioning correctly. #### OAuth Token Exchange Failures with Reverse Proxy @@ -445,7 +445,7 @@ docker compose up -d ````markdown ## Extended Configuration -To enhance your LobeChat service, you can perform the following extended configurations according to your needs. +To enhance your LobeHub service, you can perform the following extended configurations according to your needs. ### Use MinIO to Store Casdoor Avatars @@ -495,7 +495,7 @@ Allow users to change their avatars in Casdoor. 3. In Casdoor's `Authentication -> Providers`, associate the MinIO S3 service. Below is an example configuration: - ![casdoor](https://github.com/user-attachments/assets/71035610-0706-434e-9488-ab5819b55330) + ![casdoor](/blog/assets18bb134dbc5792d6a624199cca8bf7d3.webp) Here, the client ID and client secret correspond to the `Access Key` and `Secret Key` from the previous step; replace `192.168.31.251` with `your_server_ip`. @@ -520,7 +520,7 @@ In the following, it is assumed that in addition to the above services, you are The domain and corresponding service port descriptions are as follows: -- `lobe.example.com`: This is your LobeChat service domain, which needs to reverse proxy to the LobeChat service port, default is `3210`. +- `lobe.example.com`: This is your LobeHub service domain, which needs to reverse proxy to the LobeHub service port, default is `3210`. - `auth.example.com`: This is your Logto UI domain, which needs to reverse proxy to the Logto WebUI service port, default is `8000`. - `minio.example.com`: This is your MinIO API domain, which needs to reverse proxy to the MinIO API service port, default is `9000`. - `minio-ui.example.com`: Optional, this is your MinIO UI domain, which needs to reverse proxy to the MinIO WebUI service port, default is `9001`. @@ -633,7 +633,7 @@ After logging in, perform the following actions: 1. In `User Management -> Organizations`, add a new organization with the name and display name `Lobe Users`. Keep the rest as default. 2. In `Authentication -> Apps`, add a new application. -- Name and display name should be `LobeChat`. +- Name and display name should be `LobeHub`. - Organization should be `Lobe Users`. - Add a line in Redirect URLs as `https://lobe.example.com/api/auth/callback/casdoor`. - Disable all login methods except password. @@ -651,7 +651,7 @@ After logging in, perform the following actions: This article uses MinIO as an example to explain the configuration process. If you are using another S3 service provider, please refer to their documentation for configuration. - Please remember to configure the corresponding S3 service provider's CORS settings to ensure that LobeChat can access the S3 service correctly. + Please remember to configure the corresponding S3 service provider's CORS settings to ensure that LobeHub can access the S3 service correctly. In this document, you need to allow cross-origin requests from `https://lobe.example.com`. This can either be configured in MinIO WebUI under `Configuration - API - Cors Allow Origin`, or in the Docker Compose configuration under `minio - environment - MINIO_API_CORS_ALLOW_ORIGIN`. @@ -667,20 +667,20 @@ You first need to access the WebUI for configuration: 2. In the left panel under User / Access Keys, click `Create New Access Key`, no additional modifications needed, and fill the generated `Access Key` and `Secret Key` into your `.env` file under `S3_ACCESS_KEY_ID` and `S3_SECRET_ACCESS_KEY`. - Create MinIO Access Key +Create MinIO Access Key -3. Restart the LobeChat service: +3. Restart the LobeHub service: ```sh docker compose up -d ``` -At this point, you have successfully deployed the LobeChat database version, and you can access your LobeChat service at `https://lobe.example.com`. +At this point, you have successfully deployed the LobeHub database version, and you can access your LobeHub service at `https://lobe.example.com`. #### Configuring Internal Server Communication with `INTERNAL_APP_URL` - If you are deploying LobeChat behind a CDN (like Cloudflare) or reverse proxy, you may want to configure internal server-to-server communication to bypass the CDN/proxy layer for better performance. + If you are deploying LobeHub behind a CDN (like Cloudflare) or reverse proxy, you may want to configure internal server-to-server communication to bypass the CDN/proxy layer for better performance. You can configure the `INTERNAL_APP_URL` environment variable: @@ -733,7 +733,7 @@ CASDOOR_PORT=8000 MINIO_PORT=9000 # Postgres related, which are the necessary environment variables for DB -LOBE_DB_NAME=lobechat +LOBE_DB_NAME=LobeHub POSTGRES_PASSWORD=uWNZugjBqixf8dxC # Casdoor secret @@ -762,7 +762,7 @@ services: - '${MINIO_PORT}:${MINIO_PORT}' # MinIO API - '9001:9001' # MinIO Console - '${CASDOOR_PORT}:${CASDOOR_PORT}' # Casdoor - - '${LOBE_PORT}:3210' # LobeChat + - '${LOBE_PORT}:3210' # LobeHub command: tail -f /dev/null networks: - lobe-network diff --git a/docs/self-hosting/server-database/docker-compose.zh-CN.mdx b/docs/self-hosting/server-database/docker-compose.zh-CN.mdx index e187c034c5..10b269462b 100644 --- a/docs/self-hosting/server-database/docker-compose.zh-CN.mdx +++ b/docs/self-hosting/server-database/docker-compose.zh-CN.mdx @@ -1,14 +1,14 @@ --- -title: 通过 Docker Compose 部署 LobeChat -description: 学习如何使用 Docker Compose 部署 LobeChat 服务,包括各种服务的配置教程。 +title: 通过 Docker Compose 部署 LobeHub +description: 学习如何使用 Docker Compose 部署 LobeHub 服务,包括各种服务的配置教程。 tags: - Docker Compose - - LobeChat + - LobeHub - Docker 容器 - 部署指引 --- -# 使用 Docker Compose 部署 LobeChat 服务端数据库版本 +# 使用 Docker Compose 部署 LobeHub 服务端数据库版本
[![][docker-release-shield]][docker-release-link] @@ -66,11 +66,11 @@ bash <(curl -fsSL https://lobe.li/setup.sh) -l zh_CN 你需要在脚本运行结束后查看配置生成报告,包括 Casdoor 管理员的帐号、用户账号和它们的初始登录密码。 - 请使用用户账号登录 LobeChat,管理员账号仅用于管理 Casdoor。 + 请使用用户账号登录 LobeHub,管理员账号仅用于管理 Casdoor。 ```log 安全密钥生成结果如下: - LobeChat: + LobeHub: - URL: http://localhost:3210 - Username: user - Password: c66f8c @@ -112,7 +112,7 @@ bash <(curl -fsSL https://lobe.li/setup.sh) -l zh_CN ### 访问应用 - 通过 [http://localhost:3210](http://localhost:3210) 访问你的 LobeChat 服务。应用的账号密码在步骤`2`的报告中。 + 通过 [http://localhost:3210](http://localhost:3210) 访问你的 LobeHub 服务。应用的账号密码在步骤`2`的报告中。 ### 端口模式 @@ -129,11 +129,11 @@ bash <(curl -fsSL https://lobe.li/setup.sh) -l zh_CN 你需要在脚本运行结束后查看配置生成报告,包括 Casdoor 管理员的帐号、用户账号和它们的初始登录密码。 - 请使用用户账号登录 LobeChat,管理员账号仅用于管理 Casdoor。 + 请使用用户账号登录 LobeHub,管理员账号仅用于管理 Casdoor。 ```log 安全密钥生成结果如下: - LobeChat: + LobeHub: - URL: http://your_server_ip:3210 - Username: user - Password: 837e26 @@ -174,7 +174,7 @@ bash <(curl -fsSL https://lobe.li/setup.sh) -l zh_CN ### 访问应用 - 你可以通过 `http://your_server_ip:3210` 访问你的 LobeChat 服务。应用的账号密码在步骤`2`的报告中。 + 你可以通过 `http://your_server_ip:3210` 访问你的 LobeHub 服务。应用的账号密码在步骤`2`的报告中。 请注意,如果你的服务能够被公网访问,我们强烈建议你参考 [文档](https://lobehub.com/docs/self-hosting/advanced/auth/next-auth/casdoor) 关闭注册功能。 @@ -218,7 +218,7 @@ bash <(curl -fsSL https://lobe.li/setup.sh) -l zh_CN 在域名模式中,你需要根据脚本提示完成: - - LobeChat 服务的域名设置:`lobe.example.com` + - LobeHub 服务的域名设置:`lobe.example.com` - Minio 服务的域名设置:`minio.example.com` - Casdoor 服务的域名设置:`auth.example.com` - 选择访问协议:`http` 或 `https` @@ -238,11 +238,11 @@ bash <(curl -fsSL https://lobe.li/setup.sh) -l zh_CN 你需要在脚本运行结束后查看配置生成报告,包括 Casdoor 管理员的初始登录密码。 - 请使用用户账号登录 LobeChat,管理员账号仅用于管理 Casdoor。 + 请使用用户账号登录 LobeHub,管理员账号仅用于管理 Casdoor。 ```log 安全密钥生成结果如下: - LobeChat: + LobeHub: - URL: https://lobe.example.com - Username: user - Password: 837e26 @@ -283,7 +283,7 @@ bash <(curl -fsSL https://lobe.li/setup.sh) -l zh_CN ### 访问应用 - 你可以通过 `https://lobe.example.com` 访问你的 LobeChat 服务。应用的账号密码在步骤`3`的报告中。 + 你可以通过 `https://lobe.example.com` 访问你的 LobeHub 服务。应用的账号密码在步骤`3`的报告中。 请注意,如果你的服务能够被公网访问,我们强烈建议你参考 [文档](https://lobehub.com/docs/self-hosting/advanced/auth/next-auth/casdoor) 关闭注册功能。 @@ -292,7 +292,7 @@ bash <(curl -fsSL https://lobe.li/setup.sh) -l zh_CN ## 自定义部署 -该章节主要为你介绍在不同的网络环境下自定义部署 LobeChat 服务必须要修改的配置。在开始前,你可以先下载 [Docker Compose 配置文件](https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local/docker-compose.yml) 以及 [环境变量配置文件](https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local/.env.zh-CN.example)。 +该章节主要为你介绍在不同的网络环境下自定义部署 LobeHub 服务必须要修改的配置。在开始前,你可以先下载 [Docker Compose 配置文件](https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local/docker-compose.yml) 以及 [环境变量配置文件](https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local/.env.zh-CN.example)。 ```sh curl -O https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local/docker-compose.yml @@ -307,14 +307,14 @@ mv .env.zh-CN.example .env ### 预备知识 -一般来讲,想要完整的运行 LobeChat 数据库版本,你需要至少拥有如下四个服务 +一般来讲,想要完整的运行 LobeHub 数据库版本,你需要至少拥有如下四个服务 -- LobeChat 数据库版本自身 +- LobeHub 数据库版本自身 - 带有 PGVector 插件的 PostgreSQL 数据库 - 支持 S3 协议的对象存储服务 -- 受 LobeChat 支持的 SSO 登录鉴权服务 +- 受 LobeHub 支持的 SSO 登录鉴权服务 -这些服务可以通过自建或者在线云服务组合搭配,以满足不同层次的部署需求。本文中,我们提供了完全基于开源自建服务的 Docker Compose 配置,你可以直接使用这份配置文件来启动 LobeChat 数据库版本,也可以对之进行修改以适应你的需求。 +这些服务可以通过自建或者在线云服务组合搭配,以满足不同层次的部署需求。本文中,我们提供了完全基于开源自建服务的 Docker Compose 配置,你可以直接使用这份配置文件来启动 LobeHub 数据库版本,也可以对之进行修改以适应你的需求。 我们默认使用 [MinIO](https://github.com/minio/minio) 作为本地 S3 对象存储服务,使用 [Casdoor](https://github.com/casdoor/casdoor) 作为本地鉴权服务。 @@ -328,15 +328,15 @@ mv .env.zh-CN.example .env 1. Casdoor -- LobeChat 需要与 Casdoor 通讯,因此你需要配置 Casdoor 的 Issuer 。 +- LobeHub 需要与 Casdoor 通讯,因此你需要配置 Casdoor 的 Issuer 。 ```env AUTH_CASDOOR_ISSUER=https://auth.example.com ``` -该配置会影响 LobeChat 的登录鉴权服务,你需要确保 Casdoor 服务的地址正确。你可以在 [常见问题](#常见问题) 中找到该配置错误的常见现象及解决方案。 +该配置会影响 LobeHub 的登录鉴权服务,你需要确保 Casdoor 服务的地址正确。你可以在 [常见问题](#常见问题) 中找到该配置错误的常见现象及解决方案。 -- 同时,你也需要在 Casdoor 中允许回调地址为 LobeChat 的地址: +- 同时,你也需要在 Casdoor 中允许回调地址为 LobeHub 的地址: 请在 Casdoor 的 Web 面板的 `身份认证 -> 应用` -> `<应用ID,默认为 app-built-in>` -> `重定向URL` 中添加一行: @@ -352,7 +352,7 @@ origin=https://auth.example.com 2. MinIO -- LobeChat 需要为 LLM 服务提供商提供文件对象的公网访问地址,因此你需要配置 MinIO 的 Endpoint 。 +- LobeHub 需要为 LLM 服务提供商提供文件对象的公网访问地址,因此你需要配置 MinIO 的 Endpoint 。 ```env S3_PUBLIC_DOMAIN=https://minio.example.com @@ -403,13 +403,13 @@ lobe-chat | [auth][error] r3: "response" is not a conform Authorization Ser lobe-chat | [auth][error] TypeError: fetch failed ``` -成因:LobeChat 无法访问鉴权服务。 +成因:LobeHub 无法访问鉴权服务。 解决方案: -- 请检查你的鉴权服务是否正常运行,以及 LobeChat 所在的网络是否能够访问到鉴权服务。 +- 请检查你的鉴权服务是否正常运行,以及 LobeHub 所在的网络是否能够访问到鉴权服务。 -- 一个直接的排查方式,你可以在 LobeChat 容器的终端中,使用 `curl` 命令访问你的鉴权服务 `https://auth.example.com/.well-known/openid-configuration`,如果返回了 JSON 格式的数据,则说明你的鉴权服务正常运行。 +- 一个直接的排查方式,你可以在 LobeHub 容器的终端中,使用 `curl` 命令访问你的鉴权服务 `https://auth.example.com/.well-known/openid-configuration`,如果返回了 JSON 格式的数据,则说明你的鉴权服务正常运行。 #### 反向代理下 OAuth 令牌交换失败 @@ -424,7 +424,7 @@ docker compose up -d ## 拓展配置 -为了完善你的 LobeChat 服务,你可以根据你的需求进行以下拓展配置。 +为了完善你的 LobeHub 服务,你可以根据你的需求进行以下拓展配置。 ### 使用 MinIO 存储 Casdoor 头像 @@ -473,7 +473,7 @@ docker compose up -d 3. 在 Casdoor 的 `身份认证 -> 提供商` 中关联 MinIO S3 服务,以下是一个示例配置: - ![casdoor](https://github.com/user-attachments/assets/71035610-0706-434e-9488-ab5819b55330) + ![casdoor](/blog/assets18bb134dbc5792d6a624199cca8bf7d3.webp) 其中,客户端 ID、客户端密钥为上一步创建的访问密钥中的 `Access Key` 和 `Secret Key`,`192.168.31.251` 应当被替换为 `your_server_ip`。 @@ -496,7 +496,7 @@ docker compose up -d 域名和配套服务端口说明如下: -- `lobe.example.com`:为你的 LobeChat 服务端域名,需要反向代理到 LobeChat 服务端口,默认为 `3210` +- `lobe.example.com`:为你的 LobeHub 服务端域名,需要反向代理到 LobeHub 服务端口,默认为 `3210` - `auth.example.com`:为你的 Logto UI 域名,需要反向代理到 Logto WebUI 服务端口,默认为 `8000` - `minio.example.com`:为你的 MinIO API 域名,需要反向代理到 MinIO API 服务端口,默认为 `9000` - `minio-ui.example.com`:可选,为你的 MinIO UI 域名,需要反向代理到 MinIO WebUI 服务端口,默认为 `9001` @@ -609,7 +609,7 @@ docker compose up -d # 重新启动 1. 在 `用户管理 -> 组织` 中,添加一个新的组织。名称与显示名称为 `Lobe Users`。其余保持默认即可。 2. 在 `身份认证 -> 应用` 中,添加一个新的应用。 -- 名称与显示名称为 `LobeChat`。 +- 名称与显示名称为 `LobeHub`。 - 组织为 `Lobe Users`。 - 重定向 URLS 中添加一行 为 `https://lobe.example.com/api/auth/callback/casdoor`。 - 关闭除密码外的登录方式 。 @@ -626,7 +626,7 @@ docker compose up -d # 重新启动 本文以 MinIO 为例,解释配置过程,如果你使用的是其他 S3 服务商,请参照其文档进行配置。 - 请记得注意配置对应 S3 服务商的 CORS 跨域配置,以确保 LobeChat 能够正常访问 S3 服务。 + 请记得注意配置对应 S3 服务商的 CORS 跨域配置,以确保 LobeHub 能够正常访问 S3 服务。 在本文中,你需要允许 `https://lobe.example.com` 的跨域请求。这既可以在 MinIO WebUI 的 `Configuration - API - Cors Allow Origin` 中配置,也可以在 Docker Compose 中的 `minio - environment - MINIO_API_CORS_ALLOW_ORIGIN` 中配置。 @@ -642,20 +642,20 @@ docker compose up -d # 重新启动 2. 在左侧面板 User / Access Keys 处,点击 `Create New Access Key`,无需额外修改,将生成的 `Access Key` 和 `Secret Key` 填入你的 `.env` 文件中的 `S3_ACCESS_KEY_ID` 和 `S3_SECRET_ACCESS_KEY` 中 - 创建 MinIO 访问密钥 +创建 MinIO 访问密钥 -3. 重启 LobeChat 服务: +3. 重启 LobeHub 服务: ```sh docker compose up -d ``` -至此,你已经成功部署了 LobeChat 数据库版本,你可以通过 `https://lobe.example.com` 访问你的 LobeChat 服务。 +至此,你已经成功部署了 LobeHub 数据库版本,你可以通过 `https://lobe.example.com` 访问你的 LobeHub 服务。 #### 使用 `INTERNAL_APP_URL` 配置内部服务器通信 - 如果你在 CDN(如 Cloudflare)或反向代理后部署 LobeChat,你可以配置内部服务器到服务器通信以绕过 CDN / 代理层,以获得更好的性能。 + 如果你在 CDN(如 Cloudflare)或反向代理后部署 LobeHub,你可以配置内部服务器到服务器通信以绕过 CDN / 代理层,以获得更好的性能。 你可以配置 `INTERNAL_APP_URL` 环境变量: @@ -708,7 +708,7 @@ CASDOOR_PORT=8000 MINIO_PORT=9000 # Postgres related, which are the necessary environment variables for DB -LOBE_DB_NAME=lobechat +LOBE_DB_NAME=LobeHub POSTGRES_PASSWORD=uWNZugjBqixf8dxC # Casdoor secret @@ -737,7 +737,7 @@ services: - '${MINIO_PORT}:${MINIO_PORT}' # MinIO API - '9001:9001' # MinIO Console - '${CASDOOR_PORT}:${CASDOOR_PORT}' # Casdoor - - '${LOBE_PORT}:3210' # LobeChat + - '${LOBE_PORT}:3210' # LobeHub command: tail -f /dev/null networks: - lobe-network diff --git a/docs/self-hosting/server-database/docker.mdx b/docs/self-hosting/server-database/docker.mdx index 43e80ca4e7..ad7c1bdf98 100644 --- a/docs/self-hosting/server-database/docker.mdx +++ b/docs/self-hosting/server-database/docker.mdx @@ -1,10 +1,10 @@ --- -title: Deploying LobeChat Database with Docker +title: Deploying LobeHub Database with Docker description: >- - Learn how to deploy the LobeChat server database version using Docker on Linux + Learn how to deploy the LobeHub server database version using Docker on Linux and local machines. tags: - - LobeChat + - LobeHub - Docker - Database Deployment - Postgres @@ -22,16 +22,21 @@ tags: This article assumes that you are familiar with the basic principles and processes of deploying - the LobeChat server database version, so it only includes content related to core environment - variable configuration. If you are not familiar with the deployment principles of the LobeChat + the LobeHub server database version, so it only includes content related to core environment + variable configuration. If you are not familiar with the deployment principles of the LobeHub server database version, please refer to [Deploying Server Database](/docs/self-hosting/server-database) first. + + Due to the inability to expose `NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY` using Docker environment variables, you cannot use Clerk as an authentication service when deploying LobeHub using Docker / Docker Compose. + + If you do need Clerk as an authentication service, you might consider deploying using Vercel or building your own image. + ## Deploying on a Linux Server -Here is the process for deploying the LobeChat server database version on a Linux server: +Here is the process for deploying the LobeHub server database version on a Linux server: ### Create a Postgres Database Instance @@ -48,7 +53,7 @@ Here is the process for deploying the LobeChat server database version on a Linu The pgvector plugin provides vector search capabilities for Postgres, which is an important - component for LobeChat to implement RAG. + component for LobeHub to implement RAG. @@ -79,8 +84,8 @@ Here is the process for deploying the LobeChat server database version on a Linu S3_ACCESS_KEY_ID=xxxxxxxxxx S3_SECRET_ACCESS_KEY=xxxxxxxxxx S3_ENDPOINT=https://xxxxxxxxxx.r2.cloudflarestorage.com - S3_BUCKET=lobechat - S3_PUBLIC_DOMAIN=https://s3-for-lobechat.your-domain.com + S3_BUCKET=LobeHub + S3_PUBLIC_DOMAIN=https://s3-for-LobeHub.your-domain.com ``` @@ -120,7 +125,7 @@ Here is the process for deploying the LobeChat server database version on a Linu ## Using Locally (Mac / Windows) -The data version of LobeChat also supports direct use on a local Mac/Windows machine. +The data version of LobeHub also supports direct use on a local Mac/Windows machine. Here, we assume that you have a pg instance available on port 5432 locally on your Mac/Windows, with the account `postgres` and password `mysecretpassword`, accessible at `localhost:5432`. @@ -136,8 +141,8 @@ $ docker run -it -d --name lobe-chat-database -p 3210:3210 \ -e S3_ACCESS_KEY_ID=xxxxxxxxxx \ -e S3_SECRET_ACCESS_KEY=xxxxxxxxxx \ -e S3_ENDPOINT=https://xxxxxxxxxx.r2.cloudflarestorage.com \ - -e S3_BUCKET=lobechat \ - -e S3_PUBLIC_DOMAIN=https://s3-for-lobechat.your-domain.com \ + -e S3_BUCKET=LobeHub \ + -e S3_PUBLIC_DOMAIN=https://s3-for-LobeHub.your-domain.com \ lobehub/lobe-chat-database ``` diff --git a/docs/self-hosting/server-database/docker.zh-CN.mdx b/docs/self-hosting/server-database/docker.zh-CN.mdx index d59597b4d4..acf4b8b18f 100644 --- a/docs/self-hosting/server-database/docker.zh-CN.mdx +++ b/docs/self-hosting/server-database/docker.zh-CN.mdx @@ -1,9 +1,9 @@ --- -title: 使用 Docker 部署 LobeChat 数据库 -description: 详细步骤教你如何在 Docker 中部署 LobeChat 服务端数据库。 +title: 使用 Docker 部署 LobeHub 数据库 +description: 详细步骤教你如何在 Docker 中部署 LobeHub 服务端数据库。 tags: - Docker - - LobeChat + - LobeHub - 数据库部署 - Postgres --- @@ -19,17 +19,23 @@ tags:
- 本文已经假定你了解了 LobeChat 服务端数据库版本(下简称 DB - 版)的部署基本原理和流程,因此只包含核心环境变量配置的内容。如果你还不了解 LobeChat DB + 本文已经假定你了解了 LobeHub 服务端数据库版本(下简称 DB + 版)的部署基本原理和流程,因此只包含核心环境变量配置的内容。如果你还不了解 LobeHub DB 版的部署原理,请先查阅 [使用服务端数据库部署](/zh/docs/self-hosting/server-database) 。 此外,针对国内的腾讯云储存桶用户,可查询[配置腾讯云 COS 存储服务](/zh/docs/self-hosting/advanced/s3/tencent-cloud)。 + + 由于无法使用 Docker 环境变量暴露 `NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY`,使用 Docker / Docker Compose + 部署 LobeHub 时,你不能使用 Clerk 作为登录鉴权服务。 + + 如果你确实需要 Clerk 作为登录鉴权服务,你可以考虑使用 Vercel 部署或者自行构建镜像。 + ## 在 Linux 服务器上部署 -以下是在 Linux 服务器上部署 LobeChat DB 版的流程: +以下是在 Linux 服务器上部署 LobeHub DB 版的流程: ### 创建 Postgres 数据库实例 @@ -45,7 +51,7 @@ tags: 上述指令会创建一个名为 `my-postgres`,并且网络为 `pg` 的 PG 实例,其中 `pgvector/pgvector:pg16` 是一个 Postgres 16 的镜像,且默认安装了 pgvector 插件。 - pgvector 插件为 Postgres 提供了向量搜索的能力,是 LobeChat 实现 RAG 的重要构件之一。 + pgvector 插件为 Postgres 提供了向量搜索的能力,是 LobeHub 实现 RAG 的重要构件之一。 @@ -77,9 +83,9 @@ tags: S3_SECRET_ACCESS_KEY=xxxxxxxxxx # 用于 S3 API 访问的域名 S3_ENDPOINT=https://xxxxxxxxxx.r2.cloudflarestorage.com - S3_BUCKET=lobechat + S3_BUCKET=LobeHub # 用于外网访问 S3 的公共域名,需配置 CORS - S3_PUBLIC_DOMAIN=https://s3-for-lobechat.your-domain.com + S3_PUBLIC_DOMAIN=https://s3-for-LobeHub.your-domain.com # S3_REGION=ap-chengdu # 如果需要指定地域 ``` @@ -120,7 +126,7 @@ tags: ## 在本地(Mac / Windows) 上使用 -LobeChat 的 DB 版也支持直接在本地的 Mac/Windows 本地使用。 +LobeHub 的 DB 版也支持直接在本地的 Mac/Windows 本地使用。 在此我们已假设你的本地有一个 5432 端口可用,账号为 `postgres` ,密码是 `mysecretpassword` 的 pg 实例,它在 `localhost:5432` 可用。 @@ -136,8 +142,8 @@ $ docker run -it -d --name lobe-chat-database -p 3210:3210 \ -e S3_ACCESS_KEY_ID=xxxxxxxxxx \ -e S3_SECRET_ACCESS_KEY=xxxxxxxxxx \ -e S3_ENDPOINT=https://xxxxxxxxxx.r2.cloudflarestorage.com \ - -e S3_BUCKET=lobechat \ - -e S3_PUBLIC_DOMAIN=https://s3-for-lobechat.your-domain.com \ + -e S3_BUCKET=LobeHub \ + -e S3_PUBLIC_DOMAIN=https://s3-for-LobeHub.your-domain.com \ lobehub/lobe-chat-database ``` diff --git a/docs/self-hosting/server-database/dokploy.mdx b/docs/self-hosting/server-database/dokploy.mdx index 15d534713d..9cb8dd5441 100644 --- a/docs/self-hosting/server-database/dokploy.mdx +++ b/docs/self-hosting/server-database/dokploy.mdx @@ -1,18 +1,18 @@ --- -title: Deploy LobeChat with database on Dokploy +title: Deploy LobeHub with database on Dokploy description: >- - Learn how to deploy LobeChat with database on Dokploy with ease, including: + Learn how to deploy LobeHub with database on Dokploy with ease, including: database, authentication and S3 storage service. tags: - - Deploy LobeChat - - Dokploy Deployment - - Better Auth - - S3 Storage + - Deploy LobeHub + - Vercel Deployment + - OpenAI API Key + - Custom Domain Binding --- # Deploying Server Database Version on Dokploy. -This article will detail how to deploy the server database version of LobeChat. +This article will detail how to deploy the server database version of LobeHub. ## 1. Preparation Work @@ -24,11 +24,11 @@ curl -sSL https://dokploy.com/install.sh | sh 1. Connect your GitHub to Dokploy in the Settings / Git section according to the prompt. -![](https://github.com/user-attachments/assets/c75eb19e-e0f5-4135-91e4-55be8be8a996) +![](/blog/assets0f97d1dfccd5ba07172aff71ff9acd7b.webp) 2. Enter the Projects interface to create a Project. -![](https://github.com/user-attachments/assets/4e04928d-0171-48d1-afff-e22fc2faaf4e) +![](/blog/assetsb26b68a4875a6510ddc202dd4b40d010.webp) ### Configure S3 Storage Service @@ -69,9 +69,9 @@ You also need to configure the `JWKS_KEY` environment variable for signing and v Enter the previously created Project, click on Create Service, and select Database. In the Database interface, choose PostgreSQL, then set the database name, user, and password. In the Docker image field, enter `pgvector/pgvector:pg17`, and finally click Create to create the database. -![](https://github.com/user-attachments/assets/97899819-278f-42fd-804a-144d521d4b4f) +![](/blog/assets7006b60baaf62aa0d95cd40456e24afe.webp) -Enter the created database and set an unused port in External Credentials to allow external access; otherwise, LobeChat will not be able to connect to the database. You can view the Postgres database connection URL in External Host, as shown below: +Enter the created database and set an unused port in External Credentials to allow external access; otherwise, LobeHub will not be able to connect to the database. You can view the Postgres database connection URL in External Host, as shown below: ```shell postgresql://postgres:wAbLxfXSwkxxxxxx@45.577.281.48:5432/postgres @@ -79,21 +79,21 @@ postgresql://postgres:wAbLxfXSwkxxxxxx@45.577.281.48:5432/postgres Finally, click Deploy to deploy the database. -![](https://github.com/user-attachments/assets/b4e89dd4-877b-43fe-aa42-4680de17ba8e) +![](/blog/assets1b9283f9cc5fc5073ff9cffc24880e96.webp) -## Deploy LobeChat on Dokploy. +## Deploy LobeHub on Dokploy. -Click "Create Service", select "Application", and create the LobeChat application. +Click "Create Service", select "Application", and create the LobeHub application. -![](https://github.com/user-attachments/assets/4cbbbcce-36be-48ff-bb0b-31607a0bba5c) +![](/blog/assetsb33085e7553d2b7194005b102184553e.webp) -Enter the created LobeChat application, select the forked lobe-chat project and branch, and click Save to save. +Enter the created LobeHub application, select the forked lobe-chat project and branch, and click Save to save. -![](https://github.com/user-attachments/assets/2bb4c09d-75bb-4c46-bb2f-faf538308305) +![](/blog/assetsf0ebf396dbe9559eb3478f48f648a6e2.webp) Switch to the Environment section, fill in the environment variables, and click Save. -![](https://github.com/user-attachments/assets/0f79c266-cce5-4936-aabd-4c8f19196d91) +![](/blog/assets6b67dabe7b9226cdff1bace5a3b8ab18.webp) ```shell # Environment variables required for building @@ -125,14 +125,14 @@ S3_ENABLE_PATH_STYLE= After adding the environment variables and saving, click Deploy to initiate the deployment. You can check the deployment progress and log information under Deployments. -![](https://github.com/user-attachments/assets/411e2002-61f0-4010-9841-18e88ca895ec) +![](/blog/assets7c3eab218c0823fa353b1cd23afe21c3.webp) -After a successful deployment, bind your own domain to your LobeChat application and request a certificate on the Domains page. +After a successful deployment, bind your own domain to your LobeHub application and request a certificate on the Domains page. -![](https://github.com/user-attachments/assets/dd6bc4a4-3c20-4162-87fd-5cac57e5d7e7) +![](/blog/assetseebf66254337ce88357629c34e78c08d.webp) -## Check if LobeChat is working properly. +## Check if LobeHub is working properly. -Go to your LobeChat website, and if you click on the login button in the upper left corner and the login pop-up appears normally, it means you have configured it successfully. Enjoy it to the fullest! +Go to your LobeHub website, and if you click on the login button in the upper left corner and the login pop-up appears normally, it means you have configured it successfully. Enjoy it to the fullest! -![](https://github.com/user-attachments/assets/798ddb18-50c7-462a-a083-0c6841351d26) +![](/blog/assets11a8089b511aaa61e8982dea0a3665c5.webp) diff --git a/docs/self-hosting/server-database/dokploy.zh-CN.mdx b/docs/self-hosting/server-database/dokploy.zh-CN.mdx index 11464ab967..3c33ec2067 100644 --- a/docs/self-hosting/server-database/dokploy.zh-CN.mdx +++ b/docs/self-hosting/server-database/dokploy.zh-CN.mdx @@ -1,6 +1,6 @@ --- -title: 在 Dokploy 上部署 LobeChat 的服务端数据库版本 -description: 本文详细介绍如何在 Dokploy 中部署服务端数据库版 LobeChat,包括数据库配置、身份验证服务配置的设置步骤。 +title: 在 Dokploy 上部署 LobeHub 的服务端数据库版本 +description: 本文详细介绍如何在 Dokploy 中部署服务端数据库版 LobeHub,包括数据库配置、身份验证服务配置的设置步骤。 tags: - 服务端数据库 - Postgres @@ -13,7 +13,7 @@ tags: # 在 Dokploy 上部署服务端数据库版 -本文将详细介绍如何在 Dokploy 中部署服务端数据库版 LobeChat。 +本文将详细介绍如何在 Dokploy 中部署服务端数据库版 LobeHub。 ## 一、准备工作 @@ -25,11 +25,11 @@ curl -sSL https://dokploy.com/install.sh | sh 1. 在 Dokploy 的 Settings / Git 处根据提示将 Github 绑定到 Dokploy -![](https://github.com/user-attachments/assets/c75eb19e-e0f5-4135-91e4-55be8be8a996) +![](/blog/assets0f97d1dfccd5ba07172aff71ff9acd7b.webp) 2. 进入 Projects 界面创建一个 Project -![](https://github.com/user-attachments/assets/4e04928d-0171-48d1-afff-e22fc2faaf4e) +![](/blog/assetsb26b68a4875a6510ddc202dd4b40d010.webp) ### 配置 S3 存储服务 @@ -70,9 +70,9 @@ S3_ENABLE_PATH_STYLE= 进入前面创建的 Project,点击 Create Service 选择 Database,在 Database 界面选择 PostgreSQL ,然后设置数据库名、用户、密码,在 Docker image 中填入 `pgvector/pgvector:pg17` 最后点击 Create 创建数据库。 -![](https://github.com/user-attachments/assets/97899819-278f-42fd-804a-144d521d4b4f) +![](/blog/assets7006b60baaf62aa0d95cd40456e24afe.webp) -进入创建的数据库,在 External Credentials 设置一个未被占用的端口,使其能能通过外部访问,否则 LobeChat 将无法连接到该数据库。你可以在 External Host 查看 Postgres 数据库连接 URL ,如下: +进入创建的数据库,在 External Credentials 设置一个未被占用的端口,使其能能通过外部访问,否则 LobeHub 将无法连接到该数据库。你可以在 External Host 查看 Postgres 数据库连接 URL ,如下: ```shell postgresql://postgres:wAbLxfXSwkxxxxxx@45.577.281.48:5432/postgres @@ -80,21 +80,21 @@ postgresql://postgres:wAbLxfXSwkxxxxxx@45.577.281.48:5432/postgres 最后点击 Deploy 部署数据库 -![](https://github.com/user-attachments/assets/b4e89dd4-877b-43fe-aa42-4680de17ba8e) +![](/blog/assets1b9283f9cc5fc5073ff9cffc24880e96.webp) -## 在 Dokploy 上部署 LobeChat +## 在 Dokploy 上部署 LobeHub -点击 Create Service 选择 Application,创建 LobeChat 应用 +点击 Create Service 选择 Application,创建 LobeHub 应用 -![](https://github.com/user-attachments/assets/4cbbbcce-36be-48ff-bb0b-31607a0bba5c) +![](/blog/assetsb33085e7553d2b7194005b102184553e.webp) -进入创建的 LobeChat 应用,选择你 fork 的 lobe-chat 项目及分支,点击 Save 保存 +进入创建的 LobeHub 应用,选择你 fork 的 lobe-chat 项目及分支,点击 Save 保存 -![](https://github.com/user-attachments/assets/2bb4c09d-75bb-4c46-bb2f-faf538308305) +![](/blog/assetsf0ebf396dbe9559eb3478f48f648a6e2.webp) 切换到 Environment ,在其中填入环境变量,点击保存。 -![](https://github.com/user-attachments/assets/0f79c266-cce5-4936-aabd-4c8f19196d91) +![](/blog/assets6b67dabe7b9226cdff1bace5a3b8ab18.webp) ```shell # 构建所必需的环境变量 @@ -126,14 +126,14 @@ S3_ENABLE_PATH_STYLE= 添加完环境变量并保存后,点击 Deploy 进行部署,你可以在 Deployments 处查看部署进程及日志信息 -![](https://github.com/user-attachments/assets/411e2002-61f0-4010-9841-18e88ca895ec) +![](/blog/assets7c3eab218c0823fa353b1cd23afe21c3.webp) -部署成功后在 Domains 页面,为你的 LobeChat 应用绑定自己的域名并申请证书。 +部署成功后在 Domains 页面,为你的 LobeHub 应用绑定自己的域名并申请证书。 -![](https://github.com/user-attachments/assets/dd6bc4a4-3c20-4162-87fd-5cac57e5d7e7) +![](/blog/assetseebf66254337ce88357629c34e78c08d.webp) -## 验证 LobeChat 是否正常工作 +## 验证 LobeHub 是否正常工作 -进入你的 LobeChat 网址,如果你点击左上角登录,可以正常显示登录弹窗,那么说明你已经配置成功了,尽情享用吧~ +进入你的 LobeHub 网址,如果你点击左上角登录,可以正常显示登录弹窗,那么说明你已经配置成功了,尽情享用吧~ -![](https://github.com/user-attachments/assets/798ddb18-50c7-462a-a083-0c6841351d26) +![](/blog/assets11a8089b511aaa61e8982dea0a3665c5.webp) diff --git a/docs/self-hosting/server-database/netlify.mdx b/docs/self-hosting/server-database/netlify.mdx index bcc0e0fd82..0213750662 100644 --- a/docs/self-hosting/server-database/netlify.mdx +++ b/docs/self-hosting/server-database/netlify.mdx @@ -1,13 +1,13 @@ --- -title: Deploy LobeChat with Database on Netlify +title: Deploy LobeHub with Database on Netlify description: >- - Learn how to deploy LobeChat on Netlify with ease, including: database, + Learn how to deploy LobeHub on Netlify with ease, including: database, authentication and S3 storage service. tags: - - Deploy LobeChat + - Deploy LobeHub - Netlify Deployment --- -# Deploy LobeChat with Database on Netlify +# Deploy LobeHub with Database on Netlify TODO diff --git a/docs/self-hosting/server-database/netlify.zh-CN.mdx b/docs/self-hosting/server-database/netlify.zh-CN.mdx index c71f604fa2..61dc3b92ce 100644 --- a/docs/self-hosting/server-database/netlify.zh-CN.mdx +++ b/docs/self-hosting/server-database/netlify.zh-CN.mdx @@ -1,16 +1,16 @@ --- -title: 在 Netlify 上部署 LobeChat 服务端数据库版 +title: 在 Netlify 上部署 LobeHub 服务端数据库版 description: >- - 学习如何在 Netlify 上部署 LobeChat,包括 Fork 仓库、准备 OpenAI API Key、导入到 Netlify + 学习如何在 Netlify 上部署 LobeHub,包括 Fork 仓库、准备 OpenAI API Key、导入到 Netlify 工作台、配置站点名称与环境变量等步骤。 tags: - Netlify - - LobeChat + - LobeHub - 部署教程 - OpenAI API Key - 环境配置 --- -# 使用 Netlify 部署 LobeChat 数据库版 +# 使用 Netlify 部署 LobeHub 数据库版 TODO diff --git a/docs/self-hosting/server-database/railway.mdx b/docs/self-hosting/server-database/railway.mdx index d59f1243dd..cd6c5bb95a 100644 --- a/docs/self-hosting/server-database/railway.mdx +++ b/docs/self-hosting/server-database/railway.mdx @@ -1,13 +1,13 @@ --- -title: Deploy LobeChat with Database on Railway +title: Deploy LobeHub with Database on Railway description: >- - Learn how to deploy LobeChat on Railway with ease, including: database, + Learn how to deploy LobeHub on Railway with ease, including: database, authentication and S3 storage service. tags: - - Deploy LobeChat + - Deploy LobeHub - Railway Deployment --- -# Deploy LobeChat with Database on Railway +# Deploy LobeHub with Database on Railway TODO diff --git a/docs/self-hosting/server-database/railway.zh-CN.mdx b/docs/self-hosting/server-database/railway.zh-CN.mdx index c51def9a1b..6347cf6e64 100644 --- a/docs/self-hosting/server-database/railway.zh-CN.mdx +++ b/docs/self-hosting/server-database/railway.zh-CN.mdx @@ -1,15 +1,15 @@ --- -title: 在 Railway 上部署 LobeChat 数据库版 -description: 学习如何在 Railway 上部署 LobeChat 应用,包括准备 OpenAI API Key、点击按钮进行部署、绑定自定义域名等步骤。 +title: 在 Railway 上部署 LobeHub 数据库版 +description: 学习如何在 Railway 上部署 LobeHub 应用,包括准备 OpenAI API Key、点击按钮进行部署、绑定自定义域名等步骤。 tags: - Railway - 部署 - - LobeChat + - LobeHub - OpenAI - API Key - 自定义域名 --- -# 使用 Railway 部署 LobeChat 数据库版 +# 使用 Railway 部署 LobeHub 数据库版 TODO diff --git a/docs/self-hosting/server-database/repocloud.mdx b/docs/self-hosting/server-database/repocloud.mdx index ee95f9601f..7d7b745b53 100644 --- a/docs/self-hosting/server-database/repocloud.mdx +++ b/docs/self-hosting/server-database/repocloud.mdx @@ -1,18 +1,18 @@ --- -title: Deploy LobeChat with Database on RepoCloud +title: Deploy LobeHub with Database on RepoCloud description: >- - Learn how to deploy LobeChat on RepoCloud with ease, including database, + Learn how to deploy LobeHub on RepoCloud with ease, including database, authentication and S3 storage service. tags: - - Deploy LobeChat + - Deploy LobeHub - RepoCloud Deployment - OpenAI API Key - Custom Domain Binding --- -# Deploying LobeChat Database Edition with RepoCloud +# Deploying LobeHub Database Edition with RepoCloud -If you want to deploy LobeChat Database Edition on RepoCloud, you can follow the steps below: +If you want to deploy LobeHub Database Edition on RepoCloud, you can follow the steps below: ## RepoCloud Deployment Process diff --git a/docs/self-hosting/server-database/repocloud.zh-CN.mdx b/docs/self-hosting/server-database/repocloud.zh-CN.mdx index dd9bfbc730..e2a23c5efc 100644 --- a/docs/self-hosting/server-database/repocloud.zh-CN.mdx +++ b/docs/self-hosting/server-database/repocloud.zh-CN.mdx @@ -1,17 +1,17 @@ --- -title: 在 RepoCloud 上部署 LobeChat 数据库版 -description: 学习如何在 RepoCloud 上部署 LobeChat 应用,包括准备 OpenAI API Key、点击部署按钮、绑定自定义域名等操作。 +title: 在 RepoCloud 上部署 LobeHub 数据库版 +description: 学习如何在 RepoCloud 上部署 LobeHub 应用,包括准备 OpenAI API Key、点击部署按钮、绑定自定义域名等操作。 tags: - RepoCloud - - LobeChat + - LobeHub - 部署流程 - OpenAI API Key - 自定义域名 --- -# 在 RepoCloud 上部署 LobeChat 数据库版 +# 在 RepoCloud 上部署 LobeHub 数据库版 -如果您想在 RepoCloud 上部署 LobeChat 数据库版,可以按照以下步骤进行操作: +如果您想在 RepoCloud 上部署 LobeHub 数据库版,可以按照以下步骤进行操作: ## RepoCloud 部署流程 diff --git a/docs/self-hosting/server-database/sealos.mdx b/docs/self-hosting/server-database/sealos.mdx index 3b58162377..5bf6e8a9ce 100644 --- a/docs/self-hosting/server-database/sealos.mdx +++ b/docs/self-hosting/server-database/sealos.mdx @@ -1,10 +1,10 @@ --- title: Deploy Lobe Chat Database Version on Sealos description: >- - Learn how to deploy LobeChat on Sealos with ease. Follow the provided steps to - set up LobeChat and start using it efficiently. + Learn how to deploy LobeHub on Sealos with ease. Follow the provided steps to + set up LobeHub and start using it efficiently. tags: - - Deploy LobeChat + - Deploy LobeHub - Sealos Deployment - OpenAI API Key - Custom Domain Binding @@ -14,8 +14,8 @@ tags: This article assumes that you are familiar with the basic principles and processes of deploying - the LobeChat server database version, so it only includes content related to core environment - variable configuration. If you are not familiar with the deployment principles of the LobeChat + the LobeHub server database version, so it only includes content related to core environment + variable configuration. If you are not familiar with the deployment principles of the LobeHub server database version, please refer to [Deploying Server Database](/docs/self-hosting/server-database) first. diff --git a/docs/self-hosting/server-database/sealos.zh-CN.mdx b/docs/self-hosting/server-database/sealos.zh-CN.mdx index 5ee18d91ef..95901922dc 100644 --- a/docs/self-hosting/server-database/sealos.zh-CN.mdx +++ b/docs/self-hosting/server-database/sealos.zh-CN.mdx @@ -1,15 +1,15 @@ --- -title: 在 Sealos 上部署 LobeChat 数据库版 -description: 学习如何在 Sealos 上部署 LobeChat,包括准备 OpenAI API Key、点击部署按钮、绑定自定义域名等操作。 +title: 在 Sealos 上部署 LobeHub 数据库版 +description: 学习如何在 Sealos 上部署 LobeHub,包括准备 OpenAI API Key、点击部署按钮、绑定自定义域名等操作。 tags: - Sealos - - LobeChat + - LobeHub - OpenAI API Key - 部署流程 - 自定义域名 --- -# 使用 Sealos 部署 LobeChat 数据库版 +# 使用 Sealos 部署 LobeHub 数据库版 本文假设你已经熟悉 Lobe Chat @@ -22,7 +22,7 @@ tags: - Logto 提供身份校验(需额外部署) - 带有 Vector 插件的 PostgreSQL 来做数据存储和向量化 - 一个对象存储 Bucket -- LobeChat Database 的实例 +- LobeHub Database 的实例 这里是在 Sealos 上部署 Lobe Chat 服务器数据库版的流程: diff --git a/docs/self-hosting/server-database/vercel.mdx b/docs/self-hosting/server-database/vercel.mdx index 971a04832c..ac6f6d30db 100644 --- a/docs/self-hosting/server-database/vercel.mdx +++ b/docs/self-hosting/server-database/vercel.mdx @@ -1,10 +1,10 @@ --- -title: Deploy LobeChat with database on Vercel +title: Deploy LobeHub with database on Vercel description: >- - Learn how to deploy LobeChat with database on Vercel with ease, including: + Learn how to deploy LobeHub with database on Vercel with ease, including: database, authentication and S3 storage service. tags: - - Deploy LobeChat + - Deploy LobeHub - Vercel Deployment - Better Auth - S3 Storage @@ -12,7 +12,7 @@ tags: # Deploying Server Database Version on Vercel -This article will detail how to deploy the server database version of LobeChat on Vercel, including: 1) database configuration; 2) identity authentication service configuration; 3) steps for setting up the S3 storage service. +This article will detail how to deploy the server database version of LobeHub on Vercel, including: 1) database configuration; 2) identity authentication service configuration; 3) steps for setting up the S3 storage service. Before proceeding, please make sure of the following: @@ -53,7 +53,7 @@ This article will detail how to deploy the server database version of LobeChat o An example of filling in Vercel is as follows: - {'Add + {'Add @@ -69,7 +69,7 @@ This article will detail how to deploy the server database version of LobeChat o An example of filling in Vercel is as follows: - {'Add + {'Add @@ -89,7 +89,7 @@ This article will detail how to deploy the server database version of LobeChat o ### Add the `APP_URL` Environment Variable - Finally, you need to add the `APP_URL` environment variable, which specifies the URL address of the LobeChat application. + Finally, you need to add the `APP_URL` environment variable, which specifies the URL address of the LobeHub application. ## 2. Configure Authentication Service @@ -114,6 +114,55 @@ The server-side database needs to be paired with a user authentication service t For advanced features like SSO providers, magic link login, and email verification, see [Authentication Service](/docs/self-hosting/advanced/auth). + + ### Add Public and Private Key Environment Variables in Vercel + + In Vercel's deployment environment variables, add the `NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY` and `CLERK_SECRET_KEY` environment variables. You can click on "API Keys" in the menu, then copy the corresponding values and paste them into Vercel's environment variables. + + {'Find + + The environment variables required for this step are as follows: + + ```shell + NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY=pk_live_xxxxxxxxxxx + CLERK_SECRET_KEY=sk_live_xxxxxxxxxxxxxxxxxxxxxx + ``` + + Add the above variables to Vercel: + + {'Add + + ### Create and Configure Webhook in Clerk + + Since we let Clerk fully handle user authentication and management, we need Clerk to notify our application and store data in the database when there are changes in the user's lifecycle (create, update, delete). We achieve this requirement through the Webhook provided by Clerk. + + We need to add an endpoint in Clerk's Webhooks to inform Clerk to send notifications to this endpoint when a user's information changes. + + {'Add + + Fill in the endpoint with the URL of your Vercel project, such as `https://your-project.vercel.app/api/webhooks/clerk`. Then, subscribe to events by checking the three user events (`user.created`, `user.deleted`, `user.updated`), and click create. + + + The `https://` in the URL is essential to maintain the integrity of the URL. + + + {'Configure + + ### Add Webhook Secret to Vercel Environment Variables + + After creation, you can find the secret of this Webhook in the bottom right corner: + + {'View + + The environment variable corresponding to this secret is `CLERK_WEBHOOK_SECRET`: + + ```shell + CLERK_WEBHOOK_SECRET=whsec_xxxxxxxxxxxxxxxxxxxxxx + ``` + + Add it to Vercel's environment variables: + + {'Add By completing these steps, you have successfully configured the authentication service. Next, we will configure the S3 storage service. @@ -135,27 +184,27 @@ In the server-side database, we need to configure the S3 storage service to stor The interface of Cloudflare R2 is shown below: - {'Cloudflare + {'Cloudflare When creating a storage bucket, specify its name and then click create. - {'Create + {'Create ### Obtain Environment Variables for the Bucket In the settings of the R2 storage bucket, you can view the bucket configuration information: - {'View + {'View The corresponding environment variables are: ```shell # Storage bucket name - S3_BUCKET=lobechat + S3_BUCKET=LobeHub # Storage bucket request endpoint (note that the path in this link includes the bucket name, which must be removed, or use the link provided on the S3 API token application page) S3_ENDPOINT=https://0b33a03b5c993fd2f453379dc36558e5.r2.cloudflarestorage.com # Public access domain for the storage bucket - S3_PUBLIC_DOMAIN=https://s3-for-lobechat.your-domain.com + S3_PUBLIC_DOMAIN=https://s3-for-LobeHub.your-domain.com ``` @@ -164,21 +213,21 @@ In the server-side database, we need to configure the S3 storage service to stor ### Obtain S3 Key Environment Variables - You need to obtain the access key for S3 so that the LobeChat server has permission to access the S3 storage service. In R2, you can configure the access key in the account details: + You need to obtain the access key for S3 so that the LobeHub server has permission to access the S3 storage service. In R2, you can configure the access key in the account details: - {'View + {'View Click the button in the upper right corner to create an API token and enter the create API Token page. - {'Create + {'Create Since our server-side database needs to read and write to the S3 storage service, the permission needs to be set to `Object Read and Write`, then click create. - {'Configure + {'Configure After creation, you can see the corresponding S3 API token. - {'Copy + {'Copy The corresponding environment variables are: @@ -201,7 +250,7 @@ In the server-side database, we need to configure the S3 storage service to stor S3_SECRET_ACCESS_KEY=55af75d8eb6b99f189f6a35f855336ea62cd9c4751a5cf4337c53c1d3f497ac2 # Bucket name - S3_BUCKET=lobechat + S3_BUCKET=LobeHub # Bucket request endpoint S3_ENDPOINT=https://0b33a03b5c993fd2f453379dc36558e5.r2.cloudflarestorage.com # Public domain for bucket access @@ -213,7 +262,7 @@ In the server-side database, we need to configure the S3 storage service to stor Then, insert the above environment variables into Vercel's environment variables: - {'Adding + {'Adding ### Configuring Cross-Origin Resource Sharing (CORS) @@ -221,11 +270,11 @@ In the server-side database, we need to configure the S3 storage service to stor In R2, you can find the CORS configuration in the bucket settings: - {'Cloudflare + {'Cloudflare Add a CORS rule to allow requests from your domain (in this case, `https://your-project.vercel.app`): - {'Configuring + {'Configuring Example configuration: @@ -244,22 +293,22 @@ In the server-side database, we need to configure the S3 storage service to stor ## Four, Deployment and Verification -After completing the steps above, the configuration of the server-side database should be done. Next, we can deploy LobeChat to Vercel and then visit your Vercel link to verify if the server-side database is working correctly. +After completing the steps above, the configuration of the server-side database should be done. Next, we can deploy LobeHub to Vercel and then visit your Vercel link to verify if the server-side database is working correctly. ### Redeploy the latest commit After configuring the environment variables, you need to redeploy the latest commit and wait for the deployment to complete. - {'Redeploy + {'Redeploy ### Check if the features are working properly If you click on the login button in the top left corner and the login popup appears normally, then you have successfully configured it. Enjoy using it\~ - {'User + {'User - {'Login + {'Login ## Appendix @@ -285,11 +334,11 @@ S3_ACCESS_KEY_ID=9998d6757e276cf9f1edbd325b7083a6 S3_SECRET_ACCESS_KEY=55af75d8eb6b99f189f6a35f855336ea62cd9c4751a5cf4337c53c1d3f497ac2 # Bucket name -S3_BUCKET=lobechat +S3_BUCKET=LobeHub # Bucket request endpoint S3_ENDPOINT=https://0b33a03b5c993fd2f453379dc36558e5.r2.cloudflarestorage.com # Public access domain for the bucket -S3_PUBLIC_DOMAIN=https://s3-for-lobechat.your-domain.com +S3_PUBLIC_DOMAIN=https://s3-for-LobeHub.your-domain.com # Bucket region, such as us-west-1, generally not needed to add, but some service providers may require configuration # S3_REGION=us-west-1 ``` diff --git a/docs/self-hosting/server-database/vercel.zh-CN.mdx b/docs/self-hosting/server-database/vercel.zh-CN.mdx index b5d74a3c66..455172fdaf 100644 --- a/docs/self-hosting/server-database/vercel.zh-CN.mdx +++ b/docs/self-hosting/server-database/vercel.zh-CN.mdx @@ -1,6 +1,6 @@ --- -title: 在 Vercel 上部署 LobeChat 的服务端数据库版本 -description: 本文详细介绍如何在 Vercel 中部署服务端数据库版 LobeChat,包括数据库配置、身份验证服务配置和 S3 存储服务的设置步骤。 +title: 在 Vercel 上部署 LobeHub 的服务端数据库版本 +description: 本文详细介绍如何在 Vercel 中部署服务端数据库版 LobeHub,包括数据库配置、身份验证服务配置和 S3 存储服务的设置步骤。 tags: - 服务端数据库 - Postgres @@ -14,7 +14,7 @@ tags: # 在 Vercel 上部署服务端数据库版 -本文将详细介绍如何在 Vercel 中部署服务端数据库版 LobeChat,包括: 1)数据库配置;2)身份验证服务配置;3) S3 存储服务的设置步骤。 +本文将详细介绍如何在 Vercel 中部署服务端数据库版 LobeHub,包括: 1)数据库配置;2)身份验证服务配置;3) S3 存储服务的设置步骤。 进行后续操作前,请务必确认以下事项: @@ -54,7 +54,7 @@ tags: 在 Vercel 中填写的示例如下: - {'添加 + {'添加 @@ -70,7 +70,7 @@ tags: 在 Vercel 中填写的示例如下: - {'添加 + {'添加 @@ -89,7 +89,7 @@ tags: ### 添加 `APP_URL` 环境变量 - 该部分最后需要添加 `APP_URL` 环境变量,用于指定 LobeChat 应用的 URL 地址。 + 该部分最后需要添加 `APP_URL` 环境变量,用于指定 LobeHub 应用的 URL 地址。 ## 二、 配置身份验证服务 @@ -114,6 +114,53 @@ tags: 如需 SSO 登录、魔法链接登录、邮箱验证等高级功能,请参阅 [身份验证服务](/zh/docs/self-hosting/advanced/auth)。 + + ### 在 Vercel 中添加公、私钥环境变量 + + 在 Vercel 的部署环境变量中,添加 `NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY` 和 `CLERK_SECRET_KEY` 环境变量。你可以在菜单中点击「API Keys」,然后复制对应的值填入 Vercel 的环境变量中。 + + {'在 + + 此步骤所需的环境变量如下: + + ```shell + NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY=pk_live_xxxxxxxxxxx + CLERK_SECRET_KEY=sk_live_xxxxxxxxxxxxxxxxxxxxxx + ``` + + 添加上述变量到 Vercel 中: + + {'在 + + ### 在 Clerk 中创建并配置 Webhook + + 由于我们让 Clerk 完全接管用户鉴权与管理,因此我们需要在 Clerk 用户生命周期变更时(创建、更新、删除)中通知我们的应用并存储落库。我们通过 Clerk 提供的 Webhook 来实现这一诉求。 + + 我们需要在 Clerk 的 Webhooks 中添加一个端点(Endpoint),告诉 Clerk 当用户发生变更时,向这个端点发送通知。 + + {'Clerk + + 在 endpoint 中填写你的 Vercel 项目的 URL,如 `https://your-project.vercel.app/api/webhooks/clerk`。然后在订阅事件(Subscribe to events)中,勾选 user 的三个事件(`user.created` 、`user.deleted`、`user.updated`),然后点击创建。 + + URL 的`https://`不可缺失,须保持 URL 的完整性 + + {'添加 + + ### 将 Webhook 秘钥添加到 Vercel 环境变量 + + 创建完毕后,可以在右下角找到该 Webhook 的秘钥: + + {'查看 + + 这个秘钥所对应的环境变量名为 `CLERK_WEBHOOK_SECRET`: + + ```shell + CLERK_WEBHOOK_SECRET=whsec_xxxxxxxxxxxxxxxxxxxxxx + ``` + + 将其添加到 Vercel 的环境变量中: + + {'在 这样,你已经成功配置了身份验证服务。接下来我们将配置 S3 存储服务。 @@ -134,48 +181,48 @@ tags: 下图是 Cloudflare R2 的界面: - {'Cloudflare + {'Cloudflare 创建存储桶时将指定其名称,然后点击创建。 - {'R2 + {'R2 ### 获取存储桶相关环境变量 在 R2 存储桶的设置中,可以看到桶配置的信息: - {'查看存储桶的相关信息'} + {'查看存储桶的相关信息'} 其对应的环境变量为: ```shell # 存储桶的名称 - S3_BUCKET=lobechat + S3_BUCKET=LobeHub # 存储桶的请求端点(注意此处链接的路径带存储桶名称,必须删除该路径,或使用申请 S3 API token 页面所提供的链接) S3_ENDPOINT=https://0b33a03b5c993fd2f453379dc36558e5.r2.cloudflarestorage.com # 存储桶对外的访问域名 - S3_PUBLIC_DOMAIN=https://s3-for-lobechat.your-domain.com + S3_PUBLIC_DOMAIN=https://s3-for-LobeHub.your-domain.com ``` `S3_ENDPOINT`必须删除其路径,否则会无法访问所上传文件 ### 获取 S3 密钥环境变量 - 你需要获取 S3 的访问密钥,以便 LobeChat 的服务端有权限访问 S3 存储服务。在 R2 中,你可以在账户详情中配置访问密钥: + 你需要获取 S3 的访问密钥,以便 LobeHub 的服务端有权限访问 S3 存储服务。在 R2 中,你可以在账户详情中配置访问密钥: - {'查看存储桶的访问秘钥'} + {'查看存储桶的访问秘钥'} 点击右上角按钮创建 API token,进入创建 API Token 页面 - {'创建对应 + {'创建对应 鉴于我们的服务端数据库需要读写 S3 存储服务,因此权限需要选择`对象读与写`,然后点击创建。 - {'配置 + {'配置 创建完成后,就可以看到对应的 S3 API token - {'复制 + {'复制 其对应的环境变量为: @@ -196,7 +243,7 @@ tags: S3_SECRET_ACCESS_KEY=55af75d8eb6b99f189f6a35f855336ea62cd9c4751a5cf4337c53c1d3f497ac2 # 存储桶的名称 - S3_BUCKET=lobechat + S3_BUCKET=LobeHub # 存储桶的请求端点 S3_ENDPOINT=https://0b33a03b5c993fd2f453379dc36558e5.r2.cloudflarestorage.com # 存储桶对外的访问域名 @@ -208,7 +255,7 @@ tags: 然后将上述环境变量填入 Vercel 的环境变量中: - {'在 + {'在 ### 配置跨域 @@ -216,11 +263,11 @@ tags: 在 R2 中,你可以在存储桶的设置中找到跨域配置: - {'Cloudflare + {'Cloudflare 添加跨域规则,允许你的域名(在上文是 `https://your-project.vercel.app`)来源的请求: - {'配置允许你的站点域名'} + {'配置允许你的站点域名'} 示例配置如下: @@ -239,22 +286,22 @@ tags: ## 四、部署并验证 -通过上述步骤之后,我们应该就完成了服务端数据库的配置。接下来我们可以将 LobeChat 部署到 Vercel 上,然后访问你的 Vercel 链接,验证服务端数据库是否正常工作。 +通过上述步骤之后,我们应该就完成了服务端数据库的配置。接下来我们可以将 LobeHub 部署到 Vercel 上,然后访问你的 Vercel 链接,验证服务端数据库是否正常工作。 ### 重新部署最新的 commit 配置好环境变量后,你需要重新部署最新的 commit,并等待部署完成。 - {'重新部署最新的 + {'重新部署最新的 ### 检查功能是否正常 如果你点击左上角登录,可以正常显示登录弹窗,那么说明你已经配置成功了,尽情享用吧~ - {'用户登录弹窗'} + {'用户登录弹窗'} - {'登录成功状态'} + {'登录成功状态'} ## 附录 @@ -280,11 +327,11 @@ S3_ACCESS_KEY_ID=9998d6757e276cf9f1edbd325b7083a6 S3_SECRET_ACCESS_KEY=55af75d8eb6b99f189f6a35f855336ea62cd9c4751a5cf4337c53c1d3f497ac2 # 存储桶的名称 -S3_BUCKET=lobechat +S3_BUCKET=LobeHub # 存储桶的请求端点 S3_ENDPOINT=https://0b33a03b5c993fd2f453379dc36558e5.r2.cloudflarestorage.com # 存储桶对外的访问域名 -S3_PUBLIC_DOMAIN=https://s3-for-lobechat.your-domain.com +S3_PUBLIC_DOMAIN=https://s3-for-LobeHub.your-domain.com # 桶的区域,如 us-west-1,一般来说不需要添加,但某些服务商则需要配置 # S3_REGION=us-west-1 ``` diff --git a/docs/self-hosting/server-database/zeabur.mdx b/docs/self-hosting/server-database/zeabur.mdx index 5f9dc5503c..99e52fb37f 100644 --- a/docs/self-hosting/server-database/zeabur.mdx +++ b/docs/self-hosting/server-database/zeabur.mdx @@ -1,21 +1,21 @@ --- -title: Deploying LobeChat Database on Zeabur +title: Deploying LobeHub Database on Zeabur description: >- - Learn how to deploy LobeChat on Zeabur with ease. Follow the provided steps to + Learn how to deploy LobeHub on Zeabur with ease. Follow the provided steps to set up your chat application seamlessly. tags: - - Deploy LobeChat + - Deploy LobeHub - Zeabur Deployment - OpenAI API Key - Custom Domain Binding --- -# Deploying LobeChat Database on Zeabur +# Deploying LobeHub Database on Zeabur This article assumes that you are familiar with the basic principles and processes of deploying - the LobeChat server database version, so it only includes content related to core environment - variable configuration. If you are not familiar with the deployment principles of the LobeChat + the LobeHub server database version, so it only includes content related to core environment + variable configuration. If you are not familiar with the deployment principles of the LobeHub server database version, please refer to [Deploying Server Database](/docs/self-hosting/server-database) first. @@ -25,16 +25,16 @@ The template on Zeabur includes 4 services: - Logto for authrization. - PostgreSQL with Vector plugin for data storage and indexing. - MinIO for image storage. -- LobeChat database version. +- LobeHub database version. ## Deploying on Zeabur -Here is the process for deploying the LobeChat server database version on Zeabur: +Here is the process for deploying the LobeHub server database version on Zeabur: ### Go to the template page on Zeabur - Go to the [LobeChat Database template page](https://zeabur.com/templates/RRSPSD) on Zeabur and click on the "Deploy" button. + Go to the [LobeHub Database template page](https://zeabur.com/templates/RRSPSD) on Zeabur and click on the "Deploy" button. ### Fill in the required environment variables @@ -44,7 +44,7 @@ Here is the process for deploying the LobeChat server database version on Zeabur - OpenAI API key: Your OpenAI API key to get responses from OpenAI. - - LobeChat Domain: A free subdomain with `.zeabur.app` suffix. + - LobeHub Domain: A free subdomain with `.zeabur.app` suffix. - MinIO Public Domain: A free subdomain with `.zeabur.app` suffix for yout MinIO web port to enable public access for the uploaded files. @@ -54,7 +54,7 @@ Here is the process for deploying the LobeChat server database version on Zeabur ### Select a region and deploy - After you fill all the required environment variables, select a region where you want to deploy your LobeChat Database and click on the "Deploy" button. + After you fill all the required environment variables, select a region where you want to deploy your LobeHub Database and click on the "Deploy" button. You will see another modal pop-up where you can see the deployment progress. @@ -64,13 +64,13 @@ Here is the process for deploying the LobeChat server database version on Zeabur Access your Logto console with the console domain you just binded, and then create a `Next.js 14(App router)` application to get the client ID and client secret, and fill in the cors and callback URLs. You can check [this document](../advanced/auth.mdx) for a more detailed guide. - Fill in those variables into your LobeChat service on Zeabur, here is a more detailed guide for [editing environment variables on Zeabur](https://zeabur.com/docs/deploy/variables). + Fill in those variables into your LobeHub service on Zeabur, here is a more detailed guide for [editing environment variables on Zeabur](https://zeabur.com/docs/deploy/variables). For detailed configuration of Logto, refer to [this document](/docs/self-hosting/advanced/auth/next-auth/logto). - ### Access your LobeChat Instance + ### Access your LobeHub Instance - Press on the `LobeChat-Database` and you can see the public domain you just created, click on it to access your LobeChat Database. + Press on the `LobeHub-Database` and you can see the public domain you just created, click on it to access your LobeHub Database. You can also bind a custom domain for your services if you want, here is a guide on how to [bind a custom domain on Zeabur](https://zeabur.com/docs/deploy/domain-binding). diff --git a/docs/self-hosting/server-database/zeabur.zh-CN.mdx b/docs/self-hosting/server-database/zeabur.zh-CN.mdx index a1d7fab2ae..16561609de 100644 --- a/docs/self-hosting/server-database/zeabur.zh-CN.mdx +++ b/docs/self-hosting/server-database/zeabur.zh-CN.mdx @@ -1,19 +1,19 @@ --- -title: 在 Zeabur 上部署 LobeChat -description: 按照指南准备 OpenAI API Key 并点击按钮进行部署。在部署完成后,即可开始使用 LobeChat 并选择是否绑定自定义域名。 +title: 在 Zeabur 上部署 LobeHub +description: 按照指南准备 OpenAI API Key 并点击按钮进行部署。在部署完成后,即可开始使用 LobeHub 并选择是否绑定自定义域名。 tags: - Zeabur - - LobeChat + - LobeHub - OpenAI API Key - 部署流程 - 自定义域名 --- -# 使用 Zeabur 部署 LobeChat 数据库版 +# 使用 Zeabur 部署 LobeHub 数据库版 - 本文假设你已经熟悉 LobeChat - 服务器数据库版的部署基本原理和流程,因此只包含与核心环境变量配置相关的内容。如果你对 LobeChat + 本文假设你已经熟悉 LobeHub + 服务器数据库版的部署基本原理和流程,因此只包含与核心环境变量配置相关的内容。如果你对 LobeHub 服务器数据库版的部署原理不熟悉,请先参考[部署服务器数据库](/zh/docs/self-hosting/server-database)。 @@ -22,16 +22,16 @@ tags: - Logto 提供身份校验 - 带有 Vector 插件的 PostgreSQL 来做数据存储和向量化 - MinIO 作为对象存储 -- LobeChat Database 的实例 +- LobeHub Database 的实例 ## 在 Zeabur 上部署 -这里是在 Zeabur 上部署 LobeChat 服务器数据库版的流程: +这里是在 Zeabur 上部署 LobeHub 服务器数据库版的流程: ### 前往 Zeabur 上的模板页面 - 前往 [Zeabur 上的 LobeChat 数据库模板页面](https://zeabur.com/templates/RRSPSD) 并点击 "Deploy" 按钮。 + 前往 [Zeabur 上的 LobeHub 数据库模板页面](https://zeabur.com/templates/RRSPSD) 并点击 "Deploy" 按钮。 ### 填写必要的环境变量 @@ -40,14 +40,14 @@ tags: 以下是你需要填写的环境变量: - OpenAI API key: 你的 OpenAI API key 用于获取模型的访问权限。 - - LobeChat Domain: 一个免费的 `.zeabur.app` 后缀的域名。 + - LobeHub Domain: 一个免费的 `.zeabur.app` 后缀的域名。 - MinIO Public Domain: 一个免费的 `.zeabur.app` 后缀的域名为了暴露 MinIO 服务以公开访问资源。 - Logto Console Domain: 一个免费的 `.zeabur.app` 后缀的域名来访问 Logto 的控制台。 - Logto API Domain: 一个免费的 `.zeabur.app` 后缀的域名来访问 Logto 的 API。 ### 选择一个区域并部署 - 在你填写完所有必要的环境变量后,选择一个你想要部署 LobeChat 数据库的区域并点击 “部署” 按钮。 + 在你填写完所有必要的环境变量后,选择一个你想要部署 LobeHub 数据库的区域并点击 “部署” 按钮。 你会看到另一个模态弹窗,你可以在这里看到部署的进度。 @@ -55,13 +55,13 @@ tags: 当部署完成后,你会被自动导航到你在 Zeabur 控制台上刚刚创建的项目。你需要再进一步配置你的 Logto 服务。 - 使用你刚绑定的域名来访问你的 Logto 控制台,创建一个新项目以获得对应的客户端 ID 与密钥,将它们填入你的 LobeChat 服务的变量中。关于如何填入变量,可以参照 [Zeabur 的官方文档](https://zeabur.com/docs/deploy/variables)。 + 使用你刚绑定的域名来访问你的 Logto 控制台,创建一个新项目以获得对应的客户端 ID 与密钥,将它们填入你的 LobeHub 服务的变量中。关于如何填入变量,可以参照 [Zeabur 的官方文档](https://zeabur.com/docs/deploy/variables)。 Logto 的详细配置可以参考[这篇文档](/zh/docs/self-hosting/advanced/auth/next-auth/logto)。 - ### 访问你的 LobeChat + ### 访问你的 LobeHub - 按下 `LobeChat-Database` 你会看到你刚刚创建的公共域名,点击它以访问你的 LobeChat 数据库。 + 按下 `LobeHub-Database` 你会看到你刚刚创建的公共域名,点击它以访问你的 LobeHub 数据库。 你可以选择绑定一个自定义域名,这里有一个关于如何在 Zeabur 上[绑定自定义域名](https://zeabur.com/docs/deploy/domain-binding)的指南。 diff --git a/docs/self-hosting/start.mdx b/docs/self-hosting/start.mdx index 4f5aa21e8a..7d2305f731 100644 --- a/docs/self-hosting/start.mdx +++ b/docs/self-hosting/start.mdx @@ -1,8 +1,8 @@ --- -title: Build Your Own LobeChat - Choose Your Deployment Platform +title: Build Your Own LobeHub - Choose Your Deployment Platform description: >- Explore multiple deployment platforms like Vercel, Docker, Docker Compose, and - more to deploy LobeChat. Choose the platform that best suits your needs. + more to deploy LobeHub. Choose the platform that best suits your needs. tags: - Lobe Chat - Deployment Platform @@ -14,13 +14,13 @@ tags: # Build Your Own Lobe Chat -LobeChat supports various deployment platforms, including Vercel, Docker, and Docker Compose. You can choose a deployment platform that suits you to build your own Lobe Chat. +LobeHub supports various deployment platforms, including Vercel, Docker, and Docker Compose. You can choose a deployment platform that suits you to build your own Lobe Chat. ## Quick Deployment -For users who are new to LobeChat, we recommend using the client-side database mode for quick deployment. The advantage of this mode is that deployment can be quickly completed with just one command/button, making it easy for you to quickly get started and experience LobeChat. +For users who are new to LobeHub, we recommend using the client-side database mode for quick deployment. The advantage of this mode is that deployment can be quickly completed with just one command/button, making it easy for you to quickly get started and experience LobeHub. -You can follow the guide below for quick deployment of LobeChat: +You can follow the guide below for quick deployment of LobeHub: @@ -32,7 +32,7 @@ You can follow the guide below for quick deployment of LobeChat: ## Advanced Mode: Server-Side Database -For users who are already familiar with LobeChat or need cross-device synchronization, you can deploy a version with a server-side database to access a more complete and powerful LobeChat. +For users who are already familiar with LobeHub or need cross-device synchronization, you can deploy a version with a server-side database to access a more complete and powerful LobeHub. diff --git a/docs/self-hosting/start.zh-CN.mdx b/docs/self-hosting/start.zh-CN.mdx index bad58495d9..beb6ea97b5 100644 --- a/docs/self-hosting/start.zh-CN.mdx +++ b/docs/self-hosting/start.zh-CN.mdx @@ -1,5 +1,5 @@ --- -title: 构建属于自己的 LobeChat - 自选部署平台 +title: 构建属于自己的 LobeHub - 自选部署平台 description: >- 选择适合自己的部署平台,构建个性化的 Lobe Chat。支持 Docker、Docker Compose、Netlify、Railway、Repocloud、Sealos、Vercel 和 Zeabur 部署。 @@ -18,13 +18,13 @@ tags: # 构建属于自己的 Lobe Chat -LobeChat 支持多种部署平台,包括 Vercel、Docker、 Docker Compose 、阿里云计算巢 和腾讯轻量云 等,你可以选择适合自己的部署平台进行部署,构建属于自己的 Lobe Chat。 +LobeHub 支持多种部署平台,包括 Vercel、Docker、 Docker Compose 、阿里云计算巢 和腾讯轻量云 等,你可以选择适合自己的部署平台进行部署,构建属于自己的 Lobe Chat。 ## 快速部署 -对于第一次了解 LobeChat 的用户,我们推荐使用客户端数据库的模式快速部署,该模式的优势是一行指令 / 一个按钮即可快捷完成部署,便于你快速上手与体验 LobeChat。 +对于第一次了解 LobeHub 的用户,我们推荐使用客户端数据库的模式快速部署,该模式的优势是一行指令 / 一个按钮即可快捷完成部署,便于你快速上手与体验 LobeHub。 -你可以通过以下指南快速部署 LobeChat: +你可以通过以下指南快速部署 LobeHub: @@ -34,7 +34,7 @@ LobeChat 支持多种部署平台,包括 Vercel、Docker、 Docker Compose 、 ## 进阶模式:服务端数据库 -针对已经了解 LobeChat 的用户,或需要多端同步的用户,可以自行部署带有服务端数据库的版本,进而获得更完整、功能更强大的 LobeChat。 +针对已经了解 LobeHub 的用户,或需要多端同步的用户,可以自行部署带有服务端数据库的版本,进而获得更完整、功能更强大的 LobeHub。 diff --git a/docs/usage/agent/agent-team.mdx b/docs/usage/agent/agent-team.mdx new file mode 100644 index 0000000000..e94b21d17a --- /dev/null +++ b/docs/usage/agent/agent-team.mdx @@ -0,0 +1,66 @@ +--- +title: Agent Teams +description: >- + Simple centralized configuration for prompts, model selection, knowledge + bases, plugins, and more. +tags: + - LobeHub + - LobeHub + - AI Assistant + - Assistant Organization + - Group Settings + - Assistant Search + - Assistant Pinning +--- + +# Agent Teams + +Sometimes, one assistant's perspective just isn't enough. Complex problems require multifaceted thinking, creative projects thrive on diverse expertise, and learning discussions benefit from multiple viewpoints. Agent Group Chat brings together multiple specialized assistants to collaborate just like in a real group chat — a translation assistant, a coding assistant, and a product manager assistant sitting around the table, each contributing their strengths to solve your problem. You're not just getting an answer — you're engaging in a conversation. Here, different perspectives collide, expertise complements one another, and the insights generated through AI collaboration go far beyond what a single assistant can offer. + +Agent Group Chat is a collaborative space for multiple specialized assistants. You pose a question or task, and each assistant offers insights from their area of expertise. They can discuss, supplement, and even debate with one another. + +## Limitations of a Single Assistant + +- Can only analyze problems from one perspective +- Expertise is limited to its predefined role +- Lacks the richness of diverse viewpoints + +## Advantages of Agent Group Chat + +- Multiple assistants contribute their strengths and collaborate +- Diverse professional backgrounds lead to comprehensive solutions +- Discussions among assistants spark deeper insights +- A built-in moderator ensures orderly and focused conversations + +## About the Moderator + +Every Agent Group Chat includes a built-in moderator responsible for: + +- Understanding your needs and assigning discussion tasks +- Coordinating the speaking order of assistants +- Summarizing the discussion and extracting key conclusions +- Keeping the conversation organized and on-topic + +## Creating an Agent Group Chat + +Click "Create Group" in the left sidebar to get started. + +![clipboard-1768907980491-9cc0669fc3a38.png](/blog/assets8be3a46c8f9c5d3b61bc541f44b7f245.webp) + +When creating a group chat, you can use existing templates or assemble your own team of AI assistants. You can also choose whether to include a moderator and select the model for the moderator. + +![clipboard-1768908081787-ed9eb1cb78bdb.png](/blog/assetsab009b79dd794f02aec24b7607f342e8.webp) + +## Configuring an Agent Group Chat + +In the group chat session, use the left sidebar to select an assistant. You can easily switch their model or remove them from the group. + +![clipboard-1768908121691-b3517bf882633.png](/blog/assetsd3cae44cba0d3f57df6440b46246e5e7.webp) + +Also in the left sidebar, click the "Add Member" button to bring additional assistants into the group chat. + +![clipboard-1768908209289-9d3ecff50142f.png](/blog/assets75a5cf08b3e432d2477899d30acc9d47.webp) + +Go to "Group Profile" in the left sidebar to edit the group prompt, add plugins, or change the moderator model. You can also use the Agent Builder on the right panel for intelligent group creation. Agent Builder is LobeHub’s built-in assistant — simply chat with it, describe your needs, and it will automatically generate a complete group chat configuration, including group settings, system prompts, and plugin setup. + +![clipboard-1768908230723-3fce0ae5baf9b.png](/blog/assets8e9b164fa30c795850ce8fa8ef7e7c24.webp) diff --git a/docs/usage/agent/agent-team.zh-CN.mdx b/docs/usage/agent/agent-team.zh-CN.mdx new file mode 100644 index 0000000000..98cd667359 --- /dev/null +++ b/docs/usage/agent/agent-team.zh-CN.mdx @@ -0,0 +1,64 @@ +--- +title: Agent 团队 +description: 简单的集中配置,例如提示词,选择模型,知识库,插件等。 +tags: + - LobeHub + - LobeHub + - AI 助手 + - 助手组织 + - 分组设置 + - 助手搜索 + - 助手固定 +--- + +# Agent 团队 + +有时候,一个助理的视角是不够的。复杂问题需要多角度思考,创意项目需要不同专业背景的碰撞,学习讨论需要多方辩论。Agent 群聊让多个专业助理聚在一起,像真实群聊一样协作 —— 翻译助理、编程助理、产品经理助理围坐一起,各自发挥专长,共同解决你的问题。你不再是获得一个答案,而是参与一场对话。不同观点在这里碰撞,专业知识在这里互补,AI 助理协同工作时产生的洞察,远超任何单一助理所能提供的。 + +Agent 群聊是多个专业助理的协作空间。你提出一个问题或任务,不同助理从各自的专业角度给出见解,它们之间可以互相讨论、补充、甚至辩论。 + +## 单个助理的局限 + +- 只能从一个角度分析问题 +- 专业领域受限于预设角色 +- 缺少多元视角的碰撞 + +## Agent 群聊的优势 + +- 多个助理各取所长,协同工作 +- 不同专业背景带来全面的解决方案 +- 助理之间的讨论激发更深入的洞察 +- 内置主持人确保讨论有序进行 + +## 关于主持人 + +每个 Agent 群聊都有一个内置的主持人。它负责: + +- 理解你的需求,分配讨论任务 +- 协调各个助理的发言顺序 +- 总结讨论结果,提炼关键结论 +- 确保对话围绕主题有序展开 + +## 创建 Agent 群聊 + +在左侧边栏选择「创建群组」即可进入创建。 + +![clipboard-1768907980491-9cc0669fc3a38.png](/blog/assets8be3a46c8f9c5d3b61bc541f44b7f245.webp) + +创建群聊时,你可以使用现有模版,也可以选择自己的 AI 助理组建群聊。同时,你可以选择是否使用主持人,并为主持人选择模型。 + +![clipboard-1768908081787-ed9eb1cb78bdb.png](/blog/assetsab009b79dd794f02aec24b7607f342e8.webp) + +## 配置 Agent 群聊 + +在群聊会话左侧边栏,选中助理,可以便捷更换助理的模型和移除助理。 + +![clipboard-1768908121691-b3517bf882633.png](/blog/assetsd3cae44cba0d3f57df6440b46246e5e7.webp) + +同样在左侧边栏,点击添加成员按钮可以添加需要的助理到群聊中。 + +![clipboard-1768908209289-9d3ecff50142f.png](/blog/assets75a5cf08b3e432d2477899d30acc9d47.webp) + +在左侧边栏进入「群聊档案」,你可以编写群聊提示词,为群聊添加插件,更换主持人模型。你也可以使用右侧面板的 Agent Builder 进行智能创建。Agent Builder 是 LobeHub 的内置助理,只需与 Agent Builder 对话,描述你的需求,它就能理解并自动生成完整的群聊配置 —— 包括群聊设定、系统提示词、插件配置。 + +![clipboard-1768908230723-3fce0ae5baf9b.png](/blog/assets8e9b164fa30c795850ce8fa8ef7e7c24.webp) diff --git a/docs/usage/agent/gtd.mdx b/docs/usage/agent/gtd.mdx new file mode 100644 index 0000000000..05e84f9a1b --- /dev/null +++ b/docs/usage/agent/gtd.mdx @@ -0,0 +1,35 @@ +--- +title: GTD Tools +description: >- + Learn how to use scheduled tasks, including creating, editing, and deleting + them. +tags: + - Scheduled Tasks + - Create + - Edit + - Delete +--- + +# GTD Tools + +GTD Tools is a built-in plugin in LobeHub that deeply integrates the classic GTD (Getting Things Done) time management methodology into your conversational experience. Once enabled, your assistant transforms into a professional task management expert, helping you offload mental clutter and focus on what truly matters—your creativity and deep thinking. With GTD Tools, you can manage your schedule directly through natural language in conversations. Whether it's a sudden burst of inspiration, household chores, or serious work plans, your assistant can accurately record and track your progress. + +## Enabling GTD Tools + +GTD Tools is a built-in plugin in LobeHub and must be enabled for your assistant before use. + +### Enable via Assistant Profile + +Go to the assistant profile page, click on "+ Integrate Plugin," and check the "GTD Tools" plugin to activate it. + +### Enable in Conversation + +In the conversation window, click the plugin icon below the chat box and check the "GTD Tools" plugin to enable it. + +### Creating Tasks + +You can simply send your plans in the conversation, and the assistant will automatically recognize and confirm the task. + +### Completing Tasks + +You can update tasks through conversational commands, and the assistant will handle the updates automatically. diff --git a/docs/usage/agent/gtd.zh-CN.mdx b/docs/usage/agent/gtd.zh-CN.mdx new file mode 100644 index 0000000000..e04eefe3b2 --- /dev/null +++ b/docs/usage/agent/gtd.zh-CN.mdx @@ -0,0 +1,33 @@ +--- +title: GTD 工具 +description: 了解如何使用定时任务,包括创建、编辑、删除等。 +tags: + - 定时任务 + - 创建 + - 编辑 + - 删除 +--- + +# GTD 工具 + +GTD Tools 是 LobeHub 内置的插件,将经典的 GTD(Getting Things Done)时间管理方法论深度集成到你的对话体验中。开启该插件后,你的助理将化身为专业的任务管理专家,帮助你将大脑从琐碎的杂事中解放出来,专注于当下的创作与思考。通过 GTD Tools,你可以直接在会话中以自然语言管理日程。无论是突如其来的灵感、待办的家务,还是严肃的工作计划,助理都能为你精准记录并追踪进度。 + +## 启用 GTD Tools + +GTD Tools 是 LobeHub 的内置插件,需要为助理启用后才能使用。 + +### 在助理档案中启用 + +进入助理档案页面,点击「+ 集成插件」,勾选「GTD Tools」插件即可开启。 + +### 在会话中启用 + +进入会话页面,点击对话框下方插件图标,勾选「GTD Tools」插件即可。 + +### 创建任务 + +你可以在会话中直接发送你的计划,助理会自动识别并确认记录。 + +### 消除任务 + +你可以在会话中通过对话指令让助理自动更新。 diff --git a/docs/usage/agent/notebook.mdx b/docs/usage/agent/notebook.mdx new file mode 100644 index 0000000000..a16958ec42 --- /dev/null +++ b/docs/usage/agent/notebook.mdx @@ -0,0 +1,65 @@ +--- +title: Notebook +description: >- + Learn how to use scheduled tasks, including how to create, edit, and delete + them. +tags: + - Scheduled Tasks + - Create + - Edit + - Delete +--- + +# Notebook + +LobeHub offers a powerful Notebook feature that allows you to save and manage documents directly within your conversations. No more worrying about losing important information—your assistant can help you take notes, save reports, and organize research materials. Everything stays within the current topic and is always accessible. The Notebook breaks the limitations of fleeting conversations by turning valuable insights into structured, manageable documents. From meeting minutes to study notes, research reports to to-do lists, the Notebook helps you build knowledge in a more systematic and lasting way. + +The Notebook serves as a topic-level document storage space. When valuable content arises during a conversation, your assistant can save it to the Notebook, creating a structured document library. These documents are linked to the current topic, making it easy to reference and revisit them in future discussions. + +## What You Can Do with the Notebook + +### Save Notes and Reminders + +Your assistant can quickly jot down ideas, to-dos, and flashes of inspiration. Just say "Take a note for me," and the content will be saved to your Notebook. + +### Organize Research Materials + +When your assistant helps you search for information or analyze data, you can save the valuable findings. The next time you continue your research, everything will be right where you left it. + +### Generate Reports and Articles + +Your assistant can help you draft structured reports, analytical documents, or long-form articles, and save them directly to the Notebook. You can view, edit, and expand on them anytime. + +### Manage Document Versions + +You can ask your assistant to update existing documents by adding new content or modifying what's already there. Documents in the Notebook evolve over time, always staying up to date. + +### Enable the Notebook Plugin + +Notebook is a built-in plugin. You’ll need to enable it for your assistant to use document management features. + +### Enable in Assistant Profile + +- Go to the assistant profile page, click "+ Integrate Skills," and check "Notebook" to enable it. + +### Enable in a Conversation + +- In a conversation, click the skill icon below the chat box and check "Notebook" to activate it. + +### Using the Notebook + +You can ask your assistant to read document content: + +- "Show me the notes we saved earlier" +- "What did that report say?" + +To delete a document when it's no longer needed, you can either instruct your assistant to remove it or open the Notebook panel and delete it manually. + +### View and Manage Documents + +All saved documents appear in the Notebook panel on the right side of the topic. Click the document icon in the top-right corner to open the panel. You can: + +- Browse the document list to view titles and summaries +- Click a document to view its full content +- Edit documents directly within the panel +- Click "Edit in Drafts" to sync the document with the "Drafts" section in the conversation diff --git a/docs/usage/agent/notebook.zh-CN.mdx b/docs/usage/agent/notebook.zh-CN.mdx new file mode 100644 index 0000000000..6c03ca3677 --- /dev/null +++ b/docs/usage/agent/notebook.zh-CN.mdx @@ -0,0 +1,56 @@ +--- +title: 笔记本 +description: 了解如何使用定时任务,包括创建、编辑、删除等。 +tags: + - 定时任务 + - 创建 + - 编辑 + - 删除 +--- + +# 笔记本 + +LobeHub 支持笔记本功能(Notebook),让你在对话中随时保存和管理文档。不再担心重要内容被遗忘,助理可以帮你记录笔记、保存报告、整理研究资料 —— 所有内容都留存在当前话题中,随时查阅。Notebook 突破了对话内容转瞬即逝的限制,将有价值的信息沉淀为可管理的文档。从会议纪要到学习笔记,从研究报告到待办清单,Notebook 让知识积累更系统、更持久。 + +Notebook 是话题级别的文档存储空间。当对话中产生了值得保留的内容,助理可以将其保存到 Notebook 中,形成结构化的文档库。这些文档与当前话题关联,方便你在后续对话中查阅和引用。 + +## Notebook 能做什么 + +### 保存笔记和备忘 + +助理可以帮你快速记录想法、待办事项、灵感片段。你只需说 "帮我记一下",内容就会保存到 Notebook 中。 + +### 整理研究资料 + +当你让助理搜索信息或分析资料时,可以将有价值的结果保存下来。下次继续研究时,这些资料随时可用。 + +### 生成报告和文章 + +助理可以帮你撰写结构化的报告、分析文档、长篇文章,并直接保存到 Notebook。你能随时查看、编辑、补充内容。 + +### 管理文档版本 + +你可以让助理更新已有文档,追加新内容或修改现有内容。文档在 Notebook 中持续演进,保持最新状态。 + +### 启用 Notebook 插件 + +Notebook 是内置插件,需要为助理启用后才能使用文档管理功能。 + +### 在助理档案中启用 + +- 进入助理档案页面,点击「+ 集成技能」,勾选「Notebook」即可开启。 + +### 在会话中启用 + +- 进入会话页面,点击对话框下方技能图标,勾选「Notebook」即可。 + +### 使用 Notebook + +你可以让助理读取文档内容: + +- "看一下之前保存的笔记" +- "那份报告写了什么" 删除文档当文档不再需要时,可以给助理发送指令删除或展开 Notebook 面板手动删除。查看和管理文档所有保存的文档都会显示在话题右侧 Notebook 面板中。右上角点击文稿图标即可呼出面板。你可以: +- 浏览文档列表,查看标题和摘要 +- 点击文档查看完整内容 +- 直接在面板中编辑文档 +- 点击「在文稿中编辑」,将会话内文稿同步到「文稿」板块 diff --git a/docs/usage/agent/sandbox.mdx b/docs/usage/agent/sandbox.mdx new file mode 100644 index 0000000000..36d0b20fb9 --- /dev/null +++ b/docs/usage/agent/sandbox.mdx @@ -0,0 +1,79 @@ +--- +title: Cloud Sandbox +description: >- + Learn how to use scheduled tasks, including creating, editing, and deleting + them. +tags: + - Scheduled Tasks + - Create + - Edit + - Delete +--- + +# Cloud Sandbox + +LobeHub supports the Cloud Sandbox feature, enabling AI assistants to execute code and process files in a securely isolated cloud environment. Instead of merely providing code snippets, the assistant can directly run code, generate documents, and create charts — delivering downloadable results that you can iterate on in real time. Cloud Sandbox breaks the boundaries of traditional conversations, extending AI output from suggestions to actual execution. From data analysis to document generation, from code debugging to file conversion, Cloud Sandbox transforms AI into a true execution assistant. + +## Understanding Cloud Sandbox + +Cloud Sandbox is a securely isolated cloud-based execution environment. When you need more than just code snippets and want actual execution results, the assistant will run the code in the Cloud Sandbox and return the output. + +## What Can Cloud Sandbox Do? + +### Execute Code + +The assistant can run Python, JavaScript, and TypeScript code within the sandbox and return the results. You’ll see real execution output, not just code text. + +### Generate Files + +The assistant can create various types of files — PDF documents, Excel spreadsheets, Word documents, images, charts, and more — and provide download links. You can download and use them directly. + +### Process Data + +The assistant can read, analyze, and transform data files. Upload CSV, JSON, or other data formats, and the assistant can help clean, summarize, and visualize the data. + +### Run Commands + +The assistant can execute shell commands to install dependencies, manipulate files, and perform complex operations. + +### Enabling Cloud Sandbox + +Cloud Sandbox is a built-in plugin that must be enabled for the assistant to use its features. You can enable the Cloud Sandbox plugin from the "Assistant Profile" page under the plugin section, or directly within a conversation by checking the plugin option in the chat interface. + +### Using Cloud Sandbox + +#### Ask the Assistant to Execute Code + +Simply describe the task you want to accomplish, and the assistant will write and run the code in the Cloud Sandbox: + +- “Write a Python script to calculate the average and standard deviation of this dataset.” +- “Implement a quicksort algorithm in JavaScript and run a test.” +- “Run this code for me and show the output.” + +#### Ask the Assistant to Generate Documents + +Describe the content you need, and the assistant will generate the document and provide a download link: + +- “Generate a PDF report with the analysis of this data.” +- “Convert this content into a Word document.” +- “Create an Excel spreadsheet to organize this information.” + +#### Ask the Assistant to Process Data + +Provide data or files, and the assistant will process and return the results: + +- “Analyze this sales data and create a trend chart.” +- “Convert this JSON file to CSV format.” +- “Clean this dataset by removing duplicates.” + +### Understanding the Cloud Sandbox Environment + +The Cloud Sandbox runs in a securely isolated cloud server, completely separate from your local machine. All operations performed by the assistant in the sandbox will not affect your local file system. + +### Session-Based File Storage + +Files in the Cloud Sandbox are temporary and tied to the current conversation session. If the session ends or remains inactive for a long time, the sandbox files may be cleared. If you need to keep the files, be sure to download them using the export links provided by the assistant. + +### Automatic Export of Results + +When the assistant generates files in the Cloud Sandbox, they are automatically exported with download links. No extra steps are needed — you’ll receive documents, charts, data files, and other outputs directly from the code execution. diff --git a/docs/usage/agent/sandbox.zh-CN.mdx b/docs/usage/agent/sandbox.zh-CN.mdx new file mode 100644 index 0000000000..878bd4806f --- /dev/null +++ b/docs/usage/agent/sandbox.zh-CN.mdx @@ -0,0 +1,73 @@ +--- +title: 云沙箱 +description: 了解如何使用定时任务,包括创建、编辑、删除等。 +tags: + - 定时任务 + - 创建 + - 编辑 + - 删除 +--- + +# 云沙箱 + +LobeHub 支持 Cloud Sandbox ,让 AI 助理能在安全隔离的云端环境中执行代码、处理文件。不只是给出代码片段,助理可以直接运行代码、生成文档、创建图表 —— 你能立即获得可下载的成果,实时迭代调整。云沙箱突破了传统对话的限制,将 AI 的输出从建议扩展到执行。从数据分析到文档生成,从代码调试到文件转换,云沙箱让 AI 真正成为你的执行助手。 + +## 理解 Cloud Sandbox + +Cloud Sandbox 是一个安全隔离的云端执行环境。当你需要的不只是代码片段,而是实际运行结果时,助理会在 Cloud Sandbox 中执行代码并返回输出。 + +## Cloud Sandbox 能做什么 + +### 执行代码 + +助理可以运行 Python、JavaScript、TypeScript 代码,在沙盒中执行并返回结果。你能看到真实的运行输出,而不只是代码文本。 + +### 生成文件 + +助理可以创建各种文件 ——PDF 文档、Excel 表格、Word 文档、图片、图表等,并提供下载链接。你能直接下载使用。 + +### 数据处理 + +助理可以读取、分析、转换数据文件。上传 CSV、JSON 等数据,助理能帮你清洗、统计、可视化。 + +### 运行命令 + +助理可以执行 Shell 命令,安装依赖包、处理文件、执行复杂操作。 + +### 启用 Cloud Sandbox + +Cloud Sandbox 是内置插件,需要为助理启用后才能使用云沙箱功能。你可以在「助理档案」页面添加插件处启用 Cloud Sandbox 插件,也可以进入会话,在对话框勾选插件处启用 Cloud Sandbox 插件。 + +### 使用 Cloud Sandbox + +### 让助理执行代码 + +直接描述你想完成的任务,助理会编写代码并在云沙箱中运行: + +- 「帮我写个 Python 脚本,计算这组数据的平均值和标准差」 +- 「用 JavaScript 实现一个快速排序算法,运行测试一下」 +- 「帮我跑一下这段代码,看看输出是什么」 + +### 让助理生成文档 + +描述你需要的文档内容,助理会生成并提供下载: + +- 「帮我生成一份 PDF 报告,包含这些数据的分析结果」 +- 「把这段内容做成 Word 文档」 +- 「创建一个 Excel 表格,整理这些信息」 + +### 让助理处理数据 + +提供数据或文件,助理会处理并返回结果: + +- 「分析这份销售数据,画个趋势图」 +- 「把这个 JSON 转换成 CSV 格式」 +- 「帮我清洗这份数据,去除重复项」 + +### 了解云沙箱环境 + +隔离的云端环境云沙箱运行在安全隔离的云端服务器上,与你的本地电脑完全分离。助理在云沙箱中的所有操作都不会影响你的本地文件系统。 + +### 会话级文件存储 + +云沙箱中的文件是临时的,与当前对话会话绑定。会话结束或长时间不活动后,沙箱中的文件可能会被清理。如果你需要保留文件,请及时下载助理提供的导出链接。自动导出成果当助理在云沙箱中生成文件时,会自动导出并提供下载链接。你无需额外操作,即可获得代码运行产生的文档、图表、数据文件等成果。 diff --git a/docs/usage/agent/scheduled-task.mdx b/docs/usage/agent/scheduled-task.mdx new file mode 100644 index 0000000000..08f67de5ee --- /dev/null +++ b/docs/usage/agent/scheduled-task.mdx @@ -0,0 +1,27 @@ +--- +title: Scheduled Tasks +description: >- + Learn how to use scheduled tasks, including how to create, edit, and delete + them. +tags: + - LobeHub + - CronJob + - Scheduled Tasks + - Create + - Edit + - Delete +--- + +# Scheduled Tasks + +Scheduled tasks are jobs that run periodically in the cloud. + +In simple terms, you can configure an Agent to execute tasks based on your prompt at regular intervals—for example, checking social media content and sending notifications on a schedule. + +## Creating a Task + +![Create Task](/blog/assetsafa74c85aafea8a057e6047b0823e280.webp) + +To create a task, simply enter a prompt, name, and schedule. + +## Deleting a Task diff --git a/docs/usage/agent/scheduled-task.zh-CN.mdx b/docs/usage/agent/scheduled-task.zh-CN.mdx new file mode 100644 index 0000000000..9b1fdfc4d1 --- /dev/null +++ b/docs/usage/agent/scheduled-task.zh-CN.mdx @@ -0,0 +1,25 @@ +--- +title: 定时任务 +description: 了解如何使用定时任务,包括创建、编辑、删除等。 +tags: + - LobeHub + - CornJob + - 定时任务 + - 创建 + - 编辑 + - 删除 +--- + +# 定时任务 + +定时任务是定期在云端执行的任务。 + +简单来说,你可以让 Agent 定期根据你的 Prompt 去执行任务,例如定期检查社交媒体内容并发送通知。 + +## 创建任务 + +![创建任务](/blog/assetsafa74c85aafea8a057e6047b0823e280.webp) + +输入提示词、名称和日期即可创建任务。 + +## 删除任务 diff --git a/docs/usage/agent/share.mdx b/docs/usage/agent/share.mdx new file mode 100644 index 0000000000..c5bdbf2963 --- /dev/null +++ b/docs/usage/agent/share.mdx @@ -0,0 +1,92 @@ +--- +title: Share Conversations +description: >- + Learn how to share conversation records using LobeHub's sharing features, + including screenshot sharing and ShareGPT links. Easily share your dialogues + with others. +tags: + - LobeHub + - Share Conversation Records + - Screenshot Sharing + - Conversation Sharing +--- + +# Share Conversation Records + +You can share your current conversation with others by clicking the Share button in the top-right corner of the chat window. LobeHub supports two sharing methods: Screenshot Sharing and Shareable Link Generation. + +LobeHub allows you to export and share your conversations in various formats, including screenshots, plain text, PDF, or JSON, making it easy to save and share your dialogues. + +## Screenshot Sharing + +Export your conversation as an image. Available display modes: + +- Wide Screen Mode: Optimized for desktop viewing +- Narrow Screen Mode: Optimized for mobile viewing + +Optional content settings: + +- Include Assistant Role Settings: Display system prompts and configuration for the assistant +- Include Footer: Show information at the bottom of the page +- Image Format Options: JPG / PNG / SVG / WEBP +- Sharing Options: + - Copy Image: Copy to clipboard for direct pasting into other apps + - Download Screenshot: Save the image file locally + +{'Screenshot + +## Text Sharing + +Export your conversation as plain text. Optional content settings: + +- Include Assistant Role Settings: Include system prompts and assistant configuration +- Include Message Roles: Show the sender of each message +- Include User Info: Include user-related information +- Include Plugin Info: Include details of plugin calls +- Sharing Options: + - Copy Text: Copy to clipboard + - Download File: Save as a text file + +![clipboard-1769056077960-cac34bc157a65.png](/blog/assetsa8e173bec038d1d21d413f6fa0ace342.webp) + +## PDF Sharing + +Export your conversation as a PDF document. Optional content settings: + +- Include Assistant Role Settings: Include system prompts and assistant configuration +- Include Message Roles: Show the sender of each message +- Include User Info: Include user-related information +- Include Plugin Info: Include details of plugin calls + +Generation Method: After selecting your options, click the Generate button to create and download the PDF file. + +{'PDF + +## JSON Sharing + +Export your conversation in JSON format, ideal for developers or integration with other systems. + +Available export modes: + +- Default: LobeHub's standard JSON format +- OpenAI Compatible: JSON format compatible with the OpenAI API + +Optional content settings: + +- Include Role Settings: Include assistant role configuration + +Sharing Options: + +- Copy: Copy JSON content to clipboard +- Download File: Save as a JSON file + +## Use Cases + +- Screenshot Sharing: Great for sharing conversations on social media or messaging apps—visually intuitive and easy to read. +- Text Sharing: Ideal for editing or quoting conversations; small file size. +- PDF Sharing: Suitable for formal use cases like work reports, study notes, or archival. +- JSON Sharing: Best for developers—can be imported into other systems or used for data analysis. + +## Shareable Link + +Coming soon... diff --git a/docs/usage/agent/share.zh-CN.mdx b/docs/usage/agent/share.zh-CN.mdx new file mode 100644 index 0000000000..2d229847c8 --- /dev/null +++ b/docs/usage/agent/share.zh-CN.mdx @@ -0,0 +1,60 @@ +--- +title: 分享会话 +description: 了解如何通过 LobeHub 的分享功能分享会话记录,包括截图分享和 ShareGPT 分享方式。通过分享功能,轻松与他人分享您的对话。 +tags: + - LobeHub + - 分享会话记录 + - 截图分享 + - 对话分享 +--- + +# 分享会话记录 + +通过会话窗口右上角的`分享`按钮,您可以将当前会话记录分享给其他人。LobeHub 支持两种分享方式:`截图分享`和 `生成分享链接`。 + +LobeHub 支持将会话内容分享给他人。你可以将对话导出为截图、文本、PDF 或 JSON 格式,方便保存、分享。 + +## 截图分享 + +将会话导出为图片格式。可选显示模式: + +- 宽屏模式:适合电脑屏幕查看 +- 窄屏模式:适合手机屏幕查看 + +可选内容选项: + +- 是否包含助手角色设定:显示助手的系统提示词等设定信息 +- 是否包含页脚:显示页面底部信息 +- 可选图片格式:JPG/PNG/SVG/WEBP +- 可选分享方式:复制图片:复制到剪贴板,可以直接粘贴到其他应用下载截图:保存图片文件到本地 + +{'截图分享'} + +## 文本分享 + +将会话导出为纯文本格式。可选内容选项: + +- 是否包含助手角色设定:包含助手的系统提示词等设定是否包含消息角色 +- 显示每条消息的发送者 +- 是否包含用户信息:包含用户相关信息 +- 是否包含插件信息:包含插件调用的相关信息 +- 可选分享方式:复制文本:复制到剪贴板下载文件:保存为文本文件 + +![clipboard-1769056077960-cac34bc157a65.png](/blog/assetsa8e173bec038d1d21d413f6fa0ace342.webp) + +## PDF 分享 + +将会话导出为 PDF 文档。可选内容选项: + +- 是否包含助手角色设定:包含助手的系统提示词等设定 +- 是否包含消息角色:显示每条消息的发送者 +- 是否包含用户信息:包含用户相关信息 +- 是否包含插件信息:包含插件调用的相关信息生成方式:选择完选项后,点击生成按钮,直接生成 PDF 文件并下载。 + +{'PDF + +## JSON 分享 + +将会话导出为 JSON 格式,适合开发者或需要在其他系统中使用。可选导出模式:默认:LobeHub 的标准 JSON 格式 OpenAI 兼容:兼容 OpenAI API 格式的 JSON 可选内容选项:是否包含角色设定:包含助手的角色设定信息可选分享方式:复制:复制 JSON 内容到剪贴板下载文件:保存为 JSON 文件 \[Image] 使用场景截图分享:适合在社交媒体、聊天工具中分享对话内容,直观易读。文本分享:适合需要编辑或引用对话内容的场景,文件体积小。PDF 分享:适合正式场合,如工作报告、学习笔记、存档记录。JSON 分享:适合开发者,可以导入到其他系统或进行数据分析。 + +## 分享链接 diff --git a/docs/usage/agent/topic.mdx b/docs/usage/agent/topic.mdx new file mode 100644 index 0000000000..f11ab63902 --- /dev/null +++ b/docs/usage/agent/topic.mdx @@ -0,0 +1,59 @@ +--- +title: Topics +description: >- + Learn how to interact with large language models, including model selection, + file/image uploads, temperature settings, conversation history, and more. +tags: + - LobeHub + - Large Language Model + - LLM + - Model Selection + - File Upload + - Temperature Setting + - History Settings + - Voice Input + - Plugin Settings + - Token Usage + - New Topic + - Send Button +--- + +# Topics + +In LobeHub, each conversation with the assistant is organized into a topic. You can search, rename, favorite, or delete topics to better manage and retrieve your past conversations. + +## Search Topics + +On the conversation page, you can search for previous topics you've discussed with the assistant. Enter the conversation view, click "More" in the topic list, and use the search function to quickly locate the desired conversation. + +![clipboard-1769000274218-d02c4c8024709.png](/blog/assets3cdf933016e6f53bca12b8cedb17061f.webp) + +## Rename Topics + +You can give topics more meaningful names to make them easier to identify. + +- Manual Rename: Locate the topic you want to rename, click "Rename", enter a new name, and save it. +- Smart Rename: Click "Smart Rename" on the topic you want to rename, and the system will automatically generate a suitable name based on the conversation content. + +## Duplicate Topics + +You can create a duplicate of any topic to preserve the original conversation while branching off into a new direction. Find the topic you want to copy and click "Duplicate". The system will create an identical copy of the topic. This is useful for exploring different discussion paths based on the same starting point. + +## Favorite Topics + +For important or frequently used topics, you can mark them as favorites. Click the favorite icon on the topic you want to save. Favorited topics will appear in the "Favorites" section of the topic list for quick access. + +![clipboard-1769000328858-48f0503640245.png](/blog/assets04d6fae3d9aa3c33697028f1cc9f4706.webp) + +## Delete Topics + +You can delete topics you no longer need. Locate the topic, click "Delete", and confirm the action. The topic will be removed from your list. + +## Topic List + +Topics are organized by time to help you easily find conversations from different periods. + +- Favorites: Displays all your favorited topics. +- Time-Based List: Other topics are automatically grouped by their creation time. Expand a time group to view all topics from that period. + +If you prefer not to use time-based grouping, you can switch to "Ungrouped" view. diff --git a/docs/usage/agent/topic.zh-CN.mdx b/docs/usage/agent/topic.zh-CN.mdx new file mode 100644 index 0000000000..70284981aa --- /dev/null +++ b/docs/usage/agent/topic.zh-CN.mdx @@ -0,0 +1,57 @@ +--- +title: 话题 +description: 了解如何使用大型语言模型进行基本交互,包括模型选择、文件/图片上传、温度设置、历史记录设置等。 +tags: + - LobeHub + - 大型语言模型 + - LLM + - 模型选择 + - 文件上传 + - 温度设置 + - 历史记录设置 + - 语音输入 + - 插件设置 + - Token 用量 + - 新建话题 + - 发送按钮 +--- + +# 话题 + +在 LobeHub 中,每次与助手的对话会形成一个话题。你可以搜索、重命名、收藏、删除话题,方便管理和查找历史对话。 + +## 搜索话题 + +在会话页面,你可以搜索与助手聊过的话题。进入会话,点击话题列表的「更多」,通过搜索话题快速找到需要的历史对话。 + +![clipboard-1769000274218-d02c4c8024709.png](/blog/assets3cdf933016e6f53bca12b8cedb17061f.webp) + +## 重命名话题 + +你可以为话题设置更有意义的名称,方便识别。 + +- 手动重命名:找到要重命名的话题后点击「重命名」,输入新的话题名称并即可保存。 +- 智能重命名:找到要重命名的话题后点击「智能重命名」,系统会根据对话内容自动生成合适的名称。 + +## 创建话题副本 + +你可以创建某个话题的副本,保留原对话的同时创建新的分支。找到要复制的话题后点击「创建副本」,系统会创建一个完全相同的话题副本。适合在原对话基础上尝试不同的讨论方向。 + +## 收藏话题 + +对于重要或常用的话题,可以点击收藏按钮进行收藏。找到要收藏的话题后点击收藏按钮,话题会被标记为收藏。收藏的话题会显示在话题列表的「收藏」列表里,方便快速访问。\[Image] + +![clipboard-1769000328858-48f0503640245.png](/blog/assets04d6fae3d9aa3c33697028f1cc9f4706.webp) + +## 删除话题 + +不需要的话题可以删除。找到要删除的话题后点击「删除」,确认删除后话题将被移除。话题列表话题列表按时间组织,方便查找不同时期的对话。收藏列表:显示所有收藏的话题。时间列表:其他话题默认根据创建时间自动分组,展开对应时间的列表,可以查看该时间段的所有话题。如果不想使用时间分组,可以选择「不分组」。 + +## 话题列表 + +话题列表按时间组织,方便查找不同时期的对话。 + +- 收藏列表:显示所有收藏的话题。 +- 时间列表:其他话题默认根据创建时间自动分组,展开对应时间的列表,可以查看该时间段的所有话题。 + +如果不想使用时间分组,可以选择「不分组」。 diff --git a/docs/usage/agent/translate.mdx b/docs/usage/agent/translate.mdx new file mode 100644 index 0000000000..48ba039363 --- /dev/null +++ b/docs/usage/agent/translate.mdx @@ -0,0 +1,32 @@ +--- +title: Conversation Translation +description: >- + LobeHub allows users to instantly translate conversation content into a + selected language, displaying the results in real time. Learn how to configure + your translation model for an optimized experience. +tags: + - LobeHub + - Conversation Translation + - Real-Time Translation + - Translation Model Configuration +--- + +# Translate Conversation History + +{'Translate + +## Translate Content Within Conversations + +LobeHub enables users to translate conversation content into a target language with a single click. Once a language is selected, LobeHub will use the pre-configured AI model to perform the translation and display the results in real time within the chat window. + +{'Display + +## Configure Translation Model + +You can specify which model you'd like to use as your translation assistant in the settings. + +{'Configure + +- Open the `Settings` panel +- Navigate to the `System Assistant` section and find the `Translation Settings` option +- Assign a model to serve as your `Translation Assistant` diff --git a/docs/usage/agent/translate.zh-CN.mdx b/docs/usage/agent/translate.zh-CN.mdx new file mode 100644 index 0000000000..f093774ff3 --- /dev/null +++ b/docs/usage/agent/translate.zh-CN.mdx @@ -0,0 +1,29 @@ +--- +title: 会话翻译 +description: LobeHub 支持用户一键将对话内容翻译成指定语言,实时显示翻译结果。了解如何设置翻译模型以优化翻译体验。 +tags: + - LobeHub + - 会话翻译 + - 实时翻译 + - 翻译模型设置 +--- + +# 翻译会话记录 + +{'翻译会话'} + +## 翻译对话中的内容 + +LobeHub 支持用户一键将对话内容翻译成指定语言。选择目标语言后,LobeHub 将调用预先设置的 AI 模型进行翻译,并将翻译结果实时显示在聊天窗口中。 + +{'显示会话翻译结果'} + +## 翻译模型设置 + +你可以在设置中指定您希望使用的模型作为翻译助手。 + +{'设置翻译模型'} + +- 打开`设置`面板 +- 在`系统助手`中找到`翻译设置`选项 +- 为你的`翻译助手`指定一个模型 diff --git a/docs/usage/agent/tts-stt.mdx b/docs/usage/agent/tts-stt.mdx new file mode 100644 index 0000000000..d355842d64 --- /dev/null +++ b/docs/usage/agent/tts-stt.mdx @@ -0,0 +1,38 @@ +--- +title: Text-to-Speech & Speech-to-Text +description: >- + Learn how to use the text-to-speech (TTS) and speech-to-text (STT) features in + LobeHub, including how to configure your preferred voice model. +tags: + - LobeHub + - Text-to-Speech + - TTS + - STT + - Voice Model +--- + +# Guide to Text-to-Speech & Speech-to-Text + +LobeHub supports text and voice conversion features, allowing users to input content via speech and have AI responses read aloud using voice synthesis. + +## Text-to-Speech (TTS) + +To have AI read text aloud, simply highlight any content in the chat window and select `Text-to-Speech`. The AI will use a TTS model to convert the selected text into speech. + +{'TTS'} + +## Speech-to-Text (STT) + +To input text using your voice, click the voice input option in the message box. LobeHub will convert your speech into text and insert it into the input field. Once you're done, you can send it directly to the AI. + +{'STT'} + +## Configuring Voice Conversion Settings + +You can customize the voice conversion experience by selecting your preferred models in the settings. + +{'TTS + +- Open the `Settings` panel +- Navigate to the `Text-to-Speech` section +- Choose your desired voice service and AI model diff --git a/docs/usage/agent/tts-stt.zh-CN.mdx b/docs/usage/agent/tts-stt.zh-CN.mdx new file mode 100644 index 0000000000..f666065848 --- /dev/null +++ b/docs/usage/agent/tts-stt.zh-CN.mdx @@ -0,0 +1,36 @@ +--- +title: 文字语音转换 +description: 了解如何在 LobeHub 中使用文字语音转换功能,包括文字转语音(TTS)和语音转文字(STT),以及设置您喜欢的语音模型。 +tags: + - LobeHub + - 文字语音转换 + - TTS + - STT + - 语音模型 +--- + +# 文字语音转换使用指南 + +LobeHub 支持文字语音转换功能,允许用户通过语音输入内容,以及将 AI 输出的内容通过语音播报。 + +## 文字转语音(TTS) + +在对话窗口中选中任意内容,选择`文字转语音`,AI 将通过 TTS 模型对文本内容进行语音播报。 + +{'TTS'} + +## 语音转文字(STT) + +在输入窗口中选择语音输入功能,LobeHub 将您的语音转换为文字并输入到文本框中,完成输入后可以直接发送给 AI。 + +{'STT'} + +## 文字语音转换设置 + +你可以在设置中为文字语音转换功能指定您希望使用的模型。 + +{'TTS + +- 打开`设置`面板 +- 找到`文字转语音`设置 +- 选择您所需的语音服务和 AI 模型 diff --git a/docs/usage/agents/agent-organization.mdx b/docs/usage/agents/agent-organization.mdx deleted file mode 100644 index 5525ed1bff..0000000000 --- a/docs/usage/agents/agent-organization.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Efficiently Organize Your AI Assistants with LobeChat -description: >- - Learn how to use LobeChat's grouping, search, and pinning functions to efficiently organize and locate your AI assistants. - -tags: - - LobeChat - - AI assistants - - assistant organization - - grouping - - search function - - pinning function ---- - -# Assistant Organization Guide - -{'Assistant - -LobeChat provides a rich variety of AI assistant resources. Users can easily add various assistants through the assistant market, offering a wide range of application scenarios for AI applications. - -When you have added a large number of assistants, finding a specific assistant in the list may become challenging. LobeChat provides `search`, `grouping`, and `pinning` functions to help you better organize assistants and improve efficiency in locating them. - -## Assistant Grouping - -Firstly, LobeChat's AI assistants support organization through grouping. You can categorize assistants of the same type together and easily search for the required assistants by collapsing and expanding groups. - -### Assistant Settings - -{'Assistant - -- In the menu of an individual assistant, selecting the `Move to Group` option can quickly categorize the assistant into the specified group. -- If you don't find the group you want, you can choose `Add Group` to quickly create a new group. - -### Group Settings - -{'Group - -- In the group menu, you can quickly create a new assistant under that group. -- Clicking the `Group Management` button allows you to `rename`, `delete`, `sort`, and perform other operations on all groups. - -## Assistant Search - -{'Assistant - -- At the top of the assistant list, you can use the assistant search function to easily locate the assistant you need using keywords. - -## Assistant Pinning - -{'Assistant - -- In the assistant menu, you can use the `Pin` function to pin the assistant to the top. -- After pinning an assistant, a pinned area will appear at the top of the assistant list, displaying all pinned assistants. -- For pinned assistants, you can choose `Unpin` to remove them from the pinned area. diff --git a/docs/usage/agents/agent-organization.zh-CN.mdx b/docs/usage/agents/agent-organization.zh-CN.mdx deleted file mode 100644 index 5bff869c49..0000000000 --- a/docs/usage/agents/agent-organization.zh-CN.mdx +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: LobeChat 助手组织指南 - 提升助手管理效率 -description: 了解如何通过分组、搜索和固定功能更好地组织 LobeChat 的 AI 助手,提升助手管理效率和定位效率。 -tags: - - LobeChat - - AI 助手 - - 助手组织 - - 分组设置 - - 助手搜索 - - 助手固定 ---- - -# 助手组织指南 - -{'助手组织'} - -LobeChat 提供了丰富的 AI 助手资源,用户可以通过助手市场方便地添加各类助手,为 AI 应用提供了广泛的应用场景。 - -当你添加了大量助手后,在列表中寻找特定助手可能会变得比较困难。LobeChat 提供了`搜索`、`分组`和`固定`功能,帮助您更好地组织助手,提升定位效率。 - -## 助手分组 - -首先 LobeChat 的 AI 助手支持以分组的方式进行组织。你可以将同类型的助手归类到一起,并通过折叠和展开分组的方式方便地查询所需助手。 - -### 助手设置 - -{'助手分组'} - -- 在单个助手的菜单中,选择`移动到分组`选项可以快速将该助手归类到指定分组。 -- 如果没有你想要的分组,可以选择`添加分组`,快速创建一个新的分组。 - -### 分组设置 - -{'分组菜单'} - -- 在分组菜单中,可以快速在该分组下新建助手 -- 点击`分组管理`按钮可以对所有分组进行`重命名`、`删除`、`排序`等操作。 - -## 助手搜索 - -{'助手搜索'} - -- 在助手列表的顶部,您可以通过助手搜索功能,方便地使用关键词定位到您所需的助手。 - -## 助手固定 - -{'助手固定'} - -- 在助手菜单中,你可以使用`固定`功能将该助手固定在顶部。 -- 固定助手后,助手列表的上方将出现一个固定区域,显示所有已固定的助手列表。 -- 对于已固定的助手,你可以选择`解除固定`,将其移出固定区域。 diff --git a/docs/usage/agents/concepts.mdx b/docs/usage/agents/concepts.mdx deleted file mode 100644 index e87fc4f03e..0000000000 --- a/docs/usage/agents/concepts.mdx +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Improving User Interaction Efficiency with Agents in LobeChat -description: >- - Discover how LobeChat's innovative approach with Agents enhances user experience by providing dedicated functional modules for efficient task handling and quick access to historical conversations. - -tags: - - LobeChat - - Agents - - User Interaction Efficiency - - Task Handling - - Historical Conversations ---- - -# Topics and Assistants - -## ChatGPT and "Topics" - -In the official ChatGPT application, there is only the concept of "topics." As shown in the image, the user's historical conversation topics are listed in the sidebar. - -{'ChatGPT - -However, in our usage, we have found that this model has many issues. For example, the information indexing of historical conversations is too scattered. Additionally, when dealing with repetitive tasks, it is difficult to have a stable entry point. For instance, if I want ChatGPT to help me translate a document, in this model, I would need to constantly create new topics and then set up the translation prompt I had previously created. When there are high-frequency tasks, this will result in a very inefficient interaction format. - -## Topics and "Agent" - -Therefore, in LobeChat, we have introduced the concept of **Agents**. An agent is a complete functional module, each with its own responsibilities and tasks. Assistants can help you handle various tasks and provide professional advice and guidance. - -{'Topics - -At the same time, we have integrated topics into each agent. The benefit of this approach is that each agent has an independent topic list. You can choose the corresponding agent based on the current task and quickly switch between historical conversation records. This method is more in line with users' habits in common chat software, improving interaction efficiency. diff --git a/docs/usage/agents/concepts.zh-CN.mdx b/docs/usage/agents/concepts.zh-CN.mdx deleted file mode 100644 index 566b701459..0000000000 --- a/docs/usage/agents/concepts.zh-CN.mdx +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: 在 LobeChat 中进行话题与助手的革新 -description: 了解 LobeChat 中的话题与助手概念,如何提高用户交互效率并解决历史对话信息索引分散的问题。 -tags: - - LobeChat - - 话题与助手 - - 交互效率 - - 历史对话记录 - - 信息索引 ---- - -# 话题与助手 - -## ChatGPT 与「话题」 - -在 ChatGPT 官方应用中,只存在话题的概念,如图所示,在侧边栏中是用户的历史对话话题列表。 - -{'ChatGPT - -但在我们的使用过程中其实会发现这种模式存在很多问题,比如历史对话的信息索引过于分散问题,同时当处理一些重复任务时很难有一个稳定的入口,比如我希望有一个稳定的入口可以让 ChatGPT 帮助我翻译文档,在这个模式下,我需要不断新建新的话题同时再设置我之前创建好的翻译 Prompt 设定,当有高频任务存在时,这将是一个效率很低的交互形式。 - -## 「话题」与「助手」 - -因此在 LobeChat 中,我们引入了 **助手** 的概念。助手是一个完整的功能模块,每个助手都有自己的职责和任务。助手可以帮助你处理各种任务,并提供专业的建议和指导。 - -{'「话题」与「助手」'} - -与此同时,我们将话题索引到每个助手内部。这样做的好处是,每个助手都有一个独立的话题列表,你可以根据当前任务选择对应的助手,并快速切换历史对话记录。这种方式更符合用户对常见聊天软件的使用习惯,提高了交互的效率。 diff --git a/docs/usage/agents/custom-agent.mdx b/docs/usage/agents/custom-agent.mdx deleted file mode 100644 index 1272867118..0000000000 --- a/docs/usage/agents/custom-agent.mdx +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Custom LobeChat Assistant Guide - Adding and Iterating Assistants -description: >- - Learn how to add assistants to your favorites list in LobeChat through the role market or by creating custom assistants. Explore detailed steps for creating custom assistants and quick setup tips. - -tags: - - LobeChat - - Adding Assistants - - Custom Assistant - - Role Market - - Creating Assistants - - Assistant Configuration ---- - -# Custom Assistant Guide - -As the basic functional unit of LobeChat, adding and iterating assistants is very important. Now you can add assistants to your favorites list in two ways. - -## `A` Add through the role market - -If you are a beginner in Prompt writing, you might want to browse the assistant market of LobeChat first. Here, you can find commonly used assistants submitted by others and easily add them to your list with just one click, which is very convenient. - -{'Add - -## `B` Create a custom assistant - -When you need to handle specific tasks, you need to consider creating a custom assistant to help you solve the problem. You can add and configure the assistant in detail in the following ways. - - - {'Create - - {'Create - - - - **Quick Setup Tip**: You can conveniently modify the Prompt through the quick edit button in the - sidebar. - - - - {'Create - - {'Create - - -If you want to understand Prompt writing tips and common model parameter settings, you can continue to view: - - - - - - diff --git a/docs/usage/agents/custom-agent.zh-CN.mdx b/docs/usage/agents/custom-agent.zh-CN.mdx deleted file mode 100644 index 9f11f29198..0000000000 --- a/docs/usage/agents/custom-agent.zh-CN.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: LobeChat 自定义助手指南 - 添加和配置助手的最佳方法 -description: 了解如何通过角色市场或新建自定义助手将助手添加到你的常用列表中。快捷设置技巧和常见的模型参数设置也包括在内。 -tags: - - 自定义助手 - - LobeChat - - 添加助手 - - 配置助手 - - 角色市场 - - 快捷设置 - - 模型参数设置 ---- - -# 自定义助手指南 - -作为 LobeChat 的基础职能单位,助手的添加和迭代是非常重要的。现在你可以通过两种方式将助手添加到你的常用列表中 - -## `A` 通过角色市场添加 - -如果你是一个 Prompt 编写的新手,不妨先浏览一下 LobeChat 的助手市场。在这里,你可以找到其他人提交的常用助手,并且只需一键添加到你的列表中,非常方便。 - -{'通过角色市场添加'} - -## `B` 通过新建自定义助手 - -当你需要处理一些特定的任务时,你就需要考虑创建一个自定义助手来帮助你解决问题。可以通过以下方式添加并进行助手的详细配置 - - - {'通过新建自定义助手 - - {'通过新建自定义助手 - - -**快捷设置技巧**: 可以通过侧边栏的快捷编辑按钮进行 Prompt 的便捷修改 - - - {'通过新建自定义助手 - - {'通过新建自定义助手 - - -如果你希望理解 Prompt 编写技巧和常见的模型参数设置,可以继续查看: - - - - - - diff --git a/docs/usage/agents/model.mdx b/docs/usage/agents/model.mdx deleted file mode 100644 index 61afd6cd68..0000000000 --- a/docs/usage/agents/model.mdx +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: LobeChat Model Config Guide -description: >- - Explore the capabilities of ChatGPT models from gpt-3.5-turbo to gpt-4-32k, understanding their speed, context limits, and cost. Learn about model parameters like temperature and top-p for better output. - -tags: - - ChatGPT Models - - Model Parameters - - Neural Networks - - Language Understanding - - Generation Capabilities ---- - -# Model Guide - -## ChatGPT - -- **gpt-3.5-turbo**: Currently the fastest generating ChatGPT model, it is faster but may sacrifice some text quality, with a context length of 4k. -- **gpt-4**: ChatGPT 4.0 has improved language understanding and generation capabilities compared to 3.5. It can better understand context and generate more accurate and natural responses. This is thanks to improvements in the GPT-4 model, including better language modeling and deeper semantic understanding, but it may be slower than other models, with a context length of 8k. -- **gpt-4-32k**: Similar to gpt-4, the context limit is increased to 32k tokens, with a higher cost. - -## Concept of Model Parameters - -LLM seems magical, but it is essentially a probability problem. The neural network generates a bunch of candidate words from the pre-trained model based on the input text and selects the high-probability ones as output. Most of the related parameters are associated with sampling (i.e., how to select the output from the candidate words). - -### `temperature` - -This parameter controls the randomness of the model's output. The higher the value, the greater the randomness. Generally, when the same prompt is input multiple times, the model's output varies each time. - -- Set to 0: Generates a fixed output for each prompt -- Lower values: More concentrated and deterministic output -- Higher values: More random output (more creative) - - - Generally, the longer and clearer the prompt, the better the quality and confidence of the model's - output. In such cases, the temperature value can be adjusted appropriately. Conversely, if the - prompt is short and ambiguous, setting a relatively high temperature value will result in unstable - model output. - - -
- -### `top_p` - -`top_p` is also a sampling parameter, but it differs from temperature in its sampling method. Before outputting, the model generates a bunch of tokens, and these tokens are ranked based on their quality. In the top-p sampling mode, the candidate word list is dynamic, and tokens are selected from the tokens based on a percentage. Top\_p introduces randomness in token selection, allowing other high-scoring tokens to have a chance of being selected, rather than always choosing the highest-scoring one. - - - `top_p` is similar to randomness, and it is generally not recommended to change it together with - the randomness of temperature. - - -
- -### `presence_penalty` - -The presence penalty parameter can be seen as a punishment for repetitive content in the generated text. When this parameter is set high, the generation model will try to avoid producing repeated words, phrases, or sentences. Conversely, if the presence penalty parameter is set low, the generated text may contain more repetitive content. By adjusting the value of the presence penalty parameter, control over the originality and diversity of the generated text can be achieved. The importance of this parameter is mainly reflected in the following aspects: - -- Enhancing the originality and diversity of the generated text: In certain applications, such as creative writing or generating news headlines, it is necessary for the generated text to have high originality and diversity. By increasing the value of the presence penalty parameter, the amount of repeated content in the generated text can be effectively reduced, thereby enhancing its originality and diversity. -- Preventing the generation of loops and meaningless content: In some cases, the generation model may produce repetitive or meaningless text that usually fails to convey useful information. By appropriately increasing the value of the presence penalty parameter, the probability of generating such meaningless content can be reduced, thereby improving the readability and practicality of the generated text. - - - It is worth noting that the presence penalty parameter, in conjunction with other parameters such - as temperature and top-p, collectively influences the quality of the generated text. Compared to - other parameters, the presence penalty parameter primarily focuses on the originality and - repetitiveness of the text, while the temperature and top-p parameters more significantly affect - the randomness and determinism of the generated text. By adjusting these parameters reasonably, - comprehensive control over the quality of the generated text can be achieved. - - -### `frequency_penalty` - -It is a mechanism that penalizes frequently occurring new vocabulary in the text to reduce the likelihood of the model repeating the same word. The larger the value, the more likely it is to reduce repeated words. - -- `-2.0` When the morning news started broadcasting, I found that my TV now now now now now now now now now now now now now now now now now now now now now now now now now now now now now now now now now now now now now now now now now now now **(The highest frequency word is "now", accounting for 44.79%)** -- `-1.0` He always watches the news in the early morning, in front of the TV watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch watch **(The highest frequency word is "watch", accounting for 57.69%)** -- `0.0` When the morning sun poured into the small diner, a tired postman appeared at the door, carrying a bag of letters in his hands. The owner warmly prepared a breakfast for him, and he started sorting the mail while enjoying his breakfast. **(The highest frequency word is "of", accounting for 8.45%)** -- `1.0` A girl in deep sleep was woken up by a warm ray of sunshine, she saw the first ray of morning light, surrounded by birdsong and flowers, everything was full of vitality. (The highest frequency word is "of", accounting for 5.45%) -- `2.0` Every morning, he would sit on the balcony to have breakfast. Under the soft setting sun, everything looked very peaceful. However, one day, when he was about to pick up his breakfast, an optimistic little bird flew by, bringing him a good mood for the day. (The highest frequency word is "of", accounting for 4.94%) - -
- -### `reasoning_effort` - -The `reasoning_effort` parameter controls the strength of the reasoning process. This setting affects the depth of reasoning the model performs when generating a response. The available values are **`low`**, **`medium`**, and **`high`**, with the following meanings: - -- **low**: Lower reasoning effort, resulting in faster response times. Suitable for scenarios where quick responses are needed, but it may sacrifice some reasoning accuracy. -- **medium** (default): Balances reasoning accuracy and response speed, suitable for most scenarios. -- **high**: Higher reasoning effort, producing more detailed and complex responses, but slower response times and greater token consumption. - -By adjusting the `reasoning_effort` parameter, you can find an appropriate balance between response speed and reasoning depth based on your needs. For example, in conversational scenarios, if fast responses are a priority, you can choose low reasoning effort; if more complex analysis or reasoning is needed, you can opt for high reasoning effort. - - - This parameter is only applicable to reasoning models, such as OpenAI's `o1`, `o1-mini`, - `o3-mini`, etc. - diff --git a/docs/usage/agents/model.zh-CN.mdx b/docs/usage/agents/model.zh-CN.mdx deleted file mode 100644 index 8ec1dc1afc..0000000000 --- a/docs/usage/agents/model.zh-CN.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: ChatGPT 模型指南:参数概念与应用 -description: >- - 了解 ChatGPT 模型的不同版本及参数概念,包括 temperature、top_p、presence_penalty 和 frequency_penalty。 - -tags: - - ChatGPT - - 模型指南 - - 参数概念 - - LLM - - 生成模型 ---- - -# 模型指南 - -## ChatGPT - -- **gpt-3.5-turbo**:目前最生成速度最快的 chatgpt 模型更快,但可能会牺牲一些生成文本的质量,上下文长度为 4k。 -- **gpt-4**:ChatGPT 4.0 在语言理解和生成能力方面相对于 3.5 有所提升。它可以更好地理解上下文和语境,并生成更准确、自然的回答。这得益于 GPT-4 模型的改进,包括更好的语言建模和更深入的语义理解,但它的速度可能比其他模型慢,上下文长度为 8k。 -- **gpt-4-32k**:同 gpt-4,上下文限制增加到 32k token,同时费率更高。 - -## 模型参数概念 - -LLM 看似很神奇,但本质还是一个概率问题,神经网络根据输入的文本,从预训练的模型里面生成一堆候选词,选择概率高的作为输出,相关的参数,大多都是跟采样有关(也就是要如何从候选词里选择输出)。 - -### `temperature` - -用于控制模型输出的结果的随机性,这个值越大随机性越大。一般我们多次输入相同的 prompt 之后,模型的每次输出都不一样。 - -- 设置为 0,对每个 prompt 都生成固定的输出 -- 较低的值,输出更集中,更有确定性 -- 较高的值,输出更随机(更有创意 ) - - - 一般来说,prompt 越长,描述得越清楚,模型生成的输出质量就越好,置信度越高,这时可以适当调高 - temperature 的值;反过来,如果 prompt 很短,很含糊,这时再设置一个比较高的 temperature - 值,模型的输出就很不稳定了。 - - -
- -### `top_p` - -核采样 `top_p` 也是采样参数,跟 temperature 不一样的采样方式。模型在输出之前,会生成一堆 token,这些 token 根据质量高低排名,核采样模式中候选词列表是动态的,从 tokens 里按百分比选择候选词。 top\_p 为选择 token 引入了随机性,让其他高分的 token 有被选择的机会,不会总是选最高分的。 - -`top_p` 与随机性类似,一般来说不建议和随机性 temperature 一起更改 - -
- -### `presence_penalty` - -Presence Penalty 参数可以看作是对生成文本中重复内容的一种惩罚。当该参数设置较高时,生成模型会尽量避免产生重复的词语、短语或句子。相反,如果 Presence Penalty 参数较低,则生成的文本可能会包含更多重复的内容。通过调整 Presence Penalty 参数的值,可以实现对生成文本的原创性和多样性的控制。参数的重要性主要体现在以下几个方面: - -- 提高生成文本的独创性和多样性:在某些应用场景下,如创意写作、生成新闻标题等,需要生成的文本具有较高的独创性和多样性。通过增加 Presence Penalty 参数的值,可以有效减少生成文本中的重复内容,从而提高文本的独创性和多样性。 -- 防止生成循环和无意义的内容:在某些情况下,生成模型可能会产生循环、重复的文本,这些文本通常无法传达有效的信息。通过适当增加 Presence Penalty 参数的值,可以降低生成这类无意义内容的概率,提高生成文本的可读性和实用性。 - - - 值得注意的是,Presence Penalty 参数与其他参数(如 Temperature 和 - top-p)共同影响着生成文本的质量。对比其他参数,Presence Penalty - 参数主要关注文本的独创性和重复性,而 Temperature 和 top-p - 参数则更多地影响着生成文本的随机性和确定性。通过合理地调整这些参数,可以实现对生成文本质量的综合控制 - - -
- -### `frequency_penalty` - -是一种机制,通过对文本中频繁出现的新词汇施加惩罚,以减少模型重复同一词语的可能性,值越大,越有可能降低重复字词。 - -- `-2.0` 当早间新闻开始播出,我发现我家电视现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在现在 *(频率最高的词是 “现在”,占比 44.79%)* -- `-1.0` 他总是在清晨看新闻,在电视前看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看看 *(频率最高的词是 “看”,占比 57.69%)* -- `0.0` 当清晨的阳光洒进小餐馆时,一名疲倦的邮递员出现在门口,他的手中提着一袋信件。店主热情地为他准备了一份早餐,他在享用早餐的同时开始整理邮件。**(频率最高的词是 “的”,占比 8.45%)** -- `1.0` 一个深度睡眠的女孩被一阵温暖的阳光唤醒,她看到了早晨的第一缕阳光,周围是鸟语花香,一切都充满了生机。*(频率最高的词是 “的”,占比 5.45%)* -- `2.0` 每天早上,他都会在阳台上坐着吃早餐。在柔和的夕阳照耀下,一切看起来都非常宁静。然而有一天,当他准备端起早餐的时候,一只乐观的小鸟飞过,给他带来了一天的好心情。 *(频率最高的词是 “的”,占比 4.94%)* - -
- -### `reasoning_effort` - -`reasoning_effort` 参数用于控制推理过程的强度。此参数的设置会影响模型在生成回答时的推理深度。可选值包括 **`low`**、**`medium`** 和 **`high`**,具体含义如下: - -- **low(低)**:推理强度较低,生成速度较快,适用于需要快速响应的场景,但可能牺牲一定的推理精度。 -- **medium(中,默认值)**:平衡推理精度与响应速度,适用于大多数场景。 -- **high(高)**:推理强度较高,生成更为详细和复杂的回答,但响应时间较长,且消耗更多的 Token。 - -通过调整 `reasoning_effort` 参数,可以根据需求在生成速度与推理深度之间找到适合的平衡。例如,在对话场景中,如果更关注快速响应,可以选择低推理强度;如果需要更复杂的分析或推理,可以选择高推理强度。 - -该参数仅适用于推理模型,如 OpenAI 的 `o1`、`o1-mini`、`o3-mini` 等。 diff --git a/docs/usage/agents/prompt.mdx b/docs/usage/agents/prompt.mdx deleted file mode 100644 index bce297d49b..0000000000 --- a/docs/usage/agents/prompt.mdx +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: >- - Guide to Using Prompts in LobeChat - How to Write Effective Instructions for Generative AI - -description: >- - Learn the basic concepts of prompts and how to write well-structured and effective instructions for generative AI. Improve the quality and effectiveness of prompts to guide AI models accurately. - -tags: - - Generative AI - - Prompts - - Writing Instructions - - Structured Prompts - - Improving AI Output ---- - -# Guide to Using Prompts - -## Basic Concepts of Prompts - -Generative AI is very useful, but it requires human guidance. In most cases, generative AI can be as capable as a new intern at a company, but it needs clear instructions to perform well. The ability to guide generative AI correctly is a very powerful skill. You can guide generative AI by sending a prompt, which is usually a text instruction. A prompt is the input provided to the assistant, and it will affect the output. A good prompt should be structured, clear, concise, and directive. - -## How to Write a Well-Structured Prompt - - - A structured prompt refers to the construction of the prompt having a clear logic and structure. - For example, if you want the model to generate an article, your prompt may need to include the - article's topic, outline, and style. - - -Let's look at a basic discussion prompt example: - -> *"What are the most urgent environmental issues facing our planet, and what actions can individuals take to help address these issues?"* - -We can convert it into a simple prompt for the assistant to answer the following questions: placed at the front. - -```prompt -Answer the following questions: -What are the most urgent environmental issues facing our planet, and what actions can individuals take to help address these issues? -``` - -Since the results generated by this prompt are not consistent, some are only one or two sentences. A typical discussion response should have multiple paragraphs, so these results are not ideal. A good prompt should provide **specific formatting and content instructions**. You need to eliminate ambiguity in the language to improve consistency and quality. Here is a better prompt. - -```prompt -Write a highly detailed paper, including an introduction, body, and conclusion, to answer the following questions: -What are the most urgent environmental issues facing our planet, -and what actions can individuals take to help address these issues? -``` - -The second prompt generates longer output and better structure. The use of the term "paper" in the prompt is intentional, as the assistant can understand the definition of a paper, making it more likely to generate coherent, structured responses. - -## How to Improve Quality and Effectiveness - - - There are several ways to improve the quality and effectiveness of prompts: - - - **Be Clear About Your Needs:** The model's output will strive to meet your needs, so if your needs are not clear, the output may not meet expectations. - - **Use Correct Grammar and Spelling:** The model will try to mimic your language style, so if your language style is problematic, the output may also be problematic. - - **Provide Sufficient Contextual Information:** The model will generate output based on the contextual information you provide, so if the information is insufficient, it may not produce the desired results. - - -After formulating effective prompts for discussing issues, you now need to refine the generated results. This may involve adjusting the output to fit constraints such as word count or combining concepts from different generated results. - -A simple method of iteration is to generate multiple outputs and review them to understand the concepts and structures being used. Once the outputs have been evaluated, you can select the most suitable ones and combine them into a coherent response. Another iterative method is to start small and **gradually expand**. This requires more than one prompt: an initial prompt for drafting the initial one or two paragraphs, followed by additional prompts to expand on the content already written. Here is a potential philosophical discussion prompt: - -> *"Is mathematics an invention or a discovery? Use careful reasoning to explain your answer."* - -Add it to a simple prompt as follows: - -```prompt -Write an opening paragraph to answer the following question: -Is mathematics an invention or a discovery? Use careful reasoning to explain your answer. -``` - -After generating some responses, we found one that we like: - -```prompt -Philosophers and mathematicians have long debated whether mathematics is an invention or a discovery. -Some argue that the concepts and principles of mathematics exist independently of human thought, and therefore can be considered discovered rather than invented. -For example, the concepts of numbers, ratios, and geometric shapes are observed in the natural world and can be abstracted and systematically studied. -``` - -It's a good start. Then, use another prompt to expand: - -```prompt -I am writing a detailed essay to answer the following prompt: -Is mathematics an invention or a discovery? Use careful reasoning to explain your answer. Here is my current situation: Philosophers and mathematicians have long debated whether mathematics is an invention or a discovery. -Some argue that the concepts and principles of mathematics exist independently of human thought, and therefore can be considered discovered rather than invented. -For example, the concepts of numbers, ratios, and geometric shapes are observed in the natural world and can be abstracted and systematically studied. Write the next paragraph of my essay. -``` - -I used this prompt to generate several extensions and selected one that I liked, resulting in the following: - -```prompt -Philosophers and mathematicians have long debated whether mathematics is invented or discovered. -Some argue that the concepts and principles of mathematics exist independently of human thought, and therefore can be considered to be discovered rather than invented. -For example, the concepts of numbers, ratios, and geometric shapes are observed in the natural world and can be abstracted and systematically studied. -On the other hand, some believe that mathematics is largely an invention of human thought. -Mathematical principles are based on logical reasoning, which is a characteristic of human thought. -For instance, Euclidean geometry is based on abstract axioms and premises, accepted as true without the need for proof. -Therefore, geometry can be considered an invention of human thought rather than a discovery. -Similarly, mathematical formulas and equations are used to model and predict physical phenomena, which are the result of human reasoning. -``` - -Using the prompt extensions, we can iteratively write and iterate at each step. This is very useful for situations that require **generating higher quality output and incremental modifications**. - -## Further Reading - -- **Learn Prompting**: [https://learnprompting.org/en-US/docs/intro](https://learnprompting.org/en-US/docs/intro) diff --git a/docs/usage/agents/prompt.zh-CN.mdx b/docs/usage/agents/prompt.zh-CN.mdx deleted file mode 100644 index 5c2e7de6cf..0000000000 --- a/docs/usage/agents/prompt.zh-CN.mdx +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: 如何通过 LobeChat 写好结构化 Prompt - 提高生成式 AI 输出质量的关键 -description: 学习如何撰写结构化 Prompt 可以提高生成式 AI 输出的质量和效果。本文介绍了撰写有效 Prompt 的方法和技巧,以及如何逐步扩展和优化生成的结果。 -tags: - - 结构化 Prompt - - 生成式AI - - 提高输出质量 - - 撰写技巧 - - 逐步扩展 ---- - -# Prompt 使用指南 - -## Prompt 基本概念 - -生成式 AI 非常有用,但它需要人类指导。通常情况下,生成式 AI 能就像公司新来的实习生一样,非常有能力,但需要清晰的指示才能做得好。能够正确地指导生成式 AI 是一项非常强大的技能。你可以通过发送一个 prompt 来指导生成式 AI,这通常是一个文本指令。Prompt 是向助手提供的输入,它会影响输出结果。一个好的 Prompt 应该是结构化的,清晰的,简洁的,并且具有指向性。 - -## 如何写好一个结构化 prompt - - - 结构化 prompt 是指 prompt 的构造应该有明确的逻辑和结构。例如,如果你想让模型生成一篇文章,你的 - prompt 可能需要包括文章的主题,文章的大纲,文章的风格等信息。 - - -让我们看一个基本的讨论问题的例子: - -> *"我们星球面临的最紧迫的环境问题是什么,个人可以采取哪些措施来帮助解决这些问题?"* - -我们可以将其转化为简单的助手提示,将回答以下问题:放在前面。 - -```prompt -回答以下问题: -我们星球面临的最紧迫的环境问题是什么,个人可以采取哪些措施来帮助解决这些问题? -``` - -由于这个提示生成的结果并不一致,有些只有一两个句子。一个典型的讨论回答应该有多个段落,因此这些结果并不理想。一个好的提示应该给出**具体的格式和内容指令**。您需要消除语言中的歧义以提高一致性和质量。这是一个更好的提示。 - -```prompt -写一篇高度详细的论文,包括引言、正文和结论段,回答以下问题: -我们星球面临的最紧迫的环境问题是什么, -个人可以采取哪些措施来帮助解决这些问题? -``` - -第二个提示生成了更长的输出和更好的结构。提示中使用 “论文” 一词是有意的,因为助手可以理解论文的定义,因此更有可能生成连贯的、结构化的回答。 - -
- -## 如何提升其质量和效果 - - - 提升 prompt 质量和效果的方法主要有以下几点: - - - **尽量明确你的需求:** 模型的输出会尽可能满足你的需求,所以如果你的需求不明确,输出可能会不如预期。 - - **使用正确的语法和拼写:** 模型会尽可能模仿你的语言风格,所以如果你的语言风格有问题,输出可能也会有问题。 - - **提供足够的上下文信息:** 模型会根据你提供的上下文信息生成输出,所以如果你提供的上下文信息不足,可能无法生成你想要的结果。 - - -在为讨论问题制定有效的提示后,您现在需要细化生成的结果。这可能涉及到调整输出以符合诸如字数等限制,或将不同生成的结果的概念组合在一起。 - -迭代的一个简单方法是生成多个输出并查看它们,以了解正在使用的概念和结构。一旦评估了输出,您就可以选择最合适的输出并将它们组合成一个连贯的回答。另一种迭代的方法是逐步开始,然后**逐步扩展**。这需要不止一个提示:一个起始提示,用于撰写最初的一两段,然后是其他提示,以扩展已经写过的内容。以下是一个潜在的哲学讨论问题: - -> *"数学是发明还是发现?用仔细的推理来解释你的答案。"* - -将其添加到一个简单的提示中,如下所示: - -```prompt -写一个起始段回答以下问题: -数学是发明还是发现?用仔细的推理来解释你的答案。 -``` - -生成了一些回答,找到了一个我们喜欢的: - -```prompt -哲学家和数学家之间争论不休,数学是发明还是发现。 -一些人认为,数学的概念和原则独立于人类思维存在,因此可以被认为是被发现而不是被发明的。 -例如,数字、比率和几何形状的概念在自然界中被观察到,可以被抽象化和系统化地研究。 -``` - -是一个不错的开端。然后使用另一个提示来扩展: - -```prompt -我正在撰写一篇详细的短文,回答以下提示: -数学是发明还是发现?用仔细的推理来解释你的答案。这是我目前的情况: 哲学家和数学家之间争论不休,数学是发明还是发现。 -一些人认为,数学的概念和原则独立于人类思维存在,因此可以被认为是被发现而不是被发明的。 -例如,数字、比率和几何形状的概念在自然界中被观察到,可以被抽象化和系统化地研究。写下我文章的下一段。 -``` - -使用此提示生成了几个扩展,并选择了一个我喜欢的,最终得到了以下结果: - -```prompt -哲学家和数学家之间争论不休,数学是发明还是发现。 -一些人认为,数学的概念和原则独立于人类思维存在,因此可以被认为是被发现而不是被发明的。 -例如,数字、比率和几何形状的概念在自然界中被观察到,可以被抽象化和系统化地研究。 -另一方面,有人认为数学在很大程度上是人类思维的发明。 -数学原理基于逻辑推理,这是人类思维的一个特征。 -例如,欧几里德几何基于抽象的公理和前提,被接受为真实而无需证明。 -因此,可以认为几何是人类思维的发明,而不是一种发现。 -同样,数学公式和方程用于模拟和预测物理现象,这又是人类推理的结果。 -``` - -使用扩展提示,我们可以逐步地写作并在每个步骤上进行迭代。这对于需要**生成更高质量的输出并希望逐步修改**的情况非常有用。 - -## 扩展阅读 - -- **Learn Prompting**: [https://learnprompting.org/zh-Hans/docs/intro](https://learnprompting.org/zh-Hans/docs/intro) diff --git a/docs/usage/agents/topics.mdx b/docs/usage/agents/topics.mdx deleted file mode 100644 index 0f7bb762c5..0000000000 --- a/docs/usage/agents/topics.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: LobeChat Topic Usage Guide -description: >- - Learn how to save and manage topics during conversations in LobeChat, including saving topics, accessing the topic list, and pinning favorite topics. - -tags: - - Topic Usage - - Conversation Management - - Save Topic - - Topic List - - Favorite Topics ---- - -# Topic Usage Guide - -{'Topic - -- **Save Topic:** During a conversation, if you want to save the current context and start a new topic, you can click the save button next to the send button. -- **Topic List:** Clicking on a topic in the list allows for quick switching of historical conversation records and continuing the conversation. You can also use the star icon ⭐️ to pin favorite topics to the top, or use the more button on the right to rename or delete topics. diff --git a/docs/usage/agents/topics.zh-CN.mdx b/docs/usage/agents/topics.zh-CN.mdx deleted file mode 100644 index d272700ed3..0000000000 --- a/docs/usage/agents/topics.zh-CN.mdx +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: LobeChat 话题使用指南 - 保存话题、快速切换历史记录 -description: 学习如何在 LobeChat 中保存话题、快速切换历史记录,并对话题进行收藏、重命名和删除操作。 -tags: - - 话题使用指南 - - 保存话题 - - 快速切换历史记录 - - 话题收藏 - - 话题重命名 - - 话题删除 ---- - -# 话题使用指南 - -{'话题使用指南'} - -- **保存话题:** 在聊天过程中,如果想要保存当前上下文并开启新的话题,可以点击发送按钮旁边的保存按钮。 -- **话题列表:** 点击列表中的话题可以快速切换历史对话记录,并继续对话。你还可以通过点击星标图标 ⭐️ 将话题收藏置顶,或者通过右侧更多按钮对话题进行重命名和删除操作。 diff --git a/docs/usage/community/agent-market.mdx b/docs/usage/community/agent-market.mdx new file mode 100644 index 0000000000..09783f5b8c --- /dev/null +++ b/docs/usage/community/agent-market.mdx @@ -0,0 +1,44 @@ +--- +title: Assistant Marketplace +description: >- + LobeHub's Assistant Marketplace is a vibrant and innovative community that + brings together a wide range of thoughtfully designed assistants to enhance + productivity in work and learning environments. You're welcome to submit your + own assistant creations and help build a collection of useful, creative, and + cutting-edge tools. +tags: + - LobeHub + - LobeHub + - Assistant Marketplace + - Innovation Community + - Collaborative Space + - Assistant Creations + - Automated Internationalization + - Multilingual Versions +--- + +# Assistant Marketplace + +The LobeHub Assistant Marketplace features high-quality AI assistants created by developers and enthusiasts from around the world. Whether you're looking for a coding assistant, writing advisor, language tutor, or a consultant in a specialized field, you'll find the right tool here. These assistants are carefully crafted by community members and tested in real-world scenarios, making them ready for immediate use in both work and study settings. + +The marketplace is more than just a resource hub—it's an open platform for creativity. You can publish your own custom assistants and share your ideas and expertise with users across the globe. + +## Explore the Assistant Marketplace + +To access the Assistant Marketplace, click on "Community" → "Assistants" in the left sidebar. + +![Agent Marketplace](/blog/assetsbcd98b0913d2dfc30d5a2b5523115d33.webp) + +Assistants are organized by category, making it easy to quickly find the type of assistant you need. + +## Installing and Using Assistants + +View and install assistants + +Click on any assistant card to open its detail page. Here you'll find an overview, settings, capabilities, and version history. Once you've confirmed the assistant meets your needs, click "Add Assistant & Start Chatting" to begin. + +![Installing and Using Assistants](/blog/assets60bf3667e56862024d047444d9b4c2fb.webp) + +### Using Assistants + +After selecting "Add Assistant & Start Chatting," you can immediately begin interacting with the assistant to test its features and performance. You can customize system prompts, choose different models, configure plugins, and more. You also have the option to create a copy of any marketplace assistant, allowing you to personalize it while retaining its original capabilities. diff --git a/docs/usage/community/agent-market.zh-CN.mdx b/docs/usage/community/agent-market.zh-CN.mdx new file mode 100644 index 0000000000..0e854b8e48 --- /dev/null +++ b/docs/usage/community/agent-market.zh-CN.mdx @@ -0,0 +1,41 @@ +--- +title: 助手市场 +description: >- + LobeHub + 助手市场是一个充满活力和创新的社区,汇聚了众多精心设计的助手,为工作场景和学习提供便利。欢迎提交你的助手作品,共同创造更多有趣、实用且具有创新性的助手。 +tags: + - LobeHub + - LobeHub + - 助手市场 + - 创新社区 + - 协作空间 + - 助手作品 + - 自动化国际化 + - 多语言版本 +--- + +# 助手市场 + +LobeHub 的助理市场汇聚了来自全球创作者的优质 AI 助理。无论你需要编程助理、写作顾问、语言教师,还是专业领域的咨询专家,都能在这里找到合适的助理。这些助理由社区成员精心打造,经过实际使用验证,能直接投入工作和学习场景。 + +助理市场不只是获取资源的地方,更是一个开放的创作平台。你可以发布自己定制的助理,与全球用户分享你的创意和专业知识。 + +## 浏览助理市场 + +点击左侧边栏的「社区」→「助理」,进入助理市场主页。 + +![Agent 市场](/blog/assetsbcd98b0913d2dfc30d5a2b5523115d33.webp) + +助理市场按类别组织,方便你快速找到所需的助理类型。 + +## 安装和使用助理 + +查看和安装助理 + +点击任意助理卡片,进入详情页面。这里展示了助理的概览、设定、能力和版本历史等信息。确认助理符合你的需求时,可以在此页面「添加助理并会话」。 + +![安装和使用助理](/blog/assets60bf3667e56862024d047444d9b4c2fb.webp) + +### 使用助理 + +选择「添加助理并会话」后,你可以立即开始与助理对话,测试助理的功能和效果。根据需要调整助理的系统提示词、模型选择、插件配置等。你也可以基于市场助理创建副本,在保留原有能力的基础上进行个性化修改。 diff --git a/docs/usage/community/become-a-creator.mdx b/docs/usage/community/become-a-creator.mdx new file mode 100644 index 0000000000..c67dfb9e80 --- /dev/null +++ b/docs/usage/community/become-a-creator.mdx @@ -0,0 +1,27 @@ +--- +title: Community Creators +description: >- + The LobeHub Community is a vibrant and innovative space that brings together a + wide range of thoughtfully designed assistants to enhance productivity in work + and learning scenarios. You're welcome to submit your own assistant creations + and collaborate to build more interesting, practical, and creative tools. +tags: + - LobeHub + - Community Creators + - Innovation Community + - Collaborative Space +--- + +# LobeHub Community Creators + +The LobeHub Community is a vibrant and innovative space that brings together a wide range of thoughtfully designed assistants to enhance productivity in work and learning scenarios. You're welcome to submit your own assistant creations and collaborate to build more interesting, practical, and creative tools. + +## Join the Community + +Click on the sidebar: **Community** → **Assistants** → then click **Become a Creator** in the top right corner. + +![Fill in Creator Information](/blog/assets7bf0102f1cae47bf24aeb01eaa2796d9.webp) + +## Personal Profile + +Your personal profile will showcase your creator information and all the content you’ve published. diff --git a/docs/usage/community/become-a-creator.zh-CN.mdx b/docs/usage/community/become-a-creator.zh-CN.mdx new file mode 100644 index 0000000000..40abc9f5ec --- /dev/null +++ b/docs/usage/community/become-a-creator.zh-CN.mdx @@ -0,0 +1,25 @@ +--- +title: 社区创作者 +description: >- + LobeHub + 社区是一个充满活力和创新的社区,汇聚了众多精心设计的助手,为工作场景和学习提供便利。欢迎提交你的助手作品,共同创造更多有趣、实用且具有创新性的助手 +tags: + - LobeHub + - 社区创作者 + - 创新社区 + - 协作空间 +--- + +# LobeHub 社区创作者 + +LobeHub 社区是一个充满活力和创新的社区,汇聚了众多精心设计的助手,为工作场景和学习提供便利。欢迎提交你的助手作品,共同创造更多有趣、实用且具有创新性的助手。 + +## 加入 + +点击左侧边栏的「社区」→「助理」→右上角「成为创作者」。 + +![填写创作者信息](/blog/assets7bf0102f1cae47bf24aeb01eaa2796d9.webp) + +## 个人主页 + +你的个人主页会展示你的创作者信息和已发布的内容。 diff --git a/docs/usage/community/custom-plugin.mdx b/docs/usage/community/custom-plugin.mdx new file mode 100644 index 0000000000..08f0377362 --- /dev/null +++ b/docs/usage/community/custom-plugin.mdx @@ -0,0 +1,37 @@ +--- +title: Custom Plugins +description: >- + Learn how to install custom plugins and develop LobeHub plugins to extend the + capabilities of your AI assistant. +tags: + - Custom Plugins + - LobeHub + - Plugin Installation + - Plugin Development + - AI Assistant +--- + +# Custom Plugins + +## Installing Custom Plugins + +If you'd like to install a plugin that isn't available in the LobeHub Plugin Store—such as one you've developed yourself—you can do so by clicking on "Custom Plugin": + +
-至此你已经可以在 LobeChat 中使用 302.AI 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 302.AI 提供的模型进行对话了。 diff --git a/docs/usage/providers/ai360.mdx b/docs/usage/providers/ai360.mdx index 6964a09697..6fc8ea7f67 100644 --- a/docs/usage/providers/ai360.mdx +++ b/docs/usage/providers/ai360.mdx @@ -1,46 +1,47 @@ --- -title: Using the 360AI in LobeChat -description: Learn how to integrate and utilize 360AI's language model APIs in LobeChat. +title: Using 360 Zhinao in LobeHub +description: >- + Learn how to configure and use the 360 Zhinao API Key in LobeHub to start + conversations and interactions. tags: - - LobeChat - - 360AI + - LobeHub + - 360 Zhinao - API Key - Web UI --- -# Using the 360AI in LobeChat +# Using 360 Zhinao in LobeHub - + -The [360AI](https://ai.360.com/) is a cognitive general model independently developed by 360 Company, aimed at providing powerful natural language processing capabilities for enterprises and developers. This model has been upgraded to version 4.0 and supports various application scenarios, including conversational services, image generation, vector database services, and more. +[360 Zhinao](https://ai.360.com/) is a general-purpose cognitive large language model developed by Qihoo 360. It is designed to provide powerful natural language processing capabilities for businesses and developers. Now upgraded to version 4.0, it supports a wide range of applications including conversational services, image generation, vector database services, and more. -This article will guide you on how to use the 360AI in LobeChat. +This guide will walk you through how to use 360 Zhinao within LobeHub. - ### Step 1: Obtain the 360AI API Key + ### Step 1: Obtain a 360 Zhinao API Key - - Register and log in to the [360AI API Open Platform](https://ai.360.com/platform/keys) + - Register and log in to the [360 Zhinao API Platform](https://ai.360.com/platform/keys) - Click on the `API Keys` menu on the left - - Create an API key and copy it + - Create a new API key and copy it - {'Create + {'Create - ### Step 2: Configure 360AI in LobeChat + ### Step 2: Configure 360 Zhinao in LobeHub - - Access the `Settings` interface in LobeChat - - Under `AI Service Provider`, find the option for `360` + - Go to the `Settings` page in LobeHub + - Under `AI Providers`, find the configuration section for `360` - {'Enter + {'Enter - - Enter the API key you obtained - - Choose a 360AI model for your AI assistant to start chatting + - Paste the API key you obtained + - Choose a 360 Zhinao model for your AI assistant to start chatting - {'Select + {'Select - Please note that you may need to pay the API service provider during use, refer to the relevant - pricing policy of the 360AI. + You may need to pay for API usage depending on your usage level. Please refer to 360 Zhinao’s pricing policy for details. -You can now use the models provided by the 360AI for conversations in LobeChat. +That's it! You're now ready to use 360 Zhinao's models for conversations in LobeHub. diff --git a/docs/usage/providers/ai360.zh-CN.mdx b/docs/usage/providers/ai360.zh-CN.mdx index 47f45df27f..48bed81f6a 100644 --- a/docs/usage/providers/ai360.zh-CN.mdx +++ b/docs/usage/providers/ai360.zh-CN.mdx @@ -1,20 +1,20 @@ --- -title: 在 LobeChat 中使用360智脑 -description: 学习如何在 LobeChat 中配置和使用360智脑的API Key,以便开始对话和交互。 +title: 在 LobeHub 中使用360智脑 +description: 学习如何在 LobeHub 中配置和使用360智脑的API Key,以便开始对话和交互。 tags: - - LobeChat + - LobeHub - 360智脑 - API密钥 - Web UI --- -# 在 LobeChat 中使用 360 智脑 +# 在 LobeHub 中使用 360 智脑 - + [360 智脑](https://ai.360.com/)是 360 公司自主研发的认知型通用大模型,旨在为企业和开发者提供强大的自然语言处理能力。该模型已升级至 4.0 版本,能够支持多种应用场景,包括对话服务、图片生成、向量数据库服务等。 -本文将指导你如何在 LobeChat 中使用 360 智脑。 +本文将指导你如何在 LobeHub 中使用 360 智脑。 ### 步骤一:获得 360 智脑的 API Key @@ -23,23 +23,23 @@ tags: - 点击左侧 `API Keys` 菜单 - 创建一个 API 密钥并复制 - {'创建API密钥'} + {'创建API密钥'} - ### 步骤二:在 LobeChat 中配置 360 智脑 + ### 步骤二:在 LobeHub 中配置 360 智脑 - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到 `360` 的设置项 - {'填入 + {'填入 - 填入获得的 API 密钥 - 为你的 AI 助手选择一个 360 智脑的模型即可开始对话 - {'选择360模型并开始对话'} + {'选择360模型并开始对话'} 在使用过程中你可能需要向 API 服务提供商付费,请参考 360 智脑的相关费用政策。 -至此你已经可以在 LobeChat 中使用 360 智脑提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 360 智脑提供的模型进行对话了。 diff --git a/docs/usage/providers/aihubmix.mdx b/docs/usage/providers/aihubmix.mdx new file mode 100644 index 0000000000..8a2e6f0ac2 --- /dev/null +++ b/docs/usage/providers/aihubmix.mdx @@ -0,0 +1,98 @@ +--- +title: AiHubMix Provider Configuration +description: Learn how to configure and use the AiHubMix provider in LobeHub +tags: + - AiHubMix + - Provider Configuration + - Setup Guide +--- + +# AiHubMix Provider Configuration + +AiHubMix is an AI model aggregation platform that offers access to a variety of AI models through a unified OpenAI-compatible API. This guide will help you set up the AiHubMix provider in LobeHub. + +## Prerequisites + +Before using the AiHubMix API, you need to: + +1. **Create an AiHubMix Account** + - Visit [AiHubMix](https://lobe.li/MZmv94N) + - Sign up for an account + +2. **Obtain an API Key** + - Log in to your AiHubMix dashboard + - Navigate to the API settings + - Generate an API key for use with LobeHub + +## Configuration + +### Environment Variables + +Add the following environment variable to your `.env` file: + +```bash +# AiHubMix API Key (required) +AIHUBMIX_API_KEY=your_aihubmix_api_key +``` + +### Available Models + +AiHubMix provides access to a variety of popular AI models, including: + +- **GPT-4o Mini** – A cost-effective lightweight model from OpenAI +- **GPT-4o** – OpenAI’s flagship multimodal model +- **Claude 3.5 Sonnet** – Anthropic’s advanced reasoning model +- **Claude 3.5 Haiku** – A fast and efficient Claude model +- **Gemini Pro 1.5** – Google’s model with long-context support +- **DeepSeek V3** – A model with strong reasoning capabilities + +## How to Use + +1. **Configure the API Key** + - Set your AiHubMix API key in the environment variables + - Restart your LobeHub instance + +2. **Select a Model** + - Go to LobeHub settings + - Navigate to the Language Model section + - Choose AiHubMix as your provider + - Select a model from the available options + +3. **Start a Conversation** + - Create a new conversation + - Select an AiHubMix model + - Begin chatting + +## Features + +- **Multi-Model Access**: Access a variety of AI models through a single API +- **OpenAI-Compatible**: Uses the standard OpenAI API format +- **Function Calling**: Supports function calling for compatible models +- **Vision Capabilities**: Some models support image analysis +- **Model Discovery**: Automatically fetches the list of available models + +## Troubleshooting + +### Common Issues + +1. **401 Unauthorized Error** + - Verify that your API key is correct + - Ensure the API key has the necessary permissions + - Check if your account has sufficient credits + +2. **Model Unavailable** + - Some models may have usage restrictions + - Refer to the AiHubMix documentation for model availability + - Confirm that your account tier supports the requested model + +3. **Rate Limiting** + - AiHubMix may enforce rate limits based on your subscription plan + - Consider upgrading your plan for higher limits + +## Support + +For additional help: + +- Visit the [AiHubMix Documentation](https://docs.aihubmix.com/) +- Browse the [Model List](https://aihubmix.com/models) +- Contact the AiHubMix support team for API-related issues diff --git a/docs/usage/providers/aihubmix.zh-CN.mdx b/docs/usage/providers/aihubmix.zh-CN.mdx index 586c082b2c..6817ee90ee 100644 --- a/docs/usage/providers/aihubmix.zh-CN.mdx +++ b/docs/usage/providers/aihubmix.zh-CN.mdx @@ -1,6 +1,6 @@ --- title: AiHubMix 提供商配置 -description: 学习如何在 LobeChat 中配置和使用 AiHubMix 提供商 +description: 学习如何在 LobeHub 中配置和使用 AiHubMix 提供商 tags: - AiHubMix - 提供商配置 @@ -9,7 +9,7 @@ tags: # AiHubMix 提供商配置 -AiHubMix 是一个 AI 模型聚合平台,通过统一的 OpenAI 兼容 API 接口提供多种 AI 模型的访问服务。本指南将帮助您在 LobeChat 中设置 AiHubMix 提供商。 +AiHubMix 是一个 AI 模型聚合平台,通过统一的 OpenAI 兼容 API 接口提供多种 AI 模型的访问服务。本指南将帮助您在 LobeHub 中设置 AiHubMix 提供商。 ## 前置条件 @@ -22,7 +22,7 @@ AiHubMix 是一个 AI 模型聚合平台,通过统一的 OpenAI 兼容 API 接 2. **获取 API 密钥** - 登录您的 AiHubMix 控制台 - 导航到 API 设置 - - 生成用于 LobeChat 的 API 密钥 + - 生成用于 LobeHub 的 API 密钥 ## 配置 @@ -50,10 +50,10 @@ AiHubMix 提供多种热门 AI 模型的访问,包括: 1. **配置 API 密钥** - 在环境变量中设置您的 AiHubMix API 密钥 - - 重启您的 LobeChat 实例 + - 重启您的 LobeHub 实例 2. **选择模型** - - 进入 LobeChat 设置 + - 进入 LobeHub 设置 - 导航到语言模型 - 选择 AiHubMix 作为您的提供商 - 从可用模型中选择 diff --git a/docs/usage/providers/anthropic.mdx b/docs/usage/providers/anthropic.mdx index 49a403e9d9..2472cc6f40 100644 --- a/docs/usage/providers/anthropic.mdx +++ b/docs/usage/providers/anthropic.mdx @@ -1,55 +1,52 @@ --- -title: Using Anthropic Claude API Key in LobeChat +title: Using the Anthropic Claude API Key in LobeHub description: >- - Learn how to integrate Anthropic Claude API in LobeChat to enhance your AI assistant capabilities. Support Claude 3.5 sonnet / Claude 3 Opus / Claude 3 haiku - + Learn how to configure and use the Anthropic Claude API in LobeHub, including + Claude 3.5 Sonnet, Claude 3 Opus, and Claude 3 Haiku. tags: - Anthropic Claude - - API Key - - AI assistant - - Web UI + - API + - WebUI + - AI Assistant --- -# Using Anthropic Claude in LobeChat +# Using Anthropic Claude in LobeHub -{'Using +{'Using -The Anthropic Claude API is now available for everyone to use. This document will guide you on how to use [Anthropic Claude](https://www.anthropic.com/api) in LobeChat: +The Anthropic Claude API is now publicly available. This guide will walk you through how to use [Anthropic Claude](https://www.anthropic.com/api) in LobeHub: - ### Step 1: Obtain Anthropic Claude API Key + ### Step 1: Get Your Anthropic Claude API Key - - Create an [Anthropic Claude API](https://www.anthropic.com/api) account. - - Get your [API key](https://console.anthropic.com/settings/keys). + - Create an [Anthropic Claude API](https://www.anthropic.com/api) account + - Retrieve your [API key](https://console.anthropic.com/settings/keys) - {'Create + {'Create - The Claude API currently offers $5 of free credits, but it is only available in certain specific - countries/regions. You can go to Dashboard > Claim to see if it is applicable to your - country/region. + Claude API currently offers $5 in free credits. However, this is only available in select countries/regions. You can check your eligibility by visiting Dashboard > Claim. - - Set up your billing for the API key to work on [https://console.anthropic.com/settings/plans](https://console.anthropic.com/settings/plans) (choose the "Build" plan so you can add credits and only pay for usage). + - Set up your billing to activate the API key at [https://console.anthropic.com/settings/plans](https://console.anthropic.com/settings/plans) (Choose the "Developer" plan to add credits and pay only for what you use) - {'Set + {'Set - ### Step 2: Configure Anthropic Claude in LobeChat + ### Step 2: Configure Anthropic Claude in LobeHub - - Access the `Settings` interface in LobeChat. - - Find the setting for `Anthropic Claude` under `AI Service Provider`. + - Open the `Settings` panel in LobeHub + - Under `AI Providers`, locate the `Anthropic Claude` configuration section - {'Enter + {'Enter - - Enter the obtained API key. - - Choose an Anthropic Claude model for your AI assistant to start the conversation. + - Paste your API key into the input field + - Choose one of the available Anthropic Claude models for your AI assistant to start chatting - {'Select + {'Select - During usage, you may need to pay the API service provider. Please refer to Anthropic Claude's - relevant pricing policies. + You may incur charges from the API provider during usage. Please refer to Anthropic Claude’s pricing policy for more details. -You can now engage in conversations using the models provided by Anthropic Claude in LobeChat. +That's it! You’re now ready to use Anthropic Claude models in LobeHub for conversations. diff --git a/docs/usage/providers/anthropic.zh-CN.mdx b/docs/usage/providers/anthropic.zh-CN.mdx index 34abda173f..34d817f20c 100644 --- a/docs/usage/providers/anthropic.zh-CN.mdx +++ b/docs/usage/providers/anthropic.zh-CN.mdx @@ -1,8 +1,8 @@ --- -title: 在 LobeChat 中使用 Anthropic Claude API Key +title: 在 LobeHub 中使用 Anthropic Claude API Key description: >- - 学习如何在 LobeChat 中配置和使用 Anthropic Claude API, Claude 3.5 sonnet / Claude 3 Opus / Claude 3 haiku - + 学习如何在 LobeHub 中配置和使用 Anthropic Claude API, Claude 3.5 sonnet / Claude 3 Opus / + Claude 3 haiku tags: - Anthropic Claude - API @@ -10,11 +10,11 @@ tags: - AI助手 --- -# 在 LobeChat 中使用 Anthropic Claude +# 在 LobeHub 中使用 Anthropic Claude -{'在 +{'在 -Anthropic Claude API 现在可供所有人使用,本文档将指导你如何在 LobeChat 中使用 [Anthropic Claude](https://www.anthropic.com/api): +Anthropic Claude API 现在可供所有人使用,本文档将指导你如何在 LobeHub 中使用 [Anthropic Claude](https://www.anthropic.com/api): ### 步骤一:获取 Anthropic Claude API 密钥 @@ -22,7 +22,7 @@ Anthropic Claude API 现在可供所有人使用,本文档将指导你如何 - 创建一个 [Anthropic Claude API](https://www.anthropic.com/api) 帐户 - 获取您的 [API 密钥](https://console.anthropic.com/settings/keys) - {'创建 + {'创建 Claude API 现在提供 5 美元的免费积分,但是,它仅适用于某些特定国家 / 地区,您可以转到 Dashboard > @@ -31,23 +31,23 @@ Anthropic Claude API 现在可供所有人使用,本文档将指导你如何 - 设置您的账单,让 API 密钥在 [https://console.anthropic.com/settings/plans](https://console.anthropic.com/settings/plans) 上工作(选择 “生成” 计划,以便您可以添加积分并仅为使用量付费) - {'设置您的账单'} + {'设置您的账单'} - ### 步骤二:在 LobeChat 中配置 Anthropic Claude + ### 步骤二:在 LobeHub 中配置 Anthropic Claude - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到`Anthropic Claude`的设置项 - {'填入 + {'填入 - 填入获得的 API 密钥 - 为你的 AI 助手选择一个 Anthropic Claude 的模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考 Anthropic Claude 的相关费用政策。 -至此你已经可以在 LobeChat 中使用 Anthropic Claude 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 Anthropic Claude 提供的模型进行对话了。 diff --git a/docs/usage/providers/azure.mdx b/docs/usage/providers/azure.mdx index 57a7316103..8091ccb099 100644 --- a/docs/usage/providers/azure.mdx +++ b/docs/usage/providers/azure.mdx @@ -1,57 +1,52 @@ --- -title: Using Azure OpenAI API Key in LobeChat +title: Using Azure OpenAI API Key in LobeHub description: >- - Learn how to integrate and configure Azure OpenAI in LobeChat to enhance your AI assistant capabilities. Follow these steps to obtain the API key, configure the settings, and start engaging in conversations. - + Learn how to configure and use Azure OpenAI models in LobeHub, including how + to obtain your API key and select a model. tags: - Azure OpenAI - - AI assistant - - API key - - Configuration - - Conversation models + - API Key + - Web UI --- -# Using Azure OpenAI in LobeChat +# Using Azure OpenAI in LobeHub -{'Azure +{'Using -This document will guide you on how to use [Azure OpenAI](https://oai.azure.com/) in LobeChat: +This guide will walk you through how to use [Azure OpenAI](https://oai.azure.com/) in LobeHub: - ### Step 1: Obtain Azure OpenAI API Key + ### Step 1: Obtain Your Azure OpenAI API Key - - If you haven't registered yet, you need to create an [Azure OpenAI account](https://oai.azure.com/). + - If you haven’t already, you’ll need to sign up for an [Azure OpenAI account](https://oai.azure.com/). - {'Create + {'Sign - - After registration, go to the `Deployments` page and create a new deployment with your selected model. + - Once registered, go to the `Deployments` page and create a new deployment using the model of your choice. - ![Create a new deployment with the selected model](https://github.com/lobehub/lobe-chat/assets/17870709/4fae3e6f-e680-4471-93c4-987c19d7170a) + {'Create - {'Create + - Navigate to the `Chat` page and click on `View Code` to retrieve your endpoint and API key. - - Navigate to the `Chat` page and click on `View Code` to obtain your endpoint and key. + {'Go - {'Go + {'Retrieve - {'Get + ### Step 2: Configure Azure OpenAI in LobeHub - ### Step 2: Configure Azure OpenAI in LobeChat + - Open the `Settings` panel in LobeHub + - Under `AI Providers`, locate the `Azure OpenAI` configuration section - - Access the `Settings` interface in LobeChat. - - Find the setting for `Azure OpenAI` under `AI Service Provider`. + {'Enter - {'Enter + - Paste in the API key you obtained earlier + - Select an Azure OpenAI model for your AI assistant to start chatting - - Enter the API key you obtained. - - Choose an Azure OpenAI model for your AI assistant to start the conversation. - - {'Select + {'Select - During usage, you may need to pay the API service provider. Please refer to Azure OpenAI's - relevant pricing policies. + You may incur charges from the API provider while using the service. Please refer to Azure OpenAI’s pricing policy for more details. -Now you can engage in conversations using the models provided by Azure OpenAI in LobeChat. +And that’s it! You’re now ready to start chatting with models powered by Azure OpenAI in LobeHub. diff --git a/docs/usage/providers/azure.zh-CN.mdx b/docs/usage/providers/azure.zh-CN.mdx index bbb65cd1b3..fc4d8cc5dd 100644 --- a/docs/usage/providers/azure.zh-CN.mdx +++ b/docs/usage/providers/azure.zh-CN.mdx @@ -1,50 +1,50 @@ --- -title: 在 LobeChat 中使用 Azure OpenAI API Key -description: 学习如何在 LobeChat 中配置和使用 Azure OpenAI 模型进行对话,包括获取 API 密钥和选择模型。 +title: 在 LobeHub 中使用 Azure OpenAI API Key +description: 学习如何在 LobeHub 中配置和使用 Azure OpenAI 模型进行对话,包括获取 API 密钥和选择模型。 tags: - Azure OpenAI - API Key - Web UI --- -# 在 LobeChat 中使用 Azure OpenAI +# 在 LobeHub 中使用 Azure OpenAI -{'在 +{'在 -本文档将指导你如何在 LobeChat 中使用 [Azure OpenAI](https://oai.azure.com/): +本文档将指导你如何在 LobeHub 中使用 [Azure OpenAI](https://oai.azure.com/): ### 步骤一:获取 Azure OpenAI API 密钥 - 如果尚未注册,则必须注册 [Azure OpenAI 帐户](https://oai.azure.com/)。 - {'注册 + {'注册 - 注册完毕后,转到 `Deployments` 页面,然后使用您选择的模型创建新部署。 - {'选择的模型创建新部署'} + {'选择的模型创建新部署'} - 转到 `Chat` 页面,然后单击 `View Code` 以获取您的终结点和密钥。 - {'转到 + {'转到 - {'获取终结点和密钥'} + {'获取终结点和密钥'} - ### 步骤二:在 LobeChat 中配置 Azure OpenAI + ### 步骤二:在 LobeHub 中配置 Azure OpenAI - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到`Azure OpenAI`的设置项 - {'填入 + {'填入 - 填入获得的 API 密钥 - 为你的 AI 助手选择一个 Azure OpenAI 的模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考 Azure OpenAI 的相关费用政策。 -至此你已经可以在 LobeChat 中使用 Azure OpenAI 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 Azure OpenAI 提供的模型进行对话了。 diff --git a/docs/usage/providers/azureai.mdx b/docs/usage/providers/azureai.mdx index 68df568071..c099d7d92b 100644 --- a/docs/usage/providers/azureai.mdx +++ b/docs/usage/providers/azureai.mdx @@ -1,71 +1,71 @@ --- -title: Using Azure AI API Key in LobeChat -description: Learn how to configure and use Azure AI models in LobeChat, get the API key, and start a conversation. +title: Using Azure AI API Key in LobeHub +description: >- + Learn how to configure and use Azure AI models in LobeHub, obtain your API + key, and start chatting. tags: - - LobeChat + - LobeHub - Azure AI - API Key - Web UI --- -# Using Azure AI in LobeChat +# Using Azure AI in LobeHub -{'Using +{'Using -[Azure AI](https://azure.microsoft.com) is an open artificial intelligence technology platform based on the Microsoft Azure cloud platform. It provides various AI functionalities, including natural language processing, machine learning, and computer vision, helping businesses easily develop and deploy AI applications. +[Azure AI](https://azure.microsoft.com) is an open AI technology platform built on Microsoft Azure's cloud infrastructure. It offers a wide range of AI capabilities including natural language processing, machine learning, and computer vision, enabling businesses to easily develop and deploy AI-powered applications. -This document will guide you on how to integrate Azure AI models into LobeChat: +This guide will walk you through how to integrate Azure AI models into LobeHub: - ### Step 1: Deploy Azure AI Project and Model + ### Step 1: Deploy an Azure AI Project and Model - - First, visit [Azure AI Foundry](https://ai.azure.com/) and complete the registration and login process. - - After logging in, select `Browse models` on the homepage. + - First, visit [Azure AI Foundry](https://ai.azure.com/) and sign up or log in. + - Once logged in, go to the homepage and select `Browse Models`. - {'Accessing + {'Accessing - - Choose the model you want in the model marketplace. - - Enter the model details and click the `Deploy` button. + - In the model marketplace, choose the model you want to use. + - On the model details page, click the `Deploy` button. - {'Browsing + {'Browsing - In the pop-up dialog, create a new project. - {'Creating + {'Creating - For detailed configuration of Azure AI Foundry, please refer to the [official - documentation](https://learn.microsoft.com/azure/ai-foundry/model-inference/). + For detailed configuration of Azure AI Foundry, please refer to the [official documentation](https://learn.microsoft.com/azure/ai-foundry/model-inference/). - ### Step 2: Obtain the Model's API Key and Endpoint + ### Step 2: Retrieve the Model's API Key and Endpoint - - In the details of the deployed model, you can find the Endpoint and API Key information. - - Copy and save the obtained information. + - In the deployed model's details page, you can find the Endpoint and API Key. + - Copy and securely save this information. - {'Obtaining + {'Retrieving - ### Step 3: Configure Azure AI in LobeChat + ### Step 3: Configure Azure AI in LobeHub - - Visit the `App Settings` and `AI Service Provider` interface in LobeChat. - - Find the settings for `Azure AI` in the list of providers. + - Go to the `App Settings` section in LobeHub and navigate to `AI Service Providers`. + - Find the `Azure AI` option in the list of providers. - {'Entering + {'Entering - - Enable the Azure AI service provider and fill in the obtained Endpoint and API Key. + - Open the Azure AI provider settings and enter the Endpoint and API Key you obtained. - For the Endpoint, you only need to fill in the first part: - `https://xxxxxx.services.ai.azure.com/models`. + Only enter the base part of the Endpoint, e.g., `https://xxxxxx.services.ai.azure.com/models`. - - Choose an Azure AI model for your assistant and start the conversation. + - Select an Azure AI model for your assistant to start chatting. - {'Selecting + {'Selecting - You may need to pay the API service provider for usage. Please refer to Azure AI's relevant pricing policies. + You may incur charges from the API service provider during usage. Please refer to Azure AI's pricing policy for more details. -Now you can use the models provided by Azure AI in LobeChat for conversations. +That's it! You're now ready to use Azure AI models for conversations in LobeHub. diff --git a/docs/usage/providers/azureai.zh-CN.mdx b/docs/usage/providers/azureai.zh-CN.mdx index e8ae2b9dee..52258cd059 100644 --- a/docs/usage/providers/azureai.zh-CN.mdx +++ b/docs/usage/providers/azureai.zh-CN.mdx @@ -1,20 +1,20 @@ --- -title: 在 LobeChat 中使用 Azure AI API Key -description: 学习如何在 LobeChat 中配置和使用 Azure AI 模型,获取 API 密钥并开始对话。 +title: 在 LobeHub 中使用 Azure AI API Key +description: 学习如何在 LobeHub 中配置和使用 Azure AI 模型,获取 API 密钥并开始对话。 tags: - - LobeChat + - LobeHub - Azure AI - API密钥 - Web UI --- -# 在 LobeChat 中使用 Azure AI +# 在 LobeHub 中使用 Azure AI -{'在 +{'在 [Azure AI](https://azure.microsoft.com) 是一个基于 Microsoft Azure 云平台的开放式人工智能技术平台,提供包括自然语言处理、机器学习、计算机视觉等多种 AI 功能,帮助企业轻松开发和部署 AI 应用。 -本文档将指导你如何在 LobeChat 中接入 Azure AI 的模型: +本文档将指导你如何在 LobeHub 中接入 Azure AI 的模型: ### 步骤一:部署 Azure AI 项目以及模型 @@ -22,16 +22,16 @@ tags: - 首先,访问[Azure AI Foundry](https://ai.azure.com/)并完成注册登录 - 登录后在首页选择`浏览模型` - {'进入 + {'进入 - 在模型广场中选择你想要模型 - 进入模型详情,点击`部署`按钮 - {'浏览模型'} + {'浏览模型'} - 在弹出的对话框中创建一个新的项目 - {'创建新项目'} + {'创建新项目'} Azure AI Foundry @@ -43,14 +43,14 @@ tags: - 在已部署的模型详情里,可以查询到 Endpoint 以及 API Key 信息 - 复制并保存好获取的信息 - {'获取 + {'获取 - ### 步骤三:在 LobeChat 中配置 Azure AI + ### 步骤三:在 LobeHub 中配置 Azure AI - - 访问 LobeChat 的 `应用设置` 的 `AI 服务供应商` 界面 + - 访问 LobeHub 的 `应用设置` 的 `AI 服务供应商` 界面 - 在供应商列表中找到 `Azure AI` 的设置项 - {'填写 + {'填写 - 打开 Azure AI 服务商并填入获取的 Endpoint 以及 API 密钥 @@ -60,11 +60,11 @@ tags: - 为你的助手选择一个 Azure AI 模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考 Azure AI 的相关费用政策。 -至此你已经可以在 LobeChat 中使用 Azure AI 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 Azure AI 提供的模型进行对话了。 diff --git a/docs/usage/providers/baichuan.mdx b/docs/usage/providers/baichuan.mdx index 2988404faa..20774d3801 100644 --- a/docs/usage/providers/baichuan.mdx +++ b/docs/usage/providers/baichuan.mdx @@ -1,45 +1,45 @@ --- -title: Using Baichuan API Key in LobeChat +title: Using Baichuan API Key in LobeHub description: >- - Learn how to integrate Baichuan AI into LobeChat for enhanced conversational experiences. Follow the steps to configure Baichuan AI and start using its models. - + Learn how to configure and use Baichuan's API key in LobeHub to start chatting + and interacting. tags: - - LobeChat + - LobeHub - Baichuan + - Baichuan AI - API Key - Web UI --- -# Using Baichuan in LobeChat +# Using Baichuan in LobeHub -{'Using +{'Using -This article will guide you on how to use Baichuan in LobeChat: +This guide will walk you through how to use Baichuan in LobeHub: - ### Step 1: Obtain Baichuan Intelligent API Key + ### Step 1: Obtain a Baichuan AI API Key - - Create a [Baichuan Intelligent](https://platform.baichuan-ai.com/homePage) account - - Create and obtain an [API key](https://platform.baichuan-ai.com/console/apikey) + - Create a [Baichuan AI](https://platform.baichuan-ai.com/homePage) account + - Generate and retrieve your [API Key](https://platform.baichuan-ai.com/console/apikey) - {'Create + {'Create - ### Step 2: Configure Baichuan in LobeChat + ### Step 2: Configure Baichuan in LobeHub - - Visit the `Settings` interface in LobeChat - - Find the setting for `Baichuan` under `AI Service Provider` + - Go to the `Settings` page in LobeHub + - Under `AI Providers`, locate the `Baichuan` configuration section - {'Enter + {'Enter - - Enter the obtained API key - - Choose a Baichuan model for your AI assistant to start the conversation + - Paste your API key into the input field + - Choose a Baichuan model for your AI assistant to start chatting - {'Select + {'Select - During usage, you may need to pay the API service provider, please refer to Baichuan's relevant - pricing policies. + You may need to pay for API usage depending on your usage. Please refer to Baichuan's pricing policy for more details. -You can now use the models provided by Baichuan for conversation in LobeChat. +That's it! You're now ready to use Baichuan-powered models in LobeHub for conversations. diff --git a/docs/usage/providers/baichuan.zh-CN.mdx b/docs/usage/providers/baichuan.zh-CN.mdx index 5cb0c99646..abe1e8df8b 100644 --- a/docs/usage/providers/baichuan.zh-CN.mdx +++ b/docs/usage/providers/baichuan.zh-CN.mdx @@ -1,19 +1,19 @@ --- -title: 在 LobeChat 中使用百川 API Key -description: 学习如何在 LobeChat 中配置和使用百川的API Key,以便开始对话和交互。 +title: 在 LobeHub 中使用百川 API Key +description: 学习如何在 LobeHub 中配置和使用百川的API Key,以便开始对话和交互。 tags: - - LobeChat + - LobeHub - 百川 - 百川智能 - API密钥 - Web UI --- -# 在 LobeChat 中使用百川 +# 在 LobeHub 中使用百川 -{'在 +{'在 -本文将指导你如何在 LobeChat 中使用百川: +本文将指导你如何在 LobeHub 中使用百川: ### 步骤一:获取百川智能 API 密钥 @@ -21,23 +21,23 @@ tags: - 创建一个[百川智能](https://platform.baichuan-ai.com/homePage)账户 - 创建并获取 [API 密钥](https://platform.baichuan-ai.com/console/apikey) - {'创建 + {'创建 - ### 步骤二:在 LobeChat 中配置百川 + ### 步骤二:在 LobeHub 中配置百川 - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到`百川`的设置项 - {'填入 + {'填入 - 填入获得的 API 密钥 - 为你的 AI 助手选择一个百川的模型即可开始对话 - {'选择百川模型并开始对话'} + {'选择百川模型并开始对话'} 在使用过程中你可能需要向 API 服务提供商付费,请参考百川的相关费用政策。 -至此你已经可以在 LobeChat 中使用百川提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用百川提供的模型进行对话了。 diff --git a/docs/usage/providers/bedrock.mdx b/docs/usage/providers/bedrock.mdx index f782149c5b..8643daf7fe 100644 --- a/docs/usage/providers/bedrock.mdx +++ b/docs/usage/providers/bedrock.mdx @@ -1,89 +1,89 @@ --- -title: Using Amazon Bedrock API Key in LobeChat +title: Using Amazon Bedrock API Key in LobeHub description: >- - Learn how to integrate Amazon Bedrock models into LobeChat for AI-powered conversations. Follow these steps to grant access, obtain API keys, and configure Amazon Bedrock. - + Learn how to configure and use Amazon Bedrock, a fully managed foundation + model API service, in LobeHub to start chatting. tags: - Amazon Bedrock - - Claude 3.5 sonnect + - Claude 3.5 Sonnet - API keys - Claude 3 Opus - Web UI --- -# Using Amazon Bedrock in LobeChat +# Using Amazon Bedrock in LobeHub -{'Using +{'Using -Amazon Bedrock is a fully managed foundational model API service that allows users to access models from leading AI companies (such as AI21 Labs, Anthropic, Cohere, Meta, Stability AI) and Amazon's own foundational models. +Amazon Bedrock is a fully managed foundation model API service that allows users to access models from leading AI companies (such as AI21 Labs, Anthropic, Cohere, Meta, Stability AI) and Amazon itself via API. -This document will guide you on how to use Amazon Bedrock in LobeChat: +This guide will walk you through how to use Amazon Bedrock in LobeHub: - ### Step 1: Grant Access to Amazon Bedrock Models in AWS + ### Step 1: Enable Access to Amazon Bedrock Models in AWS - - Access and log in to the [AWS Console](https://console.aws.amazon.com/) - - Search for `bedrock` and enter the `Amazon Bedrock` service + - Visit and log in to the [AWS Console](https://console.aws.amazon.com/) + - Search for "Bedrock" and navigate to the `Amazon Bedrock` service - {'Enter + {'Accessing - - Select `Models access` from the left menu + - In the left-hand menu, select `Model access` - {'Access + {'Navigating - - Open model access permissions based on your needs + - Enable access to the models you want to use - {'Open + {'Enabling - Some models may require additional information from you + + Some models may require you to provide additional information. + ### Step 2: Obtain API Access Keys - - Continue searching for IAM in the AWS console and enter the IAM service + - In the AWS Console, search for IAM and go to the IAM service - {'Enter + {'Accessing - - In the `Users` menu, create a new IAM user + - Under the `Users` section, create a new IAM user - {'Create + {'Creating - - Enter the user name in the pop-up dialog box + - In the pop-up dialog, enter a username - {'Enter + {'Entering - - Add permissions for this user or join an existing user group to ensure access to Amazon Bedrock + - Assign permissions to the user or add them to an existing group that has access to Amazon Bedrock - {'Add + {'Assigning - - Create an access key for the added user + - Create access keys for the newly added user - {'Create + {'Creating - - Copy and securely store the access key and secret access key, as they will be needed later + - Copy and securely store the Access Key ID and Secret Access Key — you’ll need them later - {'Enter + {'Accessing - Please securely store the keys as they will only be shown once. If you lose them accidentally, you - will need to create a new access key. + Store your keys securely, as they will only be shown once. If you lose them, you’ll need to generate new access keys. - ### Step 3: Configure Amazon Bedrock in LobeChat + ### Step 3: Configure Amazon Bedrock in LobeHub - - Access the `Settings` interface in LobeChat - - Find the setting for `Amazon Bedrock` under `AI Service Provider` and open it + - Open the `Settings` panel in LobeHub + - Under `AI Providers`, find and enable the `Amazon Bedrock` option - {'Enter + {'Entering - - Open Amazon Bedrock and enter the obtained access key and secret access key - - Choose an Amazon Bedrock model for your assistant to start the conversation + - Enter your Access Key ID and Secret Access Key + - Choose an Amazon Bedrock model for your assistant to start chatting - {'Select + {'Selecting - You may incur charges while using the API service, please refer to Amazon Bedrock's pricing - policy. + You may incur charges while using the API. Please refer to Amazon Bedrock’s pricing policy for details. -You can now engage in conversations using the models provided by Amazon Bedrock in LobeChat. +You’re all set! You can now start chatting in LobeHub using models provided by Amazon Bedrock. diff --git a/docs/usage/providers/bedrock.zh-CN.mdx b/docs/usage/providers/bedrock.zh-CN.mdx index 3ea7a4df52..4b7b8c98f9 100644 --- a/docs/usage/providers/bedrock.zh-CN.mdx +++ b/docs/usage/providers/bedrock.zh-CN.mdx @@ -1,6 +1,6 @@ --- -title: 在 LobeChat 中使用 Amazon Bedrock API Key -description: 学习如何在 LobeChat 中配置和使用 Amazon Bedrock,一个完全托管的基础模型API服务,以便开始对话。 +title: 在 LobeHub 中使用 Amazon Bedrock API Key +description: 学习如何在 LobeHub 中配置和使用 Amazon Bedrock,一个完全托管的基础模型API服务,以便开始对话。 tags: - Amazon Bedrock - Claude 3.5 sonnect @@ -9,13 +9,13 @@ tags: - Web UI --- -# 在 LobeChat 中使用 Amazon Bedrock +# 在 LobeHub 中使用 Amazon Bedrock -{'在 +{'在 Amazon Bedrock 是一个完全托管的基础模型 API 服务,允许用户通过 API 访问来自领先 AI 公司 (如 AI21 Labs、Anthropic、Cohere、Meta、Stability AI) 和 Amazon 自家的基础模型。 -本文档将指导你如何在 LobeChat 中使用 Amazon Bedrock: +本文档将指导你如何在 LobeHub 中使用 Amazon Bedrock: ### 步骤一:在 AWS 中打开 Amazon Bedrock 模型的访问权限 @@ -23,15 +23,15 @@ Amazon Bedrock 是一个完全托管的基础模型 API 服务,允许用户通 - 访问并登录 [AWS Console](https://console.aws.amazon.com/) - 搜索 beckrock 并进入 `Amazon Bedrock` 服务 - {'进入 + {'进入 - 在左侧菜单中选择 `Models acess` - {'进入 + {'进入 - 根据你所需要的模型,打开模型访问权限 - {'打开模型访问权限'} + {'打开模型访问权限'} 某些模型可能需要你提供额外的信息 @@ -39,47 +39,47 @@ Amazon Bedrock 是一个完全托管的基础模型 API 服务,允许用户通 - 继续在 AWS console 中搜索 IAM,进入 IAM 服务 - {'进入 + {'进入 - 在 `用户` 菜单中,创建一个新的 IAM 用户 - {'创建一个新的 + {'创建一个新的 - 在弹出的对话框中,输入用户名称 - {'输入用户名称'} + {'输入用户名称'} - 为这个用户添加权限,或者加入一个已有的用户组,确保用户拥有 Amazon Bedrock 的访问权限 - {'为用户添加权限'} + {'为用户添加权限'} - 为已添加的用户创建访问密钥 - {'创建访问密钥'} + {'创建访问密钥'} - 复制并妥善保存访问密钥以及秘密访问密钥,后续将会用到 - {'进入 + {'进入 请安全地存储密钥,因为它只会出现一次。如果您意外丢失它,您将需要创建一个新访问密钥。 - ### 步骤三:在 LobeChat 中配置 Amazon Bedrock + ### 步骤三:在 LobeHub 中配置 Amazon Bedrock - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到`Amazon Bedrock`的设置项并打开 - {'LobeChat + {'LobeHub - 打开 Amazon Bedrock 并填入获得的访问密钥与秘密访问密钥 - 为你的助手选择一个 Amazone Bedrock 的模型即可开始对话 - {' + {' 在使用过程中你可能需要向 API 服务提供商付费,请参考 Amazon Bedrock 的费用政策。 -至此你已经可以在 LobeChat 中使用 Amazone Bedrock 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 Amazone Bedrock 提供的模型进行对话了。 diff --git a/docs/usage/providers/bfl.mdx b/docs/usage/providers/bfl.mdx index 1ecc1c417e..3f71a0dcb0 100644 --- a/docs/usage/providers/bfl.mdx +++ b/docs/usage/providers/bfl.mdx @@ -1,8 +1,8 @@ --- -title: Using Black Forest Labs API Key in LobeChat +title: Using Black Forest Labs API Key in LobeHub description: >- - Learn how to integrate Black Forest Labs API Key in LobeChat for AI image generation using advanced models and high-quality output. - + Learn how to configure and use the Black Forest Labs API Key in LobeHub to + generate high-quality AI images with advanced models. tags: - Black Forest Labs - Image Generation @@ -10,21 +10,21 @@ tags: - Web UI --- -# Using Black Forest Labs in LobeChat +# Using Black Forest Labs in LobeHub -{'Using +{'Using -[Black Forest Labs](https://bfl.ai/) is currently the world's top-tier AI image generation research lab, having developed the FLUX series of high-quality image generation models and the FLUX Kontext series of image editing models. This document will guide you on how to use Black Forest Labs in LobeChat: +[Black Forest Labs](https://bfl.ai/) is one of the world’s leading AI image generation labs, known for developing the FLUX series of high-quality image generation models and the FLUX Kontext series for image editing. This guide will walk you through how to use Black Forest Labs in LobeHub: - ### Step 1: Obtain Black Forest Labs API Key + ### Step 1: Get Your Black Forest Labs API Key - - Register for a [Black Forest Labs account](https://auth.bfl.ai/). - - Navigate to the [API Keys dashboard](https://dashboard.bfl.ai/api/keys) and click **Add Key** to generate a new API key. - - Copy the generated API key and keep it secure; it will only be shown once. + - Sign up for a [Black Forest Labs](https://auth.bfl.ai/) account; + - Go to the [API Keys Console](https://dashboard.bfl.ai/api/keys) and click **Add Key** to create a new API key; + - Copy and securely save the generated API key — it will only be shown once. {'Open {'Create {'Retrieve - ### Step 2: Configure Black Forest Labs in LobeChat + ### Step 2: Configure Black Forest Labs in LobeHub - - Visit the `Settings` page in LobeChat. - - Under **AI Service Provider**, locate the **Black Forest Labs** configuration section. + - Open the `Settings` page in LobeHub; + - Under `AI Providers`, locate the configuration section for `Black Forest Labs`; - {'Enter + {'Enter - - Paste the API key you obtained. - - Choose a Black Forest Labs model for image generation. + - Paste the API key you obtained; + - Select a Black Forest Labs model for image generation. - {'Select + {'Select - During usage, you may incur charges according to Black Forest Labs's pricing policy. Please review Black Forest Labs's - official pricing before heavy usage. + Please note that usage may incur charges from Black Forest Labs. Be sure to review their official pricing policy before making extensive API calls. -You can now use Black Forest Labs's advanced image generation models directly within LobeChat to create stunning visual content. +And that’s it! You’re now ready to create stunning visual content in LobeHub using the advanced image generation models provided by Black Forest Labs. diff --git a/docs/usage/providers/bfl.zh-CN.mdx b/docs/usage/providers/bfl.zh-CN.mdx index dd4b2398fc..73c70564a4 100644 --- a/docs/usage/providers/bfl.zh-CN.mdx +++ b/docs/usage/providers/bfl.zh-CN.mdx @@ -1,8 +1,6 @@ --- -title: 在 LobeChat 中使用 Black Forest Labs API Key -description: >- - 学习如何在 LobeChat 中配置和使用 Black Forest Labs API Key,使用先进模型进行高质量 AI 图像生成。 - +title: 在 LobeHub 中使用 Black Forest Labs API Key +description: 学习如何在 LobeHub 中配置和使用 Black Forest Labs API Key,使用先进模型进行高质量 AI 图像生成。 tags: - Black Forest Labs - 图像生成 @@ -10,11 +8,11 @@ tags: - Web UI --- -# 在 LobeChat 中使用 Black Forest Labs +# 在 LobeHub 中使用 Black Forest Labs -{'在 +{'在 -[Black Forest Labs](https://bfl.ai/) 是当前世界最顶级的 AI 图像生成实验室团队,研发了 FLUX 系列高质量图像生成模型,FLUX Kontext 系列图像编辑模型。本文将指导你如何在 LobeChat 中使用 Black Forest Labs: +[Black Forest Labs](https://bfl.ai/) 是当前世界最顶级的 AI 图像生成实验室团队,研发了 FLUX 系列高质量图像生成模型,FLUX Kontext 系列图像编辑模型。本文将指导你如何在 LobeHub 中使用 Black Forest Labs: ### 步骤一:获取 Black Forest Labs API Key @@ -47,9 +45,9 @@ tags: } /> - ### 步骤二:在 LobeChat 中配置 Black Forest Labs + ### 步骤二:在 LobeHub 中配置 Black Forest Labs - - 访问 LobeChat 的 `设置` 页面; + - 访问 LobeHub 的 `设置` 页面; - 在 `AI服务商` 下找到 `Black Forest Labs` 的设置项; {'填入 @@ -64,4 +62,4 @@ tags:
-至此,你已经可以在 LobeChat 中使用 Black Forest Labs 提供的先进图像生成模型来创作精美的视觉内容了。 +至此,你已经可以在 LobeHub 中使用 Black Forest Labs 提供的先进图像生成模型来创作精美的视觉内容了。 diff --git a/docs/usage/providers/cloudflare.mdx b/docs/usage/providers/cloudflare.mdx index 7085c0e7f1..d7d4a79884 100644 --- a/docs/usage/providers/cloudflare.mdx +++ b/docs/usage/providers/cloudflare.mdx @@ -1,8 +1,10 @@ --- -title: Using Cloudflare Workers AI in LobeChat -description: Learn how to integrate and utilize Cloudflare Workers AI Models in LobeChat. +title: Using Cloudflare Workers AI in LobeHub +description: >- + Learn how to configure and use the Cloudflare Workers AI API Key in LobeHub to + start chatting and interacting. tags: - - LobeChat + - LobeHub - Cloudflare - Workers AI - Provider @@ -10,51 +12,49 @@ tags: - Web UI --- -# Using Cloudflare Workers AI in LobeChat +# Using Cloudflare Workers AI in LobeHub - + -[Cloudflare Workers AI](https://www.cloudflare.com/developer-platform/products/workers-ai/) is a service that integrates AI capabilities into the Cloudflare Workers serverless computing platform. Its core functionality lies in delivering fast, scalable computing power through Cloudflare's global network, thereby reducing operational overhead. +[Cloudflare Workers AI](https://www.cloudflare.com/developer-platform/products/workers-ai/) is a service that integrates AI capabilities into the Cloudflare Workers serverless computing platform. Its core advantage lies in delivering fast and scalable compute power through Cloudflare’s global network, significantly reducing operational overhead. -This document will guide you on how to use Cloudflare Workers AI in LobeChat: +This guide will walk you through how to use Cloudflare Workers AI in LobeHub: - ### Step 1: Obtain Your Cloudflare Workers AI API Key + ### Step 1: Obtain a Cloudflare Workers AI API Key - Visit the [Cloudflare website](https://www.cloudflare.com/) and sign up for an account. - - Log in to the [Cloudflare dashboard](https://dash.cloudflare.com/). - - In the left-hand menu, locate the `AI` > `Workers AI` option. + - Log in to the [Cloudflare Dashboard](https://dash.cloudflare.com/). + - In the left-hand menu, navigate to `AI` > `Workers AI`. - {'Cloudflare + {'Cloudflare - - In the `Using REST API` section, click the `Create Workers AI API Token` button. - - In the drawer dialog, copy and save your `API token`. - - Also, copy and save your `Account ID`. + - Under the "Use REST API" section, click the `Create Workers AI API Token` button. + - In the sidebar that appears, copy and save your `API Token`. + - Also copy and save your `Account ID`. - {'Cloudflare + {'Cloudflare - - Please store your API token securely, as it will only be displayed once. If you accidentally - lose it, you will need to create a new token. + - Be sure to store your API token securely, as it will only be shown once. If you lose it, you’ll need to generate a new one. - ### Step 2: Configure Cloudflare Workers AI in LobeChat + ### Step 2: Configure Cloudflare Workers AI in LobeHub - - Go to the `Settings` interface in LobeChat. - - Under `AI Service Provider`, find the `Cloudflare` settings. + - Open the `Settings` panel in LobeHub. + - Under `AI Providers`, locate the `Cloudflare` configuration section. - {'Input + {'Enter - Enter the `API Token` you obtained. - - Input your `Account ID`. - - Choose a Cloudflare Workers AI model for your AI assistant to start the conversation. + - Enter your `Account ID`. + - Choose a Cloudflare Workers AI model for your AI assistant to start chatting. - {'Choose + {'Select - You may incur charges while using the API service, please refer to Cloudflare's pricing policy for - details. + You may incur charges from the API provider during usage. Please refer to Cloudflare’s pricing policy for details. -At this point, you can start conversing with the model provided by Cloudflare Workers AI in LobeChat. +You’re all set! You can now start using Cloudflare Workers AI models for conversations in LobeHub. diff --git a/docs/usage/providers/cloudflare.zh-CN.mdx b/docs/usage/providers/cloudflare.zh-CN.mdx index 73073a7cf6..c49ef36198 100644 --- a/docs/usage/providers/cloudflare.zh-CN.mdx +++ b/docs/usage/providers/cloudflare.zh-CN.mdx @@ -1,8 +1,8 @@ --- -title: 在 LobeChat 中使用 Cloudflare Workers AI -description: 学习如何在 LobeChat 中配置和使用 Cloudflare Workers AI 的 API Key,以便开始对话和交互。 +title: 在 LobeHub 中使用 Cloudflare Workers AI +description: 学习如何在 LobeHub 中配置和使用 Cloudflare Workers AI 的 API Key,以便开始对话和交互。 tags: - - LobeChat + - LobeHub - Cloudflare - Workers AI - 供应商 @@ -10,13 +10,13 @@ tags: - Web UI --- -# 在 LobeChat 中使用 Cloudflare Workers AI +# 在 LobeHub 中使用 Cloudflare Workers AI - + [Cloudflare Workers AI](https://www.cloudflare.com/developer-platform/products/workers-ai/) 是一种将人工智能能力集成到 Cloudflare Workers 无服务器计算平台的服务。其核心功能在于通过 Cloudflare 的全球网络提供快速、可扩展的计算能力,降低运维开销。 -本文档将指导你如何在 LobeChat 中使用 Cloudflare Workers AI: +本文档将指导你如何在 LobeHub 中使用 Cloudflare Workers AI: ### 步骤一:获取 Cloudflare Workers AI 的 API Key @@ -25,34 +25,34 @@ tags: - 登录 [Cloudflare 控制台](https://dash.cloudflare.com/). - 在左侧的菜单中找到 `AI` > `Workers AI` 选项。 - {'Cloudflare + {'Cloudflare - 在 `使用 REST API` 中点击 `创建 Workers AI API 令牌` 按钮 - 在弹出的侧边栏中复制并保存你的 `API 令牌` - 同时也复制并保存你的 `账户ID` - {'Cloudflare + {'Cloudflare - 请安全地存储 API 令牌,因为它只会出现一次。如果您意外丢失它,您将需要创建一个新令牌。 - ### 步骤二:在 LobeChat 中配置 Cloudflare Workers AI + ### 步骤二:在 LobeHub 中配置 Cloudflare Workers AI - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到 `Cloudflare` 的设置项 - {'填入访问令牌'} + {'填入访问令牌'} - 填入获得的 `API 令牌` - 填入你的`账户ID` - 为你的 AI 助手选择一个 Cloudflare Workers AI 的模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考 Cloudflare 的相关费用政策。 -至此你已经可以在 LobeChat 中使用 Cloudflare Workers AI 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 Cloudflare Workers AI 提供的模型进行对话了。 diff --git a/docs/usage/providers/comfyui.mdx b/docs/usage/providers/comfyui.mdx index 1825c25e85..309d156365 100644 --- a/docs/usage/providers/comfyui.mdx +++ b/docs/usage/providers/comfyui.mdx @@ -1,6 +1,20 @@ --- -title: Using ComfyUI for Image Generation in LobeChat -description: Learn how to configure and use ComfyUI service in LobeChat, supporting FLUX series models for high-quality image generation and editing features +title: '' +description: >- + Learn to configure ComfyUI in LobeHub for high-quality image generation and + editing. +tags: + - ComfyUI + - FLUX + - Image Generation + - AI Image Editing + - Text-to-Image +--- + +````markdown +--- +title: Using ComfyUI in LobeHub for Image Generation +description: Learn how to configure and use the ComfyUI service in LobeHub to support high-quality image generation and editing with the FLUX model series. tags: - ComfyUI - FLUX @@ -9,54 +23,54 @@ tags: - AI Image Generation --- -# Using ComfyUI in LobeChat +# Using ComfyUI in LobeHub -{'Using +{'Using -This documentation will guide you on how to use [ComfyUI](https://github.com/comfyanonymous/ComfyUI) in LobeChat for high-quality AI image generation and editing. +This guide will walk you through how to use [ComfyUI](https://github.com/comfyanonymous/ComfyUI) in LobeHub for high-quality AI image generation and editing. -## ComfyUI Overview +## Introduction to ComfyUI -ComfyUI is a powerful stable diffusion and flow diffusion GUI that provides a node-based workflow interface. LobeChat integrates with ComfyUI, supporting complete FLUX series models, including text-to-image generation and image editing capabilities. +ComfyUI is a powerful GUI for Stable Diffusion and Flow Diffusion, offering a node-based workflow interface. LobeHub integrates ComfyUI with full support for the FLUX model series, enabling both text-to-image generation and image editing. ### Key Features -- **Extensive Model Support**: Supports 223 models, including FLUX series (130) and SD series (93) -- **Configuration-Driven Architecture**: Registry system provides intelligent model selection -- **Multi-Format Support**: Supports .safetensors and .gguf formats with various quantization levels -- **Dynamic Precision Selection**: Supports default, fp8\_e4m3fn, fp8\_e5m2, fp8\_e4m3fn\_fast precision -- **Multiple Authentication Methods**: Supports no authentication, basic authentication, Bearer Token, and custom authentication -- **Intelligent Component Selection**: Automatically selects optimal T5, CLIP, VAE encoder combinations -- **Enterprise-Grade Optimization**: Includes NF4, SVDQuant, TorchAO, MFLUX optimization variants +- **Extensive Model Support**: Supports 223 models, including 130 FLUX and 93 SD models +- **Config-Driven Architecture**: Registry system enables intelligent model selection +- **Multi-Format Support**: Compatible with .safetensors and .gguf formats, with various quantization levels +- **Dynamic Precision Options**: Supports default, fp8_e4m3fn, fp8_e5m2, and fp8_e4m3fn_fast +- **Multiple Authentication Methods**: Supports None, Basic Auth, Bearer Token, and Custom Headers +- **Smart Component Selection**: Automatically selects optimal T5, CLIP, and VAE encoders +- **Enterprise-Grade Optimizations**: Includes NF4, SVDQuant, TorchAO, MFLUX variants ## Quick Start -### Step 1: Configure ComfyUI in LobeChat +### Step 1: Configure ComfyUI in LobeHub -#### 1. Open Settings Interface +#### 1. Open Settings -- Access LobeChat's `Settings` interface -- Find the `ComfyUI` setting item under `AI Providers` +- Go to the `Settings` panel in LobeHub +- Under `AI Providers`, find the `ComfyUI` section -{'ComfyUI +{'ComfyUI -#### 2. Configure Connection Parameters +#### 2. Set Connection Parameters **Basic Configuration**: -- **Server Address**: Enter ComfyUI server address, e.g., `http://localhost:8188` -- **Authentication Type**: Select appropriate authentication method (default: no authentication) +- **Server Address**: Enter your ComfyUI server address, e.g., `http://localhost:8000` +- **Authentication Type**: Choose the appropriate method (default is None) -### Step 2: Select Model and Start Generating Images +### Step 2: Select a Model and Generate Images -#### 1. Select FLUX Model +#### 1. Choose a FLUX Model -In the conversation interface: +In the chat interface: - Click the model selection button -- Select the desired FLUX model from the ComfyUI category +- Choose your desired FLUX model from the ComfyUI category -{'Select +{'Select #### 2. Text-to-Image Generation @@ -64,9 +78,9 @@ In the conversation interface: ```plaintext Generate an image: A cute orange cat sitting on a sunny windowsill, warm lighting, detailed fur texture -``` +```` -**Using FLUX Dev (High Quality Generation)**: +**Using FLUX Dev (High-Quality Generation)**: ```plaintext Generate high quality image: City skyline at sunset, cyberpunk style, neon lights, 4K high resolution, detailed architecture @@ -74,7 +88,7 @@ Generate high quality image: City skyline at sunset, cyberpunk style, neon light #### 3. Image Editing -**Using FLUX Kontext-dev for Image Editing**: +**Using FLUX Kontext-dev for Editing**: ```plaintext Edit this image: Change the background to a starry night sky, keep the main subject, cosmic atmosphere @@ -83,38 +97,38 @@ Edit this image: Change the background to a starry night sky, keep the main subj Then upload the original image you want to edit. - Image editing functionality requires uploading the original image first, then describing the modifications you want to make. + Image editing requires uploading the original image first, followed by a description of the desired changes. ## Authentication Configuration Guide -ComfyUI supports four authentication methods. Choose the appropriate method based on your server configuration and security requirements: +ComfyUI supports four authentication methods. Choose the one that best fits your server setup and security needs: ### No Authentication (none) -**Use Cases**: +**Best for**: -- Local development environment (localhost) -- Internal network with trusted users -- Personal single-machine deployment +- Local development (localhost) +- Trusted internal networks +- Personal single-machine setups **Configuration**: ```yaml Authentication Type: None -Server Address: http://localhost:8188 +Server Address: http://localhost:8000 ``` ### Basic Authentication (basic) -**Use Cases**: +**Best for**: -- Deployments using Nginx reverse proxy -- Team internal use requiring basic access control +- Deployments behind Nginx reverse proxy +- Internal team use with basic access control -**Configuration**: +**Setup**: -1. **Create User Password**: +1. **Create Username and Password**: ```bash # Install apache2-utils @@ -124,21 +138,21 @@ sudo apt-get install apache2-utils sudo htpasswd -c /etc/nginx/.htpasswd admin ``` -2. **LobeChat Configuration**: +2. **LobeHub Configuration**: ```yaml -Authentication Type: Basic Authentication -Server Address: http://your-domain.com +Authentication Type: Basic +Server Address: https://your-domain.com Username: admin Password: your_secure_password ``` ### Bearer Token (bearer) -**Use Cases**: +**Best for**: -- API-driven application integration -- Enterprise environments requiring Token authentication +- API-driven integrations +- Enterprise environments requiring token-based auth **Generate Token**: @@ -156,41 +170,41 @@ token = jwt.encode(payload, secret_key, algorithm='HS256') print(f"Bearer Token: {token}") ``` -**LobeChat Configuration**: +**LobeHub Configuration**: ```yaml Authentication Type: Bearer Token -Server Address: http://your-server:8188 -API Key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9... +Server Address: https://your-domain.com +API Key: example-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9... ``` ### Custom Authentication (custom) -**Use Cases**: +**Best for**: -- Integration with existing enterprise authentication systems -- Systems requiring multiple authentication headers +- Integrating with existing enterprise auth systems +- Systems requiring multiple custom headers -**LobeChat Configuration**: +**LobeHub Configuration**: ```yaml Authentication Type: Custom -Server Address: http://your-server:8188 +Server Address: https://your-domain.com Custom Headers: { "X-API-Key": "your_api_key", - "X-Client-ID": "lobechat" + "X-Client-ID": "LobeHub" } ``` -## Common Issues Resolution +## Troubleshooting ### 1. How to Install Comfy-Manager -Comfy-Manager is ComfyUI's extension manager that allows you to easily install and manage various nodes, models, and extensions. +Comfy-Manager is an extension manager for ComfyUI that simplifies installing and managing nodes, models, and extensions.
- 📦 Install Comfy-Manager Steps + 📦 Steps to Install Comfy-Manager #### Method 1: Manual Installation (Recommended) @@ -198,619 +212,45 @@ Comfy-Manager is ComfyUI's extension manager that allows you to easily install a # Navigate to ComfyUI's custom_nodes directory cd ComfyUI/custom_nodes - # Clone Comfy-Manager repository + # Clone the Comfy-Manager repository git clone https://github.com/ltdrdata/ComfyUI-Manager.git - # Restart ComfyUI server - # After restart, you'll see the Manager button in the UI + # Restart the ComfyUI server + # After restarting, you should see a "Manager" button in the UI ``` - #### Method 2: One-Click Installation Script + #### Method 2: One-Click Install Script ```bash - # Execute in ComfyUI root directory + # Run from the ComfyUI root directory curl -fsSL https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main/install.sh | bash ``` #### Verify Installation - 1. Restart ComfyUI server - 2. Visit `http://localhost:8188` - 3. You should see the "Manager" button in the bottom-right corner + 1. Restart the ComfyUI server + 2. Visit `http://localhost:8000` + 3. You should see a "Manager" button in the bottom-right corner #### Using Comfy-Manager **Install Models**: - 1. Click "Manager" button + 1. Click the "Manager" button 2. Select "Install Models" - 3. Search for needed models (e.g., FLUX, SD3.5) - 4. Click "Install" to automatically download to correct directory + 3. Search for the desired model (e.g., FLUX, SD3.5) + 4. Click "Install" to download automatically **Install Node Extensions**: - 1. Click "Manager" button + 1. Click the "Manager" button 2. Select "Install Custom Nodes" - 3. Search for needed nodes (e.g., ControlNet, AnimateDiff) - 4. Click "Install" and restart server + 3. Search for nodes (e.g., ControlNet, AnimateDiff) + 4. Click "Install" and restart the server **Manage Installed Content**: - 1. Click "Manager" button + 1. Click the "Manager" button 2. Select "Installed" to view installed extensions - 3. Update, disable, or uninstall extensions + 3. You can update, disable, or uninstall extensions
- -### 2. How to Handle "Model not found" Errors - -When you see errors like `Model not found: flux1-dev.safetensors, flux1-krea-dev.safetensors, flux1-schnell.safetensors`, it means the required model files are missing from the server. - -
- 🔧 Resolve Model not found Errors - - #### Error Example - - ```plaintext - Model not found: flux1-dev.safetensors, flux1-krea-dev.safetensors, flux1-schnell.safetensors - ``` - - This error indicates the system expects to find these model files but couldn't locate them on the server. - - #### Resolution Methods - - **Method 1: Download using Comfy-Manager (Recommended)** - - 1. Open ComfyUI interface - 2. Click "Manager" → "Install Models" - 3. Search for the model name from the error (e.g., "flux1-dev") - 4. Click "Install" to automatically download - - **Method 2: Manual Model Download** - - 1. **Download Model Files**: - - Visit [Hugging Face](https://huggingface.co/black-forest-labs/FLUX.1-dev) or other model sources - - Download the files mentioned in the error (e.g., `flux1-dev.safetensors`) - - 2. **Place in Correct Directory**: - ```bash - # FLUX and SD3.5 main models go to - ComfyUI/models/diffusion_models/flux1-dev.safetensors - - # SD1.5 and SDXL models go to - ComfyUI/models/checkpoints/ - ``` - - 3. **Verify Files**: - ```bash - # Check if file exists - ls -la ComfyUI/models/diffusion_models/flux1-dev.safetensors - - # Check file integrity (optional) - sha256sum flux1-dev.safetensors - ``` - - 4. **Restart ComfyUI Server** - - **Method 3: Direct Download with wget/curl** - - ```bash - # Navigate to models directory - cd ComfyUI/models/diffusion_models/ - - # Download using wget (replace with actual download link) - wget https://huggingface.co/black-forest-labs/FLUX.1-dev/resolve/main/flux1-dev.safetensors - - # Or use curl - curl -L -o flux1-dev.safetensors https://huggingface.co/black-forest-labs/FLUX.1-dev/resolve/main/flux1-dev.safetensors - ``` - - #### Common Model Download Sources - - - **Hugging Face**: [https://huggingface.co/models](https://huggingface.co/models) - - **Civitai**: [https://civitai.com/models](https://civitai.com/models) - - **Official Sources**: - - FLUX: [https://huggingface.co/black-forest-labs](https://huggingface.co/black-forest-labs) - - SD3.5: [https://huggingface.co/stabilityai](https://huggingface.co/stabilityai) - - #### Prevention Measures - - 1. **Basic Model Package**: Download at least one base model - - FLUX: `flux1-schnell.safetensors` (fast) or `flux1-dev.safetensors` (high quality) - - SD3.5: `sd3.5_large.safetensors` - - 2. **Check Disk Space**: - ```bash - # Check available space - df -h ComfyUI/models/ - ``` - - 3. **Set Model Path** (optional): - If your models are stored elsewhere, create symbolic links: - ```bash - ln -s /path/to/your/models ComfyUI/models/diffusion_models/ - ``` -
- -### 3. How to Handle Missing System Component Errors - -When you see errors like `Missing VAE encoder: ae.safetensors` or other component files missing, you need to download the corresponding system components. - -
- 🛠️ Resolve Missing System Component Errors - - #### Common Component Errors - - ```plaintext - Missing VAE encoder: ae.safetensors. Please download and place it in the models/vae folder. - Missing CLIP encoder: clip_l.safetensors. Please download and place it in the models/clip folder. - Missing T5 encoder: t5xxl_fp16.safetensors. Please download and place it in the models/clip folder. - ``` - - #### Component Types Description - - | Component Type | Example Filename | Purpose | Storage Directory | - | -------------- | ------------------------------ | ----------------------- | ------------------ | - | **VAE** | ae.safetensors | Image encoding/decoding | models/vae/ | - | **CLIP** | clip\_l.safetensors | Text encoding (CLIP) | models/clip/ | - | **T5** | t5xxl\_fp16.safetensors | Text encoding (T5) | models/clip/ | - | **ControlNet** | flux-controlnet-\*.safetensors | Control networks | models/controlnet/ | - - #### Resolution Methods - - **Method 1: Use Comfy-Manager (Recommended)** - - 1. Click "Manager" → "Install Models" - 2. Select component type in "Filter" (VAE/CLIP/T5) - 3. Download corresponding component files - - **Method 2: Manual Component Download** - - ##### FLUX Required Components - - ```bash - # 1. VAE Encoder - cd ComfyUI/models/vae/ - wget https://huggingface.co/black-forest-labs/FLUX.1-dev/resolve/main/ae.safetensors - - # 2. CLIP-L Encoder - cd ComfyUI/models/clip/ - wget https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/clip_l.safetensors - - # 3. T5-XXL Encoder (choose different precisions) - # FP16 version (recommended, balanced performance) - wget https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp16.safetensors - - # Or FP8 version (saves VRAM) - wget https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp8_e4m3fn.safetensors - ``` - - ##### SD3.5 Required Components - - ```bash - # SD3.5 uses different encoders - cd ComfyUI/models/clip/ - - # CLIP-G Encoder - wget https://huggingface.co/stabilityai/stable-diffusion-3.5-large/resolve/main/text_encoders/clip_g.safetensors - - # CLIP-L Encoder - wget https://huggingface.co/stabilityai/stable-diffusion-3.5-large/resolve/main/text_encoders/clip_l.safetensors - - # T5-XXL Encoder - wget https://huggingface.co/stabilityai/stable-diffusion-3.5-large/resolve/main/text_encoders/t5xxl_fp16.safetensors - ``` - - ##### SDXL Required Components - - ```bash - # SDXL VAE - cd ComfyUI/models/vae/ - wget https://huggingface.co/stabilityai/sdxl-vae/resolve/main/sdxl_vae.safetensors - - # SDXL uses built-in CLIP encoders, usually no separate download needed - ``` - - #### Component Compatibility Matrix - - | Model Series | Required VAE | Required CLIP | Required T5 | Optional Components | - | ------------ | -------------- | ------------------- | ----------------------- | ------------------- | - | **FLUX** | ae.safetensors | clip\_l.safetensors | t5xxl\_fp16.safetensors | ControlNet | - | **SD3.5** | Built-in | clip\_g + clip\_l | t5xxl\_fp16 | - | - | **SDXL** | sdxl\_vae | Built-in | - | Refiner | - | **SD1.5** | vae-ft-mse | Built-in | - | ControlNet | - - #### Precision Selection Recommendations - - **T5 Encoder Precision Selection**: - - | VRAM Capacity | Recommended Version | Filename | - | ------------- | ------------------- | ------------------------------ | - | \< 12GB | FP8 Quantized | t5xxl\_fp8\_e4m3fn.safetensors | - | 12-16GB | FP16 | t5xxl\_fp16.safetensors | - | > 16GB | FP32 | t5xxl.safetensors | - - #### Verify Component Installation - - ```bash - # Check all required components - echo "=== VAE Components ===" - ls -la ComfyUI/models/vae/ - - echo "=== CLIP/T5 Components ===" - ls -la ComfyUI/models/clip/ - - echo "=== ControlNet Components ===" - ls -la ComfyUI/models/controlnet/ - ``` - - #### Troubleshooting - - **Issue: Still getting errors after download** - - 1. **Check File Permissions**: - ```bash - chmod 644 ComfyUI/models/vae/*.safetensors - chmod 644 ComfyUI/models/clip/*.safetensors - ``` - - 2. **Clear Cache**: - ```bash - # Clear ComfyUI cache - rm -rf ComfyUI/temp/* - rm -rf ComfyUI/__pycache__/* - ``` - - 3. **Restart Server**: - ```bash - # Fully restart ComfyUI - pkill -f "python main.py" - python main.py --listen 0.0.0.0 --port 8188 - ``` - - **Issue: Insufficient VRAM** - - Use quantized component versions: - - - T5: Use `t5xxl_fp8_e4m3fn.safetensors` instead of FP16/FP32 - - VAE: Some models support FP16 VAE versions - - **Issue: Slow Downloads** - - 1. Use mirror sources (if applicable) - 2. Use download tools (like aria2c) with resume support: - ```bash - aria2c -x 16 -s 16 -k 1M [download_link] - ``` -
- -## ComfyUI Server Installation - -
- 🚀 Install and Configure ComfyUI Server - - ### 1. Install ComfyUI - - ```bash - # Clone ComfyUI repository - git clone https://github.com/comfyanonymous/ComfyUI.git - cd ComfyUI - - # Install dependencies - pip install -r requirements.txt - - # Optional: Install JWT support (for Token authentication) - pip install PyJWT - - # Start ComfyUI server - python main.py --listen 0.0.0.0 --port 8188 - ``` - - ### 2. Download Model Files - - **Recommended Basic Configuration** (Minimal installation): - - **Main Models** (place in `models/diffusion_models/` directory): - - - `flux1-schnell.safetensors` - Fast generation (4 steps) - - `flux1-dev.safetensors` - High-quality creation (20 steps) - - **Required Components** (place in respective directories): - - - `models/vae/ae.safetensors` - VAE encoder - - `models/clip/clip_l.safetensors` - CLIP text encoder - - `models/clip/t5xxl_fp16.safetensors` - T5 text encoder - - ### 3. Verify Server Running - - Visit `http://localhost:8188` to confirm ComfyUI interface loads properly. - - - **Smart Model Selection**: LobeChat will automatically select the best model based on available model files on the server. You don't need to download all models; the system will automatically choose from available models by priority (Official > Enterprise > Community). - -
- -## Supported Models - -LobeChat's ComfyUI integration uses a configuration-driven architecture, supporting **223 models**, providing complete coverage from official models to community-optimized versions. - -### FLUX Series Recommended Parameters - -| Model Type | Recommended Steps | CFG Scale | Resolution Range | -| ----------- | ----------------- | --------- | -------------------- | -| **Schnell** | 4 steps | - | 512×512 to 1536×1536 | -| **Dev** | 20 steps | 3.5 | 512×512 to 2048×2048 | -| **Kontext** | 20 steps | 3.5 | 512×512 to 2048×2048 | -| **Krea** | 20 steps | 4.5 | 512×512 to 2048×2048 | - -### SD3.5 Series Parameters - -| Model Type | Recommended Steps | CFG Scale | Resolution Range | -| --------------- | ----------------- | --------- | -------------------- | -| **Large** | 25 steps | 7.0 | 512×512 to 2048×2048 | -| **Large Turbo** | 8 steps | 3.5 | 512×512 to 1536×1536 | -| **Medium** | 20 steps | 6.0 | 512×512 to 1536×1536 | - -
- 📋 Complete Supported Model List - - ### Model Classification System - - #### Priority 1: Official Core Models - - **FLUX.1 Official Series**: - - - `flux1-dev.safetensors` - High-quality creation model - - `flux1-schnell.safetensors` - Fast generation model - - `flux1-kontext-dev.safetensors` - Image editing model - - `flux1-krea-dev.safetensors` - Safety-enhanced model - - **SD3.5 Official Series**: - - - `sd3.5_large.safetensors` - SD3.5 large base model - - `sd3.5_large_turbo.safetensors` - Fast generation version - - `sd3.5_medium.safetensors` - Medium-scale model - - #### Priority 2: Enterprise Optimized Models (106 FLUX) - - **Quantization Optimization Series**: - - - **GGUF Quantization**: Each variant supports 11 quantization levels (F16, Q8\_0, Q6\_K, Q5\_K\_M, Q5\_K\_S, Q4\_K\_M, Q4\_K\_S, Q4\_0, Q3\_K\_M, Q3\_K\_S, Q2\_K) - - **FP8 Precision**: fp8\_e4m3fn, fp8\_e5m2 optimized versions - - **Enterprise Lightweight**: FLUX.1-lite-8B series - - **Technical Experiments**: NF4, SVDQuant, TorchAO, optimum-quanto, MFLUX optimized versions - - #### Priority 3: Community Fine-tuned Models (48 FLUX) - - **Community Optimization Series**: - - - **Jib Mix Flux** Series: High-quality mixed models - - **Real Dream FLUX** Series: Realism style - - **Vision Realistic** Series: Visual realism - - **PixelWave FLUX** Series: Pixel art optimization - - **Fluxmania** Series: Diverse style support - - ### SD Series Model Support (93 models) - - **SD3.5 Series**: 5 models - **SD1.5 Series**: 37 models (including official, quantized, and community versions) - **SDXL Series**: 50 models (including base, Refiner, and Playground models) - - ### Workflow Support - - System supports **6 workflows**: - - - **flux-dev**: High-quality creation workflow - - **flux-schnell**: Fast generation workflow - - **flux-kontext**: Image editing workflow - - **sd35**: SD3.5 dedicated workflow - - **simple-sd**: Simple SD workflow - - **index**: Workflow entry point -
- -## Performance Optimization Recommendations - -### Hardware Requirements - -**Minimum Configuration** (GGUF quantized models): - -- GPU: 6GB VRAM (using Q4 quantization) -- RAM: 12GB -- Storage: 30GB available space - -**Recommended Configuration** (standard models): - -- GPU: 12GB+ VRAM (RTX 4070 Ti or higher) -- RAM: 24GB+ -- Storage: SSD 100GB+ available space - -### VRAM Optimization Strategy - -| VRAM Capacity | Recommended Quantization | Model Example | Performance Characteristics | -| ------------- | ------------------------ | ---------------------------------- | --------------------------- | -| **6-8GB** | Q4\_0, Q4\_K\_S | `flux1-dev-Q4_0.gguf` | Minimal VRAM usage | -| **10-12GB** | Q6\_K, Q8\_0 | `flux1-dev-Q6_K.gguf` | Balance performance/quality | -| **16GB+** | FP8, FP16 | `flux1-dev-fp8-e4m3fn.safetensors` | Near-original quality | -| **24GB+** | Full model | `flux1-dev.safetensors` | Best quality | - -## Custom Model Usage - -
- 🎨 Configure Custom SD Models - - LobeChat supports using custom Stable Diffusion models. The system uses fixed filenames to identify custom models. - - ### 1. Model File Preparation - - **Required Files**: - - - **Main Model File**: `custom_sd_lobe.safetensors` - - **VAE File (Optional)**: `custom_sd_vae_lobe.safetensors` - - ### 2. Add Custom Model - - **Method 1: Rename Existing Model** - - ```bash - # Rename your model to fixed filename - mv your_custom_model.safetensors custom_sd_lobe.safetensors - - # Move to correct directory - mv custom_sd_lobe.safetensors ComfyUI/models/diffusion_models/ - ``` - - **Method 2: Create Symbolic Link (Recommended)** - - ```bash - # Create soft link for easy model switching - ln -s /path/to/your_model.safetensors ComfyUI/models/diffusion_models/custom_sd_lobe.safetensors - ``` - - ### 3. Use Custom Model - - In LobeChat, custom models will appear as: - - - **stable-diffusion-custom**: Standard custom model - - **stable-diffusion-custom-refiner**: Refiner custom model - - ### Custom Model Parameter Recommendations - - | Parameter | SD 1.5 Models | SDXL Models | - | ---------- | ------------- | ----------- | - | **steps** | 20-30 | 25-40 | - | **cfg** | 7.0 | 6.0-8.0 | - | **width** | 512 | 1024 | - | **height** | 512 | 1024 | -
- -## Troubleshooting - -### Smart Error Diagnosis System - -LobeChat integrates a smart error handling system that can automatically diagnose and provide targeted solutions. - -#### Error Types and Solutions - -| Error Type | User Prompt | Automatic Diagnosis | -| ------------------ | ---------------------------------- | --------------------------------------------------- | -| **Connection** | "Cannot connect to ComfyUI server" | Auto-detect server status and connectivity | -| **Authentication** | "API key invalid or expired" | Auto-verify authentication credentials | -| **Permissions** | "Access permissions insufficient" | Auto-check user permissions and file access | -| **Model Issues** | "Cannot find specified model file" | Auto-scan available models and suggest alternatives | -| **Configuration** | "Configuration file error" | Auto-verify config completeness and syntax | - -
- 🔍 Traditional Troubleshooting Methods - - #### 1. Connection Failure - - **Issue**: Cannot connect to ComfyUI server - - **Solution**: - - ```bash - # Confirm server running - curl http://localhost:8188/system_stats - - # Check port - netstat -tulpn | grep 8188 - ``` - - #### 2. Out of Memory - - **Issue**: Memory errors during generation - - **Solution**: - - - Lower image resolution - - Reduce generation steps - - Use quantized models - - #### 3. Authentication Failure - - **Issue**: 401 or 403 errors - - **Solution**: - - - Verify authentication configuration - - Check if Token is expired - - Confirm user permissions -
- -## Best Practices - -### Prompt Writing - -1. **Detailed Description**: Provide clear, detailed image descriptions -2. **Style Specification**: Clearly specify artistic style, color style, etc. -3. **Quality Keywords**: Add "4K", "high quality", "detailed" keywords -4. **Avoid Contradictions**: Ensure description content is logically consistent - -**Example**: - -```plaintext -A young woman with flowing long hair, wearing an elegant blue dress, standing in a cherry blossom park, -sunlight filtering through leaves, warm atmosphere, cinematic lighting, 4K high resolution, detailed, photorealistic -``` - -### Parameter Optimization - -1. **FLUX Schnell**: Suitable for quick previews, use 4-step generation -2. **FLUX Dev**: Balance quality and speed, CFG 3.5, 20 steps -3. **FLUX Krea-dev**: Safe creation, CFG 4.5, note content filtering -4. **FLUX Kontext-dev**: Image editing, strength 0.6-0.9 - - - Please note during use: - - - FLUX Dev, Krea-dev, Kontext-dev models are for non-commercial use only - - Generated content must comply with relevant laws and platform policies - - Large model generation may take considerable time, please be patient - - -## API Reference - -
- 📚 API Documentation - - ### Request Format - - ```typescript - interface ComfyUIRequest { - model: string; // Model ID, e.g., 'flux-schnell' - prompt: string; // Text prompt - width: number; // Image width - height: number; // Image height - steps: number; // Generation steps - seed: number; // Random seed - cfg?: number; // CFG Scale (Dev/Krea/Kontext specific) - strength?: number; // Edit strength (Kontext specific) - imageUrl?: string; // Input image (Kontext specific) - } - ``` - - ### Response Format - - ```typescript - interface ComfyUIResponse { - images: Array<{ - url: string; // Generated image URL - filename: string; // Filename - subfolder: string; // Subdirectory - type: string; // File type - }>; - prompt_id: string; // Prompt ID - } - ``` - - ### Error Codes - - | Error Code | Description | Resolution Suggestions | - | ---------- | ------------------------ | -------------------------------- | - | `400` | Invalid parameters | Check parameter format and range | - | `401` | Authentication failed | Verify API key and auth config | - | `403` | Insufficient permissions | Check user permissions | - | `404` | Model not found | Confirm model file exists | - | `500` | Server error | Check ComfyUI logs | -
- -You can now use ComfyUI in LobeChat for high-quality AI image generation and editing. If you encounter issues, please refer to the troubleshooting section or consult the [ComfyUI official documentation](https://github.com/comfyanonymous/ComfyUI). diff --git a/docs/usage/providers/comfyui.zh-CN.mdx b/docs/usage/providers/comfyui.zh-CN.mdx index 069bb5169c..c68b98d6ac 100644 --- a/docs/usage/providers/comfyui.zh-CN.mdx +++ b/docs/usage/providers/comfyui.zh-CN.mdx @@ -1,6 +1,6 @@ --- -title: 在 LobeChat 中使用 ComfyUI 生成图像 -description: 学习如何在 LobeChat 中配置和使用 ComfyUI 服务,支持 FLUX 系列模型的高质量图像生成和编辑功能 +title: 在 LobeHub 中使用 ComfyUI 生成图像 +description: 学习如何在 LobeHub 中配置和使用 ComfyUI 服务,支持 FLUX 系列模型的高质量图像生成和编辑功能 tags: - ComfyUI - FLUX @@ -9,15 +9,15 @@ tags: - AI 图像生成 --- -# 在 LobeChat 中使用 ComfyUI +# 在 LobeHub 中使用 ComfyUI -{'在 +{'在 -本文档将指导你如何在 LobeChat 中使用 [ComfyUI](https://github.com/comfyanonymous/ComfyUI) 进行高质量的 AI 图像生成和编辑。 +本文档将指导你如何在 LobeHub 中使用 [ComfyUI](https://github.com/comfyanonymous/ComfyUI) 进行高质量的 AI 图像生成和编辑。 ## ComfyUI 简介 -ComfyUI 是一个功能强大的稳定扩散和流扩散 GUI,提供基于节点的工作流界面。LobeChat 集成了 ComfyUI,支持完整的 FLUX 系列模型,包括文本生成图像和图像编辑功能。 +ComfyUI 是一个功能强大的稳定扩散和流扩散 GUI,提供基于节点的工作流界面。LobeHub 集成了 ComfyUI,支持完整的 FLUX 系列模型,包括文本生成图像和图像编辑功能。 ### 主要特性 @@ -31,14 +31,14 @@ ComfyUI 是一个功能强大的稳定扩散和流扩散 GUI,提供基于节 ## 快速开始 -### 步骤一:在 LobeChat 中配置 ComfyUI +### 步骤一:在 LobeHub 中配置 ComfyUI #### 1. 打开设置界面 -- 访问 LobeChat 的 `设置` 界面 +- 访问 LobeHub 的 `设置` 界面 - 在 `AI 服务商` 下找到 `ComfyUI` 的设置项 -{'ComfyUI +{'ComfyUI #### 2. 配置连接参数 @@ -56,7 +56,7 @@ ComfyUI 是一个功能强大的稳定扩散和流扩散 GUI,提供基于节 - 点击模型选择按钮 - 从 ComfyUI 分类中选择所需的 FLUX 模型 -{'选择 +{'选择 #### 2. 文本生成图像 @@ -124,7 +124,7 @@ sudo apt-get install apache2-utils sudo htpasswd -c /etc/nginx/.htpasswd admin ``` -2. **LobeChat 配置**: +2. **LobeHub 配置**: ```yaml 认证类型:基本认证 @@ -156,7 +156,7 @@ token = jwt.encode(payload, secret_key, algorithm='HS256') print(f"Bearer Token: {token}") ``` -**LobeChat 配置**: +**LobeHub 配置**: ```yaml 认证类型:Bearer Token @@ -171,7 +171,7 @@ API 密钥:example-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9... - 集成现有企业认证系统 - 需要多重认证头的系统 -**LobeChat 配置**: +**LobeHub 配置**: ```yaml 认证类型:自定义 @@ -179,7 +179,7 @@ API 密钥:example-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9... 自定义请求头: { "X-API-Key": "your_api_key", - "X-Client-ID": "lobechat" + "X-Client-ID": "LobeHub" } ``` @@ -525,13 +525,13 @@ Comfy-Manager 是 ComfyUI 的扩展管理器,让你能够轻松安装和管理 访问 `http://localhost:8000` 确认 ComfyUI 界面正常加载。 - **智能模型选择**:LobeChat 会根据服务器上可用的模型文件自动选择最佳模型。您无需下载所有模型,系统会在可用模型中按优先级(官方 > 企业 > 社区)自动选择。 + **智能模型选择**:LobeHub 会根据服务器上可用的模型文件自动选择最佳模型。您无需下载所有模型,系统会在可用模型中按优先级(官方 > 企业 > 社区)自动选择。 ## 支持的模型 -LobeChat ComfyUI 集成采用配置驱动的架构,支持 **223 个模型**,提供从官方模型到社区优化版本的全覆盖。 +LobeHub ComfyUI 集成采用配置驱动的架构,支持 **223 个模型**,提供从官方模型到社区优化版本的全覆盖。 ### FLUX 系列推荐参数 @@ -637,7 +637,7 @@ LobeChat ComfyUI 集成采用配置驱动的架构,支持 **223 个模型**,
🎨 配置自定义 SD 模型 - LobeChat 支持使用自定义的 Stable Diffusion 模型。系统使用固定的文件名来识别自定义模型。 + LobeHub 支持使用自定义的 Stable Diffusion 模型。系统使用固定的文件名来识别自定义模型。 ### 1. 模型文件准备 @@ -667,7 +667,7 @@ LobeChat ComfyUI 集成采用配置驱动的架构,支持 **223 个模型**, ### 3. 使用自定义模型 - 在 LobeChat 中,自定义模型会显示为: + 在 LobeHub 中,自定义模型会显示为: - **stable-diffusion-custom**:标准自定义模型 - **stable-diffusion-custom-refiner**:Refiner 自定义模型 @@ -686,7 +686,7 @@ LobeChat ComfyUI 集成采用配置驱动的架构,支持 **223 个模型**, ### 智能错误诊断系统 -LobeChat 集成了智能错误处理系统,能够自动诊断并提供针对性的解决方案。 +LobeHub 集成了智能错误处理系统,能够自动诊断并提供针对性的解决方案。 #### 错误类型与解决方案 @@ -813,4 +813,4 @@ sunlight filtering through leaves, warm atmosphere, cinematic lighting, 4K high | `500` | 服务器错误 | 检查 ComfyUI 日志 |
-至此你已经可以在 LobeChat 中使用 ComfyUI 进行高质量的 AI 图像生成和编辑了。如果遇到问题,请参考故障排除部分或查阅 [ComfyUI 官方文档](https://github.com/comfyanonymous/ComfyUI)。 +至此你已经可以在 LobeHub 中使用 ComfyUI 进行高质量的 AI 图像生成和编辑了。如果遇到问题,请参考故障排除部分或查阅 [ComfyUI 官方文档](https://github.com/comfyanonymous/ComfyUI)。 diff --git a/docs/usage/providers/deepseek.mdx b/docs/usage/providers/deepseek.mdx index 24f5f7f3b0..be33e6f7b8 100644 --- a/docs/usage/providers/deepseek.mdx +++ b/docs/usage/providers/deepseek.mdx @@ -1,64 +1,62 @@ --- -title: Using DeepSeek API Key in LobeChat +title: Using DeepSeek API Key in LobeHub description: >- - Learn how to use DeepSeek-V2 in LobeChat, obtain API keys. Get started with DeepSeek integration now! - + Learn how to configure and use the DeepSeek language model in LobeHub, obtain + your API key, and start chatting. tags: + - LobeHub - DeepSeek - - LobeChat - - DeepSeek-V2 + - DeepSeek R1 - API Key - Web UI --- -# Using DeepSeek in LobeChat +# Using DeepSeek in LobeHub -{'Using +{'Using -[DeepSeek](https://www.deepseek.com/) represents a cutting-edge open-source large language model. The latest versions, DeepSeek-V3 and DeepSeek-R1, have undergone substantial improvements in both architecture and performance, particularly shining in their inference capabilities. By leveraging innovative training methodologies and reinforcement learning, the model has effectively boosted its inference prowess, now nearly matching the pinnacle performance of OpenAI. +[DeepSeek](https://www.deepseek.com/) is a cutting-edge open-source large language model (LLM). The latest versions, DeepSeek-V3 and DeepSeek-R1, feature significant architectural and performance improvements, particularly in reasoning capabilities. Through innovative training methods and reinforcement learning techniques, DeepSeek has achieved near state-of-the-art performance, rivaling top-tier models from OpenAI. -This document will guide you on how to use DeepSeek in LobeChat: +This guide will walk you through how to use DeepSeek in LobeHub: - ### Step 1: Obtain DeepSeek API Key + ### Step 1: Get Your DeepSeek API Key - - First, you need to register and log in to the [DeepSeek](https://platform.deepseek.com/) open platform. + - First, sign up and log in to the [DeepSeek Open Platform](https://platform.deepseek.com/) - New users will receive a free quota of 500M Tokens + New users currently receive 500M tokens for free. - - Go to the `API keys` menu and click on `Create API Key`. + - Navigate to the `API keys` section and click `Create API Key` - {'Create + {'Create - - Enter the API key name in the pop-up dialog box. + - Enter a name for your API key in the pop-up dialog - {'Enter + {'Enter - - Copy the generated API key and save it securely. + - Copy the generated API key and store it securely - {'Save + {'Save - Please store the key securely as it will only appear once. If you accidentally lose it, you will - need to create a new key. + Make sure to store your key securely, as it will only be shown once. If you lose it, you’ll need to generate a new one. - ### Step 2: Configure DeepSeek in LobeChat + ### Step 2: Configure DeepSeek in LobeHub - - Access the `App Settings` interface in LobeChat. - - Find the setting for `DeepSeek` under `AI Service Provider`. + - Go to the `App Settings` page in LobeHub + - Under `AI Providers`, find the `DeepSeek` configuration section - {'Enter + {'Enter - - Open DeepSeek and enter the obtained API key. - - Choose a DeepSeek model for your assistant to start the conversation. + - Enable DeepSeek and paste in your API key + - Choose a DeepSeek model for your assistant to start chatting - {'Select + {'Select - You may need to pay the API service provider during usage, please refer to DeepSeek's relevant - pricing policies. + You may incur charges from the API provider during usage. Please refer to DeepSeek’s pricing policy for details. -You can now engage in conversations using the models provided by Deepseek in LobeChat. +And that’s it! You’re now ready to start chatting with DeepSeek-powered models in LobeHub. diff --git a/docs/usage/providers/deepseek.zh-CN.mdx b/docs/usage/providers/deepseek.zh-CN.mdx index 85f59698b7..653dc38fc1 100644 --- a/docs/usage/providers/deepseek.zh-CN.mdx +++ b/docs/usage/providers/deepseek.zh-CN.mdx @@ -1,21 +1,21 @@ --- -title: 在 LobeChat 中使用 DeepSeek API Key -description: 学习如何在 LobeChat 中配置和使用 DeepSeek 语言模型,获取 API 密钥并开始对话。 +title: 在 LobeHub 中使用 DeepSeek API Key +description: 学习如何在 LobeHub 中配置和使用 DeepSeek 语言模型,获取 API 密钥并开始对话。 tags: - - LobeChat + - LobeHub - DeepSeek - DeepSeek R1 - API密钥 - Web UI --- -# 在 LobeChat 中使用 DeepSeek +# 在 LobeHub 中使用 DeepSeek -{'在 +{'在 [DeepSeek](https://www.deepseek.com/) 是一款先进的开源大型语言模型(LLM)。最新的 DeepSeek-V3 和 DeepSeek-R1 在架构和性能上进行了显著优化,特别是在推理能力方面表现出色。它通过创新性的训练方法和强化学习技术,成功地提升了模型的推理能力,并且其性能已逼近 OpenAI 的顶尖水平。 -本文档将指导你如何在 LobeChat 中使用 DeepSeek: +本文档将指导你如何在 LobeHub 中使用 DeepSeek: ### 步骤一:获取 DeepSeek API 密钥 @@ -26,35 +26,35 @@ tags: - 进入 `API keys` 菜单,并点击 `创建 API Key` - {'创建 + {'创建 - 在弹出的对话框中输入 API 密钥名称 - {'填写 + {'填写 - 复制得到的 API 密钥并妥善保存 - {'保存 + {'保存 请安全地存储密钥,因为它只会出现一次。如果你意外丢失它,您将需要创建一个新密钥。 - ### 步骤二:在 LobeChat 中配置 DeepSeek + ### 步骤二:在 LobeHub 中配置 DeepSeek - - 访问 LobeChat 的 `应用设置`界面 + - 访问 LobeHub 的 `应用设置`界面 - 在 `AI 服务商` 下找到 `DeepSeek` 的设置项 - {'填写 + {'填写 - 打开 DeepSeek 并填入获取的 API 密钥 - 为你的助手选择一个 DeepSeek 模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考 DeepSeek 的相关费用政策。 -至此你已经可以在 LobeChat 中使用 Deepseek 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 Deepseek 提供的模型进行对话了。 diff --git a/docs/usage/providers/fal.mdx b/docs/usage/providers/fal.mdx index 4087851bee..bd0893e8af 100644 --- a/docs/usage/providers/fal.mdx +++ b/docs/usage/providers/fal.mdx @@ -1,31 +1,31 @@ --- -title: Using Fal API Key in LobeChat +title: Using Fal API Key in LobeHub description: >- - Learn how to integrate Fal API Key in LobeChat for AI image and video generation using cutting-edge models like FLUX, Kling, and more. - + Learn how to configure and use the Fal API Key in LobeHub to generate + AI-powered images and videos with cutting-edge models like FLUX and Kling. tags: - - Fal AI + - Fal - Image Generation - Video Generation - API Key - Web UI --- -# Using Fal in LobeChat +# Using Fal in LobeHub -{'Using +{'Using -[Fal.ai](https://fal.ai/) is a lightning-fast inference platform specialized in AI media generation, hosting state-of-the-art models for image and video creation including FLUX, Kling, HiDream, and other cutting-edge generative models. This document will guide you on how to use Fal in LobeChat: +[Fal.ai](https://fal.ai/) is a high-performance inference platform specializing in AI media generation. It offers state-of-the-art image and video generation models such as FLUX, Kling, and HiDream. This guide will walk you through how to use Fal within LobeHub: - ### Step 1: Obtain Fal API Key + ### Step 1: Obtain Your Fal API Key - - Register for a [Fal.ai account](https://fal.ai/). - - Navigate to [API Keys dashboard](https://fal.ai/dashboard/keys) and click **Add key** to create a new API key. - - Copy the generated API key and keep it secure; it will only be shown once. + - Sign up for a [Fal.ai](https://fal.ai/) account; + - Go to the [API Keys Dashboard](https://fal.ai/dashboard/keys) and click **Add key** to create a new API key; + - Copy the generated API key and store it securely — it will only be shown once. {'Open {'Create {'Retrieve - ### Step 2: Configure Fal in LobeChat + ### Step 2: Configure Fal in LobeHub - - Visit the `Settings` page in LobeChat. - - Under **AI Service Provider**, locate the **Fal** configuration section. + - Navigate to the `Settings` page in LobeHub; + - Under `AI Providers`, locate the configuration section for `Fal`; - {'Enter + {'Enter - - Paste the API key you obtained. - - Choose a Fal model (e.g. `Flux.1 Schnell`, `Flux.1 Kontext Dev`) for image or video generation. + - Paste the API key you obtained earlier; + - Choose a Fal model (e.g., `Flux.1 Schnell`, `Flux.1 Kontext Dev`) for image or video generation. - {'Select + {'Select - During usage, you may incur charges according to Fal's pricing policy. Please review Fal's - official pricing before heavy usage. + Please note that using Fal may incur charges. Be sure to review Fal’s official pricing policy before making extensive API calls. -You can now use Fal's advanced image and video generation models directly within LobeChat to create stunning visual content. +And that’s it! You’re now ready to create stunning visual content in LobeHub using Fal’s advanced image and video generation models. diff --git a/docs/usage/providers/fal.zh-CN.mdx b/docs/usage/providers/fal.zh-CN.mdx index 05155c42d2..ba4cea51cb 100644 --- a/docs/usage/providers/fal.zh-CN.mdx +++ b/docs/usage/providers/fal.zh-CN.mdx @@ -1,8 +1,6 @@ --- -title: 在 LobeChat 中使用 Fal API Key -description: >- - 学习如何在 LobeChat 中配置和使用 Fal API Key,使用 FLUX、Kling 等尖端模型进行 AI 图像和视频生成。 - +title: 在 LobeHub 中使用 Fal API Key +description: 学习如何在 LobeHub 中配置和使用 Fal API Key,使用 FLUX、Kling 等尖端模型进行 AI 图像和视频生成。 tags: - Fal - 图像生成 @@ -11,11 +9,11 @@ tags: - Web UI --- -# 在 LobeChat 中使用 Fal +# 在 LobeHub 中使用 Fal -{'在 +{'在 -[Fal.ai](https://fal.ai/) 是一个专门从事 AI 媒体生成的快速推理平台,提供包括 FLUX、Kling、HiDream 等在内的最先进图像和视频生成模型。本文将指导你如何在 LobeChat 中使用 Fal: +[Fal.ai](https://fal.ai/) 是一个专门从事 AI 媒体生成的快速推理平台,提供包括 FLUX、Kling、HiDream 等在内的最先进图像和视频生成模型。本文将指导你如何在 LobeHub 中使用 Fal: ### 步骤一:获取 Fal API Key @@ -48,9 +46,9 @@ tags: } /> - ### 步骤二:在 LobeChat 中配置 Fal + ### 步骤二:在 LobeHub 中配置 Fal - - 访问 LobeChat 的 `设置` 页面; + - 访问 LobeHub 的 `设置` 页面; - 在 `AI服务商` 下找到 `Fal` 的设置项; {'填入 @@ -65,4 +63,4 @@ tags:
-至此,你已经可以在 LobeChat 中使用 Fal 提供的先进图像和视频生成模型来创作精美的视觉内容了。 +至此,你已经可以在 LobeHub 中使用 Fal 提供的先进图像和视频生成模型来创作精美的视觉内容了。 diff --git a/docs/usage/providers/fireworksai.mdx b/docs/usage/providers/fireworksai.mdx index 565fc25279..6ef8ac18fe 100644 --- a/docs/usage/providers/fireworksai.mdx +++ b/docs/usage/providers/fireworksai.mdx @@ -1,57 +1,55 @@ --- -title: Using Fireworks AI in LobeChat +title: Using Fireworks AI in LobeHub description: >- - Learn how to integrate and utilize Fireworks AI's language model APIs in LobeChat. - + Learn how to configure and use Fireworks AI's API Key in LobeHub to start + chatting and interacting. tags: - - LobeChat + - LobeHub - Fireworks AI - API Key - Web UI --- -# Using Fireworks AI in LobeChat +# Using Fireworks AI in LobeHub - + -[Fireworks.ai](https://fireworks.ai/) is a high-performance generative AI model inference platform that allows users to access and utilize various models through its API. The platform supports multiple modalities, including text and visual language models, and offers features like function calls and JSON schemas to enhance the flexibility of application development. +[Fireworks.ai](https://fireworks.ai/) is a high-performance generative AI model inference platform that allows users to access and utilize a variety of models via its API. The platform supports multiple modalities, including text and vision-language models, and offers features like function calling and JSON mode to enhance application development flexibility. -This article will guide you on how to use Fireworks AI in LobeChat. +This guide will walk you through how to use Fireworks AI within LobeHub. - ### Step 1: Obtain an API Key for Fireworks AI + ### Step 1: Obtain Your Fireworks AI API Key - Log in to the [Fireworks.ai Console](https://fireworks.ai/account/api-keys) - Navigate to the `User` page and click on `API Keys` - Create a new API key - {'Create + {'Create - Copy and securely save the generated API key - {'Save + {'Save - Please store the key securely, as it will appear only once. If you accidentally lose it, you will - need to create a new key. + Make sure to store your API key securely, as it will only be shown once. If you lose it, you’ll need to generate a new one. - ### Step 2: Configure Fireworks AI in LobeChat + ### Step 2: Configure Fireworks AI in LobeHub - - Access the `Settings` interface in LobeChat - - Under `AI Service Provider`, locate the settings for `Fireworks AI` + - Go to the `Settings` page in LobeHub + - Under `AI Providers`, locate the settings for `Fireworks AI` - {'Enter + {'Enter - - Enter the obtained API key - - Select a Fireworks AI model for your AI assistant to start a conversation + - Paste the API key you obtained earlier + - Choose a Fireworks AI model for your AI assistant to start chatting - {'Select + {'Select - Please note that you may need to pay fees to the API service provider during use; refer to - Fireworks AI's pricing policy for details. + Please note that usage may incur charges from the API provider. Refer to Fireworks AI’s pricing policy for more details. -You are now ready to use the models provided by Fireworks AI for conversations in LobeChat. +And that’s it! You’re now ready to start using Fireworks AI models in LobeHub for conversations and interactions. diff --git a/docs/usage/providers/fireworksai.zh-CN.mdx b/docs/usage/providers/fireworksai.zh-CN.mdx index 647a51f0e9..e31a67104f 100644 --- a/docs/usage/providers/fireworksai.zh-CN.mdx +++ b/docs/usage/providers/fireworksai.zh-CN.mdx @@ -1,20 +1,20 @@ --- -title: 在 LobeChat 中使用 Fireworks AI -description: 学习如何在 LobeChat 中配置和使用 Fireworks AI 的API Key,以便开始对话和交互。 +title: 在 LobeHub 中使用 Fireworks AI +description: 学习如何在 LobeHub 中配置和使用 Fireworks AI 的API Key,以便开始对话和交互。 tags: - - LobeChat + - LobeHub - Fireworks AI - API密钥 - Web UI --- -# 在 LobeChat 中使用 Fireworks AI +# 在 LobeHub 中使用 Fireworks AI - + [Fireworks.ai](https://fireworks.ai/) 是一个高性能的生成式 AI 模型推理平台,允许用户通过其 API 访问和使用各种模型。该平台支持多种模态,包括文本和视觉语言模型,并提供函数调用和 JSON 模式等功能,以增强应用开发的灵活性。 -本文将指导你如何在 LobeChat 中使用 Fireworks AI。 +本文将指导你如何在 LobeHub 中使用 Fireworks AI。 ### 步骤一:获得 Fireworks AI 的 API Key @@ -23,31 +23,31 @@ tags: - 进入 `User` 页面,点击 `API Keys` - 创建一个新的 API 密钥 - {'创建 + {'创建 - 复制并保存生成的 API 密钥 - {'保存 + {'保存 请安全地存储密钥,因为它只会出现一次。如果您意外丢失它,您将需要创建一个新密钥。 - ### 步骤二:在 LobeChat 中配置 Fireworks AI + ### 步骤二:在 LobeHub 中配置 Fireworks AI - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到 `Fireworks AI` 的设置项 - {'填入 + {'填入 - 填入获得的 API 密钥 - 为你的 AI 助手选择一个 Fireworks AI 的模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考 Fireworks AI 的相关费用政策。 -至此你已经可以在 LobeChat 中使用 Fireworks AI 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 Fireworks AI 提供的模型进行对话了。 diff --git a/docs/usage/providers/giteeai.mdx b/docs/usage/providers/giteeai.mdx index 4d48cef369..ba5b14e103 100644 --- a/docs/usage/providers/giteeai.mdx +++ b/docs/usage/providers/giteeai.mdx @@ -1,58 +1,56 @@ --- -title: Using Gitee AI in LobeChat +title: Using Gitee AI in LobeHub description: >- - Learn how to configure and use Gitee AI's API Key in LobeChat to start conversations and interactions. - + Learn how to configure and use Gitee AI's API Key in LobeHub to start chatting + and interacting. tags: - - LobeChat + - LobeHub - Gitee AI - API Key - Web UI --- -# Using Gitee AI in LobeChat +# Using Gitee AI in LobeHub - + -[Gitee AI](https://ai.gitee.com/) is an open-source platform based on Git code hosting technology, specifically designed for AI application scenarios. It aims to provide developers and businesses with a one-stop solution for AI application development services, including model experience, inference, fine-tuning, and deployment. +[Gitee AI](https://ai.gitee.com/) is an open-source platform built on Git-based code hosting technology, designed specifically for artificial intelligence (AI) applications. It aims to provide developers and enterprises with an all-in-one AI development service, including model testing, inference, fine-tuning, and deployment. -This article will guide you on how to use Gitee AI in LobeChat. +This guide will walk you through how to use Gitee AI within LobeHub. - ### Step 1: Obtain the Gitee AI API Key + ### Step 1: Obtain Your Gitee AI API Key - Register and log in to the [Gitee AI official website](https://ai.gitee.com/) - - Purchase and recharge `Serverless API` from your dashboard + - In the dashboard, purchase and top up the `Serverless API` - {'Gitee + {'Gitee - - In `Settings`, click on the `Access Tokens` section + - Go to the `Settings` section and click on `Access Tokens` - Create a new access token - - Save the access token in the pop-up window + - Save the token from the pop-up window - {'Gitee + {'Gitee - Please keep the access token safe as it will only appear once. If you accidentally lose it, you - will need to create a new one. + Make sure to save the access token shown in the pop-up window. It will only be displayed once. If you lose it, you’ll need to generate a new one. - ### Step 2: Configure Gitee AI in LobeChat + ### Step 2: Configure Gitee AI in LobeHub - - Access the `Settings` page in LobeChat - - Under `AI Service Provider`, find the settings for `Gitee AI` + - Go to the `Settings` page in LobeHub + - Under `AI Providers`, find the configuration section for `Gitee AI` - {'Enter + {'Enter - - Enter the obtained API key - - Select a Gitee AI model for your AI assistant to begin chatting + - Paste the API key you obtained earlier + - Choose a Gitee AI model for your AI assistant to start chatting - {'Select + {'Select - During usage, you may need to make payments to the API service provider; please refer to Gitee - AI's relevant pricing policy. + You may need to pay for API usage depending on your usage. Please refer to Gitee AI’s pricing policy for more details. -Now you can start having conversations using the models provided by Gitee AI in LobeChat! +And that’s it! You’re now ready to use Gitee AI models for conversations in LobeHub. diff --git a/docs/usage/providers/giteeai.zh-CN.mdx b/docs/usage/providers/giteeai.zh-CN.mdx index bf7d84fc70..5612358878 100644 --- a/docs/usage/providers/giteeai.zh-CN.mdx +++ b/docs/usage/providers/giteeai.zh-CN.mdx @@ -1,20 +1,20 @@ --- -title: 在 LobeChat 中使用 Gitee AI -description: 学习如何在 LobeChat 中配置和使用 Gitee AI 的 API Key,以便开始对话和交互。 +title: 在 LobeHub 中使用 Gitee AI +description: 学习如何在 LobeHub 中配置和使用 Gitee AI 的 API Key,以便开始对话和交互。 tags: - - LobeChat + - LobeHub - Gitee AI - API密钥 - Web UI --- -# 在 LobeChat 中使用 Gitee AI +# 在 LobeHub 中使用 Gitee AI - + [Gitee AI](https://ai.gitee.com/) 是一个基于 Git 代码托管技术的开源平台,专为人工智能(AI)应用场景设计。它旨在为开发者和企业提供一站式的 AI 应用开发服务,包括模型体验、推理、微调和部署等功能。 -本文将指导你如何在 LobeChat 中使用 Gitee AI。 +本文将指导你如何在 LobeHub 中使用 Gitee AI。 ### 步骤一:获取 Gitee AI 的 API 密钥 @@ -22,33 +22,33 @@ tags: - 注册并登录 [Gitee AI 官网](https://ai.gitee.com/) - 在工作台中购买并充值 `Serverless API` - {'Gitee + {'Gitee - 在 `设置` 中点击 `访问令牌` 界面 - 创建一个新的访问令牌 - 在弹出窗口中保存访问令牌 - {'Gitee + {'Gitee 妥善保存弹窗中的访问令牌,它只会出现一次,如果不小心丢失了,你需要重新创建一个访问令牌。 - ### 步骤二:在 LobeChat 中配置 Gitee AI + ### 步骤二:在 LobeHub 中配置 Gitee AI - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到 `Gitee AI` 的设置项 - {'填入 + {'填入 - 填入获得的 API 密钥 - 为你的 AI 助手选择一个 Gitee AI 的模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考 Gitee AI 的相关费用政策。 -至此你已经可以在 LobeChat 中使用 Gitee AI 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 Gitee AI 提供的模型进行对话了。 diff --git a/docs/usage/providers/github.mdx b/docs/usage/providers/github.mdx index 80877584a0..cf706be5b6 100644 --- a/docs/usage/providers/github.mdx +++ b/docs/usage/providers/github.mdx @@ -1,67 +1,69 @@ --- -title: Using GitHub Models in LobeChat -description: Learn how to integrate and utilize GitHub Models in LobeChat. +title: Using GitHub Models in LobeHub +description: >- + Learn how to configure and use GitHub's API Key in LobeHub to start + conversations and interactions. tags: - - LobeChat + - LobeHub - GitHub - GitHub Models - API Key - Web UI --- -# Using GitHub Models in LobeChat +# Using GitHub Models in LobeHub - + -[GitHub Models](https://github.com/marketplace/models) is a new feature recently launched by GitHub, designed to provide developers with a free platform to access and experiment with various AI models. GitHub Models offers an interactive sandbox environment where users can test different model parameters and prompts, and observe the responses of the models. The platform supports advanced language models, including OpenAI's GPT-4o, Meta's Llama 3.1, and Mistral's Large 2, covering a wide range of applications from large-scale language models to task-specific models. +[GitHub Models](https://github.com/marketplace/models) is a new feature recently launched by GitHub, designed to provide developers with a free platform to access and experiment with various AI models. GitHub Models offers an interactive sandbox environment where users can test different model parameters and prompts to observe the model's responses. The platform supports a range of advanced language models, including OpenAI's GPT-4o, Meta's Llama 3.1, and Mistral's Large 2, covering a wide spectrum of use cases from large language models to task-specific models. -This article will guide you on how to use GitHub Models in LobeChat. +This guide will walk you through how to use GitHub Models within LobeHub. -## Rate Limits for GitHub Models +## GitHub Models Rate Limits -Currently, the usage of the Playground and free API is subject to limits on the number of requests per minute, the number of requests per day, the number of tokens per request, and the number of concurrent requests. If you hit the rate limit, you will need to wait for the limit to reset before making further requests. The rate limits vary for different models (low, high, and embedding models). For model type information, please refer to the GitHub Marketplace. +Currently, usage of the Playground and free API is subject to limits on requests per minute, daily requests, tokens per request, and concurrent requests. If you hit a rate limit, you’ll need to wait for it to reset before making additional requests. Rate limits vary depending on the model type (low, high, or embedding models). For details on model types, refer to the GitHub Marketplace. -{'GitHub +{'GitHub - These limits are subject to change at any time. For specific information, please refer to the - [GitHub Official - Documentation](https://docs.github.com/en/github-models/prototyping-with-ai-models#rate-limits). + These limits are subject to change. For the most up-to-date information, please refer to the [official GitHub documentation](https://docs.github.com/en/github-models/prototyping-with-ai-models#rate-limits). --- -## Configuration Guide for GitHub Models +## GitHub Models Configuration Guide ### Step 1: Obtain a GitHub Access Token - - Log in to GitHub and open the [Access Tokens](https://github.com/settings/tokens) page. + - Log in to GitHub and navigate to the [Personal Access Tokens](https://github.com/settings/tokens) page. - Create and configure a new access token. - {'Creating + {'Create - - Copy and save the generated token from the results returned. + - Copy and securely save the generated token from the result page. - {'Saving + {'Save - - During the testing phase of GitHub Models, users must apply to join the [waitlist](https://github.com/marketplace/models/waitlist/join) in order to gain access. + - During the GitHub Models testing phase, you must apply to join the [waitlist](https://github.com/marketplace/models/waitlist/join) to gain access. - - Please store the access token securely, as it will only be displayed once. If you accidentally lose it, you will need to create a new token. + ``` + - Be sure to store your access token securely, as it will only be shown once. If you lose it, you’ll need to generate a new one. + ``` - ### Step 2: Configure GitHub Models in LobeChat + ### Step 2: Configure GitHub Models in LobeHub - - Navigate to the `Settings` interface in LobeChat. - - Under `AI Service Provider`, find the GitHub settings. + - Open the `Settings` panel in LobeHub. + - Under `AI Providers`, locate the `GitHub` configuration section. - {'Entering + {'Enter - - Enter the access token you obtained. - - Select a GitHub model for your AI assistant to start the conversation. + - Paste the access token you obtained earlier. + - Choose a GitHub model for your AI assistant to start chatting. - {'Selecting + {'Select -You are now ready to use the models provided by GitHub for conversations within LobeChat. +And that’s it! You’re now ready to start using GitHub-provided models in LobeHub for conversations and interactions. diff --git a/docs/usage/providers/github.zh-CN.mdx b/docs/usage/providers/github.zh-CN.mdx index 689a978007..04a11b9f14 100644 --- a/docs/usage/providers/github.zh-CN.mdx +++ b/docs/usage/providers/github.zh-CN.mdx @@ -1,27 +1,27 @@ --- -title: 在 LobeChat 中使用 GitHub Models -description: 学习如何在 LobeChat 中配置和使用 GitHub 的 API Key,以便开始对话和交互。 +title: 在 LobeHub 中使用 GitHub Models +description: 学习如何在 LobeHub 中配置和使用 GitHub 的 API Key,以便开始对话和交互。 tags: - - LobeChat + - LobeHub - GitHub - GitHub Models - API密钥 - Web UI --- -# 在 LobeChat 中使用 GitHub Models +# 在 LobeHub 中使用 GitHub Models - + [GitHub Models](https://github.com/marketplace/models) 是 GitHub 最近推出的一项新功能,旨在为开发者提供一个免费的平台来访问和实验多种 AI 模型。GitHub Models 提供了一个互动沙盒环境,用户可以在此测试不同的模型参数和提示语,观察模型的响应。该平台支持多种先进的语言模型,包括 OpenAI 的 GPT-4o、Meta 的 Llama 3.1 和 Mistral 的 Large 2 等,覆盖了从大规模语言模型到特定任务模型的广泛应用。 -本文将指导你如何在 LobeChat 中使用 GitHub Models。 +本文将指导你如何在 LobeHub 中使用 GitHub Models。 ## GitHub Models 速率限制 当前 Playground 和免费 API 的使用受到每分钟请求数、每日请求数、每个请求的令牌数以及并发请求数的限制。若达到速率限制,则需等待限制重置后方可继续发出请求。不同模型(低、高及嵌入模型)的速率限制有所不同。 模型类型信息请参阅 GitHub Marketplace。 -{'GitHub +{'GitHub 这些限制可能随时更改,具体信息请参考 [GitHub @@ -38,11 +38,11 @@ tags: - 登录 GitHub 并打开 [访问令牌](https://github.com/settings/tokens) 页面 - 创建并设置一个新的访问令牌 - {'创建访问令牌'} + {'创建访问令牌'} - 在返回的结果中复制并保存生成的令牌 - {'保存访问令牌'} + {'保存访问令牌'} - GitHub Models 测试期间,要使用 GitHub Models,用户需要申请加入[等待名单(waitlist)](https://github.com/marketplace/models/waitlist/join) 通过后才能获得访问权限。 @@ -50,17 +50,17 @@ tags: - 请安全地存储访问令牌,因为它只会出现一次。如果您意外丢失它,您将需要创建一个新令牌。 - ### 步骤二:在 LobeChat 中配置 GitHub Models + ### 步骤二:在 LobeHub 中配置 GitHub Models - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到 `GitHub` 的设置项 - {'填入访问令牌'} + {'填入访问令牌'} - 填入获得的访问令牌 - 为你的 AI 助手选择一个 GitHub 的模型即可开始对话 - {'选择 + {'选择 -至此你已经可以在 LobeChat 中使用 GitHub 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 GitHub 提供的模型进行对话了。 diff --git a/docs/usage/providers/google.mdx b/docs/usage/providers/google.mdx index 10d9769c73..457dc38dcb 100644 --- a/docs/usage/providers/google.mdx +++ b/docs/usage/providers/google.mdx @@ -1,55 +1,54 @@ --- -title: Using Google Gemini API Key in LobeChat +title: Using Google Gemini API Key in LobeHub description: >- - Learn how to integrate and utilize Google Gemini AI in LobeChat to enhance your conversational experience. Follow these steps to configure Google Gemini and start leveraging its powerful capabilities. - + This guide will walk you through how to configure and use Google Gemini, a + powerful language model developed by Google AI, within LobeHub. tags: - Google Gemini - - AI integration - - Google AI Studio + - Google AI + - API Key - Web UI --- -# Using Google Gemini in LobeChat +# Using Google Gemini in LobeHub -{'Using +{'Using -Gemini AI is a set of large language models (LLMs) created by Google AI, known for its cutting-edge advancements in multimodal understanding and processing. It is essentially a powerful artificial intelligence tool capable of handling various tasks involving different types of data, not just text. +Gemini AI is a suite of large language models (LLMs) developed by Google AI, renowned for its cutting-edge capabilities in multimodal understanding and processing. It is a powerful AI tool designed to handle a wide range of tasks involving various types of data—not just text. -This document will guide you on how to use Google Gemini in LobeChat: +This guide will show you how to use Google Gemini in LobeHub: - ### Step 1: Obtain Google API Key + ### Step 1: Obtain a Google API Key - - Visit and log in to [Google AI Studio](https://aistudio.google.com/) - - Navigate to `Get API Key` in the menu and click on `Create API Key` + - Visit and sign in to [Google AI Studio](https://aistudio.google.com/) + - In the "Get API Key" menu, click on "Create API Key" - {'Generate + {'Generate - - Select a project and create an API key, or create one in a new project + - Choose an existing project or create a new one to generate your API key - {'Enter + {'Enter - Copy the API key from the pop-up dialog - {'Copy + {'Copy - ### Step 2: Configure OpenAI in LobeChat + ### Step 2: Configure Google Gemini in LobeHub - - Go to the `Settings` interface in LobeChat - - Find the setting for `Google Gemini` under `AI Service Provider` + - Go to the Settings page in LobeHub + - Under "AI Providers", locate the "Google Gemini" section - {'Enter + {'Enter - - Enable Google Gemini and enter the obtained API key - - Choose a Gemini model for your assistant to start the conversation + - Enable Google Gemini and paste in your API key + - Choose a Gemini model for your assistant to start chatting - {'Select + {'Select - During usage, you may need to pay the API service provider, please refer to Google Gemini's - pricing policy. + You may incur charges from the API provider while using the service. Please refer to Google Gemini’s pricing policy for details. -Congratulations! You can now use Google Gemini in LobeChat. +And that’s it — you’re now ready to use Google Gemini in LobeHub! diff --git a/docs/usage/providers/google.zh-CN.mdx b/docs/usage/providers/google.zh-CN.mdx index bc38a3785a..1c57361c18 100644 --- a/docs/usage/providers/google.zh-CN.mdx +++ b/docs/usage/providers/google.zh-CN.mdx @@ -1,6 +1,6 @@ --- -title: 在 LobeChat 中使用 Google Gemini API Key -description: 本文将指导你如何在 LobeChat 中配置并使用 Google Gemini,一个由 Google AI 创建的强大语言模型。 +title: 在 LobeHub 中使用 Google Gemini API Key +description: 本文将指导你如何在 LobeHub 中配置并使用 Google Gemini,一个由 Google AI 创建的强大语言模型。 tags: - Google Gemini - Google AI @@ -8,13 +8,13 @@ tags: - Web UI --- -# 在 LobeChat 中使用 Google Gemini +# 在 LobeHub 中使用 Google Gemini -{'在 +{'在 Gemini AI 是由 Google AI 创建的一组大型语言模型(LLM),以其在多模式理解和处理方面的尖端进步而闻名。它本质上是一个强大的人工智能工具,可以处理涉及不同类型数据的各种任务,而不仅仅是文本。 -本文档将指导你如何在 LobeChat 中使用 Google Gemini: +本文档将指导你如何在 LobeHub 中使用 Google Gemini: ### 步骤一:获取 Google 的 API 密钥 @@ -22,31 +22,31 @@ Gemini AI 是由 Google AI 创建的一组大型语言模型(LLM),以其 - 访问并登录 [Google AI Studio](https://aistudio.google.com/) - 在 `获取 API 密钥` 菜单中 `创建 API 密钥` - {'生成 + {'生成 - 选择一个项目并创建 API 密钥,或者在新项目中创建 API 密钥 - {'输入 + {'输入 - 在弹出的对话框中复制 API 密钥 - {'复制 + {'复制 - ### 步骤二:在 LobeChat 中配置 OpenAI + ### 步骤二:在 LobeHub 中配置 OpenAI - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到`Google Gemini`的设置项 - {'LobeChat + {'LobeHub - 打开 Google Gemini 并填入获得的 API 密钥 - 为你的助手选择一个 Gemini 的模型即可开始对话 - {' + {' 在使用过程中你可能需要向 API 服务提供商付费,请参考 Google Gemini 的费用政策。 -至此,你已经可以在 LobeChat 中使用 Google Gemini 啦。 +至此,你已经可以在 LobeHub 中使用 Google Gemini 啦。 diff --git a/docs/usage/providers/groq.mdx b/docs/usage/providers/groq.mdx index add4015330..63e84663dd 100644 --- a/docs/usage/providers/groq.mdx +++ b/docs/usage/providers/groq.mdx @@ -1,55 +1,50 @@ --- -title: Using Groq API Key in LobeChat +title: Using the Groq API Key in LobeHub description: >- - Learn how to obtain GroqCloud API keys and configure Groq in LobeChat for optimal performance. - + Learn how to obtain a GroqCloud API Key and configure Groq in LobeHub to + experience its powerful performance. tags: - - LPU Inference Engine - - GroqCloud - LLAMA3 - Qwen2 - API keys - Web UI + - API Key --- -# Using Groq in LobeChat +# Using Groq in LobeHub -{'Using +{'Using -Groq's [LPU Inference Engine](https://wow.groq.com/news_press/groq-lpu-inference-engine-leads-in-first-independent-llm-benchmark/) has excelled in the latest independent Large Language Model (LLM) benchmark, redefining the standard for AI solutions with its remarkable speed and efficiency. By integrating LobeChat with Groq Cloud, you can now easily leverage Groq's technology to accelerate the operation of large language models in LobeChat. +Groq’s [LPU Inference Engine](https://wow.groq.com/news_press/groq-lpu-inference-engine-leads-in-first-independent-llm-benchmark/) has demonstrated outstanding performance in the latest independent large language model (LLM) benchmarks, redefining the standards for AI solutions with its incredible speed and efficiency. With the integration of Groq Cloud into LobeHub, you can now easily harness Groq’s technology to accelerate LLM performance within LobeHub. - Groq's LPU Inference Engine achieved a sustained speed of 300 tokens per second in internal - benchmark tests, and according to benchmark tests by ArtificialAnalysis.ai, Groq outperformed - other providers in terms of throughput (241 tokens per second) and total time to receive 100 - output tokens (0.8 seconds). + In internal benchmarks, Groq’s LPU Inference Engine consistently achieved speeds of 300 tokens per second. According to ArtificialAnalysis.ai, Groq outperforms other providers in both throughput (241 tokens per second) and total time to receive 100 output tokens (0.8 seconds). -This document will guide you on how to use Groq in LobeChat: +This guide will walk you through how to use Groq in LobeHub: - ### Obtaining GroqCloud API Keys + ### Step 1: Get Your GroqCloud API Key - First, you need to obtain an API Key from the [GroqCloud Console](https://console.groq.com/). + First, visit the [GroqCloud Console](https://console.groq.com/) to obtain your API Key. - {'Get + {'Get - Create an API Key in the `API Keys` menu of the console. + In the console, navigate to the `API Keys` section and create a new API Key. - {'Save + {'Save - Safely store the key from the pop-up as it will only appear once. If you accidentally lose it, you - will need to create a new key. + Make sure to save the key shown in the popup — it will only be displayed once. If you lose it, you’ll need to generate a new one. - ### Configure Groq in LobeChat + ### Step 2: Configure Groq in LobeHub - You can find the Groq configuration option in `Settings` -> `AI Service Provider`, where you can input the API Key you just obtained. + Go to `Settings` -> `AI Providers` in LobeHub, and find the configuration section for Groq. Paste the API Key you just obtained. - {'Groq + {'Groq -Next, select a Groq-supported model in the assistant's model options, and you can experience the powerful performance of Groq in LobeChat. +Next, in the assistant’s model selection menu, choose a model supported by Groq to start experiencing Groq’s powerful performance in LobeHub. - - ### 步骤二:在 LobeChat 中配置 Nvidia NIM + ### 步骤二:在 LobeHub 中配置 Nvidia NIM - - 访问 LobeChat 的 `应用设置` 的 `AI 服务供应商` 界面 + - 访问 LobeHub 的 `应用设置` 的 `AI 服务供应商` 界面 - 在供应商列表中找到 ` Nvidia NIM` 的设置项 - {'填写 + {'填写 - 打开 Nvidia NIM 服务商并填入获取的 API 密钥 - 为你的助手选择一个 Nvidia NIM 模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考 Nvidia NIM 的相关费用政策。 -至此你已经可以在 LobeChat 中使用 Nvidia NIM 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 Nvidia NIM 提供的模型进行对话了。 diff --git a/docs/usage/providers/ollama.mdx b/docs/usage/providers/ollama.mdx index a1c9233388..5250c77e2d 100644 --- a/docs/usage/providers/ollama.mdx +++ b/docs/usage/providers/ollama.mdx @@ -1,98 +1,98 @@ --- -title: Using Ollama in LobeChat +title: Using Ollama in LobeHub description: >- - Learn how to use Ollama in LobeChat, run LLM locally, and experience cutting-edge AI usage. - + Learn how to use Ollama in LobeHub to run large language models locally and + experience cutting-edge AI capabilities. tags: - Ollama - - Local LLM - - Ollama WebUI - Web UI - API Key + - Local LLM + - Ollama WebUI --- -# Using Ollama in LobeChat +# Using Ollama in LobeHub -{'Using +{'Using -Ollama is a powerful framework for running large language models (LLMs) locally, supporting various language models including Llama 2, Mistral, and more. Now, LobeChat supports integration with Ollama, meaning you can easily enhance your application by using the language models provided by Ollama in LobeChat. +Ollama is a powerful framework for running large language models (LLMs) locally. It supports a variety of models, including Llama 2, Mistral, and more. LobeHub now integrates seamlessly with Ollama, allowing you to leverage these models directly within your chat interface. -This document will guide you on how to use Ollama in LobeChat: +This guide will walk you through how to use Ollama in LobeHub: -
-至此你已经可以在 LobeChat 中使用七牛云提供的大模型进行对话了。 +至此你已经可以在 LobeHub 中使用七牛云提供的大模型进行对话了。 diff --git a/docs/usage/providers/qwen.mdx b/docs/usage/providers/qwen.mdx index 6f946df8c5..2fffefa1ce 100644 --- a/docs/usage/providers/qwen.mdx +++ b/docs/usage/providers/qwen.mdx @@ -1,64 +1,63 @@ --- -title: Using Qwen2 API Key in LobeChat +title: Using Tongyi Qianwen API Key in LobeHub description: >- - Learn how to integrate and utilize Tongyi Qianwen, a powerful language model by Alibaba Cloud, in LobeChat for various tasks. Follow the steps to activate the service, obtain the API key, and configure Tongyi Qianwen for seamless interaction. - + Learn how to configure and use Alibaba Cloud's Tongyi Qianwen model in + LobeHub, offering powerful natural language understanding and generation + capabilities. tags: + - LobeHub - Tongyi Qianwen - - Alibaba Cloud - DashScope - API key - Web UI --- -# Using Tongyi Qianwen in LobeChat +# Using Tongyi Qianwen in LobeHub -{'Using +{'Using -[Tongyi Qianwen](https://tongyi.aliyun.com/) is a large-scale language model independently developed by Alibaba Cloud, with powerful natural language understanding and generation capabilities. It can answer various questions, create text content, express opinions, write code, and play a role in multiple fields. +[Tongyi Qianwen](https://tongyi.aliyun.com/) is a large-scale language model developed by Alibaba Cloud, known for its powerful natural language understanding and generation capabilities. It can answer questions, generate content, express opinions, write code, and more—making it useful across a wide range of applications. -This document will guide you on how to use Tongyi Qianwen in LobeChat: +This guide will walk you through how to use Tongyi Qianwen in LobeHub: - ### Step 1: Activate DashScope Model Service + ### Step 1: Enable the DashScope Model Service - - Visit and log in to Alibaba Cloud's [DashScope](https://dashscope.console.aliyun.com/) platform. - - If it is your first time, you need to activate the DashScope service. - - If you have already activated it, you can skip this step. + - Visit and log in to the [DashScope](https://dashscope.console.aliyun.com/) platform by Alibaba Cloud. + - If this is your first time, you’ll need to activate the DashScope service. + - If you’ve already enabled it, you can skip this step. - {'Activate + {'Enable - ### Step 2: Obtain DashScope API Key + ### Step 2: Obtain a DashScope API Key - - Go to the `API-KEY` interface and create an API key. + - Navigate to the API-KEY section and create a new API key. - {'Create + {'Create - - Copy the API key from the pop-up dialog box and save it securely. + - Copy the API key from the pop-up dialog and store it securely. - {'Copy + {'Copy - Please store the key securely as it will only appear once. If you accidentally lose it, you will - need to create a new key. + Make sure to store your API key securely, as it will only be shown once. If you lose it, you’ll need to generate a new one. - ### Step 3: Configure Tongyi Qianwen in LobeChat + ### Step 3: Configure Tongyi Qianwen in LobeHub - - Visit the `Settings` interface in LobeChat. - - Find the setting for `Tongyi Qianwen` under `AI Service Provider`. + - Open the Settings panel in LobeHub. + - Under AI Providers, locate the configuration section for Tongyi Qianwen. - {'Enter + {'Enter - - Open Tongyi Qianwen and enter the obtained API key. - - Choose a Qwen model for your AI assistant to start the conversation. + - Enable Tongyi Qianwen and paste in your API key. + - Choose a Qwen model for your AI assistant to start chatting. - {'Select + {'Select - During usage, you may need to pay the API service provider. Please refer to Tongyi Qianwen's - relevant pricing policies. + Please note that usage may incur charges from the API provider. Refer to Tongyi Qianwen’s pricing policy for details. -You can now engage in conversations using the models provided by Tongyi Qianwen in LobeChat. +You’re all set! You can now start using Tongyi Qianwen’s models in LobeHub for intelligent conversations. diff --git a/docs/usage/providers/qwen.zh-CN.mdx b/docs/usage/providers/qwen.zh-CN.mdx index 7dd95be765..f32f9fbe69 100644 --- a/docs/usage/providers/qwen.zh-CN.mdx +++ b/docs/usage/providers/qwen.zh-CN.mdx @@ -1,8 +1,8 @@ --- -title: 在 LobeChat 中使用通义千问 API Key -description: 学习如何在 LobeChat 中配置和使用阿里云的通义千问模型,提供强大的自然语言理解和生成能力。 +title: 在 LobeHub 中使用通义千问 API Key +description: 学习如何在 LobeHub 中配置和使用阿里云的通义千问模型,提供强大的自然语言理解和生成能力。 tags: - - LobeChat + - LobeHub - 通义千问 - DashScope - DashScope @@ -10,13 +10,13 @@ tags: - Web UI --- -# 在 LobeChat 中使用通义千问 +# 在 LobeHub 中使用通义千问 -{'在 +{'在 [通义千问](https://tongyi.aliyun.com/)是阿里云自主研发的超大规模语言模型,具有强大的自然语言理解和生成能力。它可以回答各种问题、创作文字内容、表达观点看法、撰写代码等,在多个领域发挥作用。 -本文档将指导你如何在 LobeChat 中使用通义千问: +本文档将指导你如何在 LobeHub 中使用通义千问: ### 步骤一:开通 DashScope 模型服务 @@ -25,37 +25,37 @@ tags: - 初次进入时需要开通 DashScope 服务 - 若你已开通,可跳过该步骤 - {'开通 + {'开通 ### 步骤二:获取 DashScope API 密钥 - 进入`API-KEY` 界面,并创建一个 API 密钥 - {'创建通义千问 + {'创建通义千问 - 在弹出的对话框中复制 API 密钥,并妥善保存 - {'复制通义千问 + {'复制通义千问 请安全地存储密钥,因为它只会出现一次。如果您意外丢失它,您将需要创建一个新密钥。 - ### 步骤三:在 LobeChat 中配置通义千问 + ### 步骤三:在 LobeHub 中配置通义千问 - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到`通义千问`的设置项 - {'填写 + {'填写 - 打开通义千问并填入获得的 API 密钥 - 为你的 AI 助手选择一个 Qwen 的模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考通义千问的相关费用政策。 -至此你已经可以在 LobeChat 中使用通义千问提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用通义千问提供的模型进行对话了。 diff --git a/docs/usage/providers/sambanova.mdx b/docs/usage/providers/sambanova.mdx index 50f2c48084..31f2479a0c 100644 --- a/docs/usage/providers/sambanova.mdx +++ b/docs/usage/providers/sambanova.mdx @@ -1,51 +1,52 @@ --- -title: Using SambaNova API Key in LobeChat -description: Learn how to configure and use SambaNova models in LobeChat, obtain an API key, and start a conversation. +title: Using the SambaNova API Key in LobeHub +description: >- + Learn how to configure and use SambaNova models in LobeHub, obtain your API + key, and start chatting. tags: - - LobeChat + - LobeHub - SambaNova - API Key - Web UI --- -# Using SambaNova in LobeChat +# Using SambaNova in LobeHub -{'Using +{'Using -[SambaNova](https://sambanova.ai/) is a company based in Palo Alto, California, USA, focused on developing high-performance AI hardware and software solutions. It provides fast AI model training, fine-tuning, and inference capabilities, especially suitable for large-scale generative AI models. +[SambaNova](https://sambanova.ai/) is a company based in Palo Alto, California, specializing in high-performance AI hardware and software solutions. It offers fast AI model training, fine-tuning, and inference capabilities, particularly well-suited for large-scale generative AI models. -This document will guide you on how to use SambaNova in LobeChat: +This guide will walk you through how to use SambaNova in LobeHub: ### Step 1: Obtain a SambaNova API Key - - First, you need to register and log in to [SambaNova Cloud](https://cloud.sambanova.ai/) - - Create an API key in the `APIs` page + - First, sign up and log in to [SambaNova Cloud](https://cloud.sambanova.ai/) + - Navigate to the `APIs` page and create a new API key - {'Obtain + {'Obtain - - Copy the obtained API key and save it securely + - Copy the generated API key and store it securely - Please save the generated API Key securely, as it will only appear once. If you accidentally lose - it, you will need to create a new API key. + Make sure to save your API key securely. It will only be shown once. If you lose it, you’ll need to generate a new one. - ### Step 2: Configure SambaNova in LobeChat + ### Step 2: Configure SambaNova in LobeHub - - Access the `Application Settings` interface of LobeChat - - Find the `SambaNova` setting item under `AI Service Provider` + - Go to the `App Settings` section in LobeHub + - Under `AI Providers`, locate the `SambaNova` configuration option - {'Fill + {'Enter - - Turn on SambaNova and fill in the obtained API key - - Select a SambaNova model for your assistant to start the conversation + - Enable SambaNova and paste in your API key + - Choose a SambaNova model for your assistant to start chatting - {'Select + {'Select - You may need to pay the API service provider during use, please refer to SambaNova's related fee policies. + Please note that you may incur charges from the API provider. Refer to SambaNova’s pricing policy for more details. -Now you can use the models provided by SambaNova in LobeChat to conduct conversations. +You're now ready to start chatting with models powered by SambaNova in LobeHub. diff --git a/docs/usage/providers/sambanova.zh-CN.mdx b/docs/usage/providers/sambanova.zh-CN.mdx index 0412ab0cdb..75443ce7b3 100644 --- a/docs/usage/providers/sambanova.zh-CN.mdx +++ b/docs/usage/providers/sambanova.zh-CN.mdx @@ -1,20 +1,20 @@ --- -title: 在 LobeChat 中使用 SambaNova API Key -description: 学习如何在 LobeChat 中配置和使用 SambaNova 模型,获取 API 密钥并开始对话。 +title: 在 LobeHub 中使用 SambaNova API Key +description: 学习如何在 LobeHub 中配置和使用 SambaNova 模型,获取 API 密钥并开始对话。 tags: - - LobeChat + - LobeHub - SambaNova - API密钥 - Web UI --- -# 在 LobeChat 中使用 SambaNova +# 在 LobeHub 中使用 SambaNova -{'在 +{'在 [SambaNova](https://sambanova.ai/) 是一家位于美国加利福尼亚州帕洛阿尔托的公司,专注于开发高性能 AI 硬件和软件解决方案,提供快速的 AI 模型训练、微调和推理能力,尤其适用于大规模生成式 AI 模型。 -本文档将指导你如何在 LobeChat 中使用 SambaNova: +本文档将指导你如何在 LobeHub 中使用 SambaNova: ### 步骤一:获取 SambaNova API 密钥 @@ -22,7 +22,7 @@ tags: - 首先,你需要注册并登录 [SambaNova Cloud](https://cloud.sambanova.ai/) - 在 `APIs` 页面中创建一个 API 密钥 - {'获取 + {'获取 - 复制得到的 API 密钥并妥善保存 @@ -30,21 +30,21 @@ tags: 请妥善保存生成的 API Key,它只会出现一次,如果不小心丢失了,你需要重新创建一个 API key
- ### 步骤二:在 LobeChat 中配置 SambaNova + ### 步骤二:在 LobeHub 中配置 SambaNova - - 访问 LobeChat 的 `应用设置`界面 + - 访问 LobeHub 的 `应用设置`界面 - 在 `AI 服务商` 下找到 `SambaNova` 的设置项 - {'填写 + {'填写 - 打开 SambaNova 并填入获取的 API 密钥 - 为你的助手选择一个 SambaNova 模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考 SambaNova 的相关费用政策。 -至此你已经可以在 LobeChat 中使用 SambaNova 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 SambaNova 提供的模型进行对话了。 diff --git a/docs/usage/providers/sensenova.mdx b/docs/usage/providers/sensenova.mdx index 22712e388e..1d09226654 100644 --- a/docs/usage/providers/sensenova.mdx +++ b/docs/usage/providers/sensenova.mdx @@ -1,58 +1,56 @@ --- -title: Using SenseNova in LobeChat +title: Using SenseTime SenseNova in LobeHub description: >- - Learn how to configure and use SenseNova's API Key in LobeChat to start conversations and interactions. - + Learn how to configure and use your SenseTime SenseNova API Key in LobeHub to + start chatting and interacting. tags: - - LobeChat - - SenseNova + - LobeHub + - SenseTime SenseNova - API Key - Web UI --- -# Using SenseNova in LobeChat +# Using SenseTime SenseNova in LobeHub - + -[SenseNova](https://platform.sensenova.cn/home) is a large model system introduced by SenseTime, aimed at promoting the rapid iteration and practical application of artificial intelligence (AI) technology. +[SenseTime SenseNova](https://platform.sensenova.cn/home) is a large-scale AI model ecosystem developed by SenseTime, designed to accelerate the iteration and real-world application of artificial intelligence technologies. -This article will guide you on how to use SenseNova in LobeChat. +This guide will walk you through how to use SenseNova within LobeHub. - ### Step 1: Obtain the API Key for SenseNova + ### Step 1: Obtain Your SenseNova API Key - - Register and log in to the [SenseCore Development Platform](https://www.sensecore.cn/product/aistudio). - - Locate the `SenseNova Large Model` in the product menu and activate the service. + - Register and log in to the [SenseCore AI Studio](https://www.sensecore.cn/product/aistudio) + - In the product menu, locate and activate the `SenseNova Large Model` service - {'Activate + {'Activate - - Go to the [AccessKey Management](https://console.sensecore.cn/iam/Security/access-key) page. - - Create an access key. - - Save the Access Key ID and secret in the pop-up window. + - Go to the [AccessKey Management](https://console.sensecore.cn/iam/Security/access-key) page + - Create a new access key + - Save the AccessKey ID and Secret Token from the pop-up window - {'Save + {'Save - Please keep the access key from the pop-up window secure, as it will only appear once. If you lose - it, you will need to create a new access key. + Make sure to save the access key shown in the pop-up window. It will only be displayed once. If you lose it, you’ll need to generate a new one. - ### Step 2: Configure SenseNova in LobeChat + ### Step 2: Configure SenseNova in LobeHub - - Access the `Settings` interface on LobeChat. - - Find the setting for `SenseNova` under `AI Service Provider`. + - Open the `Settings` panel in LobeHub + - Under `AI Providers`, find the configuration section for `SenseTime SenseNova` - {'Enter + {'Enter - - Input the obtained `Access Key ID` and `Access Key Secret`. - - Choose a SenseNova model for your AI assistant and start the conversation. + - Enter your `AccessKey ID` and `AccessKey Secret` + - Choose a SenseNova model for your AI assistant to start chatting - {'Choose + {'Select - During usage, you may need to pay the API service provider, please refer to the relevant fee - policy for SenseNova. + You may incur charges when using the API services. Please refer to SenseNova’s pricing policy for more details. -You can now have conversations using the models provided by SenseNova in LobeChat. +And that’s it! You’re now ready to use SenseTime SenseNova models in LobeHub for intelligent conversations. diff --git a/docs/usage/providers/sensenova.zh-CN.mdx b/docs/usage/providers/sensenova.zh-CN.mdx index 7b96ed715e..133ab1f939 100644 --- a/docs/usage/providers/sensenova.zh-CN.mdx +++ b/docs/usage/providers/sensenova.zh-CN.mdx @@ -1,20 +1,20 @@ --- -title: 在 LobeChat 中使用商汤日日新 -description: 学习如何在 LobeChat 中配置和使用商汤日日新的 API Key,以便开始对话和交互。 +title: 在 LobeHub 中使用商汤日日新 +description: 学习如何在 LobeHub 中配置和使用商汤日日新的 API Key,以便开始对话和交互。 tags: - - LobeChat + - LobeHub - 商汤日日新 - API密钥 - Web UI --- -# 在 LobeChat 中使用商汤日日新 +# 在 LobeHub 中使用商汤日日新 - + [商汤日日新](https://platform.sensenova.cn/home) 是商汤科技(SenseTime)推出的一个大模型体系,旨在推动人工智能(AI)技术的快速迭代和应用落地。 -本文将指导你如何在 LobeChat 中使用商汤日日新。 +本文将指导你如何在 LobeHub 中使用商汤日日新。 ### 步骤一:获取商汤日日新的 API 密钥 @@ -22,33 +22,33 @@ tags: - 注册并登录 [万象模型开发平台](https://www.sensecore.cn/product/aistudio) - 在产品菜单中找到 `日日新大模型` 并开通服务 - {'开通日日新大模型'} + {'开通日日新大模型'} - 进入 [AccessKey 访问密钥](https://console.sensecore.cn/iam/Security/access-key) 页面 - 创建一个访问密钥 - 在弹出窗口中保存访问密钥 ID 和令牌 - {'保存访问密钥'} + {'保存访问密钥'} 妥善保存弹窗中的访问密钥,它只会出现一次,如果不小心丢失了,你需要重新创建一个访问密钥。 - ### 步骤二:在 LobeChat 中配置商汤日日新 + ### 步骤二:在 LobeHub 中配置商汤日日新 - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到 `商汤日日新` 的设置项 - {'填入访问密钥'} + {'填入访问密钥'} - 填入获得的 `AccessKey ID` 和 `AccessKey Secret` - 为你的 AI 助手选择一个商汤日日新的模型即可开始对话 - {'选择商汤日日新模型并开始对话'} + {'选择商汤日日新模型并开始对话'} 在使用过程中你可能需要向 API 服务提供商付费,请参考商汤日日新的相关费用政策。 -至此你已经可以在 LobeChat 中使用商汤日日新提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用商汤日日新提供的模型进行对话了。 diff --git a/docs/usage/providers/siliconcloud.mdx b/docs/usage/providers/siliconcloud.mdx index f6a12644a3..5b62c2d50e 100644 --- a/docs/usage/providers/siliconcloud.mdx +++ b/docs/usage/providers/siliconcloud.mdx @@ -1,48 +1,47 @@ --- -title: Using SiliconCloud in LobeChat +title: Using SiliconCloud in LobeHub description: >- - Learn how to integrate and utilize SiliconCloud's language model APIs in LobeChat. - + Learn how to configure and use SiliconCloud's API Key in LobeHub to start + chatting and interacting. tags: - - LobeChat + - LobeHub - SiliconCloud - API Key - Web UI --- -# Using SiliconCloud in LobeChat +# Using SiliconCloud in LobeHub - + [SiliconCloud](https://siliconflow.cn/) is an AI service platform based on open-source foundational models, offering a variety of generative AI (GenAI) services. -This article will guide you on how to use SiliconCloud in LobeChat. +This guide will walk you through how to use SiliconCloud within LobeHub. - ### Step 1: Obtain the API Key from SiliconCloud + ### Step 1: Obtain an API Key from SiliconCloud - - Sign up and log in to [SiliconCloud](https://cloud.siliconflow.cn/account/ak) - - Click on the `API Keys` menu on the left side - - Create an API Key and copy it + - Register and log in to [SiliconCloud](https://cloud.siliconflow.cn/account/ak) + - Click on the `API Key` menu on the left sidebar + - Create a new API key and copy it - {'Create + {'Create - ### Step 2: Configure SiliconCloud in LobeChat + ### Step 2: Configure SiliconCloud in LobeHub - - Go to the `Settings` page in LobeChat - - Under `AI Service Provider`, find the setting for `SiliconFlow` + - Go to the `Settings` page in LobeHub + - Under `AI Providers`, find the configuration section for `SiliconFlow` - {'Enter + {'Enter - - Enter the API Key you obtained - - Choose a SiliconCloud model for your AI assistant to start the conversation + - Paste the API key you obtained + - Choose a model from SiliconCloud for your AI assistant to start chatting - {'Select + {'Select - During usage, you may need to pay the API service provider, so please refer to SiliconCloud's - relevant pricing policy. + You may need to pay the API service provider during usage. Please refer to SiliconCloud’s pricing policy for details. -At this point, you can start chatting using the models provided by SiliconCloud in LobeChat. +That's it! You're now ready to use SiliconCloud's models for conversations in LobeHub. diff --git a/docs/usage/providers/siliconcloud.zh-CN.mdx b/docs/usage/providers/siliconcloud.zh-CN.mdx index 270a689ac4..911caf5ed3 100644 --- a/docs/usage/providers/siliconcloud.zh-CN.mdx +++ b/docs/usage/providers/siliconcloud.zh-CN.mdx @@ -1,20 +1,20 @@ --- -title: 在 LobeChat 中使用 SiliconCloud -description: 学习如何在 LobeChat 中配置和使用 SiliconCloud 的API Key,以便开始对话和交互。 +title: 在 LobeHub 中使用 SiliconCloud +description: 学习如何在 LobeHub 中配置和使用 SiliconCloud 的API Key,以便开始对话和交互。 tags: - - LobeChat + - LobeHub - SiliconCloud - API密钥 - Web UI --- -# 在 LobeChat 中使用 SiliconCloud +# 在 LobeHub 中使用 SiliconCloud - + [SiliconCloud](https://siliconflow.cn/) 是一个基于开源基础模型的人工智能服务平台,提供多种生成式 AI(GenAI)服务。 -本文将指导你如何在 LobeChat 中使用 SiliconCloud。 +本文将指导你如何在 LobeHub 中使用 SiliconCloud。 ### 步骤一:获得 SiliconCloud 的 API Key @@ -23,23 +23,23 @@ tags: - 点击左侧 `API 密钥` 菜单 - 创建一个 API 密钥并复制 - {'创建API密钥'} + {'创建API密钥'} - ### 步骤二:在 LobeChat 中配置 SiliconCloud + ### 步骤二:在 LobeHub 中配置 SiliconCloud - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到 `SiliconFlow` 的设置项 - {'填入 + {'填入 - 填入获得的 API 密钥 - 为你的 AI 助手选择一个 SiliconCloud 的模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考 SiliconCloud 的相关费用政策。 -至此你已经可以在 LobeChat 中使用 SiliconCloud 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 SiliconCloud 提供的模型进行对话了。 diff --git a/docs/usage/providers/spark.mdx b/docs/usage/providers/spark.mdx index 948da8098a..676e3eba37 100644 --- a/docs/usage/providers/spark.mdx +++ b/docs/usage/providers/spark.mdx @@ -1,51 +1,51 @@ --- -title: Using iFLYTEK Spark in LobeChat -description: Learn how to integrate and utilize iFLYTEK's Spark model APIs in LobeChat. +title: Using iFLYTEK Spark in LobeHub +description: >- + Learn how to configure and use the iFLYTEK Spark API Key in LobeHub to start + conversations and interactions. tags: - - LobeChat - - iFLYTEK - - Spark + - LobeHub + - iFLYTEK Spark - API Key - Web UI --- -# Using iFLYTEK Spark in LobeChat +# Using iFLYTEK Spark in LobeHub - + -[iFLYTEK Spark](https://xinghuo.xfyun.cn/) is a powerful AI model launched by iFLYTEK, equipped with cross-domain knowledge and language understanding capabilities, able to perform various tasks such as Q\&A, conversations, and literary creation. +[iFLYTEK Spark](https://xinghuo.xfyun.cn/) is a powerful AI large language model developed by iFLYTEK. It features cross-domain knowledge and language understanding capabilities, and can perform a variety of tasks such as Q\&A, conversation, and creative writing. -This guide will instruct you on how to use iFLYTEK Spark in LobeChat. +This guide will walk you through how to use iFLYTEK Spark in LobeHub. - ### Step 1: Obtain the iFLYTEK Spark API Key + ### Step 1: Obtain an API Key for iFLYTEK Spark - Register and log in to the [iFLYTEK Open Platform](https://console.xfyun.cn/) - - Create an application + - Create a new application - {'Create + {'Create - - Select a large model to view details - - Copy the `API Password` from the top right corner under the HTTP service interface authentication information + - Select a large model and view its details + - Copy the `API Password` from the top-right corner under the HTTP service authentication section - {'Copy + {'Copy - ### Step 2: Configure iFLYTEK Spark in LobeChat + ### Step 2: Configure iFLYTEK Spark in LobeHub - - Access the `Settings` menu in LobeChat - - Find the iFLYTEK Spark settings under `AI Service Provider` + - Go to the `Settings` page in LobeHub + - Under `AI Providers`, find the configuration section for `iFLYTEK Spark` - {'Enter + {'Enter - - Input the obtained API Key - - Choose an iFLYTEK Spark model for your AI assistant to start the conversation + - Paste the API Key you obtained + - Choose an iFLYTEK Spark model for your AI assistant to start chatting - {'Select + {'Select - During usage, you may need to pay the API service provider, please refer to the relevant pricing - policy of iFLYTEK Spark. + You may need to pay for API usage depending on your usage level. Please refer to iFLYTEK Spark's pricing policy for more details. -Now you can use the models provided by iFLYTEK Spark for conversations in LobeChat. +That's it! You're now ready to use iFLYTEK Spark's models for conversations in LobeHub. diff --git a/docs/usage/providers/spark.zh-CN.mdx b/docs/usage/providers/spark.zh-CN.mdx index 7498a78c59..ecd618d5e8 100644 --- a/docs/usage/providers/spark.zh-CN.mdx +++ b/docs/usage/providers/spark.zh-CN.mdx @@ -1,20 +1,20 @@ --- -title: 在 LobeChat 中使用讯飞星火 -description: 学习如何在 LobeChat 中配置和使用讯飞星火的API Key,以便开始对话和交互。 +title: 在 LobeHub 中使用讯飞星火 +description: 学习如何在 LobeHub 中配置和使用讯飞星火的API Key,以便开始对话和交互。 tags: - - LobeChat + - LobeHub - 讯飞星火 - API密钥 - Web UI --- -# 在 LobeChat 中使用讯飞星火 +# 在 LobeHub 中使用讯飞星火 - + [讯飞星火](https://xinghuo.xfyun.cn/)是科大讯飞推出的一款强大的 AI 大模型,具备跨领域的知识和语言理解能力,能够执行问答、对话和文学创作等多种任务。 -本文将指导你如何在 LobeChat 中使用讯飞星火。 +本文将指导你如何在 LobeHub 中使用讯飞星火。 ### 步骤一:获得讯飞星火的 API Key @@ -22,28 +22,28 @@ tags: - 注册并登录 [讯飞开放平台](https://console.xfyun.cn/) - 创建一个应用 - {'创建应用'} + {'创建应用'} - 选择一个大模型查看详情 - 复制右上角 http 服务接口认证信息中的 `API Password` - {'复制 + {'复制 - ### 步骤二:在 LobeChat 中配置讯飞星火 + ### 步骤二:在 LobeHub 中配置讯飞星火 - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到 `讯飞星火` 的设置项 - {'填入 + {'填入 - 填入获得的 API 密钥 - 为你的 AI 助手选择一个讯飞星火的模型即可开始对话 - {'选择讯飞星火模型并开始对话'} + {'选择讯飞星火模型并开始对话'} 在使用过程中你可能需要向 API 服务提供商付费,请参考讯飞星火的相关费用政策。 -至此你已经可以在 LobeChat 中使用讯飞星火提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用讯飞星火提供的模型进行对话了。 diff --git a/docs/usage/providers/stepfun.mdx b/docs/usage/providers/stepfun.mdx index 83099426e2..28d18d4ebe 100644 --- a/docs/usage/providers/stepfun.mdx +++ b/docs/usage/providers/stepfun.mdx @@ -1,47 +1,46 @@ --- -title: Using Stepfun API Key in LobeChat +title: Using the Stepfun API Key in LobeHub description: >- - Learn how to integrate Stepfun AI models into LobeChat for engaging conversations. Obtain Stepfun API key, configure Stepfun in LobeChat settings, and select a model to start chatting. - + Learn how to configure and use Stepfun's AI models in LobeHub, including how + to obtain an API key and select a model to start chatting. tags: - Stepfun - API key - Web UI --- -# Using Stepfun in LobeChat +# Using Stepfun in LobeHub -{'Using +{'Using -[Stepfun](https://www.stepfun.com/) is a startup focusing on the research and development of Artificial General Intelligence (AGI). They have released the Step-1 billion-parameter language model, Step-1V billion-parameter multimodal model, and the Step-2 trillion-parameter MoE language model preview. +[Stepfun](https://www.stepfun.com/) is a startup focused on the development of Artificial General Intelligence (AGI). They have released several large-scale models, including the Step-1 language model with hundreds of billions of parameters, the Step-1V multimodal model, and a preview version of the Step-2 trillion-parameter MoE language model. -This document will guide you on how to use Stepfun in LobeChat: +This guide will walk you through how to use Stepfun in LobeHub: - ### Step 1: Obtain Stepfun API Key + ### Step 1: Get Your Stepfun API Key - - Visit and log in to the [Stepfun Open Platform](https://platform.stepfun.com/) - - Go to the `API Key` menu, where the system has already created an API key for you - - Copy the created API key + - Visit and log in to the [Stepfun Developer Platform](https://platform.stepfun.com/) + - Navigate to the `API Key` section — an API key will be automatically generated for you + - Copy the generated API key - {'Obtain + {'Get - ### Step 2: Configure Stepfun in LobeChat + ### Step 2: Configure Stepfun in LobeHub - - Visit the `Settings` interface in LobeChat - - Find the setting for Stepfun under `AI Service Provider` + - Open the `Settings` panel in LobeHub + - Under `AI Providers`, locate the `Stepfun` configuration section - {'Enter + {'Enter - - Open Stepfun and enter the obtained API key - - Choose a Stepfun model for your AI assistant to start the conversation + - Enable Stepfun and paste in your API key + - Choose a Stepfun model for your AI assistant to start chatting - {'Select + {'Select - During usage, you may need to pay the API service provider, please refer to Stepfun's relevant - pricing policies. + You may need to pay for API usage depending on your usage level. Please refer to Stepfun’s pricing policy for more details. -You can now use the models provided by Stepfun to have conversations in LobeChat. +And that’s it! You’re now ready to start chatting with AI models powered by Stepfun in LobeHub. diff --git a/docs/usage/providers/stepfun.zh-CN.mdx b/docs/usage/providers/stepfun.zh-CN.mdx index ad74a5d507..fea8cca8ad 100644 --- a/docs/usage/providers/stepfun.zh-CN.mdx +++ b/docs/usage/providers/stepfun.zh-CN.mdx @@ -1,19 +1,19 @@ --- -title: 在 LobeChat 中使用 Stepfun 阶跃星辰 API Key -description: 学习如何在 LobeChat 中配置和使用 Stepfun 阶跃星辰的人工智能模型,包括获取 API Key 和选择模型开始对话。 +title: 在 LobeHub 中使用 Stepfun 阶跃星辰 API Key +description: 学习如何在 LobeHub 中配置和使用 Stepfun 阶跃星辰的人工智能模型,包括获取 API Key 和选择模型开始对话。 tags: - Stepfun 阶跃星辰 - API key - Web UI --- -# 在 LobeChat 中使用 Stepfun 阶跃星辰 +# 在 LobeHub 中使用 Stepfun 阶跃星辰 -{'在 +{'在 [Stepfun 阶跃星辰](https://www.stepfun.com/)是一家专注于通用人工智能 (AGI) 研发的创业公司,目前已推出 Step-1 千亿参数语言大模型、Step-1V 千亿参数多模态大模型,以及 Step-2 万亿参数 MoE 语言大模型预览版。 -本文档将指导你如何在 LobeChat 中使用 Stepfun 阶跃星辰: +本文档将指导你如何在 LobeHub 中使用 Stepfun 阶跃星辰: ### 步骤一:获取 Stepfun 阶跃星辰 API 密钥 @@ -22,23 +22,23 @@ tags: - 进入`接口密钥`菜单,系统已为你创建好 API 密钥 - 复制已创建的 API 密钥 - {'获取 + {'获取 - ### 步骤二:在 LobeChat 中配置 Stepfun Stepfun 阶跃星辰 + ### 步骤二:在 LobeHub 中配置 Stepfun Stepfun 阶跃星辰 - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到` Stepfun 阶跃星辰`的设置项 - {'填写 + {'填写 - 打开 Stepfun 阶跃星辰并填入获得的 API 密钥 - 为你的 AI 助手选择一个 Stepfun 阶跃星辰的模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考 Stepfun 阶跃星辰的相关费用政策。 -至此你已经可以在 LobeChat 中使用 Stepfun 阶跃星辰提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 Stepfun 阶跃星辰提供的模型进行对话了。 diff --git a/docs/usage/providers/taichu.mdx b/docs/usage/providers/taichu.mdx index a8f20b9d32..4a7db278ee 100644 --- a/docs/usage/providers/taichu.mdx +++ b/docs/usage/providers/taichu.mdx @@ -1,45 +1,45 @@ --- -title: Using Taichu API Key in LobeChat +title: Using Zidong Taichu API Key in LobeHub description: >- - Learn how to integrate Taichu AI into LobeChat for enhanced conversational experiences. Follow the steps to configure Taichu AI and start using its models. - + Learn how to configure and use the Zidong Taichu API Key in LobeHub to start + conversations and interactions. tags: - - LobeChat + - LobeHub - Taichu + - Zidong Taichu - API Key - Web UI --- -# Using Taichu in LobeChat +# Using Zidong Taichu in LobeHub -{'Using +{'Using -This article will guide you on how to use Taichu in LobeChat: +This guide will walk you through how to use Zidong Taichu in LobeHub: - ### Step 1: Obtain Taichu API Key + ### Step 1: Obtain a Zidong Taichu API Key - - Create an account on [Taichu](https://ai-maas.wair.ac.cn/) - - Create and obtain an [API key](https://ai-maas.wair.ac.cn/#/settlement/api/key) + - Create a [Zidong Taichu](https://ai-maas.wair.ac.cn/) account + - Generate and retrieve your [API Key](https://ai-maas.wair.ac.cn/#/settlement/api/key) - {'Create + {'Create - ### Step 2: Configure Taichu in LobeChat + ### Step 2: Configure Zidong Taichu in LobeHub - - Go to the `Settings` interface in LobeChat - - Find the setting for `Taichu` under `AI Service Provider` + - Go to the LobeHub `Settings` page + - Under `AI Providers`, locate the `Zidong Taichu` configuration section - {'Enter + {'Enter - - Enter the obtained API key - - Choose a Purple Taichu model for your AI assistant to start the conversation + - Paste the API Key you obtained + - Choose a Zidong Taichu model for your AI assistant to start chatting - {'Select + {'Select - During usage, you may need to pay the API service provider, please refer to Taichu's relevant - pricing policies. + You may need to pay for API usage depending on the provider’s pricing policy. Please refer to Zidong Taichu’s official pricing for details. -Now you can start conversing with the models provided by Taichu in LobeChat. +Now you're all set to start using Zidong Taichu models for conversations in LobeHub. diff --git a/docs/usage/providers/taichu.zh-CN.mdx b/docs/usage/providers/taichu.zh-CN.mdx index c7149ac10c..f00ef529ec 100644 --- a/docs/usage/providers/taichu.zh-CN.mdx +++ b/docs/usage/providers/taichu.zh-CN.mdx @@ -1,19 +1,19 @@ --- -title: 在 LobeChat 中使用紫东太初 API Key -description: 学习如何在 LobeChat 中配置和使用紫东太初的API Key,以便开始对话和交互。 +title: 在 LobeHub 中使用紫东太初 API Key +description: 学习如何在 LobeHub 中配置和使用紫东太初的API Key,以便开始对话和交互。 tags: - - LobeChat + - LobeHub - 太初 - 紫东太初 - API密钥 - Web UI --- -# 在 LobeChat 中使用紫东太初 +# 在 LobeHub 中使用紫东太初 -{'在 +{'在 -本文将指导你如何在 LobeChat 中使用紫东太初: +本文将指导你如何在 LobeHub 中使用紫东太初: ### 步骤一:获取紫东太初 API 密钥 @@ -21,23 +21,23 @@ tags: - 创建一个[紫东太初](https://ai-maas.wair.ac.cn/)账户 - 创建并获取 [API 密钥](https://ai-maas.wair.ac.cn/#/settlement/api/key) - {'创建 + {'创建 - ### 步骤二:在 LobeChat 中配置紫东太初 + ### 步骤二:在 LobeHub 中配置紫东太初 - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到`紫东太初`的设置项 - {'填入 + {'填入 - 填入获得的 API 密钥 - 为你的 AI 助手选择一个紫东太初的模型即可开始对话 - {'选择太初模型并开始对话'} + {'选择太初模型并开始对话'} 在使用过程中你可能需要向 API 服务提供商付费,请参考紫东太初的相关费用政策。 -至此你已经可以在 LobeChat 中使用紫东太初提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用紫东太初提供的模型进行对话了。 diff --git a/docs/usage/providers/tencentcloud.mdx b/docs/usage/providers/tencentcloud.mdx index 12284f8677..cee36703ea 100644 --- a/docs/usage/providers/tencentcloud.mdx +++ b/docs/usage/providers/tencentcloud.mdx @@ -1,49 +1,51 @@ --- -title: Using Tencent Cloud API Key in LobeChat -description: Learn how to configure and use Tencent Cloud AI models in LobeChat, obtain an API key, and start a conversation. +title: Using Tencent Cloud API Key in LobeHub +description: >- + Learn how to configure and use Tencent Cloud AI models in LobeHub, obtain your + API key, and start chatting. tags: - - LobeChat + - LobeHub - Tencent Cloud - API Key - Web UI --- -# Using Tencent Cloud in LobeChat +# Using Tencent Cloud in LobeHub -{'Using +{'Using -[Tencent Cloud](https://cloud.tencent.com/) is the cloud computing service brand of Tencent, specializing in providing cloud computing services for enterprises and developers. Tencent Cloud provides a series of AI large model solutions, through which AI models can be connected stably and efficiently. +[Tencent Cloud](https://cloud.tencent.com/) is the cloud computing service brand under Tencent, offering a wide range of cloud solutions for businesses and developers. Tencent Cloud provides a suite of AI large model services that allow for stable and efficient integration with AI models. -This document will guide you on how to connect Tencent Cloud's AI models in LobeChat: +This guide will walk you through how to integrate Tencent Cloud's AI models into LobeHub: - ### Step 1: Obtain the Tencent Cloud API Key + ### Step 1: Obtain a Tencent Cloud API Key - - First, visit [Tencent Cloud](https://cloud.tencent.com/) and complete the registration and login. - - Enter the Tencent Cloud Console and navigate to [Large-scale Knowledge Engine Atomic Capability](https://console.cloud.tencent.com/lkeap). - - Activate the Large-scale Knowledge Engine, which requires real-name authentication during the activation process. + - First, visit [Tencent Cloud](https://cloud.tencent.com/) and complete the registration and login process. + - Go to the Tencent Cloud Console and navigate to the [Atomic Capabilities of Knowledge Engine](https://console.cloud.tencent.com/lkeap). + - Enable the Large Model Knowledge Engine. Real-name verification is required during activation. - {'Enter + {'Navigate - - In the `Access via OpenAI SDK` option, click the `Create API Key` button to create a new API Key. - - You can view and manage the created API Keys in `API Key Management`. - - Copy and save the created API Key. + - Under the "Access via OpenAI SDK" section, click the Create API Key button to generate a new API key. + - You can view and manage your API keys in the API Key Management section. + - Copy and securely save your newly created API key. - ### Step 2: Configure Tencent Cloud in LobeChat + ### Step 2: Configure Tencent Cloud in LobeHub - - Visit the `Application Settings` and `AI Service Provider` interface of LobeChat. - - Find the `Tencent Cloud` settings item in the list of providers. + - Open LobeHub and go to App Settings > AI Service Providers. + - Find the Tencent Cloud option in the list of providers. - {'Fill + {'Enter - - Open the Tencent Cloud provider and fill in the obtained API Key. - - Select a Tencent Cloud model for your assistant to start the conversation. + - Enable the Tencent Cloud provider and paste in your API key. + - Choose a Tencent Cloud model for your assistant to start chatting. - {'Select + {'Select - You may need to pay the API service provider during use, please refer to Tencent Cloud's relevant fee policy. + You may incur charges when using services from the API provider. Please refer to Tencent Cloud’s pricing policy for details. -You can now use the models provided by Tencent Cloud in LobeChat to have conversations. +That's it! You’re now ready to use Tencent Cloud’s AI models in LobeHub for conversations. diff --git a/docs/usage/providers/tencentcloud.zh-CN.mdx b/docs/usage/providers/tencentcloud.zh-CN.mdx index e90d39ecdd..8bad25fd87 100644 --- a/docs/usage/providers/tencentcloud.zh-CN.mdx +++ b/docs/usage/providers/tencentcloud.zh-CN.mdx @@ -1,20 +1,20 @@ --- -title: 在 LobeChat 中使用腾讯云 API Key -description: 学习如何在 LobeChat 中配置和使用腾讯云 AI 模型,获取 API 密钥并开始对话。 +title: 在 LobeHub 中使用腾讯云 API Key +description: 学习如何在 LobeHub 中配置和使用腾讯云 AI 模型,获取 API 密钥并开始对话。 tags: - - LobeChat + - LobeHub - 腾讯云 - API密钥 - Web UI --- -# 在 LobeChat 中使用腾讯云 +# 在 LobeHub 中使用腾讯云 -{'在 +{'在 [腾讯云(Tencent Cloud)](https://cloud.tencent.com/)是腾讯公司旗下的云计算服务品牌,专门为企业和开发者提供云计算服务。腾讯云提供了一系列 AI 大模型解决方案,通过这些工具可以稳定高效接入 AI 模型。 -本文档将指导你如何在 LobeChat 中接入腾讯云的 AI 模型: +本文档将指导你如何在 LobeHub 中接入腾讯云的 AI 模型: ### 步骤一:获取腾讯云 API 密钥 @@ -23,27 +23,27 @@ tags: - 进入腾讯云控制台并导航至[知识引擎原子能力](https://console.cloud.tencent.com/lkeap) - 开通大模型知识引擎,开通过程需要实名认证 - {'进入知识引擎原子能力页面'} + {'进入知识引擎原子能力页面'} - 在`使用OpenAI SDK方式接入`选项中,点击 `创建 API Key` 按钮,创建一个新的 API Key - 在 `API key 管理` 中可以查看和管理已创建的 API Key - 复制并保存创建好的 API Key - ### 步骤二:在 LobeChat 中配置腾讯云 + ### 步骤二:在 LobeHub 中配置腾讯云 - - 访问 LobeChat 的 `应用设置` 的 `AI 服务供应商` 界面 + - 访问 LobeHub 的 `应用设置` 的 `AI 服务供应商` 界面 - 在供应商列表中找到 `腾讯云` 的设置项 - {'填写腾讯云 + {'填写腾讯云 - 打开腾讯云服务商并填入获取的 API 密钥 - 为你的助手选择一个腾讯云模型即可开始对话 - {'选择腾讯云模型'} + {'选择腾讯云模型'} 在使用过程中你可能需要向 API 服务提供商付费,请参考腾讯云的相关费用政策。 -至此你已经可以在 LobeChat 中使用腾讯云提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用腾讯云提供的模型进行对话了。 diff --git a/docs/usage/providers/togetherai.mdx b/docs/usage/providers/togetherai.mdx index aebca3be51..82d061f314 100644 --- a/docs/usage/providers/togetherai.mdx +++ b/docs/usage/providers/togetherai.mdx @@ -1,50 +1,50 @@ --- -title: Using Together AI in LobeChat API Key +title: Using the Together AI API Key in LobeHub description: >- - Learn how to integrate Together AI into LobeChat, obtain the API key, configure settings, and start conversations with AI models. - + Learn how to configure and use the Together AI API key in LobeHub to start + chatting and interacting. tags: + - LobeHub - Together AI - - API key + - API Key - Web UI --- -# Using Together AI in LobeChat +# Using Together AI in LobeHub -{'Using +{'Using -[together.ai](https://www.together.ai/) is a platform focused on the field of Artificial Intelligence Generated Content (AIGC), founded in June 2022. It is dedicated to building a cloud platform for running, training, and fine-tuning open-source models, providing scalable computing power at prices lower than mainstream vendors. +[together.ai](https://www.together.ai/) is a platform focused on the field of generative AI (AIGC), founded in June 2022. It aims to build a cloud platform for running, training, and fine-tuning open-source models, offering scalable compute power at prices lower than mainstream providers. -This document will guide you on how to use Together AI in LobeChat: +This guide will walk you through how to use Together AI in LobeHub: - ### Step 1: Obtain the API Key for Together AI + ### Step 1: Get Your Together AI API Key - Visit and log in to [Together AI API](https://api.together.ai/) - - Upon initial login, the system will automatically create an API key for you and provide a $5.0 credit + - Upon your first login, the system will automatically generate an API key for you and grant you a $5.00 free credit - {'Obtain + {'Get - - If you haven't saved it, you can also view the API key at any time in the `API Key` interface under `Settings` + - If you didn’t save it, you can always view it later under the API Key section in the Settings menu - {'View + {'View - ### Step 2: Configure Together AI in LobeChat + ### Step 2: Configure Together AI in LobeHub - - Visit the `Settings` interface in LobeChat - - Find the setting for `together.ai` under `AI Service Provider` + - Go to the Settings page in LobeHub + - Under the AI Providers section, find the settings for together.ai - {'Enter + {'Enter - - Open together.ai and enter the obtained API key - - Choose a Together AI model for your assistant to start the conversation + - Enable together.ai and paste in your API key + - Choose a Together AI model for your assistant to start chatting - {'Select + {'Select - During usage, you may need to pay the API service provider, please refer to Together AI's pricing - policy. + You may need to pay for API usage depending on your usage. Please refer to Together AI’s pricing policy for more details. -You can now engage in conversations using the models provided by Together AI in LobeChat. +And that’s it! You’re now ready to use Together AI models for conversations in LobeHub. diff --git a/docs/usage/providers/togetherai.zh-CN.mdx b/docs/usage/providers/togetherai.zh-CN.mdx index 4396f112e0..851491346e 100644 --- a/docs/usage/providers/togetherai.zh-CN.mdx +++ b/docs/usage/providers/togetherai.zh-CN.mdx @@ -1,20 +1,20 @@ --- -title: 在 LobeChat 中使用 Together AI API Key -description: 学习如何在 LobeChat 中配置和使用 Together AI 的API Key,以便开始对话和交互。 +title: 在 LobeHub 中使用 Together AI API Key +description: 学习如何在 LobeHub 中配置和使用 Together AI 的API Key,以便开始对话和交互。 tags: - - LobeChat + - LobeHub - Together AI - API密钥 - Web UI --- -# 在 LobeChat 中使用 Together AI +# 在 LobeHub 中使用 Together AI -{'在 +{'在 [together.ai](https://www.together.ai/) 是一家专注于生成式人工智能 (AIGC) 领域的平台,成立于 2022 年 6 月。 它致力于构建用于运行、训练和微调开源模型的云平台,以低于主流供应商的价格提供可扩展的计算能力。 -本文档将指导你如何在 LobeChat 中使用 Together AI: +本文档将指导你如何在 LobeHub 中使用 Together AI: ### 步骤一:获取 Together AI 的 API 密钥 @@ -22,27 +22,27 @@ tags: - 访问并登录 [Together AI API](https://api.together.ai/) - 初次登录时系统会自动为你创建好 API 密钥并赠送 $5.0 的额度 - {'获得 + {'获得 - 如果你没有保存,也可以在后续任意时间,通过 `设置` 中的 `API 密钥` 界面查看 - {'查看 + {'查看 - ### 步骤二:在 LobeChat 中配置 Together AI + ### 步骤二:在 LobeHub 中配置 Together AI - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到`together.ai`的设置项 - {'LobeChat + {'LobeHub - 打开 together.ai 并填入获得的 API 密钥 - 为你的助手选择一个 Together AI 的模型即可开始对话 - {' + {' 在使用过程中你可能需要向 API 服务提供商付费,请参考 Together AI 的费用政策。 -至此你已经可以在 LobeChat 中使用 Together AI 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 Together AI 提供的模型进行对话了。 diff --git a/docs/usage/providers/upstage.mdx b/docs/usage/providers/upstage.mdx index 0e88f4807b..d06ec1f167 100644 --- a/docs/usage/providers/upstage.mdx +++ b/docs/usage/providers/upstage.mdx @@ -1,47 +1,48 @@ --- -title: Using Upstage in LobeChat -description: Learn how to integrate and utilize Upstage's language model APIs in LobeChat. +title: Using Upstage in LobeHub +description: >- + Learn how to configure and use Upstage's API Key in LobeHub to start + conversations and interactions. tags: - - LobeChat + - LobeHub - Upstage - API Key - Web UI --- -# Using Upstage in LobeChat +# Using Upstage in LobeHub - + -[Upstage](https://www.upstage.ai/) is a platform that offers AI models and services, focusing on applications in natural language processing and machine learning. It allows developers to access its powerful AI capabilities through APIs, supporting various tasks such as text generation and conversational systems. +[Upstage](https://www.upstage.ai/) is a platform that provides AI models and services, with a focus on natural language processing and machine learning applications. It allows developers to integrate powerful AI capabilities via API, supporting a variety of tasks such as text generation, conversational systems, and more. -This article will guide you on how to use Upstage in LobeChat. +This guide will walk you through how to use Upstage within LobeHub. - ### Step 1: Obtain an Upstage API Key + ### Step 1: Obtain an API Key from Upstage - - Register and log in to the [Upstage Console](https://console.upstage.ai/home) - - Navigate to the `API Keys` page + - Sign up and log in to the [Upstage Console](https://console.upstage.ai/home) + - Navigate to the `API Keys` section - Create a new API key - - Copy and save the generated API key + - Copy and securely save the generated API key - {'Save + {'Save - ### Step 2: Configure Upstage in LobeChat + ### Step 2: Configure Upstage in LobeHub - - Access the `Settings` interface in LobeChat - - Locate the `Upstage` settings under `AI Service Provider` + - Go to the `Settings` page in LobeHub + - Under `AI Providers`, locate the `Upstage` configuration section - {'Enter + {'Enter - - Enter the obtained API key - - Select an Upstage model for your AI assistant to start the conversation + - Paste the API key you obtained earlier + - Choose an Upstage model for your AI assistant to start chatting - {'Select + {'Select - Please note that you may need to pay the API service provider for usage. Refer to Upstage's - pricing policy for more information. + You may need to pay for API usage depending on your usage level. Please refer to Upstage’s pricing policy for more details. -You can now use the models provided by Upstage for conversations in LobeChat. +And that’s it! You’re now ready to use Upstage’s models for conversations in LobeHub. diff --git a/docs/usage/providers/upstage.zh-CN.mdx b/docs/usage/providers/upstage.zh-CN.mdx index c8a251ee71..66c03554c9 100644 --- a/docs/usage/providers/upstage.zh-CN.mdx +++ b/docs/usage/providers/upstage.zh-CN.mdx @@ -1,20 +1,20 @@ --- -title: 在 LobeChat 中使用 Upstage -description: 学习如何在 LobeChat 中配置和使用 Upstage 的API Key,以便开始对话和交互。 +title: 在 LobeHub 中使用 Upstage +description: 学习如何在 LobeHub 中配置和使用 Upstage 的API Key,以便开始对话和交互。 tags: - - LobeChat + - LobeHub - Upstage - API密钥 - Web UI --- -# 在 LobeChat 中使用 Upstage +# 在 LobeHub 中使用 Upstage - + [Upstage](https://www.upstage.ai/) 是一个提供 AI 模型和服务的平台,专注于自然语言处理和机器学习应用。它允许开发者通过 API 接入其强大的 AI 功能,支持多种任务,如文本生成、对话系统等。 -本文将指导你如何在 LobeChat 中使用 Upstage。 +本文将指导你如何在 LobeHub 中使用 Upstage。 ### 步骤一:获得 Upstage 的 API Key @@ -24,23 +24,23 @@ tags: - 创建一个新的 API 密钥 - 复制并保存生成的 API 密钥 - {'保存 + {'保存 - ### 步骤二:在 LobeChat 中配置 Upstage + ### 步骤二:在 LobeHub 中配置 Upstage - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到 `Upstage` 的设置项 - {'填入 + {'填入 - 填入获得的 API 密钥 - 为你的 AI 助手选择一个 Upstage 的模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考 Upstage 的相关费用政策。 -至此你已经可以在 LobeChat 中使用 Upstage 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 Upstage 提供的模型进行对话了。 diff --git a/docs/usage/providers/vercel-ai-gateway.mdx b/docs/usage/providers/vercel-ai-gateway.mdx index 0454edc4b4..98fc678d13 100644 --- a/docs/usage/providers/vercel-ai-gateway.mdx +++ b/docs/usage/providers/vercel-ai-gateway.mdx @@ -1,59 +1,57 @@ --- -title: Using Vercel AI Gateway in LobeChat -description: >- - Learn how to integrate and utilize Vercel AI Gateway's unified API in LobeChat. - +title: Using Vercel AI Gateway in LobeHub +description: Learn how to integrate and use the unified API of Vercel AI Gateway in LobeHub tags: - - LobeChat + - LobeHub - Vercel AI Gateway - API Key - - Web UI + - Web Interface --- -# Using Vercel AI Gateway in LobeChat +# Using Vercel AI Gateway in LobeHub -[Vercel AI Gateway](https://vercel.com/ai-gateway) is a unified API that provides access to 100+ AI models through a single endpoint. It offers features like budget management, usage monitoring, load balancing, and fallback handling. +[Vercel AI Gateway](https://vercel.com/ai-gateway) is a unified API that provides access to over 100 AI models through a single endpoint. It offers features such as budget management, usage monitoring, load balancing, and fallback handling. -This article will guide you on how to use Vercel AI Gateway in LobeChat. +This guide will walk you through how to use Vercel AI Gateway within LobeHub. ### Step 1: Create an API Key in Vercel AI Gateway - - Go to [Vercel Dashboard](https://vercel.com/dashboard) - - Click on the **AI Gateway** tab on the left side - - Click on **API keys** in the left sidebar - - Click **Create key** and then **Create key** in the dialog to complete + - Go to the [Vercel Dashboard](https://vercel.com/dashboard) + - Click on the **AI Gateway** tab on the left sidebar + - Select **API Keys** from the sidebar + - Click **Create Key**, then confirm by clicking **Create Key** in the dialog - ### Step 2: Configure Vercel AI Gateway in LobeChat + ### Step 2: Configure Vercel AI Gateway in LobeHub - - Go to the `Settings` page in LobeChat - - Under `AI Service Provider`, find the setting for `Vercel AI Gateway` - - Enter the API Key you obtained - - Choose a model from Vercel AI Gateway for your AI assistant to start the conversation + - Navigate to the `Settings` page in LobeHub + - Under `AI Service Provider`, find the `Vercel AI Gateway` section + - Enter the API key you obtained + - Choose a model from Vercel AI Gateway and start chatting with the AI assistant - During usage, you may need to pay the API service provider, so please refer to Vercel AI Gateway's - [pricing policy](https://vercel.com/docs/ai-gateway/models). + You may incur charges from the API service provider during usage. Please refer to the + [Vercel AI Gateway pricing policy](https://vercel.com/docs/ai-gateway/models) for details. -At this point, you can start chatting using the models provided by Vercel AI Gateway in LobeChat. +That's it! You can now start chatting in LobeHub using models provided by Vercel AI Gateway. ## Model Selection -Vercel AI Gateway supports various model providers including: +Vercel AI Gateway supports a variety of model providers, including: -- **OpenAI**: `openai/gpt-4o`, `openai/gpt-4o-mini`, `openai/o1`, etc. -- **Anthropic**: `anthropic/claude-3-5-sonnet`, `anthropic/claude-3-opus`, etc. -- **Google**: `google/gemini-2.5-pro`, `google/gemini-2.0-flash`, etc. -- **DeepSeek**: `deepseek/deepseek-chat`, `deepseek/deepseek-reasoner`, etc. -- And many more... +- **OpenAI**: `openai/gpt-4o`, `openai/gpt-4o-mini`, `openai/o1`, and more +- **Anthropic**: `anthropic/claude-3-5-sonnet`, `anthropic/claude-3-opus`, and more +- **Google**: `google/gemini-2.5-pro`, `google/gemini-2.0-flash`, and more +- **DeepSeek**: `deepseek/deepseek-chat`, `deepseek/deepseek-reasoner`, and more +- And many others... -For a complete list of supported models, visit [Vercel AI Gateway Models](https://vercel.com/ai-gateway/models). +To view the full list of supported models, visit the [Vercel AI Gateway Models](https://vercel.com/ai-gateway/models) page. ## API Configuration -Vercel AI Gateway uses OpenAI-compatible API format. The base URL is: +Vercel AI Gateway uses an OpenAI-compatible API format. The base URL is: ``` https://ai-gateway.vercel.sh/v1 diff --git a/docs/usage/providers/vercel-ai-gateway.zh-CN.mdx b/docs/usage/providers/vercel-ai-gateway.zh-CN.mdx index 9d7385fd09..752be46294 100644 --- a/docs/usage/providers/vercel-ai-gateway.zh-CN.mdx +++ b/docs/usage/providers/vercel-ai-gateway.zh-CN.mdx @@ -1,19 +1,18 @@ --- -title: 在 LobeChat 中使用 Vercel AI Gateway -description: 了解如何在 LobeChat 中集成和使用 Vercel AI Gateway 的统一 API - +title: 在 LobeHub 中使用 Vercel AI Gateway +description: 了解如何在 LobeHub 中集成和使用 Vercel AI Gateway 的统一 API tags: - - LobeChat + - LobeHub - Vercel AI Gateway - API 密钥 - Web 界面 --- -# 在 LobeChat 中使用 Vercel AI Gateway +# 在 LobeHub 中使用 Vercel AI Gateway [Vercel AI Gateway](https://vercel.com/ai-gateway) 是一个统一的 API,通过单一端点提供对 100+ AI 模型的访问。它提供预算管理、使用监控、负载均衡和回退处理等功能。 -本文将指导您如何在 LobeChat 中使用 Vercel AI Gateway。 +本文将指导您如何在 LobeHub 中使用 Vercel AI Gateway。 ### 第一步:在 Vercel AI Gateway 中创建 API 密钥 @@ -23,9 +22,9 @@ tags: - 点击左侧边栏的 **API 密钥** - 点击 **创建密钥**,然后在对话框中点击 **创建密钥** 完成创建 - ### 第二步:在 LobeChat 中配置 Vercel AI Gateway + ### 第二步:在 LobeHub 中配置 Vercel AI Gateway - - 进入 LobeChat 的 `设置` 页面 + - 进入 LobeHub 的 `设置` 页面 - 在 `AI 服务提供商` 下,找到 `Vercel AI Gateway` 设置 - 输入您获得的 API 密钥 - 选择 Vercel AI Gateway 的模型,开始与 AI 助手对话 @@ -36,7 +35,7 @@ tags: -至此,您可以在 LobeChat 中使用 Vercel AI Gateway 提供的模型开始聊天了。 +至此,您可以在 LobeHub 中使用 Vercel AI Gateway 提供的模型开始聊天了。 ## 模型选择 diff --git a/docs/usage/providers/vertexai.mdx b/docs/usage/providers/vertexai.mdx index f1caf944e9..c02c0372fd 100644 --- a/docs/usage/providers/vertexai.mdx +++ b/docs/usage/providers/vertexai.mdx @@ -1,59 +1,61 @@ --- -title: Using Vertex AI API Key in LobeChat -description: Learn how to configure and use Vertex AI models in LobeChat, get an API key, and start a conversation. +title: Using Vertex AI API Key in LobeHub +description: >- + Learn how to configure and use Vertex AI models in LobeHub, obtain your API + key, and start chatting. tags: - - LobeChat + - LobeHub - Vertex AI - API Key - Web UI --- -# Using Vertex AI in LobeChat +# Using Vertex AI in LobeHub -{'Using +{'Using -[Vertex AI](https://cloud.google.com/vertex-ai) is a fully managed, integrated AI development platform from Google Cloud, designed for building and deploying generative AI. It provides easy access to Vertex AI Studio, Agent Builder, and over 160 foundational models for AI development. +[Vertex AI](https://cloud.google.com/vertex-ai) is a fully managed, integrated AI development platform from Google Cloud, designed for building and deploying generative AI applications. It provides easy access to Vertex AI Studio, Agent Builder, and over 160 foundation models for your AI development needs. -This document will guide you on how to connect Vertex AI models in LobeChat: +This guide will walk you through how to integrate Vertex AI models into LobeHub: - ### Step 1: Prepare a Vertex AI Project + ### Step 1: Set Up Your Vertex AI Project - - First, visit [Google Cloud](https://console.cloud.google.com/) and complete the registration and login process. + - First, visit the [Google Cloud Console](https://console.cloud.google.com/) and sign in or create an account. - Create a new Google Cloud project or select an existing one. - - Go to the [Vertex AI Console](https://console.cloud.google.com/vertex-ai). - - Ensure that the Vertex AI API service is enabled for the project. + - Navigate to the [Vertex AI Console](https://console.cloud.google.com/vertex-ai). + - Ensure that the Vertex AI API is enabled for your project. - {'Accessing + {'Accessing - ### Step 2: Set Up API Access Permissions + ### Step 2: Configure API Access - - Go to the Google Cloud [IAM Management page](https://console.cloud.google.com/iam-admin/serviceaccounts) and navigate to `Service Accounts`. - - Create a new service account and assign a role permission to it, such as `Vertex AI User`. + - Go to the [IAM & Admin Service Accounts page](https://console.cloud.google.com/iam-admin/serviceaccounts) in Google Cloud and navigate to the "Service Accounts" section. + - Create a new service account and assign it a role such as Vertex AI User. - {'Creating + {'Creating - - On the service account management page, find the service account you just created, click `Keys`, and create a new JSON format key. - - After successful creation, the key file will be automatically saved to your computer in JSON format. Please keep it safe. + - In the service account management page, find the newly created account, click on "Keys", and generate a new key in JSON format. + - Once created, the key file will be automatically downloaded to your computer. Be sure to store it securely. - {'Creating + {'Creating - ### Step 3: Configure Vertex AI in LobeChat + ### Step 3: Configure Vertex AI in LobeHub - - Visit the `App Settings` and then the `AI Service Provider` interface in LobeChat. - - Find the settings item for `Vertex AI` in the list of providers. + - Open LobeHub and go to the App Settings > AI Service Providers section. + - Find the Vertex AI option in the list of providers. - {'Entering + {'Entering - - Open the Vertex AI service provider settings. - - Fill the entire content of the JSON format key you just obtained into the API Key field. - - Select a Vertex AI model for your assistant to start the conversation. + - Open the Vertex AI provider settings. + - Paste the entire contents of the JSON key file into the API Key field. + - Choose a Vertex AI model for your assistant to start chatting. - {'Selecting + {'Selecting - You may need to pay the API service provider during usage. Please refer to Google Cloud's relevant fee policies. + You may incur charges when using the API services. Please refer to Google Cloud’s pricing documentation for more details. -Now you can use the models provided by Vertex AI for conversations in LobeChat. +You're all set! You can now start using Vertex AI models in LobeHub for your conversations. diff --git a/docs/usage/providers/vertexai.zh-CN.mdx b/docs/usage/providers/vertexai.zh-CN.mdx index 41c8677421..1e21e656e5 100644 --- a/docs/usage/providers/vertexai.zh-CN.mdx +++ b/docs/usage/providers/vertexai.zh-CN.mdx @@ -1,20 +1,20 @@ --- -title: 在 LobeChat 中使用 Vertex AI API Key -description: 学习如何在 LobeChat 中配置和使用 Vertex AI 模型,获取 API 密钥并开始对话。 +title: 在 LobeHub 中使用 Vertex AI API Key +description: 学习如何在 LobeHub 中配置和使用 Vertex AI 模型,获取 API 密钥并开始对话。 tags: - - LobeChat + - LobeHub - Vertex AI - API密钥 - Web UI --- -# 在 LobeChat 中使用 Vertex AI +# 在 LobeHub 中使用 Vertex AI -{'在 +{'在 [Vertex AI](https://cloud.google.com/vertex-ai) 是 Google Cloud 的一款全面托管、集成的 AI 开发平台,旨在构建与应用生成式 AI。你可轻松访问 Vertex AI Studio、Agent Builder 以及超过 160 种基础模型,进行 AI 开发。 -本文档将指导你如何在 LobeChat 中接入 Vertex AI 的模型: +本文档将指导你如何在 LobeHub 中接入 Vertex AI 的模型: ### 步骤一:准备 Vertex AI 项目 @@ -24,36 +24,36 @@ tags: - 进入 [Vertex AI 控制台](https://console.cloud.google.com/vertex-ai) - 确认该项目已开通 Vertex AI API 服务 - {'进入 + {'进入 ### 步骤二:设置 API 访问权限 - 进入 Google Cloud [IAM 管理页面](https://console.cloud.google.com/iam-admin/serviceaccounts),并导航至`服务账号` - 创建一个新的服务账号,并为其分配一个角色权限,例如 `Vertex AI User` - {'创建服务账号'} + {'创建服务账号'} - 在服务账号管理页面找到刚刚创建的服务账号,点击`密钥`并创建一个新的 JSON 格式密钥 - 创建成功后,密钥文件将会以 JSON 文件的格式自动保存到你的电脑上,请妥善保存 - {'创建密钥'} + {'创建密钥'} - ### 步骤三:在 LobeChat 中配置 Vertex AI + ### 步骤三:在 LobeHub 中配置 Vertex AI - - 访问 LobeChat 的 `应用设置` 的 `AI 服务供应商` 界面 + - 访问 LobeHub 的 `应用设置` 的 `AI 服务供应商` 界面 - 在供应商列表中找到 `Vertex AI` 的设置项 - {'填写 + {'填写 - 打开 Vertex AI 服务供应商 - 将刚刚获取的 JSON 格式的全部内容填入 API Key 字段中 - 为你的助手选择一个 Vertex AI 模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考 Google Cloud 的相关费用政策。 -至此你已经可以在 LobeChat 中使用 Vertex AI 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 Vertex AI 提供的模型进行对话了。 diff --git a/docs/usage/providers/vllm.mdx b/docs/usage/providers/vllm.mdx index 15309e6706..e3c8c827b9 100644 --- a/docs/usage/providers/vllm.mdx +++ b/docs/usage/providers/vllm.mdx @@ -1,25 +1,27 @@ --- -title: Using vLLM API Key in LobeChat -description: Learn how to configure and use the vLLM language model in LobeChat, obtain an API key, and start a conversation. +title: Using vLLM API Key in LobeHub +description: >- + Learn how to configure and use vLLM language models in LobeHub, obtain an API + key, and start chatting. tags: - - LobeChat + - LobeHub - vLLM - API Key - Web UI --- -# Using vLLM in LobeChat +# Using vLLM in LobeHub -{'Using +{'Using -[vLLM](https://github.com/vllm-project/vllm) is an open-source local large language model (LLM) deployment tool that allows users to efficiently run LLM models on local devices and provides an OpenAI API-compatible service interface. +[vLLM](https://github.com/vllm-project/vllm) is an open-source local deployment tool for large language models (LLMs). It allows users to efficiently run LLMs on their local machines and provides an OpenAI-compatible API interface. -This document will guide you on how to use vLLM in LobeChat: +This guide will walk you through how to use vLLM in LobeHub: - ### Step 1: Preparation + ### Step 1: Prerequisites - vLLM has certain requirements for hardware and software environments. Be sure to configure according to the following requirements: + vLLM has specific hardware and software requirements. Please ensure your environment meets the following: | Hardware Requirements | | | --------------------- | ----------------------------------------------------------------------- | @@ -33,7 +35,7 @@ This document will guide you on how to use vLLM in LobeChat: ### Step 2: Install vLLM - If you are using an NVIDIA GPU, you can directly install vLLM using `pip`. However, it is recommended to use `uv` here, which is a very fast Python environment manager, to create and manage the Python environment. Please follow the [documentation](https://docs.astral.sh/uv/#getting-started) to install uv. After installing uv, you can use the following command to create a new Python environment and install vLLM: + If you're using an NVIDIA GPU, you can install vLLM directly via `pip`. However, we recommend using `uv`, a fast Python environment manager, to create and manage your Python environments. Follow the [official guide](https://docs.astral.sh/uv/#getting-started) to install uv. Once installed, you can create a new Python environment and install vLLM with the following commands: ```shell uv venv myenv --python 3.12 --seed @@ -41,13 +43,13 @@ This document will guide you on how to use vLLM in LobeChat: uv pip install vllm ``` - Another method is to use `uv run` with the `--with [dependency]` option, which allows you to run commands such as `vllm serve` without creating an environment: + Alternatively, you can use `uv run` with the `--with [dependency]` option to run commands like `vllm serve` without creating a dedicated environment: ```shell uv run --with vllm vllm --help ``` - You can also use [conda](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html) to create and manage your Python environment. + You can also use [conda](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html) to manage your Python environment: ```shell conda create -n myenv python=3.12 -y @@ -56,46 +58,43 @@ This document will guide you on how to use vLLM in LobeChat: ``` - For non-CUDA platforms, please refer to the [official - documentation](https://docs.vllm.ai/en/latest/getting_started/installation/index.html#installation-index) - to learn how to install vLLM. + For non-CUDA platforms, please refer to the [official documentation](https://docs.vllm.ai/en/latest/getting_started/installation/index.html#installation-index) for installation instructions. - ### Step 3: Start Local Service + ### Step 3: Start the Local Server - vLLM can be deployed as an OpenAI API protocol-compatible server. By default, it will start the server at `http://localhost:8000`. You can specify the address using the `--host` and `--port` parameters. The server currently runs only one model at a time. + vLLM can be deployed as a server compatible with the OpenAI API protocol. By default, it starts at `http://localhost:8000`. You can customize the address using the `--host` and `--port` parameters. Note that the server can only run one model at a time. - The following command will start a vLLM server and run the `Qwen2.5-1.5B-Instruct` model: + The following command starts a vLLM server running the `Qwen2.5-1.5B-Instruct` model: ```shell vllm serve Qwen/Qwen2.5-1.5B-Instruct ``` - You can enable the server to check the API key in the header by passing the parameter `--api-key` or the environment variable `VLLM_API_KEY`. If not set, no API Key is required to access. + To enable API key authentication, you can pass the `--api-key` parameter or set the `VLLM_API_KEY` environment variable. If not set, the server will be accessible without an API key. - For more detailed vLLM server configuration, please refer to the [official - documentation](https://docs.vllm.ai/en/latest/). + For more detailed server configuration options, refer to the [official vLLM documentation](https://docs.vllm.ai/en/latest/). - ### Step 4: Configure vLLM in LobeChat + ### Step 4: Configure vLLM in LobeHub - - Access the `Application Settings` interface of LobeChat. - - Find the `vLLM` settings item under `AI Service Provider`. + - Open the `App Settings` panel in LobeHub + - Under `AI Providers`, locate the `vLLM` configuration section - {'Fill + {'Enter - - Open the vLLM service provider and fill in the API service address and API Key. + - Enable the vLLM provider and enter the API service URL and API key - * If your vLLM is not configured with an API Key, please leave the API Key blank. \* If your vLLM - is running locally, please make sure to turn on `Client Request Mode`. + \* If your vLLM server is not configured with an API key, leave the API key field blank.\ + \* If your vLLM server is running locally, make sure to enable "Client Request Mode". - - Add the model you are running to the model list below. - - Select a vLLM model to run for your assistant and start the conversation. + - Add the model you are running to the model list below + - Assign the vLLM model to your assistant to start chatting - {'Select + {'Select -Now you can use the models provided by vLLM in LobeChat to have conversations. +You're now ready to use vLLM-powered models in LobeHub for conversations. diff --git a/docs/usage/providers/vllm.zh-CN.mdx b/docs/usage/providers/vllm.zh-CN.mdx index eebcbf3eb0..982873bc83 100644 --- a/docs/usage/providers/vllm.zh-CN.mdx +++ b/docs/usage/providers/vllm.zh-CN.mdx @@ -1,20 +1,20 @@ --- -title: 在 LobeChat 中使用 vLLM API Key -description: 学习如何在 LobeChat 中配置和使用 vLLM 语言模型,获取 API 密钥并开始对话。 +title: 在 LobeHub 中使用 vLLM API Key +description: 学习如何在 LobeHub 中配置和使用 vLLM 语言模型,获取 API 密钥并开始对话。 tags: - - LobeChat + - LobeHub - vLLM - API密钥 - Web UI --- -# 在 LobeChat 中使用 vLLM +# 在 LobeHub 中使用 vLLM -{'在 +{'在 [vLLM](https://github.com/vllm-project/vllm)是一个开源的本地大型语言模型(LLM)部署工具,允许用户在本地设备上高效运行 LLM 模型,并提供兼容 OpenAI API 的服务接口。 -本文档将指导你如何在 LobeChat 中使用 vLLM: +本文档将指导你如何在 LobeHub 中使用 vLLM: ### 步骤一:准备工作 @@ -77,12 +77,12 @@ tags: 更详细的 vLLM 服务器配置,请参考[官方文档](https://docs.vllm.ai/en/latest/) - ### 步骤四:在 LobeChat 中配置 vLLM + ### 步骤四:在 LobeHub 中配置 vLLM - - 访问 LobeChat 的 `应用设置`界面 + - 访问 LobeHub 的 `应用设置`界面 - 在 `AI 服务商` 下找到 `vLLM` 的设置项 - {'填写 + {'填写 - 打开 vLLM 服务商并填入 API 服务地址以及 API Key @@ -94,7 +94,7 @@ tags: - 在下方的模型列表中添加你运行的模型 - 为你的助手选择一个 vLLM 运行的模型即可开始对话 - {'选择 + {'选择 -至此你已经可以在 LobeChat 中使用 vLLM 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 vLLM 提供的模型进行对话了。 diff --git a/docs/usage/providers/volcengine.mdx b/docs/usage/providers/volcengine.mdx index e7d61eb306..7674471fac 100644 --- a/docs/usage/providers/volcengine.mdx +++ b/docs/usage/providers/volcengine.mdx @@ -1,49 +1,50 @@ --- -title: Using the Volcano Engine API Key in LobeChat -description: Learn how to configure and use the Volcano Engine AI model in LobeChat, obtain API keys, and start conversations. +title: Using Volcengine API Key in LobeHub +description: >- + Learn how to configure and use Volcengine AI models in LobeHub, obtain your + API key, and start chatting. tags: - - LobeChat + - LobeHub - Volcengine - Doubao - API Key - Web UI --- -# Using Volcengine in LobeChat +# Using Volcengine in LobeHub -{'Using +{'Using -[Volcengine](https://www.volcengine.com/) is a cloud service platform under ByteDance that provides large language model (LLM) services through "Volcano Ark," supporting multiple mainstream models such as Baichuan Intelligent, Mobvoi, and more. +[Volcengine](https://www.volcengine.com/) is a cloud service platform under ByteDance. Through its "Volcano Ark" platform, it provides large language model (LLM) services, supporting several mainstream models such as Baichuan Intelligence and Mobvoi. -This document will guide you on how to use Volcengine in LobeChat: +This guide will walk you through how to use Volcengine in LobeHub: - ### Step 1: Obtain the Volcengine API Key + ### Step 1: Obtain a Volcengine API Key - First, visit the [Volcengine official website](https://www.volcengine.com/) and complete the registration and login process. - - Access the Volcengine console and navigate to [Volcano Ark](https://console.volcengine.com/ark/). + - Navigate to the [Volcano Ark Console](https://console.volcengine.com/ark/) - {'Entering + {'Accessing - - Go to the `API Key Management` menu and click `Create API Key`. - - Copy and save the created API Key. + - Go to the `API Key Management` section and click `Create API Key` + - Copy and securely save your newly created API Key - ### Step 2: Configure Volcengine in LobeChat + ### Step 2: Configure Volcengine in LobeHub - - Navigate to the `Application Settings` page in LobeChat and select `AI Service Providers`. - - Find the `Volcengine` option in the provider list. + - Open the `App Settings` in LobeHub and go to the `AI Service Providers` section + - Find the `Volcengine` option in the list of providers - {'Entering + {'Enter - - Open the Volcengine service provider and enter the obtained API Key. - - Choose a Volcengine model for your assistant to start the conversation. + - Enable the Volcengine provider and paste in your API Key + - Choose a Volcengine model for your assistant to start chatting - {'Selecting + {'Select - During usage, you may need to pay the API service provider, so please refer to Volcengine's - pricing policy. + You may incur charges when using the API services. Please refer to Volcengine’s pricing policy for details. -You can now use the models provided by Volcengine for conversations in LobeChat. +You're now ready to start chatting with models powered by Volcengine in LobeHub. diff --git a/docs/usage/providers/volcengine.zh-CN.mdx b/docs/usage/providers/volcengine.zh-CN.mdx index 9cc835ecd3..fadfc572aa 100644 --- a/docs/usage/providers/volcengine.zh-CN.mdx +++ b/docs/usage/providers/volcengine.zh-CN.mdx @@ -1,21 +1,21 @@ --- -title: 在 LobeChat 中使用火山引擎 API Key -description: 学习如何在 LobeChat 中配置和使用火山引擎 AI 模型,获取 API 密钥并开始对话。 +title: 在 LobeHub 中使用火山引擎 API Key +description: 学习如何在 LobeHub 中配置和使用火山引擎 AI 模型,获取 API 密钥并开始对话。 tags: - - LobeChat + - LobeHub - 火山引擎 - 豆包 - API密钥 - Web UI --- -# 在 LobeChat 中使用火山引擎 +# 在 LobeHub 中使用火山引擎 -{'在 +{'在 [火山引擎](https://www.volcengine.com/)是字节跳动旗下的云服务平台,通过 "火山方舟" 提供大型语言模型 (LLM) 服务,支持多个主流模型如百川智能、Mobvoi 等。 -本文档将指导你如何在 LobeChat 中使用火山引擎: +本文档将指导你如何在 LobeHub 中使用火山引擎: ### 步骤一:获取火山引擎 API 密钥 @@ -23,26 +23,26 @@ tags: - 首先,访问[火山引擎官网](https://www.volcengine.com/)并完成注册登录 - 进入火山引擎控制台并导航至[火山方舟](https://console.volcengine.com/ark/) - {'进入火山方舟API管理页面'} + {'进入火山方舟API管理页面'} - 进入 `API key 管理` 菜单,并点击 `创建 API Key` - 复制并保存创建好的 API Key - ### 步骤二:在 LobeChat 中配置火山引擎 + ### 步骤二:在 LobeHub 中配置火山引擎 - - 访问 LobeChat 的 `应用设置` 的 `AI 服务供应商` 界面 + - 访问 LobeHub 的 `应用设置` 的 `AI 服务供应商` 界面 - 在供应商列表中找到 `火山引擎` 的设置项 - {'填写火山引擎 + {'填写火山引擎 - 打开火山引擎服务商并填入获取的 API 密钥 - 为你的助手选择一个火山引擎模型即可开始对话 - {'选择火山引擎模型'} + {'选择火山引擎模型'} 在使用过程中你可能需要向 API 服务提供商付费,请参考火山引擎的相关费用政策。 -至此你已经可以在 LobeChat 中使用火山引擎提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用火山引擎提供的模型进行对话了。 diff --git a/docs/usage/providers/wenxin.mdx b/docs/usage/providers/wenxin.mdx index 4a461dc07b..253cb5e8ec 100644 --- a/docs/usage/providers/wenxin.mdx +++ b/docs/usage/providers/wenxin.mdx @@ -1,59 +1,58 @@ --- -title: Using Wenxin Qianfan in LobeChat +title: Using Wenxin Qianfan in LobeHub description: >- - Learn how to integrate and utilize Wenxin Qianfan's language model APIs in LobeChat. - + Learn how to configure and use the Wenxin Qianfan API Key in LobeHub to start + conversations and interactions. tags: - - LobeChat - - 百度 - - 文心千帆 - - API密钥 + - LobeHub + - Baidu + - Wenxin Qianfan + - API Key - Web UI --- -# Using Wenxin Qianfan in LobeChat +# Using Wenxin Qianfan in LobeHub - + -[Wenxin Qianfan](https://qianfan.cloud.baidu.com/) is an artificial intelligence large language model platform launched by Baidu, supporting a variety of application scenarios, including literary creation, commercial copywriting, and mathematical logic reasoning. The platform features deep semantic understanding and generation capabilities across modalities and languages, and it is widely utilized in fields such as search Q\&A, content creation, and smart office applications. +[Wenxin Qianfan](https://qianfan.cloud.baidu.com/) is a large language model platform developed by Baidu. It supports a wide range of applications, including creative writing, business copy generation, and mathematical logic reasoning. The platform features deep semantic understanding and generation across modalities and languages, and is widely used in search Q\&A, content creation, and intelligent office scenarios. -This article will guide you on how to use Wenxin Qianfan in LobeChat. +This guide will walk you through how to use Wenxin Qianfan in LobeHub. - ### Step 1: Obtain the Wenxin Qianfan API Key + ### Step 1: Obtain a Wenxin Qianfan API Key - Register and log in to the [Baidu AI Cloud Console](https://console.bce.baidu.com/) - Navigate to `Baidu AI Cloud Qianfan ModelBuilder` - - Select `API Key` from the left menu + - In the left-hand menu, select `API Key` - {'API + {'API - - Click `Create API Key` - - In `Service`, select `Qianfan ModelBuilder` - - In `Resource`, choose `All Resources` + - Click "Create API Key" + - Under `Service`, select `Qianfan ModelBuilder` + - Under `Resource`, choose `All Resources` - Click the `Confirm` button - - Copy the `API Key` and keep it safe + - Copy the generated `API Key` and store it securely - {'Create + {'Create - {'Copy + {'Copy - ### Step 2: Configure Wenxin Qianfan in LobeChat + ### Step 2: Configure Wenxin Qianfan in LobeHub - - Go to the `Settings` page of LobeChat - - Under `AI Service Provider`, find the `Wenxin Qianfan` settings + - Open the `Settings` page in LobeHub + - Under `AI Providers`, locate the configuration section for `Wenxin Qianfan` - {'Enter + {'Enter - - Enter the obtained `API Key` - - Select a Wenxin Qianfan model for your AI assistant, and you're ready to start chatting! + - Paste the API Key you obtained earlier + - Choose a Wenxin Qianfan model for your AI assistant to start chatting - {'Select + {'Select - During usage, you may need to pay the API service provider. Please refer to Wenxin Qianfan's - relevant fee policy. + You may incur charges from the API service provider during usage. Please refer to Wenxin Qianfan’s pricing policy for details. -You can now use the models provided by Wenxin Qianfan for conversations in LobeChat. +Now you're all set to start using Wenxin Qianfan models for conversations in LobeHub. diff --git a/docs/usage/providers/wenxin.zh-CN.mdx b/docs/usage/providers/wenxin.zh-CN.mdx index 4816474c99..7a0901c343 100644 --- a/docs/usage/providers/wenxin.zh-CN.mdx +++ b/docs/usage/providers/wenxin.zh-CN.mdx @@ -1,21 +1,21 @@ --- -title: 在 LobeChat 中使用文心千帆 -description: 学习如何在 LobeChat 中配置和使用文心千帆的API Key,以便开始对话和交互。 +title: 在 LobeHub 中使用文心千帆 +description: 学习如何在 LobeHub 中配置和使用文心千帆的API Key,以便开始对话和交互。 tags: - - LobeChat + - LobeHub - 百度 - 文心千帆 - API密钥 - Web UI --- -# 在 LobeChat 中使用文心千帆 +# 在 LobeHub 中使用文心千帆 - + [文心千帆](https://qianfan.cloud.baidu.com/)是百度推出的一个人工智能大语言模型平台,支持多种应用场景,包括文学创作、商业文案生成、数理逻辑推算等。该平台具备跨模态、跨语言的深度语义理解与生成能力,广泛应用于搜索问答、内容创作和智能办公等领域。 -本文将指导你如何在 LobeChat 中使用文心千帆。 +本文将指导你如何在 LobeHub 中使用文心千帆。 ### 步骤一:获得文心千帆的 API Key @@ -24,7 +24,7 @@ tags: - 进入 `百度智能云千帆 ModelBuilder` - 在左侧菜单中选择 `API Key` - {'API + {'API - 点击创建 API Key - 在 `服务` 中选择 `千帆ModelBuilder` @@ -32,25 +32,25 @@ tags: - 点击 `确定` 按钮 - 复制 `API Key` 并妥善保存 - {'创建密钥'} + {'创建密钥'} - {'复制密钥'} + {'复制密钥'} - ### 步骤二:在 LobeChat 中配置文心千帆 + ### 步骤二:在 LobeHub 中配置文心千帆 - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到 `文心千帆` 的设置项 - {'填入 + {'填入 - 填入获得的 `API Key` - 为你的 AI 助手选择一个文心千帆的模型即可开始对话 - {'选择文心千帆模型并开始对话'} + {'选择文心千帆模型并开始对话'} 在使用过程中你可能需要向 API 服务提供商付费,请参考文心千帆的相关费用政策。 -至此你已经可以在 LobeChat 中使用文心千帆提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用文心千帆提供的模型进行对话了。 diff --git a/docs/usage/providers/xai.mdx b/docs/usage/providers/xai.mdx index 6a3607d813..221b3ac419 100644 --- a/docs/usage/providers/xai.mdx +++ b/docs/usage/providers/xai.mdx @@ -1,53 +1,51 @@ --- -title: Using xAI in LobeChat +title: Using xAI in LobeHub description: >- - Learn how to configure and use xAI's API Key in LobeChat to start conversations and interactions. - + Learn how to configure and use xAI's API Key in LobeHub to start chatting and + interacting. tags: - - LobeChat + - LobeHub - xAI - API Key - Web UI --- -# Using xAI in LobeChat +# Using xAI in LobeHub - + -[xAI](https://x.ai/) is an artificial intelligence company founded by Elon Musk in 2023, aimed at exploring and understanding the true nature of the universe. The company's mission is to solve complex scientific and mathematical problems using AI technology and to advance the field of artificial intelligence. +[xAI](https://x.ai/) is an artificial intelligence company founded by Elon Musk in 2023, with the mission of exploring and understanding the true nature of the universe. The company aims to solve complex scientific and mathematical problems and advance the development of AI technologies. -This article will guide you on how to use xAI in LobeChat. +This guide will walk you through how to use xAI within LobeHub. - ### Step 1: Obtain an API Key from xAI + ### Step 1: Obtain an xAI API Key - - Register and login to the [xAI console](https://console.x.ai/) - - Create an API token - - Copy and save the API token + - Sign up and log in to the [xAI Console](https://console.x.ai/) + - Create a new API Token + - Copy and securely save your API Token - {'xAI + {'xAI - Make sure to securely save the API token displayed in the popup; it only appears once. If you - accidentally lose it, you will need to create a new API token. + Make sure to save the API token shown in the popup — it will only be displayed once. If you lose it, you’ll need to generate a new one. - ### Step 2: Configure xAI in LobeChat + ### Step 2: Configure xAI in LobeHub - - Go to the `Settings` menu in LobeChat - - Locate the `xAI` settings under `AI Service Provider` + - Go to the `Settings` page in LobeHub + - Under `AI Providers`, locate the `xAI` configuration section - {'Enter + {'Enter - - Enter the API key you obtained - - Select an xAI model for your AI assistant to start a conversation + - Paste the API Key you obtained + - Choose an xAI model for your AI assistant to start chatting - {'Select + {'Select - During use, you may need to pay the API service provider, so please refer to xAI's relevant - pricing policies. + Please note that usage may incur charges from the API provider. Refer to xAI’s pricing policy for more details. -You are now ready to engage in conversations using the models provided by xAI in LobeChat. +You’re now ready to start using xAI-powered models in LobeHub for conversations and interactions. diff --git a/docs/usage/providers/xai.zh-CN.mdx b/docs/usage/providers/xai.zh-CN.mdx index 6edc911810..bfeb3c8aed 100644 --- a/docs/usage/providers/xai.zh-CN.mdx +++ b/docs/usage/providers/xai.zh-CN.mdx @@ -1,20 +1,20 @@ --- -title: 在 LobeChat 中使用 xAI -description: 学习如何在 LobeChat 中配置和使用 xAI 的 API Key,以便开始对话和交互。 +title: 在 LobeHub 中使用 xAI +description: 学习如何在 LobeHub 中配置和使用 xAI 的 API Key,以便开始对话和交互。 tags: - - LobeChat + - LobeHub - xAI - API密钥 - Web UI --- -# 在 LobeChat 中使用 xAI +# 在 LobeHub 中使用 xAI - + [xAI](https://x.ai/) 是由埃隆・马斯克于 2023 年成立的一家人工智能公司,旨在探索和理解宇宙的真实本质。该公司的目标是通过人工智能技术解决复杂的科学和数学问题,并推动人工智能的发展。 -本文将指导你如何在 LobeChat 中使用 xAI。 +本文将指导你如何在 LobeHub 中使用 xAI。 ### 步骤一:获取 xAI 的 API 密钥 @@ -23,27 +23,27 @@ tags: - 创建一个 API Token - 复制并保存 API Token - {'xAI + {'xAI 妥善保存弹窗中的 API 令牌,它只会出现一次,如果不小心丢失了,你需要重新创建一个 API 令牌。 - ### 步骤二:在 LobeChat 中配置 xAI + ### 步骤二:在 LobeHub 中配置 xAI - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到 `xAI` 的设置项 - {'填入 + {'填入 - 填入获得的 API 密钥 - 为你的 AI 助手选择一个 xAI 的模型即可开始对话 - {'选择 + {'选择 在使用过程中你可能需要向 API 服务提供商付费,请参考 xAI 的相关费用政策。 -至此你已经可以在 LobeChat 中使用 xAI 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用 xAI 提供的模型进行对话了。 diff --git a/docs/usage/providers/zeroone.mdx b/docs/usage/providers/zeroone.mdx index 40cab37e77..deba73b081 100644 --- a/docs/usage/providers/zeroone.mdx +++ b/docs/usage/providers/zeroone.mdx @@ -1,57 +1,59 @@ --- -title: Using 01 AI API Key in LobeChat +title: Using the 01.AI API Key in LobeHub description: >- - Learn how to integrate and use 01 AI in LobeChat with step-by-step instructions. Obtain an API key, configure 01 AI, and start conversations with AI models. - + Learn how to configure and use AI models provided by 01.AI (Zero One AI) in + LobeHub. Get your API key, enter it in the settings, choose a model, and start + chatting with your AI assistant. tags: + - LobeHub - 01.AI + - Zero One AI - Web UI - - API key - - AI models + - API Key + - Configuration Guide --- -# Using 01 AI in LobeChat +# Using Zero One AI in LobeHub -{'Using +{'Using -[01 AI](https://www.01.ai/) is a global company dedicated to AI 2.0 large model technology and applications. Its billion-parameter Yi-Large closed-source model, when evaluated on Stanford University's English ranking AlpacaEval 2.0, is on par with GPT-4. +[01.AI (Zero One AI)](https://www.01.ai/) is a global company focused on AI 2.0 large language models and their applications. Its proprietary Yi-Large model, with over 100 billion parameters, has achieved top rankings alongside GPT-4 on Stanford’s English benchmark AlpacaEval 2.0. -This document will guide you on how to use 01 AI in LobeChat: +This guide will walk you through how to use 01.AI in LobeHub: - ### Step 1: Obtain 01 AI API Key + ### Step 1: Get Your 01.AI API Key - - Register and log in to the [01 AI Large Model Open Platform](https://platform.lingyiwanwu.com/) - - Go to the `Dashboard` and access the `API Key Management` menu - - A system-generated API key has been created for you automatically, or you can create a new one on this interface + - Register and log in to the [01.AI Developer Platform](https://platform.lingyiwanwu.com/) + - Go to the `Dashboard` and navigate to the `API Key Management` section + - An API key is automatically generated for you. You can also create a new one if needed - {'Create + {'Create - - Account verification is required for first-time use + - First-time users will need to complete account verification - {'Complete + {'Complete - - Click on the created API key - - Copy and save the API key in the pop-up dialog box + - Click on the API key you created + - In the pop-up dialog, copy and save your API key securely - {'Save + {'Save - ### Step 2: Configure 01 AI in LobeChat + ### Step 2: Configure 01.AI in LobeHub - - Access the `Settings` interface in LobeChat - - Find the setting for `01 AI` under `AI Service Provider` + - Open the `Settings` panel in LobeHub + - Under `AI Providers`, locate the configuration section for `01.AI` - {'Enter + {'Enter - - Open 01 AI and enter the obtained API key - - Choose a 01.AI model for your AI assistant to start the conversation + - Enable 01.AI and paste in your API key + - Choose a model from 01.AI for your AI assistant to start chatting - {'Select + {'Select - During usage, you may need to pay the API service provider. Please refer to 01 AI's relevant fee - policies. + You may incur charges when using the API. Please refer to 01.AI’s pricing policy for more details. -You can now use the models provided by 01 AI for conversations in LobeChat. +And that’s it! You’re now ready to use 01.AI’s models in LobeHub for intelligent conversations. diff --git a/docs/usage/providers/zeroone.zh-CN.mdx b/docs/usage/providers/zeroone.zh-CN.mdx index 3f05c31eae..e4b132e622 100644 --- a/docs/usage/providers/zeroone.zh-CN.mdx +++ b/docs/usage/providers/zeroone.zh-CN.mdx @@ -1,10 +1,8 @@ --- -title: 在 LobeChat 中使用 01.AI 零一万物 API Key -description: >- - 学习如何在 LobeChat 中配置并使用 01.AI 零一万物提供的 AI 模型进行对话。获取 API 密钥、填入设置项、选择模型,开始与 AI 助手交流。 - +title: 在 LobeHub 中使用 01.AI 零一万物 API Key +description: 学习如何在 LobeHub 中配置并使用 01.AI 零一万物提供的 AI 模型进行对话。获取 API 密钥、填入设置项、选择模型,开始与 AI 助手交流。 tags: - - LobeChat + - LobeHub - 01.AI - Zero One AI - 零一万物 @@ -13,13 +11,13 @@ tags: - 配置指南 --- -# 在 LobeChat 中使用零一万物 +# 在 LobeHub 中使用零一万物 -{'在 +{'在 [零一万物](https://www.01.ai/)是一家致力于 AI 2.0 大模型技术和应用的全球公司,其发布的千亿参数的 Yi-Large 闭源模型,在斯坦福大学的英语排行 AlpacaEval 2.0 上,与 GPT-4 互有第一。 -本文档将指导你如何在 LobeChat 中使用零一万物: +本文档将指导你如何在 LobeHub 中使用零一万物: ### 步骤一:获取零一万物 API 密钥 @@ -28,32 +26,32 @@ tags: - 进入`工作台`并访问`API Key管理`菜单 - 系统已为你自动创建了一个 API 密钥,你也可以在此界面创建新的 API 密钥 - {'创建零一万物 + {'创建零一万物 - 初次使用时需要完成账号认证 - {'完成账号认证'} + {'完成账号认证'} - 点击创建好的 API 密钥 - 在弹出的对话框中复制并保存 API 密钥 - {'保存 + {'保存 - ### 步骤二:在 LobeChat 中配置零一万物 + ### 步骤二:在 LobeHub 中配置零一万物 - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到`零一万物`的设置项 - {'填入 + {'填入 - 打开零一万物并填入获得的 API 密钥 - 为你的 AI 助手选择一个 01.AI 的模型即可开始对话 - {'选择01.AI模型并开始对话'} + {'选择01.AI模型并开始对话'} 在使用过程中你可能需要向 API 服务提供商付费,请参考零一万物的相关费用政策。 -至此你已经可以在 LobeChat 中使用零一万物提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用零一万物提供的模型进行对话了。 diff --git a/docs/usage/providers/zhipu.mdx b/docs/usage/providers/zhipu.mdx index e38d3491f7..f5d0537adf 100644 --- a/docs/usage/providers/zhipu.mdx +++ b/docs/usage/providers/zhipu.mdx @@ -1,8 +1,8 @@ --- -title: Using Zhipu ChatGLM API Key in LobeChat +title: Using Zhipu ChatGLM API Key in LobeHub description: >- - Learn how to integrate and utilize Zhipu AI models in LobeChat for enhanced conversational experiences. Obtain the API key, configure settings, and start engaging with cognitive intelligence. - + Learn how to configure and use Zhipu AI's API Key in LobeHub to start chatting + with models provided by Zhipu AI. tags: - Zhipu AI - ChatGLM @@ -10,39 +10,38 @@ tags: - Web UI --- -# Using Zhipu ChatGLM in LobeChat +# Using Zhipu ChatGLM in LobeHub -{'Using +{'Using -[Zhipu AI](https://www.zhipuai.cn/) is a high-tech company originating from the Department of Computer Science at Tsinghua University. Established in 2019, the company focuses on natural language processing, machine learning, and big data analysis, dedicated to expanding the boundaries of artificial intelligence technology in the field of cognitive intelligence. +[Zhipu AI](https://www.zhipuai.cn/) is a high-tech company founded in 2019, originating from the Department of Computer Science at Tsinghua University. It focuses on natural language processing, machine learning, and big data analytics, aiming to push the boundaries of AI technology in the field of cognitive intelligence. -This document will guide you on how to use Zhipu AI in LobeChat: +This guide will walk you through how to use Zhipu AI in LobeHub: - ### Step 1: Obtain the API Key for Zhipu AI + ### Step 1: Get Your Zhipu AI API Key - Visit and log in to the [Zhipu AI Open Platform](https://open.bigmodel.cn/) - - Upon initial login, the system will automatically create an API key for you and gift you a resource package of 25M Tokens - - Navigate to the `API Key` section at the top to view your API key + - Upon your first login, the system will automatically generate an API key for you and grant a free 25M token package + - Navigate to the top menu and click on `API Key` to view your key - {'Obtaining + {'Get - ### Step 2: Configure Zhipu AI in LobeChat + ### Step 2: Configure Zhipu AI in LobeHub - - Visit the `Settings` interface in LobeChat - - Under `AI Service Provider`, locate the settings for Zhipu AI + - Go to the `Settings` page in LobeHub + - Under `AI Providers`, find the configuration section for `Zhipu AI` - {'Enter + {'Enter - - Open Zhipu AI and enter the obtained API key - - Choose a Zhipu AI model for your assistant to start the conversation + - Enable Zhipu AI and paste in your API key + - Choose a Zhipu AI model for your assistant to start chatting - {'Select + {'Select - During usage, you may need to pay the API service provider, please refer to Zhipu AI's pricing - policy. + You may need to pay for API usage depending on your usage. Please refer to Zhipu AI’s pricing policy for more details. -You can now engage in conversations using the models provided by Zhipu AI in LobeChat. +And that’s it! You’re now ready to start chatting with models provided by Zhipu AI in LobeHub. diff --git a/docs/usage/providers/zhipu.zh-CN.mdx b/docs/usage/providers/zhipu.zh-CN.mdx index 8c649654e9..86f10e14ad 100644 --- a/docs/usage/providers/zhipu.zh-CN.mdx +++ b/docs/usage/providers/zhipu.zh-CN.mdx @@ -1,6 +1,6 @@ --- -title: 在 LobeChat 中使用智谱 ChatGLM API Key -description: 学习如何在 LobeChat 中配置和使用智谱AI的 API Key,开始与智谱AI提供的模型进行对话。 +title: 在 LobeHub 中使用智谱 ChatGLM API Key +description: 学习如何在 LobeHub 中配置和使用智谱AI的 API Key,开始与智谱AI提供的模型进行对话。 tags: - 智谱AI - ChatGLM @@ -8,13 +8,13 @@ tags: - Web UI --- -# 在 LobeChat 中使用智谱 ChatGLM +# 在 LobeHub 中使用智谱 ChatGLM -{'在 +{'在 [智谱 AI](https://www.zhipuai.cn/) 是一家源自清华大学计算机系技术成果的高科技公司,成立于 2019 年,专注于自然语言处理、机器学习和大数据分析,致力于在认知智能领域拓展人工智能技术的边界。 -本文档将指导你如何在 LobeChat 中使用智谱 AI: +本文档将指导你如何在 LobeHub 中使用智谱 AI: ### 步骤一:获取智谱 AI 的 API 密钥 @@ -23,23 +23,23 @@ tags: - 初次登录时系统会自动为你创建好 API 密钥并赠送 25M Tokens 的资源包 - 进入顶部的 `API密钥` 可以查看你的 API - {'获得智谱AI + {'获得智谱AI - ### 步骤二:在 LobeChat 中配置智谱 AI + ### 步骤二:在 LobeHub 中配置智谱 AI - - 访问 LobeChat 的`设置`界面 + - 访问 LobeHub 的`设置`界面 - 在`AI 服务商`下找到`智谱AI`的设置项 - {'LobeChat + {'LobeHub - 打开智谱 AI 并填入获得的 API 密钥 - 为你的助手选择一个智谱 AI 的模型即可开始对话 - {' + {' 在使用过程中你可能需要向 API 服务提供商付费,请参考智谱 AI 的费用政策。 -至此你已经可以在 LobeChat 中使用智谱 AI 提供的模型进行对话了。 +至此你已经可以在 LobeHub 中使用智谱 AI 提供的模型进行对话了。 diff --git a/docs/usage/start.mdx b/docs/usage/start.mdx index 8e2addf4cf..3632411b89 100644 --- a/docs/usage/start.mdx +++ b/docs/usage/start.mdx @@ -1,62 +1,33 @@ --- -title: Get started with LobeChat -description: >- - Explore the exciting features in LobeChat, including Vision Model, TTS & STT, Local LLMs, and Multi AI Providers. Discover more about Agent Market, Plugin System, and Personalization. - +title: Getting Started +description: Getting started with LobeHub tags: - - Feature Overview - - Vision Model - - TTS & STT - - Local LLMs - - Multi AI Providers - - Agent Market + - LobeHub + - LobeHub + - Features + - Visual Recognition + - Voice Conversations + - AI Providers + - Assistant Marketplace + - Local Large Language Models - Plugin System --- -# ✨ Feature Overview +# LobeHub User Guide -## 2024 Overview +Welcome to the official LobeHub User Guide. -{'LobeChat + + For self-hosting assistance, please visit the [Self-Hosting Guide](/docs/self-hosting/start). For developer resources, check out the [Developer Guide](/docs/development/start). + - - +## Getting Started - +- [Create Your First Agent](/docs/usage/getting-started/page) +- [Create Your First Team](/docs/usage/getting-started/lobe-ai) +- [Explore the LobeHub Community](/docs/usage/community/agent-market) +- [Migrate from v1.x Local Database to v2.x (Cloud / Self-hosted)](/docs/usage/migrate-from-local-database) - +## Help & Support - - - - - - - -
- -## 2023 Overview - -{'LobeChat - - - - - - - - - - - - - - - - - - - - - - +If you need assistance, please refer to our [Help & Support](/docs/usage/help) section. diff --git a/docs/usage/start.zh-CN.mdx b/docs/usage/start.zh-CN.mdx index 31771469ea..23bfc5e697 100644 --- a/docs/usage/start.zh-CN.mdx +++ b/docs/usage/start.zh-CN.mdx @@ -1,8 +1,9 @@ --- -title: 开始使用 LobeChat -description: 了解 LobeChat 的功能特性,包括视觉识别、语音会话、多 AI 服务商等,体验助手市场、本地大语言模型、插件系统等功能。 +title: 开始 +description: 上手使用 LobeHub tags: - - LobeChat + - LobeHub + - LobeHub - 功能特性 - 视觉识别 - 语音会话 @@ -12,50 +13,21 @@ tags: - 插件系统 --- -# ✨ LobeChat 功能特性一览 +# LobeHub 用户指南 -## 2024 特性一览 +欢迎来到 LobeHub 官方用户指南。 -{'LobeChat + + 欲获取自部署帮助,请访问[私有化部署指南](/docs/self-hosting/start)。欲获取开发者指南,请访问[开发指南](/docs/development/start) + - - +## 开始 - +- [创建你的第一个 Agent](/docs/usage/getting-started/page) +- [创建你的第一个 Team](/docs/usage/getting-started/lobe-ai) +- [探索 LobeHub 社区](/docs/usage/community/agent-market) +- [从 v1.x 本地数据库迁移到 v2.x(云端 / 自部署)](/docs/usage/migrate-from-local-database) - +## 帮助与支持 - - - - - - - -
- -## 2023 特性一览 - -{'LobeChat - - - - - - - - - - - - - - - - - - - - - - +如果你需要帮助,可以参考 [帮助与支持](/docs/usage/help)。 diff --git a/docs/usage/tools-calling.mdx b/docs/usage/tools-calling.mdx deleted file mode 100644 index acc025dc99..0000000000 --- a/docs/usage/tools-calling.mdx +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Tools Calling -description: Discover the best tools to enhance your calling experience and productivity. -tags: - - Calling Tools - - Productivity - - Communication ---- - -# Tools Calling - -TODO diff --git a/docs/usage/tools-calling.zh-CN.mdx b/docs/usage/tools-calling.zh-CN.mdx deleted file mode 100644 index 92ef954d45..0000000000 --- a/docs/usage/tools-calling.zh-CN.mdx +++ /dev/null @@ -1,248 +0,0 @@ ---- -title: 大模型工具调用(Tools Calling)评测 -description: 基于 LobeChat 测试主流支持工具调用(Tools Calling) 的大模型,并客观呈现评测结果 -tags: - - Tools Calling - - Benchmark - - Function Calling 评测 - - 工具调用 - - LobeChat 插件 ---- - -# 大模型工具调用(Tools Calling)评测 - -Tools Calling 是大语言模型的高级能力。你可以通过在 API 请求中传入一组工具列表,让模型智能地选择具体使用哪个工具,并在返回请求中输出工具调用的 JSON 参数。 - - - 如果你之前没有了解过 Tools Calling, 可以查看 [Function Call: Chat - 应用的插件基石与交互技术的变革黎明](https://lobehub.com/zh/blog/openai-function-call) 这篇文章。 - - -随着社区中越来越多的大语言模型支持了 Tools Calling 能力,同时得益于 LobeChat 的 Agent Runtime 架构,我们几乎实现了所有主流大语言模型( OpenAI 、Claude 、Gemini 等等)的 Tools Calling 调用能力。 - -LobeChat 的插件实现基于模型的 Tools Calling 能力,模型本身的 Tools Calling 能力决定插件调用是否正常。作为上层应用,我们针对各个模型的 Tools Calling 做了较为完善的测试,以便帮助我们的用户了解现有的模型能力,更好地进行抉择。 - -## 评测任务介绍 - -我们基于实际真实的用户场景出发构建了两大组测试任务,第一组为简单的调用指令(天气查询),第二组为复杂调用指令(文生图)。这两组指令的系统描述如下: - - - - ```md - ## Tools - - You can use these tools below: - - ### Realtime Weather - - Get realtime weather information - - The APIs you can use: - - #### `realtime-weather____fetchCurrentWeather` - - 获取当前天气情况 - ``` - - - - ```md - ## Tools - - You can use these tools below: - - ### DALL·E 3 - - Whenever a description of an image is given, use lobe-image-designer to create the images and then summarize the prompts used to generate the images in plain text. If the user does not ask for a specific number of images, default to creating four captions to send to lobe-image-designer that are written to be as diverse as possible. - - All captions sent to lobe-image-designer must abide by the following policies: - - 1. If the description is not in English, then translate it. - 2. Do not create more than 4 images, even if the user requests more. - 3. Don't create images of politicians or other public figures. Recommend other ideas instead. - 4. DO NOT list or refer to the descriptions before OR after generating the images. They should ONLY ever be written out ONCE, in the `prompts` field of the request. You do not need to ask for permission to generate, just do it! - 5. Always mention the image type (photo, oil painting, watercolor painting, illustration, cartoon, drawing, vector, render, etc.) at the beginning of the caption. Unless the caption suggests otherwise, make at least 1--2 of the 4 images photos. - 6. Diversify depictions of ALL images with people to include DESCENT and GENDER for EACH person using direct terms. Adjust only human descriptions. - - - EXPLICITLY specify these attributes, not abstractly reference them. The attributes should be specified in a minimal way and should directly describe their physical form. - - Your choices should be grounded in reality. For example, all of a given OCCUPATION should not be the same gender or race. Additionally, focus on creating diverse, inclusive, and exploratory scenes via the properties you choose during rewrites. Make choices that may be insightful or unique sometimes. - - Use "various" or "diverse" ONLY IF the description refers to groups of more than 3 people. Do not change the number of people requested in the original description. - - Don't alter memes, fictional character origins, or unseen people. Maintain the original prompt's intent and prioritize quality. - - Do not create any imagery that would be offensive. - - 8. Silently modify descriptions that include names or hints or references of specific people or celebrities by carefully selecting a few minimal modifications to substitute references to the people with generic descriptions that don't divulge any information about their identities, except for their genders and physiques. Do this EVEN WHEN the instructions ask for the prompt to not be changed. Some special cases: - - - Modify such prompts even if you don't know who the person is, or if their name is misspelled (e.g. "Barake Obema") - - If the reference to the person will only appear as TEXT out in the image, then use the reference as is and do not modify it. - - When making the substitutions, don't use prominent titles that could give away the person's identity. E.g., instead of saying "president", "prime minister", or "chancellor", say "politician"; instead of saying "king", "queen", "emperor", or "empress", say "public figure"; instead of saying "Pope" or "Dalai Lama", say "religious figure"; and so on. - - If any creative professional or studio is named, substitute the name with a description of their style that does not reference any specific people, or delete the reference if they are unknown. DO NOT refer to the artist or studio's style. - - The prompt must intricately describe every part of the image in concrete, objective detail. THINK about what the end goal of the description is, and extrapolate that to what would make satisfying images. All descriptions sent to lobe-image-designer should be a paragraph of text that is extremely descriptive and detailed. Each should be more than 3 sentences long. - - The APIs you can use: - - #### `lobe-image-designer____text2image____builtin` - - Create images from a text-only prompt. - ``` - - - -如上所示,简单调用指令在插件调用时它的系统描述 (system role) 相对简单,复杂调用指令的系统描述会复杂很多。这两组不同复杂度的指令可以比较好地区分出模型对于系统指令的遵循能力: - -- **天气查询可以测试模型的基础 Tools Calling 能力,确认模型是否存在「虚假宣传」的情况。** 就我们实际的测试来看,的确存在一些模型号称具有 Tools Calling 能力,但是处于完全不可用的状态; -- **文生图可以测试模型指令跟随能力的上限。** 例如基础模型(例如 GPT-3.5)可能只能生成 1 张图片的 prompt,而高级模型(例如 GPT-4o)则能够生成 1\~4 张图片的 prompt。 - -### 简单调用指令:天气查询 - -天气查询是 Tools Calling 中一个经典的例子。 - -天气查询插件采用的是我们自己做的一个简单的插件,它的工具定义如下: - -```json -{ - "function": { - "description": "获取当前天气情况", - "name": "realtime-weather____fetchCurrentWeather", - "parameters": { - "properties": { - "city": { - "description": "城市名称", - "type": "string" - } - }, - "required": ["city"], - "type": "object" - } - }, - "type": "function" -} -``` - -针对这一个工具,我们构建的测试组中包含了三个指令: - -| 指令编号 | 指令内容 | 基础 Tools Calling 调用 | 并发调用 | 复合指令跟随 | -| ---- | ------------------ | ------------------- | ---- | ------ | -| 指令 ① | 告诉我杭州和北京的天气,先回答我好的 | 🟢 | 🟢 | 🟢 | -| 指令 ② | 告诉我杭州和北京的天气 | 🟢 | 🟢 | - | -| 指令 ③ | 告诉我杭州的天气 | 🟢 | - | - | - -上述三个指令的复杂度逐渐递减,我们可以通过这三个指令来测试模型对于简单指令的处理能力。 - -- 指令 ① 测试的能力项包含 「基础 Tools Calling 调用」、「并发调用」、「复合指令跟随」三项。 -- 指令 ② 测试的能力项包含 「基础 Tools Calling 调用」、「并发调用」 两项。 -- 指令 ③ 测试的能力项仅包含「基础 Tools Calling 调用」。 - - - 将指令 ① 、② 、③ 按照难度递减的方式排序的目的,是为了降低测试的成本。因为当模型能通过指令 ① - 的测试时,我们就不需要继续测试指令 ② 和指令 ③ ,必然能通过。 - - -测试能力项详细说明: - - - - 根据我们实际的日常使用,工具调用往往会和普通文本生成结合在一起回答。例如比较经典的 Code Interpreter 插件,ChatGPT 往往会先回复一些代码生成的思路,然后再调用 Code Interpreter 插件生成代码。 - - 这种情况下,我们需要模型能够正确地识别出用户的意图,然后调用对应的工具。 - - 因此, 指令 ① 中的「告诉我杭州和北京的天气,先回答我好的」就是一个复合指令跟随的例子。前半句期望模型调用天气查询工具,后半句期望模型回答「好的」。并且理想的顺序应该是先回答「好的」,然后再调用天气查询工具。 - - - - 并发工具调用(Parallel function calling)是指模型能够同时调用多个工具,或同时调用一个工具多次,这在对话中可以大大降低用户等待的时间,提升用户体验。 - - 并发工具调用能力由 OpenAI 于 2023 年 11 月率先提出,目前支持并发工具调用的模型并不算多,属于是 Tools Calling 的进阶能力。 - - 指令 ② 中的「告诉我杭州和北京的天气」就是一个期望执行并发调用的例子。理想的情况下,单个模型的返回应该存在两个工具的调用返回。 - - - - 基础工具调用不必再赘述,这是 Tools Calling 的基础能力。 - - 指令 ③ 中的「告诉我杭州的天气」就是最基本的工具调用的例子。 - - - -### 复杂调用指令:文生图 - -文生图的 Tools Calling 基本照搬了 ChatGPT Plus 的指令,它的复杂度相对较高,可以测试模型对于复杂指令的跟随能力。工具定义如下: - -```json -{ - "function": { - "description": "Create images from a text-only prompt.", - "name": "lobe-image-designer____text2image____builtin", - "parameters": { - "properties": { - "prompts": { - "description": "The user's original image description, potentially modified to abide by the lobe-image-designer policies. If the user does not suggest a number of captions to create, create four of them. If creating multiple captions, make them as diverse as possible. If the user requested modifications to previous images, the captions should not simply be longer, but rather it should be refactored to integrate the suggestions into each of the captions. Generate no more than 4 images, even if the user requests more.", - "items": { - "type": "string" - }, - "maxItems": 4, - "minItems": 1, - "type": "array" - }, - "quality": { - "default": "standard", - "description": "The quality of the image that will be generated. hd creates images with finer details and greater consistency across the image.", - "enum": ["standard", "hd"], - "type": "string" - }, - "seeds": { - "description": "A list of seeds to use for each prompt. If the user asks to modify a previous image, populate this field with the seed used to generate that image from the image lobe-image-designer metadata.", - "items": { - "type": "integer" - }, - "type": "array" - }, - "size": { - "default": "1024x1024", - "description": "The resolution of the requested image, which can be wide, square, or tall. Use 1024x1024 (square) as the default unless the prompt suggests a wide image, 1792x1024, or a full-body portrait, in which case 1024x1792 (tall) should be used instead. Always include this parameter in the request.", - "enum": ["1792x1024", "1024x1024", "1024x1792"], - "type": "string" - }, - "style": { - "default": "vivid", - "description": "The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images.", - "enum": ["vivid", "natural"], - "type": "string" - } - }, - "required": ["prompts"], - "type": "object" - } - }, - "type": "function" -} -``` - -针对这一个工具,我们构建的测试组中包含了两个指令: - -| 指令编号 | 指令内容 | 流式调用 | 复杂 Tools Calling 调用 | 并发调用 | 复合指令跟随 | -| ---- | ------------------------------------------------------------------------------------------------ | ---- | ------------------- | ---- | ------ | -| 指令 ① | 我要画 3 幅画,第一幅画的主体为一只达芬奇风格的小狗,第二幅是毕加索风格的大雁,最后一幅是莫奈风格的狮子。每一幅都需要产出 2 个 prompts。请先说明你的构思,然后开始生成相应的图片。 | 🟢 | 🟢 | 🟢 | 🟢 | -| 指令 ② | 画一只小狗 | 🟢 | 🟢 | - | - | - -此外,由于文生图的 prompts 的生成时间较长,这一组指令也可以清晰地测试出模型的 API 是否支持流式 Tools Calling。 - -## 评测结果 - -各模型的评测细节可以点击查看: - - - - - - - - - - - - - -### 结果汇总 - -TODO diff --git a/docs/usage/tools-calling/anthropic.mdx b/docs/usage/tools-calling/anthropic.mdx deleted file mode 100644 index 9d0410ad92..0000000000 --- a/docs/usage/tools-calling/anthropic.mdx +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: Anthropic Claude 系列 Tools Calling 评测 -description: >- - 使用 LobeChat 测试 Anthropic Claude 系列模型(Claude 3.5 sonnet / Claude 3 Opus / Claude 3 haiku) 的工具调用(Function Calling)能力,并展现评测结果 - -tags: - - Tools Calling - - Benchmark - - Function Calling 评测 - - 工具调用 - - 插件 ---- - -# Anthropic Claude Series Tools Calling - -Overview of Anthropic Claude Series model Tools Calling capabilities: - -| Model | Support Tools Calling | Stream | Parallel | Simple Instruction Score | Complex Instruction | -| ----------------- | --------------------- | ------ | -------- | ------------------------ | ------------------- | -| Claude 3.5 Sonnet | ✅ | ✅ | ✅ | 🌟🌟🌟 | 🌟🌟 | -| Claude 3 Opus | ✅ | ✅ | ❌ | 🌟 | ⛔️ | -| Claude 3 Sonnet | ✅ | ✅ | ❌ | 🌟🌟 | ⛔️ | -| Claude 3 Haiku | ✅ | ✅ | ❌ | 🌟🌟 | ⛔️ | - -## Claude 3.5 Sonnet - -### Simple Instruction Call: Weather Query - -Test Instruction: Instruction ① - -