feat(MiniMax Chat Model Node): Add MiniMax Chat Model sub-node (#28305)

This commit is contained in:
Dawid Myslak 2026-04-14 16:29:50 +02:00 committed by GitHub
parent 1042350f4e
commit bd927d9350
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 476 additions and 0 deletions

View file

@ -0,0 +1,85 @@
import type {
IAuthenticateGeneric,
ICredentialTestRequest,
ICredentialType,
INodeProperties,
} from 'n8n-workflow';
export class MinimaxApi implements ICredentialType {
name = 'minimaxApi';
displayName = 'MiniMax';
documentationUrl = 'minimax';
properties: INodeProperties[] = [
{
displayName: 'API Key',
name: 'apiKey',
type: 'string',
typeOptions: { password: true },
required: true,
default: '',
},
{
displayName: 'Region',
name: 'region',
type: 'options',
options: [
{
name: 'International',
value: 'international',
description: 'platform.minimax.io - international endpoint',
},
{
name: 'China',
value: 'china',
description: 'platform.minimaxi.com - mainland China endpoint',
},
],
default: 'international',
},
{
displayName: 'Base URL',
name: 'url',
type: 'hidden',
default:
'={{ $self.region === "china" ? "https://api.minimaxi.com/v1" : "https://api.minimax.io/v1" }}',
},
];
authenticate: IAuthenticateGeneric = {
type: 'generic',
properties: {
headers: {
Authorization: '=Bearer {{$credentials.apiKey}}',
},
},
};
test: ICredentialTestRequest = {
request: {
baseURL: '={{ $credentials.url }}',
url: '/files/list',
qs: { purpose: 'voice_clone' },
},
rules: [
{
type: 'responseSuccessBody',
properties: {
key: 'base_resp.status_code',
value: 1004,
message: 'Authentication failed. Please check your API key.',
},
},
{
type: 'responseSuccessBody',
properties: {
key: 'base_resp.status_code',
value: 2049,
message: 'Invalid API key. Please verify your key matches the selected region.',
},
},
],
};
}

View file

@ -103,6 +103,7 @@ function getInputs(
'@n8n/n8n-nodes-langchain.lmChatGoogleGemini',
'@n8n/n8n-nodes-langchain.lmChatGoogleVertex',
'@n8n/n8n-nodes-langchain.lmChatMistralCloud',
'@n8n/n8n-nodes-langchain.lmChatMinimax',
'@n8n/n8n-nodes-langchain.lmChatMoonshot',
'@n8n/n8n-nodes-langchain.lmChatAzureOpenAi',
'@n8n/n8n-nodes-langchain.lmChatDeepSeek',
@ -134,6 +135,7 @@ function getInputs(
'@n8n/n8n-nodes-langchain.lmChatAwsBedrock',
'@n8n/n8n-nodes-langchain.lmChatLemonade',
'@n8n/n8n-nodes-langchain.lmChatMistralCloud',
'@n8n/n8n-nodes-langchain.lmChatMinimax',
'@n8n/n8n-nodes-langchain.lmChatMoonshot',
'@n8n/n8n-nodes-langchain.lmChatOllama',
'@n8n/n8n-nodes-langchain.lmChatOpenAi',

View file

@ -0,0 +1,184 @@
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
import {
getProxyAgent,
makeN8nLlmFailedAttemptHandler,
N8nLlmTracing,
getConnectionHintNoticeField,
} from '@n8n/ai-utilities';
import {
NodeConnectionTypes,
type INodeType,
type INodeTypeDescription,
type ISupplyDataFunctions,
type SupplyData,
} from 'n8n-workflow';
import type { OpenAICompatibleCredential } from '../../../types/types';
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
export class LmChatMinimax implements INodeType {
description: INodeTypeDescription = {
displayName: 'MiniMax Chat Model',
name: 'lmChatMinimax',
icon: 'file:minimax.svg',
group: ['transform'],
version: [1],
description: 'For advanced usage with an AI chain',
defaults: {
name: 'MiniMax Chat Model',
},
codex: {
categories: ['AI'],
subcategories: {
AI: ['Language Models', 'Root Nodes'],
'Language Models': ['Chat Models (Recommended)'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatminimax/',
},
],
},
alias: ['minimax'],
},
inputs: [],
outputs: [NodeConnectionTypes.AiLanguageModel],
outputNames: ['Model'],
credentials: [
{
name: 'minimaxApi',
required: true,
},
],
requestDefaults: {
ignoreHttpStatusErrors: true,
baseURL: '={{ $credentials?.url }}',
},
properties: [
getConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),
{
displayName: 'Model',
name: 'model',
type: 'options',
description:
'The model which will generate the completion. <a href="https://platform.minimax.io/docs/api-reference/text-openai-api">Learn more</a>.',
options: [
{ name: 'MiniMax-M2', value: 'MiniMax-M2' },
{ name: 'MiniMax-M2.1', value: 'MiniMax-M2.1' },
{ name: 'MiniMax-M2.1-Highspeed', value: 'MiniMax-M2.1-highspeed' },
{ name: 'MiniMax-M2.5', value: 'MiniMax-M2.5' },
{ name: 'MiniMax-M2.5-Highspeed', value: 'MiniMax-M2.5-highspeed' },
{ name: 'MiniMax-M2.7', value: 'MiniMax-M2.7' },
{ name: 'MiniMax-M2.7-Highspeed', value: 'MiniMax-M2.7-highspeed' },
],
default: 'MiniMax-M2.7',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
description: 'Additional options to add',
type: 'collection',
default: {},
options: [
{
displayName: 'Hide Thinking',
name: 'hideThinking',
default: true,
type: 'boolean',
description:
'Whether to strip chain-of-thought reasoning from the response, returning only the final answer',
},
{
displayName: 'Maximum Number of Tokens',
name: 'maxTokens',
default: -1,
description:
'The maximum number of tokens to generate in the completion. The limit depends on the selected model.',
type: 'number',
},
{
displayName: 'Sampling Temperature',
name: 'temperature',
default: 0.7,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description:
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
type: 'number',
},
{
displayName: 'Timeout',
name: 'timeout',
default: 360000,
description: 'Maximum amount of time a request is allowed to take in milliseconds',
type: 'number',
},
{
displayName: 'Max Retries',
name: 'maxRetries',
default: 2,
description: 'Maximum number of retries to attempt',
type: 'number',
},
{
displayName: 'Top P',
name: 'topP',
default: 1,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description:
'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',
type: 'number',
},
],
},
],
};
async supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {
const credentials = await this.getCredentials<OpenAICompatibleCredential>('minimaxApi');
const modelName = this.getNodeParameter('model', itemIndex) as string;
const options = this.getNodeParameter('options', itemIndex, {}) as {
hideThinking?: boolean;
maxTokens?: number;
maxRetries: number;
timeout: number;
temperature?: number;
topP?: number;
};
const hideThinking = options.hideThinking ?? true;
const timeout = options.timeout;
const configuration: ClientOptions = {
baseURL: credentials.url,
fetchOptions: {
dispatcher: getProxyAgent(credentials.url, {
headersTimeout: timeout,
bodyTimeout: timeout,
}),
},
};
const model = new ChatOpenAI({
apiKey: credentials.apiKey,
model: modelName,
...options,
timeout,
maxRetries: options.maxRetries ?? 2,
configuration,
callbacks: [new N8nLlmTracing(this)],
modelKwargs: hideThinking ? { reasoning_split: true } : undefined,
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),
});
return {
response: model,
};
}
}

View file

@ -0,0 +1,10 @@
<svg width="40" height="40" viewBox="0 0 490.16 411.7" fill="none" xmlns="http://www.w3.org/2000/svg">
<defs>
<linearGradient id="minimax-grad" y1="205.85" x2="490.16" y2="205.85" gradientUnits="userSpaceOnUse">
<stop offset="0" stop-color="#e4177f"/>
<stop offset="0.5" stop-color="#e73562"/>
<stop offset="1" stop-color="#e94e4a"/>
</linearGradient>
</defs>
<path fill="url(#minimax-grad)" d="M233.45,40.81a17.55,17.55,0,1,0-35.1,0V331.56a40.82,40.82,0,0,1-81.63,0V145a17.55,17.55,0,1,0-35.09,0v79.06a40.82,40.82,0,0,1-81.63,0V195.42a11.63,11.63,0,0,1,23.26,0v28.66a17.55,17.55,0,0,0,35.1,0V145A40.82,40.82,0,0,1,140,145V331.56a17.55,17.55,0,0,0,35.1,0V217.5h0V40.81a40.81,40.81,0,1,1,81.62,0V281.56a11.63,11.63,0,1,1-23.26,0Zm215.9,63.4A40.86,40.86,0,0,0,408.53,145V300.85a17.55,17.55,0,0,1-35.09,0v-260a40.82,40.82,0,0,0-81.63,0V370.89a17.55,17.55,0,0,1-35.1,0V330a11.63,11.63,0,1,0-23.26,0v40.86a40.81,40.81,0,0,0,81.62,0V40.81a17.55,17.55,0,0,1,35.1,0v260a40.82,40.82,0,0,0,81.63,0V145a17.55,17.55,0,1,1,35.1,0V281.56a11.63,11.63,0,0,0,23.26,0V145A40.85,40.85,0,0,0,449.35,104.21Z"/>
</svg>

After

Width:  |  Height:  |  Size: 1.1 KiB

View file

@ -0,0 +1,193 @@
/* eslint-disable n8n-nodes-base/node-filename-against-convention */
/* eslint-disable @typescript-eslint/no-unsafe-assignment */
/* eslint-disable @typescript-eslint/unbound-method */
import { ChatOpenAI } from '@langchain/openai';
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing, getProxyAgent } from '@n8n/ai-utilities';
import { createMockExecuteFunction } from 'n8n-nodes-base/test/nodes/Helpers';
import type { INode, ISupplyDataFunctions } from 'n8n-workflow';
import { LmChatMinimax } from '../LmChatMinimax.node';
jest.mock('@langchain/openai');
jest.mock('@n8n/ai-utilities');
const MockedChatOpenAI = jest.mocked(ChatOpenAI);
const MockedN8nLlmTracing = jest.mocked(N8nLlmTracing);
const mockedMakeN8nLlmFailedAttemptHandler = jest.mocked(makeN8nLlmFailedAttemptHandler);
const mockedGetProxyAgent = jest.mocked(getProxyAgent);
describe('LmChatMinimax', () => {
let node: LmChatMinimax;
const mockNodeDef: INode = {
id: '1',
name: 'MiniMax Chat Model',
typeVersion: 1,
type: '@n8n/n8n-nodes-langchain.lmChatMinimax',
position: [0, 0],
parameters: {},
};
const setupMockContext = (nodeOverrides: Partial<INode> = {}) => {
const nodeDef = { ...mockNodeDef, ...nodeOverrides };
const ctx = createMockExecuteFunction<ISupplyDataFunctions>(
{},
nodeDef,
) as jest.Mocked<ISupplyDataFunctions>;
ctx.getCredentials = jest.fn().mockResolvedValue({
apiKey: 'test-minimax-key',
url: 'https://api.minimax.io/v1',
});
ctx.getNode = jest.fn().mockReturnValue(nodeDef);
ctx.getNodeParameter = jest.fn().mockImplementation((paramName: string) => {
if (paramName === 'model') return 'MiniMax-M2.7';
if (paramName === 'options') return {};
return undefined;
});
MockedN8nLlmTracing.mockImplementation(() => ({}) as unknown as N8nLlmTracing);
mockedMakeN8nLlmFailedAttemptHandler.mockReturnValue(jest.fn());
mockedGetProxyAgent.mockReturnValue({} as any);
return ctx;
};
beforeEach(() => {
node = new LmChatMinimax();
jest.clearAllMocks();
});
describe('node description', () => {
it('should have correct node properties', () => {
expect(node.description).toMatchObject({
displayName: 'MiniMax Chat Model',
name: 'lmChatMinimax',
group: ['transform'],
version: [1],
});
});
it('should require minimaxApi credentials', () => {
expect(node.description.credentials).toEqual([{ name: 'minimaxApi', required: true }]);
});
it('should output ai_languageModel', () => {
expect(node.description.outputs).toEqual(['ai_languageModel']);
expect(node.description.outputNames).toEqual(['Model']);
});
});
describe('supplyData', () => {
it('should create ChatOpenAI with Minimax base URL', async () => {
const ctx = setupMockContext();
const result = await node.supplyData.call(ctx, 0);
expect(ctx.getCredentials).toHaveBeenCalledWith('minimaxApi');
expect(MockedChatOpenAI).toHaveBeenCalledWith(
expect.objectContaining({
apiKey: 'test-minimax-key',
model: 'MiniMax-M2.7',
maxRetries: 2,
callbacks: expect.arrayContaining([expect.any(Object)]),
onFailedAttempt: expect.any(Function),
configuration: expect.objectContaining({
baseURL: 'https://api.minimax.io/v1',
}),
}),
);
expect(result).toEqual({ response: expect.any(Object) });
});
it('should pass options to ChatOpenAI', async () => {
const ctx = setupMockContext();
ctx.getNodeParameter = jest.fn().mockImplementation((paramName: string) => {
if (paramName === 'model') return 'MiniMax-M2.5';
if (paramName === 'options')
return {
temperature: 0.5,
maxTokens: 2000,
topP: 0.9,
timeout: 60000,
maxRetries: 5,
};
return undefined;
});
await node.supplyData.call(ctx, 0);
expect(MockedChatOpenAI).toHaveBeenCalledWith(
expect.objectContaining({
model: 'MiniMax-M2.5',
temperature: 0.5,
maxTokens: 2000,
topP: 0.9,
timeout: 60000,
maxRetries: 5,
}),
);
});
it('should set reasoning_split by default (hideThinking defaults to true)', async () => {
const ctx = setupMockContext();
await node.supplyData.call(ctx, 0);
expect(MockedChatOpenAI).toHaveBeenCalledWith(
expect.objectContaining({
modelKwargs: { reasoning_split: true },
}),
);
});
it('should not set reasoning_split when hideThinking is false', async () => {
const ctx = setupMockContext();
ctx.getNodeParameter = jest.fn().mockImplementation((paramName: string) => {
if (paramName === 'model') return 'MiniMax-M2.7';
if (paramName === 'options') return { hideThinking: false };
return undefined;
});
await node.supplyData.call(ctx, 0);
expect(MockedChatOpenAI).toHaveBeenCalledWith(
expect.objectContaining({
modelKwargs: undefined,
}),
);
});
it('should configure proxy agent with credentials URL', async () => {
const ctx = setupMockContext();
await node.supplyData.call(ctx, 0);
expect(mockedGetProxyAgent).toHaveBeenCalledWith(
'https://api.minimax.io/v1',
expect.objectContaining({
headersTimeout: undefined,
bodyTimeout: undefined,
}),
);
});
it('should configure proxy agent with custom timeout', async () => {
const ctx = setupMockContext();
ctx.getNodeParameter = jest.fn().mockImplementation((paramName: string) => {
if (paramName === 'model') return 'MiniMax-M2.7';
if (paramName === 'options') return { timeout: 120000 };
return undefined;
});
await node.supplyData.call(ctx, 0);
expect(mockedGetProxyAgent).toHaveBeenCalledWith(
'https://api.minimax.io/v1',
expect.objectContaining({
headersTimeout: 120000,
bodyTimeout: 120000,
}),
);
});
});
});

View file

@ -63,6 +63,7 @@
"dist/credentials/MotorheadApi.credentials.js",
"dist/credentials/MilvusApi.credentials.js",
"dist/credentials/MistralCloudApi.credentials.js",
"dist/credentials/MinimaxApi.credentials.js",
"dist/credentials/MoonshotApi.credentials.js",
"dist/credentials/LemonadeApi.credentials.js",
"dist/credentials/OllamaApi.credentials.js",
@ -123,6 +124,7 @@
"dist/nodes/llms/LmChatGoogleVertex/LmChatGoogleVertex.node.js",
"dist/nodes/llms/LmChatGroq/LmChatGroq.node.js",
"dist/nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.js",
"dist/nodes/llms/LmChatMinimax/LmChatMinimax.node.js",
"dist/nodes/llms/LmChatMoonshot/LmChatMoonshot.node.js",
"dist/nodes/llms/LMChatLemonade/LmChatLemonade.node.js",
"dist/nodes/llms/LMChatOllama/LmChatOllama.node.js",