updated models template mappers. added lfm2.5vl450m to transformers 5… (#4939)

* updated models template mappers. added lfm2.5vl450m to transformers 5.3.0 whitelist

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
Roland Tannous 2026-04-09 23:36:42 +04:00 committed by GitHub
parent d5525e8bbb
commit bcf4fd6bd3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 69 additions and 4 deletions

View file

@ -215,6 +215,21 @@ TEMPLATE_TO_MODEL_MAPPER = {
"google/gemma-3n-E2B-it",
"unsloth/gemma-3n-E2B-it-unsloth-bnb-4bit",
),
"gemma-4": (
"unsloth/gemma-4-E2B-it",
"google/gemma-4-E2B-it",
"unsloth/gemma-4-E4B-it",
"google/gemma-4-E4B-it",
"unsloth/gemma-4-E2B-it-unsloth-bnb-4bit",
"unsloth/gemma-4-E4B-it-unsloth-bnb-4bit",
),
"gemma-4-thinking": (
"unsloth/gemma-4-26B-A4B-it",
"google/gemma-4-26B-A4B-it",
"unsloth/gemma-4-31B-it",
"unsloth/gemma-4-31B-it-unsloth-bnb-4bit",
"google/gemma-4-31B-it",
),
"qwen2.5": (
"unsloth/Qwen2.5-0.5B-Instruct-unsloth-bnb-4bit",
"unsloth/Qwen2.5-0.5B-Instruct",
@ -399,6 +414,12 @@ TEMPLATE_TO_MODEL_MAPPER = {
"THUDM/GLM-4.7-Flash",
"unsloth/GLM-4.7-Flash-bnb-4bit",
),
"lfm-2": (
"unsloth/LFM2-1.2B",
"LiquidAI/LFM2-1.2B",
"unsloth/LFM2-1.2B-unsloth-bnb-4bit",
),
"lfm-2.5": ("unsloth/LFM2.5-1.2B-Instruct",),
}
MODEL_TO_TEMPLATE_MAPPER = {}
@ -414,6 +435,14 @@ for key, values in TEMPLATE_TO_MODEL_MAPPER.items():
TEMPLATE_TO_RESPONSES_MAPPER = {
"gemma-4-thinking": {
"instruction": "<|turn>user\n",
"response": "<|turn>model\n",
},
"gemma-4": {
"instruction": "<|turn>user\n",
"response": "<|turn>model\n",
},
"gemma-3": {
"instruction": "<start_of_turn>user\n",
"response": "<start_of_turn>model\n",
@ -514,6 +543,10 @@ TEMPLATE_TO_RESPONSES_MAPPER = {
"instruction": "<|im_start|>user\n",
"response": "<|im_start|>assistant\n",
},
"lfm-2.5": {
"instruction": "<|im_start|>user\n",
"response": "<|im_start|>assistant\n",
},
"starling": {
"instruction": "GPT4 Correct User: ",
"response": "GPT4 Correct Assistant: ",

View file

@ -52,6 +52,7 @@ TRANSFORMERS_5_MODEL_SUBSTRINGS: tuple[str, ...] = (
"qwen3.5", # Qwen3.5 family (35B-A3B, etc.)
"qwen3-next", # Qwen3-Next and variants
"tiny_qwen3_moe", # imdatta0/tiny_qwen3_moe_2.8B_0.7B
"lfm2.5-vl-450m", # LiquidAI/LFM2.5-VL-450M
)
# Lowercase substrings for models that require transformers 5.5.0 (checked first).

View file

@ -22,6 +22,39 @@ __all__ = [
__INT_TO_FLOAT_MAPPER = \
{
"unsloth/gemma-4-E2B-it-unsloth-bnb-4bit" : (
"unsloth/gemma-4-E2B-it",
"google/gemma-4-E2B-it",
),
"unsloth/gemma-4-E4B-it-unsloth-bnb-4bit" : (
"unsloth/gemma-4-E4B-it",
"google/gemma-4-E4B-it",
),
"unsloth/gemma-4-26B-A4B-it" : (
"unsloth/gemma-4-26B-A4B-it",
"google/gemma-4-26B-A4B-it",
),
"unsloth/gemma-4-31B-it" : (
"unsloth/gemma-4-31B-it",
"google/gemma-4-31B-it",
),
"unsloth/gemma-4-E2B" : (
"unsloth/gemma-4-E2B",
"google/gemma-4-E2B",
),
"unsloth/gemma-4-E4B" : (
"unsloth/gemma-4-E4B",
"google/gemma-4-E4B",
),
"unsloth/gemma-4-26B-A4B" : (
"unsloth/gemma-4-26B-A4B",
"google/gemma-4-26B-A4B",
),
"unsloth/gemma-4-31B" : (
"unsloth/gemma-4-31B",
"google/gemma-4-31B",
),
"unsloth/mistral-7b-bnb-4bit" : (
"unsloth/mistral-7b",
"mistralai/Mistral-7B-v0.1",

View file

@ -1978,13 +1978,11 @@ OLLAMA_TEMPLATE_TO_MODEL_MAPPER = {
),
"gemma4": (
"unsloth/gemma-4-E2B-it",
"unsloth/gemma-4-E2B",
"unsloth/gemma-4-E2B-it-unsloth-bnb-4bit",
"unsloth/gemma-4-E4B-it",
"unsloth/gemma-4-E4B",
"unsloth/gemma-4-E4B-it-unsloth-bnb-4bit",
"unsloth/gemma-4-31B-it",
"unsloth/gemma-4-31B",
"unsloth/gemma-4-26B-A4B-it",
"unsloth/gemma-4-26B-A4B",
),
"gemma3n": (
"unsloth/gemma-3n-E4B-it-unsloth-bnb-4bit",