* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Bug fix

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* torch_dtype

* Update rl.py

* Fix CE Loss

* Versioning

* Update loader.py

* Update loader.py

* extract_model_type_from_config

* Model types

* Update loader.py

* get_transformers_model_type

* Update loader.py

* Update loader.py

* Update loader.py

* Update rl.py

* Update pyproject.toml

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Versioning

* Update _utils.py

* Update _utils.py

* Update _utils.py

* Update _utils.py

* Update vision.py

* Update vision.py

* Fix DataParallel

* Update _utils.py

* Update rl.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update mapper.py

* Versioning

* Update loader.py

* Update loader.py

* Update rl.py

* Versioning

* Update _utils.py

* Fix auto_mapping

* Update loader.py

* Update loader.py

* Update vision.py

* Update vision.py

* Update loader.py

* Message

* Update vision.py

* Update loader.py

* Update vision.py

* cache_implementation

* Update vision.py

* Update loader.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update loader.py

* Update vision.py

* Save max_seq_length

* Update _utils.py

* Update rl.py

* Update vision.py

* Update llama.py

* Mistral3 vllm (#3349)

* [WIP] use vLLM for vision language models

* Update README.md

Editing icon sizes

* Update README.md

Updating icon sizes

* Update README.md (#2885)

* MoE kernels AGPLv3

* versioning

* Many bug fixes (#2908)

* add deepseek v3

* add deepseek r1 base

* add deepseek r1 zero

* add deepseek distill llama

* add deepseek distill models

* remove redundant code when constructing model names

* add mistral small to registry

* rename model registration methods

* rename deepseek registration methods

* refactor naming for mistral and phi

* add global register models

* refactor model registration tests for new registry apis

* add model search method

* remove deprecated registration api

* add quant type test

* add registry readme

* make llama registration more specific

* clear registry when executing individual model registration file

* more registry readme updates

* Update _auto_install.py

* Llama4

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Synthetic data

* Update mapper.py

* Xet and Synthetic

* Update synthetic.py

* Update loader.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update pyproject.toml

* Delete .gitignore

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update _utils.py

* Update pyproject.toml

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update chat_templates.py

* Seasame force float16 / float32

* Fix Seasame

* Update loader.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update loader.py

* is_multimodal

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update vision.py

* Update vision.py

* Update vision.py

* UNSLOTH_DISABLE_STATIC_GENERATION

* Update vision.py

* Auto vision detection

* Sesame

* Whisper

* Update loader.py

* Update loader.py

* Update loader.py

* Update mapper.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update _utils.py

* Update rl.py

* versioning

* Update rl.py

* Update rl.py

* Update rl.py

* Update rl.py

* Update rl.py

* logging

* Update pyproject.toml

* Update rl.py

* versioning

* Update rl.py

* Update rl.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl.py

* Update rl_replacements.py

* Update rl_replacements.py

* logits / temperature

* Update rl_replacements.py

* Update pyproject.toml

* Update rl_replacements.py

* Update rl_replacements.py

* Debugging only

* Update llama.py

* Update llama.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Generic efficient GRPO

* Update rl_replacements.py

* Update rl_replacements.py

* Remove debugging

* Update rl_replacements.py

* Update rl_replacements.py

* Update vision.py

* Update llama.py

* Update rl_replacements.py

* versioning

* Update _utils.py

* Update vision.py

* Update mapper.py

* Update loader.py

* Update mapper.py

* Update vision.py

* Update loader.py

* Update vision.py

* Update loader.py

* Update _utils.py

* Update vision.py

* gradient checkpointing

* Gemma 3N fixes

* Update loader.py

* Versioning

* Gemma 3N fixes

* Update vision.py

* Update vision.py

* Update loader.py

* Update vision.py

* Fix setup.py

* setup.py

* Prints

* Update setup.py

* Update setup.py

* Update setup.py

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update vision.py

* Update vision.py

* Update pyproject.toml

* Update vision.py

* Update _utils.py

* Update __init__.py

* Update __init__.py

---------

Co-authored-by: jeromeku <jerome.ku@gmail.com>
Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com>

* silienty skip falcon h1 import is transformers_version < 4.53.0 (#2912)

* Dynamically adjust get_per_token_logps function and patch as well (#2911)

* add intel gpu with vllm support (#2903)

* [bugs] fix for casual mask (#2868)

* fix for casual mask

* use un_casual in sdpa

* add missing mask

* fix for type

* Explicitly check if xformers exists for attention (#2889)

* Update __init__.py

* Update llama.py

* if mlp doesn't exist in layer module check for feed_forward name for falcon h1 (#2913)

* Move inputs to right devices. (#2919)

* Move tensors to right devices

* fix multi gpu for non mistral models

* multi GPU RoPE for gemma2

* Finish up multi GPU inference

* Make multiGPU rope a list

* Remove unnecessary transfer to CPU

* Remove unnecessary move to CPU

* Donot move inputs to device yet

will be handled separately in another PR

* Move inputs to appropriate decoder device

* Make device count global variable

* Cleanup RoPE device code

* Fixup num_gpu to device count

* Cleanup device counts

* Use device index for RoPE get_cache

* Donot typecast

* Use tuple instead of list for tensors. Use device index directly

* fixup move to device logic

* WIP VLM vLLM

* Make vLLM patch a function

* Add save and load lora functions

* Make fast_inference setup depend on the flag

* Improve fast inference patching mechanism

* Make vision setting depend on checks in fastbasemodel

* Check LoRA and vLLM intercompatibility for vision models

* Comment pointing to vLLM LoRA check

* Improve lora validation on vLLM

* Error out on no vLLM and increase max lora rank

* Bug fixes (#3017)

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update pyproject.toml

* Delete .gitignore

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update _utils.py

* Update pyproject.toml

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update chat_templates.py

* Seasame force float16 / float32

* Fix Seasame

* Update loader.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update loader.py

* is_multimodal

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update vision.py

* Update vision.py

* Update vision.py

* UNSLOTH_DISABLE_STATIC_GENERATION

* Update vision.py

* Auto vision detection

* Sesame

* Whisper

* Update loader.py

* Update loader.py

* Update loader.py

* Update mapper.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update _utils.py

* Update rl.py

* versioning

* Update rl.py

* Update rl.py

* Update rl.py

* Update rl.py

* Update rl.py

* logging

* Update pyproject.toml

* Update rl.py

* versioning

* Update rl.py

* Update rl.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl.py

* Update rl_replacements.py

* Update rl_replacements.py

* logits / temperature

* Update rl_replacements.py

* Update pyproject.toml

* Update rl_replacements.py

* Update rl_replacements.py

* Debugging only

* Update llama.py

* Update llama.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Generic efficient GRPO

* Update rl_replacements.py

* Update rl_replacements.py

* Remove debugging

* Update rl_replacements.py

* Update rl_replacements.py

* Update vision.py

* Update llama.py

* Update rl_replacements.py

* versioning

* Update _utils.py

* Update vision.py

* Update mapper.py

* Update loader.py

* Update mapper.py

* Update vision.py

* Update loader.py

* Update vision.py

* Update loader.py

* Update _utils.py

* Update vision.py

* gradient checkpointing

* Gemma 3N fixes

* Update loader.py

* Versioning

* Gemma 3N fixes

* Update vision.py

* Update vision.py

* Update loader.py

* Update vision.py

* Fix setup.py

* setup.py

* Prints

* Update setup.py

* Update setup.py

* Update setup.py

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update vision.py

* Update vision.py

* Update pyproject.toml

* Update vision.py

* Update _utils.py

* Update __init__.py

* Update __init__.py

* Small fixes

* Update vision.py

* Update vision.py

* versioning

* Update __init__.py

* Update llama.py

* Update rl.py

* Update rl.py

* Update _utils.py

* Update vision.py

* Update vision.py

* compiler stance

* Update _utils.py

* Update pyproject.toml

* Update pyproject.toml

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Revert "Revert "Add Qwen2.5-VL-32B-Instruct mapping to fix quantized model me…" (#2990)

This reverts commit 4021da634a.

* skip_guard_eval_unsafe fix

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update llama.py

* Update llama.py

* Fix `quantization_method`

* versioning

* fix for casual mask (#3011)

* [intel] add for intel path for llama.py (#3012)

* fix for intel path

* remove unuse code

* Update unsloth/models/llama.py

---------

Co-authored-by: Daniel Han <danielhanchen@gmail.com>

* Update llama.py

* Fix Gemma 2 (#3024)

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update pyproject.toml

* Delete .gitignore

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update _utils.py

* Update pyproject.toml

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update chat_templates.py

* Seasame force float16 / float32

* Fix Seasame

* Update loader.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update loader.py

* is_multimodal

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update vision.py

* Update vision.py

* Update vision.py

* UNSLOTH_DISABLE_STATIC_GENERATION

* Update vision.py

* Auto vision detection

* Sesame

* Whisper

* Update loader.py

* Update loader.py

* Update loader.py

* Update mapper.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update _utils.py

* Update rl.py

* versioning

* Update rl.py

* Update rl.py

* Update rl.py

* Update rl.py

* Update rl.py

* logging

* Update pyproject.toml

* Update rl.py

* versioning

* Update rl.py

* Update rl.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl.py

* Update rl_replacements.py

* Update rl_replacements.py

* logits / temperature

* Update rl_replacements.py

* Update pyproject.toml

* Update rl_replacements.py

* Update rl_replacements.py

* Debugging only

* Update llama.py

* Update llama.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Generic efficient GRPO

* Update rl_replacements.py

* Update rl_replacements.py

* Remove debugging

* Update rl_replacements.py

* Update rl_replacements.py

* Update vision.py

* Update llama.py

* Update rl_replacements.py

* versioning

* Update _utils.py

* Update vision.py

* Update mapper.py

* Update loader.py

* Update mapper.py

* Update vision.py

* Update loader.py

* Update vision.py

* Update loader.py

* Update _utils.py

* Update vision.py

* gradient checkpointing

* Gemma 3N fixes

* Update loader.py

* Versioning

* Gemma 3N fixes

* Update vision.py

* Update vision.py

* Update loader.py

* Update vision.py

* Fix setup.py

* setup.py

* Prints

* Update setup.py

* Update setup.py

* Update setup.py

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update vision.py

* Update vision.py

* Update pyproject.toml

* Update vision.py

* Update _utils.py

* Update __init__.py

* Update __init__.py

* Small fixes

* Update vision.py

* Update vision.py

* versioning

* Update __init__.py

* Update llama.py

* Update rl.py

* Update rl.py

* Update _utils.py

* Update vision.py

* Update vision.py

* compiler stance

* Update _utils.py

* Update pyproject.toml

* Update pyproject.toml

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Revert "Revert "Add Qwen2.5-VL-32B-Instruct mapping to fix quantized model me…" (#2990)

This reverts commit 4021da634a.

* skip_guard_eval_unsafe fix

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update llama.py

* Update llama.py

* Fix `quantization_method`

* versioning

* Update _utils.py

* Update _utils.py

* Update _utils.py

* falcon force float32 on sm<75 machines (#3026)

* Fix torch compile issues (#3028)

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update pyproject.toml

* Delete .gitignore

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update _utils.py

* Update pyproject.toml

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update chat_templates.py

* Seasame force float16 / float32

* Fix Seasame

* Update loader.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update loader.py

* is_multimodal

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update vision.py

* Update vision.py

* Update vision.py

* UNSLOTH_DISABLE_STATIC_GENERATION

* Update vision.py

* Auto vision detection

* Sesame

* Whisper

* Update loader.py

* Update loader.py

* Update loader.py

* Update mapper.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

* Update _utils.py

* Update rl.py

* versioning

* Update rl.py

* Update rl.py

* Update rl.py

* Update rl.py

* Update rl.py

* logging

* Update pyproject.toml

* Update rl.py

* versioning

* Update rl.py

* Update rl.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl.py

* Update rl_replacements.py

* Update rl_replacements.py

* logits / temperature

* Update rl_replacements.py

* Update pyproject.toml

* Update rl_replacements.py

* Update rl_replacements.py

* Debugging only

* Update llama.py

* Update llama.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Generic efficient GRPO

* Update rl_replacements.py

* Update rl_replacements.py

* Remove debugging

* Update rl_replacements.py

* Update rl_replacements.py

* Update vision.py

* Update llama.py

* Update rl_replacements.py

* versioning

* Update _utils.py

* Update vision.py

* Update mapper.py

* Update loader.py

* Update mapper.py

* Update vision.py

* Update loader.py

* Update vision.py

* Update loader.py

* Update _utils.py

* Update vision.py

* gradient checkpointing

* Gemma 3N fixes

* Update loader.py

* Versioning

* Gemma 3N fixes

* Update vision.py

* Update vision.py

* Update loader.py

* Update vision.py

* Fix setup.py

* setup.py

* Prints

* Update setup.py

* Update setup.py

* Update setup.py

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update pyproject.toml

* Update vision.py

* Update vision.py

* Update pyproject.toml

* Update vision.py

* Update _utils.py

* Update __init__.py

* Update __init__.py

* Small fixes

* Update vision.py

* Update vision.py

* versioning

* Update __init__.py

* Update llama.py

* Update rl.py

* Update rl.py

* Update _utils.py

* Update vision.py

* Update vision.py

* compiler stance

* Update _utils.py

* Update pyproject.toml

* Update pyproject.toml

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Update rl_replacements.py

* Revert "Revert "Add Qwen2.5-VL-32B-Instruct mapping to fix quantized model me…" (#2990)

This reverts commit 4021da634a.

* skip_guard_eval_unsafe fix

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update synthetic.py

* Update llama.py

* Update llama.py

* Fix `quantization_method`

* versioning

* Update _utils.py

* Update _utils.py

* Update _utils.py

* check stride

* Cleanup

* Update rope_embedding.py

* Update gemma2.py

* Fix `set_stance`

* Update pyproject.toml

* Update _utils.py

* Fixup patch vllm

* Disable mllama

* Use variables to decide VLM support

* Better attn_impl handling

* Patch TF protobuf incompatability

* Torch 2.8 (#3186)

* Fix mamba

* Update loader.py

* Update vision.py

* Update loader.py

* Filter vLLM standby logs (#3131)

* filter vLLM standby logs

* safeguard standby logger patch

* Update unsloth/models/_utils.py

* Update unsloth/models/_utils.py

* Update unsloth/models/_utils.py

---------

Co-authored-by: Daniel Han <danielhanchen@gmail.com>

* Update loader.py

* Add scaler

* Update llama.py

* Update _utils.py

* Versioning

* GPT OSS fix

* GPT OSS fix

* Update loader.py

* Update vision.py

* Update vision.py

* Update loader.py

* Update vision.py

* Update vision.py

* Update llama.py

* Update llama.py

* Update llama.py

* Versioning

* Update mapper.py

* Update vision.py

* Update vision.py

* Update vision.py

* Upcast norms

* Update loader.py

* Update vision.py

* Upcast layernorms

* Update llama.py

* Update llama.py

* Update llama.py

* Update llama.py

* Update llama.py

* Update llama.py

* Update save.py

* Update rl.py

* Update pyproject.toml

* Update rl.py

* Update rl_replacements.py

* Update rl.py

* Update rl.py

* Update rl.py

* Update _utils.py

* Update __init__.py

* Torch 2.8

* Update rl_replacements.py

---------

Co-authored-by: Datta Nimmaturi <venkatadattasainimmaturi@gmail.com>

* Update _auto_install.py

* Update pyproject.toml

* Update rl.py

* Protobuf issue

* Update pyproject.toml

* Fix extras transformers typo in pyproject.toml

* Update _utils.py

* Bug fixes (#3195)

* Fix mamba

* Update loader.py

* Update vision.py

* Update loader.py

* Filter vLLM standby logs (#3131)

* filter vLLM standby logs

* safeguard standby logger patch

* Update unsloth/models/_utils.py

* Update unsloth/models/_utils.py

* Update unsloth/models/_utils.py

---------

Co-authored-by: Daniel Han <danielhanchen@gmail.com>

* Update loader.py

* Add scaler

* Update llama.py

* Update _utils.py

* Versioning

* GPT OSS fix

* GPT OSS fix

* Update loader.py

* Update vision.py

* Update vision.py

* Update loader.py

* Update vision.py

* Update vision.py

* Update llama.py

* Update llama.py

* Update llama.py

* Versioning

* Update mapper.py

* Update vision.py

* Update vision.py

* Update vision.py

* Upcast norms

* Update loader.py

* Update vision.py

* Upcast layernorms

* Update llama.py

* Update llama.py

* Update llama.py

* Update llama.py

* Update llama.py

* Update llama.py

* Update save.py

* Update rl.py

* Update pyproject.toml

* Update rl.py

* Update rl_replacements.py

* Update rl.py

* Update rl.py

* Update rl.py

* Update _utils.py

* Update __init__.py

* Torch 2.8

* Update rl_replacements.py

* Update loader.py

* UNSLOTH_ENABLE_CCE

* Fix

* Update loader.py

* Update loader.py

* Update __init__.py

* Update __init__.py

* Update __init__.py

* Update __init__.py

* Import fixes

* Update loader.py

* Fix aimv2 issue

* Update loader.py

* Update import_fixes.py

* Update import_fixes.py

* Update loader.py

* Update loader.py

* Update loader.py

* Upgrade

* Update loader.py

* Update loader.py

* Update loader.py

* Update loader.py

---------

Co-authored-by: Datta Nimmaturi <venkatadattasainimmaturi@gmail.com>

* adallow float32 dtype in FastLanguageModel (#3204)

* Update loader.py

* Update vision.py

* Suppress message and use unsloth sampling params

* Use trl sampling params for now

* Improve error message

* fixup quantized fast inference model name

* Add mistral 3 support

---------

Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com>
Co-authored-by: Daniel Han <danielhanchen@gmail.com>
Co-authored-by: jeromeku <jerome.ku@gmail.com>
Co-authored-by: DoubleMathew <mmathew23@gmail.com>
Co-authored-by: Lei Zhenyuan <zhenyuan.lei@intel.com>
Co-authored-by: parth2510 <parthguptapg7326@gmail.com>

* Set padding to 0

* Fix patch

* fixup patch (#3359)

Co-authored-by: Datta Nimmaturi <venkatadattasainimmaturi@gmail.com>

* Update vision.py

* Versioning

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* MXFP4 dequant

* Update loader.py

* Update vision.py

* load_in_16bit

* Update vision.py

* Update vision.py

* Update vision.py

* Update rl.py

* Update vision.py

* offload_embedding

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update rl_replacements.py

* Update loader.py

* Fix padding issue

* Update pyproject.toml

* Update _utils.py

* Update pyproject.toml

* Update _utils.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* Update vision.py

* New models

* Update llama.py

* Versioning

* Update _utils.py

* Update llama.py

* Update _utils.py

* Update llama.py

* Fix AMD

* Update _utils.py

* Update llama.py

* Update vision.py

* DEVICE_TYPE_TORCH

* Update __init__.py

* Update __init__.py

* Update _utils.py

* Move DEVICE_TYPE

* Update rl_replacements.py

* Update loader.py

* AMD install script

* Move AMD

* Update _amd_install.sh

* Update pyproject.toml

* Update pyproject.toml

* Delete _amd_install.sh

* Update device_type.py

* Update loader.py

* Update _utils.py

* Update _utils.py

* Update _utils.py

* Update _utils.py

* Update _utils.py

* Update tokenizer_utils.py

* Versioning

* Update pyproject.toml

* Update loader.py

* Update _utils.py

* Update pyproject.toml

* Update pyproject.toml

* Update _utils.py

* Update pyproject.toml

* Update _utils.py

* Update _utils.py

* Update loader.py

* Update _utils.py

* Update _utils.py

* local_files_only

* Cut Cross Entropy

* Update llama.py

* Update vision.py

* Update vision.py

* Update vision.py

* Qwen 3 VL vLLM (#3489)

* Update __init__.py

* patch_torchao

* torchao_logger

* Update rl_replacements.py

* Fix

* Update rl.py

* Update rl.py

* Update rl.py

* Update rl.py

* Update _utils.py

* Versioning

---------

Co-authored-by: Datta Nimmaturi <venkatadattasainimmaturi@gmail.com>
Co-authored-by: Michael Han <107991372+shimmyshimmer@users.noreply.github.com>
Co-authored-by: jeromeku <jerome.ku@gmail.com>
Co-authored-by: DoubleMathew <mmathew23@gmail.com>
Co-authored-by: Lei Zhenyuan <zhenyuan.lei@intel.com>
Co-authored-by: parth2510 <parthguptapg7326@gmail.com>
This commit is contained in:
Daniel Han 2025-11-03 06:47:26 -08:00 committed by GitHub
parent c449c7b06e
commit a9ff4e23c9
6 changed files with 30 additions and 24 deletions

View file

@ -55,11 +55,11 @@ huggingfacenotorch = [
"hf_transfer",
"diffusers",
"transformers>=4.51.3,!=4.52.0,!=4.52.1,!=4.52.2,!=4.52.3,!=4.53.0,!=4.54.0,!=4.55.0,!=4.55.1,!=4.57.0,<=4.57.2",
"trl>=0.18.2,!=0.19.0,<=0.23.0",
"trl>=0.18.2,!=0.19.0,<=0.24.0",
]
huggingface = [
"unsloth[huggingfacenotorch]",
"unsloth_zoo>=2025.10.13",
"unsloth_zoo>=2025.11.1",
"torchvision",
"unsloth[triton]",
]
@ -489,7 +489,7 @@ colab-ampere-torch220 = [
"flash-attn>=2.6.3 ; ('linux' in sys_platform)",
]
colab-new = [
"unsloth_zoo>=2025.10.13",
"unsloth_zoo>=2025.11.1",
"packaging",
"tyro",
"transformers>=4.51.3,!=4.52.0,!=4.52.1,!=4.52.2,!=4.52.3,!=4.53.0,!=4.54.0,!=4.55.0,!=4.55.1,!=4.57.0,<=4.57.2",
@ -507,7 +507,7 @@ colab-new = [
]
colab-no-deps = [
"accelerate>=0.34.1",
"trl>=0.18.2,!=0.19.0,<=0.23.0",
"trl>=0.18.2,!=0.19.0,<=0.24.0",
"peft>=0.7.1",
"xformers ; ('linux' in sys_platform or sys_platform == 'win32') and (platform_machine == 'AMD64' or platform_machine == 'x86_64')",
"bitsandbytes>=0.45.5,!=0.46.0,!=0.48.0",

View file

@ -29,7 +29,6 @@ already_imported = [mod for mod in critical_modules if mod in sys.modules]
# their code at import time. If they're imported first, the original (slower,
# more memory-intensive) implementations will be used instead of Unsloth's
# optimized versions, potentially causing OOM errors or slower training.
if already_imported:
# stacklevel=2 makes warning point to user's import line rather than this library code,
# showing them exactly where to fix the import order in their script
@ -40,7 +39,7 @@ if already_imported:
f"Please restructure your imports with 'import unsloth' at the top of your file.",
stacklevel = 2,
)
pass
del already_imported, critical_modules
# Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so
# enabling it will require much more work, so we have to prioritize. Please understand!
@ -57,14 +56,12 @@ os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
# Log Unsloth is being used
os.environ["UNSLOTH_IS_PRESENT"] = "1"
import importlib.util
from pathlib import Path
from importlib.metadata import version as importlib_version
from importlib.metadata import PackageNotFoundError
# Check for unsloth_zoo
try:
unsloth_zoo_version = importlib_version("unsloth_zoo")
if Version(unsloth_zoo_version) < Version("2025.10.13"):
if Version(unsloth_zoo_version) < Version("2025.11.1"):
print(
"Unsloth: Please update Unsloth and Unsloth-Zoo to the latest version!\n"\
"Do this via `pip install --upgrade --force-reinstall --no-cache-dir --no-deps unsloth unsloth_zoo`"
@ -82,7 +79,7 @@ except PackageNotFoundError:
raise ImportError(f"Unsloth: Please install unsloth_zoo via `pip install unsloth_zoo` then retry!")
except:
raise
pass
del PackageNotFoundError, importlib_version
# Try importing PyTorch and check version
try:
@ -92,8 +89,8 @@ except ModuleNotFoundError:
"Unsloth: Pytorch is not installed. Go to https://pytorch.org/.\n"\
"We have some installation instructions on our Github page."
)
except Exception as exception:
raise exception
except:
raise
pass
from unsloth_zoo.device_type import (
@ -130,7 +127,7 @@ if DEVICE_TYPE == "cuda":
else:
def is_bf16_supported(): return SUPPORTS_BFLOAT16
torch.cuda.is_bf16_supported = is_bf16_supported
pass
del major_version, minor_version
elif DEVICE_TYPE == "hip":
SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported()
elif DEVICE_TYPE == "xpu":
@ -179,6 +176,8 @@ if DEVICE_TYPE == "cuda":
latest_cuda = np.argsort([float(find_number.search(x).group(1)) for x in possible_cudas])[::-1][0]
latest_cuda = possible_cudas[latest_cuda]
os.system(f"ldconfig /usr/local/{latest_cuda}")
del find_number, latest_cuda
del possible_cudas, find_cuda
pass
importlib.reload(bnb)
@ -200,7 +199,7 @@ if DEVICE_TYPE == "cuda":
"Also try `sudo ldconfig /usr/local/cuda-xx.x` - find the latest cuda version.\n"\
"Unsloth will still run for now, but maybe it might crash - let's hope it works!"
)
pass
del libcuda_dirs
elif DEVICE_TYPE == "hip":
# NO-OP for rocm device
pass

View file

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2025.10.12"
__version__ = "2025.11.1"
__all__ = [
"SUPPORTS_BFLOAT16",
@ -235,6 +235,9 @@ del transformers_training_args_logger
# No label_names provided for model class
from transformers.trainer import logger as transformers_trainer_logger
transformers_trainer_logger.addFilter(HideLoggingMessage("No label_names"))
# The tokenizer has new PAD/BOS/EOS tokens that differ from the model config and generation config.
transformers_trainer_logger.addFilter(HideLoggingMessage("The tokenizer has new"))
del transformers_trainer_logger
# Using the default loss: `ForCausalLMLoss`.

View file

@ -752,11 +752,11 @@ def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"):
" scale_rewards = False\n"\
"elif loss_type.lower() == 'dapo':\n"\
" if mask_truncated_completions != True:\n"\
" print('Unsloth: The DAPO paper recommends `mask_truncated_completions = True`')\n"\
" print('Unsloth: The DAPO paper recommends `mask_truncated_completions = True` - we will set it.')\n"\
" if epsilon_high != 0.28:\n"\
" print('Unsloth: The DAPO paper recommends `epsilon_high = 0.28`')\n"\
" print('Unsloth: The DAPO paper recommends `epsilon_high = 0.28` - we will set it.')\n"\
" if beta != 0.0:\n"\
" print('Unsloth: The DAPO paper recommends setting `beta = 0.0` to remove the KL term')\n"\
" print('Unsloth: The DAPO paper recommends setting `beta = 0.0` to remove the KL term - we will set it.')\n"\
" mask_truncated_completions = True\n"\
" epsilon_high = 0.28\n"\
" beta = 0.0\n"\
@ -1085,10 +1085,13 @@ def patch_functions(RLTrainer, trainer_file, RLTrainer_name, all_imports, import
)
# Replace self.llm.generate and self.llm.chat
lora_name = trainer_file + "_lora_model"
if "CUDA_VISIBLE_DEVICES" in os.environ:
lora_name = trainer_file + "_lora_model_' + " + "(os.environ.get('CUDA_VISIBLE_DEVICES', '0').replace(',',''))"
else:
lora_name = trainer_file + "_lora_model'"
source = re.sub(
r"(self\.llm\.(?:generate|chat)\([^\)]{1,})\)",
r"\1, lora_request = self.model.load_lora('" + lora_name + r"', load_tensors = True))",
r"\1, lora_request = self.model.load_lora('" + lora_name + r", load_tensors = True))",
source
)
# Prefer using unsloth's sampling params and fallback to trl's if not found

View file

@ -216,17 +216,17 @@ def grpo_trainer__generate_and_score_completions(function_name, function):
replacement_lines = """
batch_size = self.args.per_device_train_batch_size if mode == "train" else self.args.per_device_eval_batch_size
try:
#TRL 0.23.1 and below path
# TRL 0.23.1 and below path
if not has_images:
# Left pad prompt before calculation old and ref hidden states
prompt_completion_ids = left_pack_padding(prompt_completion_ids, self.processing_class.pad_token_id)
self.model.for_training()
except:
#TRL 0.24.0 and below path
except:
# TRL 0.24.0 and below path
if images is None:
# Left pad prompt before calculation old and ref hidden states
prompt_completion_ids = left_pack_padding(prompt_completion_ids, self.processing_class.pad_token_id)
self.model.for_training()"""
self.model.for_training()"""
function = function.replace(line_to_replace, replacement_lines)

View file

@ -91,6 +91,7 @@ VLLM_SUPPORTED_VLM = [
"qwen2_5_vl",
"gemma3",
"mistral3",
"qwen3_vl",
]
VLLM_NON_LORA_VLM = [
"mllama",