ultralytics/tests/test_engine.py
fatih akyon 2283438618
Simplify engine resume tests (#24183)
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
2026-04-10 14:51:02 +08:00

186 lines
6.9 KiB
Python

# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
import sys
from types import SimpleNamespace
from unittest import mock
import pytest
import torch
from tests import MODEL, SOURCE
from ultralytics import YOLO
from ultralytics.cfg import get_cfg
from ultralytics.engine.exporter import Exporter
from ultralytics.models.yolo import classify, detect, segment
from ultralytics.utils import ASSETS, DEFAULT_CFG, WEIGHTS_DIR
def test_func(*args, **kwargs):
"""Test function used as a callback stub to verify callback registration."""
print("callback test passed")
def test_export():
"""Test model exporting functionality by adding a callback and verifying its execution."""
exporter = Exporter()
exporter.add_callback("on_export_start", test_func)
assert test_func in exporter.callbacks["on_export_start"], "callback test failed"
f = exporter(model=YOLO("yolo26n.yaml").model)
YOLO(f)(SOURCE) # exported model inference
def test_detect():
"""Test YOLO object detection training, validation, and prediction functionality."""
overrides = {"data": "coco8.yaml", "model": "yolo26n.yaml", "imgsz": 32, "epochs": 1, "save": False}
cfg = get_cfg(DEFAULT_CFG)
cfg.data = "coco8.yaml"
cfg.imgsz = 32
# Trainer
trainer = detect.DetectionTrainer(overrides=overrides)
trainer.add_callback("on_train_start", test_func)
assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
trainer.train()
# Validator
val = detect.DetectionValidator(args=cfg)
val.add_callback("on_val_start", test_func)
assert test_func in val.callbacks["on_val_start"], "callback test failed"
val(model=trainer.best) # validate best.pt
# Predictor
pred = detect.DetectionPredictor(overrides={"imgsz": [64, 64]})
pred.add_callback("on_predict_start", test_func)
assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
# Confirm there is no issue with sys.argv being empty
with mock.patch.object(sys, "argv", []):
result = pred(source=ASSETS, model=MODEL)
assert len(result), "predictor test failed"
# Test resume functionality
with pytest.raises(AssertionError):
detect.DetectionTrainer(overrides={**overrides, "resume": trainer.last}).train()
def test_segment():
"""Test image segmentation training, validation, and prediction pipelines using YOLO models."""
overrides = {
"data": "coco8-seg.yaml",
"model": "yolo26n-seg.yaml",
"imgsz": 32,
"epochs": 1,
"save": False,
"mask_ratio": 1,
"overlap_mask": False,
}
cfg = get_cfg(DEFAULT_CFG)
cfg.data = "coco8-seg.yaml"
cfg.imgsz = 32
# Trainer
trainer = segment.SegmentationTrainer(overrides=overrides)
trainer.add_callback("on_train_start", test_func)
assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
trainer.train()
# Validator
val = segment.SegmentationValidator(args=cfg)
val.add_callback("on_val_start", test_func)
assert test_func in val.callbacks["on_val_start"], "callback test failed"
val(model=trainer.best) # validate best.pt
# Predictor
pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]})
pred.add_callback("on_predict_start", test_func)
assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolo26n-seg.pt")
assert len(result), "predictor test failed"
# Test resume functionality
with pytest.raises(AssertionError):
segment.SegmentationTrainer(overrides={**overrides, "resume": trainer.last}).train()
def test_classify():
"""Test image classification including training, validation, and prediction phases."""
overrides = {"data": "imagenet10", "model": "yolo26n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
cfg = get_cfg(DEFAULT_CFG)
cfg.data = "imagenet10"
cfg.imgsz = 32
# Trainer
trainer = classify.ClassificationTrainer(overrides=overrides)
trainer.add_callback("on_train_start", test_func)
assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
trainer.train()
# Validator
val = classify.ClassificationValidator(args=cfg)
val.add_callback("on_val_start", test_func)
assert test_func in val.callbacks["on_val_start"], "callback test failed"
val(model=trainer.best)
# Predictor
pred = classify.ClassificationPredictor(overrides={"imgsz": [64, 64]})
pred.add_callback("on_predict_start", test_func)
assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
result = pred(source=ASSETS, model=trainer.best)
assert len(result), "predictor test failed"
def test_nan_recovery():
"""Test NaN loss detection and recovery during training."""
nan_injected = [False]
def inject_nan(trainer):
"""Inject NaN into loss during batch processing to test recovery mechanism."""
if trainer.epoch == 1 and trainer.tloss is not None and not nan_injected[0]:
trainer.tloss *= torch.tensor(float("nan"))
nan_injected[0] = True
overrides = {"data": "coco8.yaml", "model": "yolo26n.yaml", "imgsz": 32, "epochs": 3}
trainer = detect.DetectionTrainer(overrides=overrides)
trainer.add_callback("on_train_batch_end", inject_nan)
trainer.train()
assert nan_injected[0], "NaN injection failed"
def test_train_reuses_loaded_checkpoint_model(monkeypatch):
"""Test training reuses an already-loaded checkpoint model instead of re-parsing the model source."""
model = YOLO("yolo26n.yaml")
model.ckpt = {"checkpoint": True}
model.ckpt_path = "/tmp/fake.pt"
model.overrides["model"] = "ul://glenn-jocher/m2/exp-14"
original_model = model.model
captured = {}
class FakeTrainer:
def __init__(self, overrides=None, _callbacks=None):
self.overrides = overrides
self.callbacks = _callbacks
self.model = None
self.validator = SimpleNamespace(metrics=None)
self.best = MODEL.parent / "nonexistent-best.pt"
self.last = MODEL
captured["trainer"] = self
def get_model(self, cfg=None, weights=None, verbose=True):
captured["cfg"] = cfg
captured["weights"] = weights
return original_model
def train(self):
return None
monkeypatch.setattr("ultralytics.engine.model.checks.check_pip_update_available", lambda: None)
monkeypatch.setattr(model, "_smart_load", lambda key: FakeTrainer)
monkeypatch.setattr(
"ultralytics.engine.model.load_checkpoint",
lambda path: (original_model, {"checkpoint": True}),
)
model.train(data="coco8.yaml", epochs=1)
assert captured["trainer"].model is original_model
assert captured["cfg"] == original_model.yaml
assert captured["weights"] is original_model