Add onnxslim>=0.1.59 to TOML export dependencies (#21302)

Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
inisis 2025-07-08 01:15:12 +08:00 committed by GitHub
parent a520e48ba2
commit ceeb58317b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 3 additions and 2 deletions

View file

@ -93,6 +93,7 @@ dev = [
]
export = [
"onnx>=1.12.0,<1.18.0", # ONNX export
"onnxslim>=0.1.59", # ONNX model optimization
"coremltools>=8.0; platform_system != 'Windows' and python_version <= '3.13'", # CoreML supported on macOS and Linux
"scikit-learn>=1.3.2; platform_system != 'Windows' and python_version <= '3.13'", # CoreML k-means quantization
"openvino>=2024.0.0", # OpenVINO export

View file

@ -574,7 +574,7 @@ class Exporter:
"""Export YOLO model to ONNX format."""
requirements = ["onnx>=1.12.0,<1.18.0"]
if self.args.simplify:
requirements += ["onnxslim>=0.1.56", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
requirements += ["onnxslim>=0.1.59", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
check_requirements(requirements)
import onnx # noqa
@ -952,7 +952,7 @@ class Exporter:
"ai-edge-litert>=1.2.0,<1.4.0", # required by 'onnx2tf' package
"onnx>=1.12.0,<1.18.0",
"onnx2tf>=1.26.3",
"onnxslim>=0.1.56",
"onnxslim>=0.1.59",
"onnxruntime-gpu" if cuda else "onnxruntime",
"protobuf>=5",
),