feat: 📦 update onnx version to 1.20.0 (#22867)

Signed-off-by: Onuralp SEZER <onuralp@ultralytics.com>
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
Onuralp SEZER 2025-12-15 19:21:24 +00:00 committed by GitHub
parent d461859adb
commit c3b3500f7a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 3 additions and 5 deletions

View file

@ -35,8 +35,6 @@ RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config && \
ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Pip install numpy, onnxruntime-gpu, torch, torchvision and ultralytics, then remove build files
# Prevent onnx upgrade to 1.20.0 which has issues with tflite export on Jetson-jetpack6 environment
RUN sed -i -e 's/"onnx>=1.12.0/"onnx>=1.12.0,<1.20.0/' pyproject.toml
RUN python3 -m pip install --upgrade pip uv && \
uv pip install --system \
numpy==1.26.4 \

View file

@ -90,7 +90,7 @@ dev = [
export = [
"numpy<2.0.0", # TF 2.20 compatibility
"onnx>=1.12.0; platform_system != 'Darwin'", # ONNX export
"onnx>=1.12.0,<1.18.0; platform_system == 'Darwin'", # TF inference hanging on MacOS
"onnx>=1.12.0,<1.18.0; platform_system == 'Darwin'", # TF inference hanging on MacOS (tested up to onnx==1.20.0)
"coremltools>=9.0; platform_system != 'Windows' and python_version <= '3.13'", # CoreML supported on macOS and Linux
"scikit-learn>=1.3.2; platform_system != 'Windows' and python_version <= '3.13'", # CoreML k-means quantization
"openvino>=2024.0.0", # OpenVINO export

View file

@ -670,7 +670,7 @@ class Exporter:
@try_export
def export_onnx(self, prefix=colorstr("ONNX:")):
"""Export YOLO model to ONNX format."""
requirements = ["onnx>=1.12.0,<=1.19.1"] # pin until onnx_graphsurgeon supports onnx>=1.20
requirements = ["onnx>=1.12.0,<2.0.0"]
if self.args.simplify:
requirements += ["onnxslim>=0.1.71", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
check_requirements(requirements)
@ -1051,7 +1051,7 @@ class Exporter:
"sng4onnx>=1.0.1", # required by 'onnx2tf' package
"onnx_graphsurgeon>=0.3.26", # required by 'onnx2tf' package
"ai-edge-litert>=1.2.0" + (",<1.4.0" if MACOS else ""), # required by 'onnx2tf' package
"onnx>=1.12.0,<=1.19.1", # pin until onnx_graphsurgeon releases onnx>=1.20 fix
"onnx>=1.12.0,<2.0.0",
"onnx2tf>=1.26.3",
"onnxslim>=0.1.71",
"onnxruntime-gpu" if cuda else "onnxruntime",