Dockerfile improvements (#23090)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
Glenn Jocher 2025-12-31 11:23:49 +01:00 committed by GitHub
parent 5d13afc552
commit abd4c52dd9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 39 additions and 38 deletions

View file

@ -40,7 +40,7 @@ RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config && \
sed -i'' -e 's/"opencv-python/"opencv-python-headless/' pyproject.toml
ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Install pip packages
# Install pip packages (uv already installed in base image)
RUN uv pip install --system -e "." albumentations faster-coco-eval wandb && \
# Remove extra build files \
rm -rf tmp /root/.config/Ultralytics/persistent_cache.json

View file

@ -20,13 +20,11 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
/root/.config/Ultralytics/
# Install linux packages
# pkg-config and libhdf5-dev (not included) are needed to build 'h5py==3.11.0' aarch64 wheel required by 'tensorflow'
# gnupg required for Edge TPU install
RUN apt update && \
apt upgrade -y && \
apt install -y --no-install-recommends \
# TensorFlow on aarch64 may require pkg-config and libhdf5-dev if h5py builds from source.
RUN apt-get update && \
apt-get install -y --no-install-recommends \
python3-pip git zip unzip wget curl htop gcc libgl1 libglib2.0-0 gnupg && \
apt clean && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Create working directory
@ -39,7 +37,7 @@ RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config && \
ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Install pip packages, create python symlink, and remove build files
RUN pip install uv && \
RUN python3 -m pip install uv && \
uv pip install --system -e ".[export]" --break-system-packages && \
# Creates a symbolic link to make 'python' point to 'python3'
ln -sf /usr/bin/python3 /usr/bin/python && \

View file

@ -20,11 +20,13 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
# Install linux packages and conda packages
RUN apt-get update && \
apt-get install -y --no-install-recommends libgl1 && \
apt-get clean && \
# Install conda packages
# mkl required to fix 'OSError: libmkl_intel_lp64.so.2: cannot open shared object file: No such file or directory'
conda config --set solver libmamba && \
conda install pytorch torchvision pytorch-cuda=12.1 -c pytorch -c nvidia && \
conda install -c conda-forge ultralytics mkl && \
conda install -y pytorch torchvision pytorch-cuda=12.1 -c pytorch -c nvidia && \
conda install -y -c conda-forge ultralytics mkl && \
conda clean -afy && \
# Remove extra build files
rm -rf /var/lib/apt/lists/* /root/.config/Ultralytics/persistent_cache.json

View file

@ -6,7 +6,7 @@
FROM ultralytics/ultralytics:latest
# Install export dependencies and run exports to AutoInstall packages
# Numpy 1.26.4 required due to TF export bug with torch 2.8
# Numpy 1.26.4 required for TensorFlow export compatibility
# Note tensorrt installed on-demand as depends on runtime environment CUDA version
RUN uv pip install --system -e ".[export]" "onnxruntime-gpu" paddlepaddle x2paddle numpy==1.26.4 && \
# Run exports to AutoInstall packages \

View file

@ -25,8 +25,9 @@ RUN wget -q -O - https://repo.download.nvidia.com/jetson/jetson-ota-public.asc |
# gnupg required for Edge TPU install
RUN apt-get update && \
apt-get install -y --no-install-recommends \
git python3.8 python3.8-dev python3-pip python3-libnvinfer libopenmpi-dev libopenblas-base libomp-dev gcc \
&& rm -rf /var/lib/apt/lists/*
git python3.8 python3.8-dev python3-pip python3-libnvinfer libopenmpi-dev libopenblas-base libomp-dev gcc && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Create symbolic links for python3.8 and pip3
RUN ln -sf /usr/bin/python3.8 /usr/bin/python3 && \

View file

@ -21,6 +21,7 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
RUN apt-get update && \
apt-get install -y --no-install-recommends \
git python3-pip libopenmpi-dev libopenblas-base libomp-dev \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Create working directory

View file

@ -23,6 +23,7 @@ RUN dpkg -i cuda-keyring_1.1-1_all.deb && \
apt-get update && \
apt-get install -y --no-install-recommends \
git python3-pip libopenmpi-dev libopenblas-base libomp-dev libcusparselt0 libcusparselt-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* cuda-keyring_1.1-1_all.deb
# Create working directory

View file

@ -15,7 +15,7 @@ RUN uv pip install --system jupyterlab && \
rm -rf tmp /root/.config/Ultralytics/persistent_cache.json
# Start JupyterLab with tutorial notebook
ENTRYPOINT ["/usr/local/bin/jupyter", "lab", "--allow-root", "--ip=*", "/ultralytics/examples/tutorial.ipynb"]
ENTRYPOINT ["/usr/local/bin/jupyter", "lab", "--allow-root", "--ip=0.0.0.0", "/ultralytics/examples/tutorial.ipynb"]
# Usage Examples -------------------------------------------------------------------------------------------------------

View file

@ -14,16 +14,17 @@ ENV RUNNER_ALLOW_RUNASROOT=1 \
# Set the working directory
WORKDIR /actions-runner
# Download and unpack the latest runner from https://github.com/actions/runner and install dependencies
# Download and unpack the runner from https://github.com/actions/runner and install dependencies
RUN FILENAME=actions-runner-linux-x64-${RUNNER_VERSION}.tar.gz && \
curl -o "$FILENAME" -L "https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/${FILENAME}" && \
curl -fLso "$FILENAME" "https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/${FILENAME}" && \
tar xzf "$FILENAME" && \
rm "$FILENAME" && \
# Install runner dependencies \
uv pip install --system pytest-cov && \
./bin/installdependencies.sh && \
apt-get update && \
apt-get -y install libicu-dev && \
apt-get install -y --no-install-recommends libicu-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# JSON ENTRYPOINT command to configure and start runner with default TOKEN and NAME

View file

@ -24,13 +24,13 @@ Ultralytics is a [computer vision](https://www.ultralytics.com/glossary/computer
Installing the Ultralytics package is straightforward using pip:
```
```bash
pip install ultralytics
```
For the latest development version, install directly from the GitHub repository:
```
```bash
pip install git+https://github.com/ultralytics/ultralytics.git
```
@ -168,7 +168,7 @@ Ultralytics YOLO boasts a rich set of features for advanced computer vision task
- Pretrained Models: Access a variety of [pretrained models](https://docs.ultralytics.com/models/) that balance speed and accuracy for different use cases.
- Custom Training: Easily fine-tune models on custom datasets with the flexible [training pipeline](https://docs.ultralytics.com/modes/train/).
- Wide [Deployment Options](https://docs.ultralytics.com/guides/model-deployment-options/): Export models to various formats like TensorRT, ONNX, and CoreML for deployment across different platforms.
- Extensive Documentation: Benefit from comprehensive [documentation](https://docs.ultralytics.com/) and a supportive community to guide you through your computer vision journey.
- Extensive Documentation: Benefit from comprehensive [documentation](https://docs.ultralytics.com/) and a supportive community for your computer vision workflows.
### How can I improve the performance of my YOLO model?

View file

@ -95,7 +95,7 @@ For a detailed understanding of the model training process and best practices, r
## Keep Learning about JupyterLab
If you're excited to learn more about JupyterLab, here are some great resources to get you started:
If you want to learn more about JupyterLab, here are resources to get you started:
- [**JupyterLab Documentation**](https://jupyterlab.readthedocs.io/en/stable/getting_started/starting.html): Dive into the official JupyterLab Documentation to explore its features and capabilities. It's a great way to understand how to use this powerful tool to its fullest potential.
- [**Try It With Binder**](https://mybinder.org/v2/gh/jupyterlab/jupyterlab-demo/HEAD?urlpath=lab/tree/demo): Experiment with JupyterLab without installing anything by using Binder, which lets you launch a live JupyterLab instance directly in your browser. It's a great way to start experimenting immediately.

View file

@ -202,4 +202,4 @@ Learn more about evaluation metrics like [Precision](https://www.ultralytics.com
<p align="center"><img width="1000" src="https://github.com/ultralytics/docs/releases/download/0/gcp-running-docker.avif" alt="Running YOLOv5 inside a Docker container on GCP"></p>
Congratulations! You have successfully set up and run YOLOv5 within a Docker container.
You have successfully set up and run YOLOv5 within a Docker container.

View file

@ -1,6 +1,6 @@
# YOLO-Series ONNXRuntime Rust Demo for Core YOLO Tasks
This repository provides a [Rust](https://rust-lang.org/) demo showcasing key [Ultralytics YOLO](https://docs.ultralytics.com/) series tasks such as [Classification](https://docs.ultralytics.com/tasks/classify/), [Segmentation](https://docs.ultralytics.com/tasks/segment/), [Detection](https://docs.ultralytics.com/tasks/detect/), [Pose Estimation](https://docs.ultralytics.com/tasks/pose/), and Oriented Bounding Box ([OBB](https://docs.ultralytics.com/tasks/obb/)) detection using the [ONNXRuntime](https://github.com/microsoft/onnxruntime). It supports various YOLO models (v5 through 11) across multiple computer vision tasks.
This repository provides a [Rust](https://rust-lang.org/) demo showcasing key [Ultralytics YOLO](https://docs.ultralytics.com/) series tasks such as [Classification](https://docs.ultralytics.com/tasks/classify/), [Segmentation](https://docs.ultralytics.com/tasks/segment/), [Detection](https://docs.ultralytics.com/tasks/detect/), [Pose Estimation](https://docs.ultralytics.com/tasks/pose/), and Oriented Bounding Box ([OBB](https://docs.ultralytics.com/tasks/obb/)) detection using the [ONNXRuntime](https://github.com/microsoft/onnxruntime). It supports various YOLO models (YOLOv5 through YOLO11) across multiple computer vision tasks.
## ✨ Introduction

View file

@ -105,6 +105,6 @@ Contributions are welcome! If you find any issues or have suggestions for improv
---
For more resources, explore the [Ultralytics documentation](https://docs.ultralytics.com/), [Ultralytics blog](https://www.ultralytics.com/blog), and [Ultralytics HUB](https://docs.ultralytics.com/hub/).
For more resources, explore the [Ultralytics documentation](https://docs.ultralytics.com/) and [Ultralytics blog](https://www.ultralytics.com/blog).
**We encourage your contributions to help improve this project.**

View file

@ -63,18 +63,15 @@ char* YOLO_V8::PreProcess(cv::Mat& iImg, std::vector<int> iImgSize, cv::Mat& oIm
case YOLO_DETECT_V8_HALF:
case YOLO_POSE_V8_HALF://LetterBox
{
if (iImg.cols >= iImg.rows)
{
resizeScales = iImg.cols / (float)iImgSize.at(0);
cv::resize(oImg, oImg, cv::Size(iImgSize.at(0), int(iImg.rows / resizeScales)));
}
else
{
resizeScales = iImg.rows / (float)iImgSize.at(0);
cv::resize(oImg, oImg, cv::Size(int(iImg.cols / resizeScales), iImgSize.at(1)));
}
cv::Mat tempImg = cv::Mat::zeros(iImgSize.at(0), iImgSize.at(1), CV_8UC3);
oImg.copyTo(tempImg(cv::Rect(0, 0, oImg.cols, oImg.rows)));
int new_h = iImgSize.at(0);
int new_w = iImgSize.at(1);
float r = min(new_w / (float)iImg.cols, new_h / (float)iImg.rows);
int resized_w = static_cast<int>(iImg.cols * r);
int resized_h = static_cast<int>(iImg.rows * r);
resizeScales = 1.0f / r;
cv::resize(oImg, oImg, cv::Size(resized_w, resized_h));
cv::Mat tempImg = cv::Mat::zeros(new_h, new_w, CV_8UC3);
oImg.copyTo(tempImg(cv::Rect(0, 0, resized_w, resized_h)));
oImg = tempImg;
break;
}
@ -85,7 +82,7 @@ char* YOLO_V8::PreProcess(cv::Mat& iImg, std::vector<int> iImgSize, cv::Mat& oIm
int m = min(h, w);
int top = (h - m) / 2;
int left = (w - m) / 2;
cv::resize(oImg(cv::Rect(left, top, m, m)), oImg, cv::Size(iImgSize.at(0), iImgSize.at(1)));
cv::resize(oImg(cv::Rect(left, top, m, m)), oImg, cv::Size(iImgSize.at(1), iImgSize.at(0)));
break;
}
}
@ -335,7 +332,7 @@ char* YOLO_V8::TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std::
char* YOLO_V8::WarmUpSession() {
clock_t starttime_1 = clock();
cv::Mat iImg = cv::Mat(cv::Size(imgSize.at(0), imgSize.at(1)), CV_8UC3);
cv::Mat iImg = cv::Mat(cv::Size(imgSize.at(1), imgSize.at(0)), CV_8UC3);
cv::Mat processedImg;
PreProcess(iImg, imgSize, processedImg);
if (modelType < 4)