Sfoglia il codice sorgente

Add files via upload

Mengqi Lei 2 mesi fa
parent
commit
9aa453f025
79 ha cambiato i file con 11588 aggiunte e 2 eliminazioni
  1. 219 2
      README.md
  2. BIN
      assets/framework.png
  3. BIN
      assets/hyperedge.png
  4. BIN
      assets/icon.png
  5. BIN
      assets/vis.png
  6. 92 0
      docker/Dockerfile
  7. 58 0
      docker/Dockerfile-arm64
  8. 50 0
      docker/Dockerfile-conda
  9. 62 0
      docker/Dockerfile-cpu
  10. 70 0
      docker/Dockerfile-jetson-jetpack4
  11. 57 0
      docker/Dockerfile-jetson-jetpack5
  12. 58 0
      docker/Dockerfile-jetson-jetpack6
  13. 33 0
      docker/Dockerfile-jupyter
  14. 59 0
      docker/Dockerfile-python
  15. 44 0
      docker/Dockerfile-runner
  16. 40 0
      examples/README.md
  17. 43 0
      examples/RTDETR-ONNXRuntime-Python/README.md
  18. 222 0
      examples/RTDETR-ONNXRuntime-Python/main.py
  19. 14 0
      examples/YOLO-Series-ONNXRuntime-Rust/Cargo.toml
  20. 94 0
      examples/YOLO-Series-ONNXRuntime-Rust/README.md
  21. 236 0
      examples/YOLO-Series-ONNXRuntime-Rust/src/main.rs
  22. 464 0
      examples/YOLOv8-Action-Recognition/action_recognition.py
  23. 116 0
      examples/YOLOv8-Action-Recognition/readme.md
  24. 4 0
      examples/YOLOv8-Action-Recognition/requirements.txt
  25. 28 0
      examples/YOLOv8-CPP-Inference/CMakeLists.txt
  26. 50 0
      examples/YOLOv8-CPP-Inference/README.md
  27. 185 0
      examples/YOLOv8-CPP-Inference/inference.cpp
  28. 52 0
      examples/YOLOv8-CPP-Inference/inference.h
  29. 70 0
      examples/YOLOv8-CPP-Inference/main.cpp
  30. 47 0
      examples/YOLOv8-LibTorch-CPP-Inference/CMakeLists.txt
  31. 35 0
      examples/YOLOv8-LibTorch-CPP-Inference/README.md
  32. 260 0
      examples/YOLOv8-LibTorch-CPP-Inference/main.cc
  33. 99 0
      examples/YOLOv8-ONNXRuntime-CPP/CMakeLists.txt
  34. 120 0
      examples/YOLOv8-ONNXRuntime-CPP/README.md
  35. 375 0
      examples/YOLOv8-ONNXRuntime-CPP/inference.cpp
  36. 94 0
      examples/YOLOv8-ONNXRuntime-CPP/inference.h
  37. 193 0
      examples/YOLOv8-ONNXRuntime-CPP/main.cpp
  38. 24 0
      examples/YOLOv8-ONNXRuntime-Rust/Cargo.toml
  39. 212 0
      examples/YOLOv8-ONNXRuntime-Rust/README.md
  40. 87 0
      examples/YOLOv8-ONNXRuntime-Rust/src/cli.rs
  41. 160 0
      examples/YOLOv8-ONNXRuntime-Rust/src/lib.rs
  42. 28 0
      examples/YOLOv8-ONNXRuntime-Rust/src/main.rs
  43. 651 0
      examples/YOLOv8-ONNXRuntime-Rust/src/model.rs
  44. 553 0
      examples/YOLOv8-ONNXRuntime-Rust/src/ort_backend.rs
  45. 235 0
      examples/YOLOv8-ONNXRuntime-Rust/src/yolo_result.rs
  46. 43 0
      examples/YOLOv8-ONNXRuntime/README.md
  47. 229 0
      examples/YOLOv8-ONNXRuntime/main.py
  48. 19 0
      examples/YOLOv8-OpenCV-ONNX-Python/README.md
  49. 130 0
      examples/YOLOv8-OpenCV-ONNX-Python/main.py
  50. 21 0
      examples/YOLOv8-OpenVINO-CPP-Inference/CMakeLists.txt
  51. 69 0
      examples/YOLOv8-OpenVINO-CPP-Inference/README.md
  52. 175 0
      examples/YOLOv8-OpenVINO-CPP-Inference/inference.cc
  53. 59 0
      examples/YOLOv8-OpenVINO-CPP-Inference/inference.h
  54. 41 0
      examples/YOLOv8-OpenVINO-CPP-Inference/main.cc
  55. 128 0
      examples/YOLOv8-Region-Counter/readme.md
  56. 253 0
      examples/YOLOv8-Region-Counter/yolov8_region_counter.py
  57. 69 0
      examples/YOLOv8-SAHI-Inference-Video/readme.md
  58. 108 0
      examples/YOLOv8-SAHI-Inference-Video/yolov8_sahi.py
  59. 63 0
      examples/YOLOv8-Segmentation-ONNXRuntime-Python/README.md
  60. 338 0
      examples/YOLOv8-Segmentation-ONNXRuntime-Python/main.py
  61. 55 0
      examples/YOLOv8-TFLite-Python/README.md
  62. 221 0
      examples/YOLOv8-TFLite-Python/main.py
  63. 186 0
      examples/heatmaps.ipynb
  64. 115 0
      examples/hub.ipynb
  65. 200 0
      examples/object_counting.ipynb
  66. 245 0
      examples/object_tracking.ipynb
  67. 665 0
      examples/tutorial.ipynb
  68. 769 0
      mkdocs.yml
  69. 186 0
      pyproject.toml
  70. 20 0
      requirements.txt
  71. 22 0
      tests/__init__.py
  72. 83 0
      tests/conftest.py
  73. 122 0
      tests/test_cli.py
  74. 155 0
      tests/test_cuda.py
  75. 131 0
      tests/test_engine.py
  76. 216 0
      tests/test_exports.py
  77. 150 0
      tests/test_integrations.py
  78. 615 0
      tests/test_python.py
  79. 94 0
      tests/test_solutions.py

+ 219 - 2
README.md

@@ -1,2 +1,219 @@
-# yolov13
-Official implementation of YOLOv13.
+<p align="center">
+    <img src="assets/icon.png" width="110" style="margin-bottom: 0.2;"/>
+<p>
+<h2 align="center">YOLOv13: Real-Time Object Detection with Hypergraph-Enhanced Adaptive Visual Perception</h2>
+
+<p align="center">
+  <a href="https://github.com/iMoonLab">
+    <img src="https://img.shields.io/badge/iMoonLab-Homepage-blueviolet.svg" alt="iMoonLab">
+  </a>
+</p>
+
+  
+<div align="center">
+    <img src="assets/framework.png" width="96%" height="96%">
+</div>
+
+
+<h2>Table of Contents</h2>
+
+- [Technical Briefing 💡](#technical-briefing-)
+- [Main Results 🏆](#main-results-)
+  - [1. MS COCO Benchmark](#1-ms-coco-benchmark)
+  - [2. Visualizations](#2-visualizations)
+  - [4. Computational Efficiency](#4-computational-efficiency)
+- [Quick Start 🚀](#quick-start-)
+  - [1. Install Dependencies](#1-install-dependencies)
+  - [2. Validation](#2-validation)
+  - [3. Training](#3-training)
+  - [4. Prediction](#4-prediction)
+  - [5. Export](#5-export)
+- [Related Projects 🔗](#related-projects-)
+- [Cite YOLOv13 📝](#cite-yolov13-)
+
+
+
+## Technical Briefing 💡
+
+
+**Introducing YOLOv13**—the next-generation real-time detector with cutting-edge performance and efficiency. YOLOv13 family includes four variants: Nano, Small, Large, and X-Large, powered by:
+
+* **HyperACE: Hypergraph-based Adaptive Correlation Enhancement**
+
+  * Treats pixels in multi-scale feature maps as hypergraph vertices.
+  * Adopts a learnable hyperedge construction module to adaptively exploring higher-order correlations between vertices.
+  * A message passing module with linear complexity is leveraged to effectively aggregate multi-scale features with the guidance of higher-order correlations to achieve effective visual perception of complex scenarios.
+
+* **FullPAD: Full-Pipeline Aggregation-and-Distribution Paradigm**
+
+  * Uses the HyperACE to aggregate multi-scale features of the backbone and extract high-order correlations in the hypergraph space.
+  * FullPAD paradigm further leverages three separate tunnels to forward these correlation-enhanced features to the connection between the backbone and neck, the internal layers of the neck, and the connection between the neck and head, respectively. In this way, YOLOv13 achieves fine‑grained information flow and representational synergy across the entire pipeline.
+  * FullPAD significantly improves gradient propagation and enhances the detection performance.
+
+* **Lightweight Convolution Replacement**
+
+  * Replaces large-kernel convolutions with blocks building based on depthwise separable convolutions (DSConv, DS-Bottleneck, DS-C3k, DS-C3k2), preserving receptive field while greatly reducing parameters and computation.
+  * Achieves faster inference speed without sacrificing accuracy.
+
+* **State-of-the-Art Performance**
+
+  * Demonstrates significant mAP gains of YOLOv13-S over YOLOv12-S and earlier versions on the MS COCO benchmark.
+  * Maintains a lightweight model size, ideal for mobile and embedded deployment. Specifically, The FLOPs of Nano and Small are the lowest among the YOLO series.
+
+> YOLOv13 seamlessly combines hypergraph computation with end-to-end information collaboration to deliver a more accurate, robust, and efficient real-time detection solution.
+
+
+
+## Main Results 🏆
+
+### 1. MS COCO Benchmark
+
+**Table 1. Quantitative comparison with other state-of-the-art real-time object detectors on the MS COCO dataset**
+
+
+| **Method** | **FLOPs (G)** | **Parameters(M)** | **AP<sub>50:95</sub><sup>val</sup>** | **AP<sub>50</sub><sup>val</sup>** | **AP<sub>75</sub><sup>val</sup>** | **Latency (ms)** |
+| :--- | :---: | :---: | :---: | :---: | :---: | :---: |
+| YOLOv6-3.0-N | 11.4 | 4.7 | 37.0 | 52.7 | – | 2.74 |
+| Gold-YOLO-N | 12.1 | 5.6 | 39.6 | 55.7 | – | 2.97 |
+| YOLOv8-N | 8.7 | 3.2 | 37.4 | 52.6 | 40.5 | 1.77 |
+| YOLOv10-N | 6.7 | 2.3 | 38.5 | 53.8 | 41.7 | 1.84 |
+| YOLO11-N | 6.5 | 2.6 | 38.6 | 54.2 | 41.6 | 1.53 |
+| YOLOv12-N | 6.5 | 2.6 | 40.1 | 56.0 | 43.4 | 1.83 |
+| **YOLOv13-N** | **6.4** | **2.5** | **41.6** | **57.8** | **45.1** | **1.97** |
+|           |           |            |                 |           | 
+| YOLOv6-3.0-S | 45.3 | 18.5 | 44.3 | 61.2 | – | 3.42 |
+| Gold-YOLO-S | 46.0 | 21.5 | 45.4 | 62.5 | – | 3.82 |
+| YOLOv8-S | 28.6 | 11.2 | 45.0 | 61.8 | 48.7 | 2.33 |
+| RT-DETR-R18 | 60.0 | 20.0 | 46.5 | 63.8 | – | 4.58 |
+| RT-DETRv2-R18 | 60.0 | 20.0 | 47.9 | 64.9 | – | 4.58 |
+| YOLOv9-S | 26.4 | 7.1 | 46.8 | 63.4 | 50.7 | 3.44 |
+| YOLOv10-S | 21.6 | 7.2 | 46.3 | 63.0 | 50.4 | 2.53 |
+| YOLO11-S | 21.5 | 9.4 | 45.8 | 62.6 | 49.8 | 2.56 |
+| YOLOv12-S | 21.4 | 9.3 | 47.1 | 64.2 | 51.0 | 2.82 |
+| **YOLOv13-S** | **20.8** | **9.0** | **48.0** | **65.2** | **52.0** | **2.98** |
+|           |           |            |                 |           | 
+| YOLOv6-3.0-L | 150.7 | 59.6 | 51.8 | 69.2 | – | 9.01 |
+| Gold-YOLO-L | 151.7 | 75.1 | 51.8 | 68.9 | – | 10.69 |
+| YOLOv8-L | 165.2 | 43.7 | 53.0 | 69.8 | 57.7 | 8.13 |
+| RT-DETR-R50 | 136.0 | 42.0 | 53.1 | 71.3 | – | 6.93 |
+| RT-DETRv2-R50 | 136.0 | 42.0 | 53.4 | 71.6 | – | 6.93 |
+| YOLOv9-C | 102.1 | 25.3 | 53.0 | 70.2 | 57.8 | 6.64 |
+| YOLOv10-L | 120.3 | 24.4 | 53.2 | 70.1 | 57.2 | 7.31 |
+| YOLO11-L | 86.9 | 25.3 | 52.3 | 69.2 | 55.7 | 6.23 |
+| YOLOv12-L | 88.9 | 26.4 | 53.0 | 70.0 | 57.9 | 7.10 |
+| **YOLOv13-L** | **88.4** | **27.6** | **53.4** | **70.9** | **58.1** | **8.63** |
+|           |           |            |                 |           | 
+| YOLOv8-X | 257.8 | 68.2 | 54.0 | 71.0 | 58.8 | 12.83 |
+| RT-DETR-R101 | 259.0 | 76.0 | 54.3 | 72.7 | – | 13.51 |
+| RT-DETRv2-R101| 259.0 | 76.0 | 54.3 | 72.8 | – | 13.51 |
+| YOLOv10-X | 160.4 | 29.5 | 54.4 | 71.3 | 59.3 | 10.70 |
+| YOLO11-X | 194.9 | 56.9 | 54.2 | 71.0 | 59.1 | 11.35 |
+| YOLOv12-X | 199.0 | 59.1 | 54.4 | 71.1 | 59.3 | 12.46 |
+| **YOLOv13-X** | **199.2** | **64.0** | **54.8** | **72.0** | **59.8** | **14.67** |
+
+
+### 2. Visualizations
+
+<div>
+    <img src="assets/vis.png" width="100%" height="100%">
+</div>
+
+**Visualization examples of YOLOv10-N/S, YOLO11-N/S, YOLOv12-N/S, and YOLOv13-N/S.**
+
+
+### 4. Computational Efficiency
+<div>
+    <img src="assets/hyperedge.png" width="60%" height="60%">
+</div>
+
+**Representative visualization examples of adaptive hyperedges. The hyperedges in the first and second columns mainly focus on the high-order interactions among objects in the foreground. The third column mainly focuses on the high-order interactions between the background and part of the foreground. The visualization of these hyperedges can intuitively reflect the high-order visual associations modeled by the YOLOv13.**
+
+
+
+## Quick Start 🚀
+
+### 1. Install Dependencies
+
+```
+wget https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.3/flash_attn-2.7.3+cu11torch2.2cxx11abiFALSE-cp311-cp311-linux_x86_64.whl
+conda create -n yolov13 python=3.11
+conda activate yolov13
+pip install -r requirements.txt
+pip install -e .
+```
+YOLOv13 suppports Flash Attention acceleration.
+
+### 2. Validation
+[`YOLOv13-N`](https://github.com/iMoonLab/yolov13/releases/download/yolov13/yolov12n.pt)
+[`YOLOv13-S`](https://github.com/iMoonLab/yolov13/releases/download/yolov13/yolov12s.pt)
+[`YOLOv13-L`](https://github.com/iMoonLab/yolov13/releases/download/yolov13/yolov12l.pt)
+[`YOLOv13-X`](https://github.com/iMoonLab/yolov13/releases/download/yolov13/yolov12x.pt)
+
+Use the following code to validate the YOLOv13 models on the COCO dataset. Make sure to replace `{n/s/l/x}` with the desired model scale (nano, small, plus, or ultra).
+```python
+from ultralytics import YOLO
+
+model = YOLO('yolov12{n/s/l/x}.pt')  # Replace with the desired model scale
+```
+
+### 3. Training
+
+Use the following code to train the YOLOv13 models. Make sure to replace `yolov13n.yaml` with the desired model configuration file path, and `coco.yaml` with your coco dataset configuration file.
+```python
+from ultralytics import YOLO
+
+model = YOLO('yolov13n.yaml')
+
+# Train the model
+results = model.train(
+  data='coco.yaml',
+  epochs=600, 
+  batch=256, 
+  imgsz=640,
+  scale=0.5,  # S:0.9; L:0.9; X:0.9
+  mosaic=1.0,
+  mixup=0.0,  # S:0.05; L:0.15; X:0.2
+  copy_paste=0.1,  # S:0.15; L:0.5; X:0.6
+  device="0,1,2,3",
+)
+
+# Evaluate model performance on the validation set
+metrics = model.val('coco.yaml')
+
+# Perform object detection on an image
+results = model("path/to/your/image.jpg")
+results[0].show()
+
+```
+
+
+### 4. Prediction
+Use the following code to perform object detection using the YOLOv13 models. Make sure to replace `{n/s/l/x}` with the desired model scale.
+```python
+from ultralytics import YOLO
+
+model = YOLO('yolov13{n/s/l/x}.pt')  # Replace with the desired model scale
+model.predict()
+```
+
+### 5. Export
+Use the following code to export the YOLOv13 models to ONNX or TensorRT format. Make sure to replace `{n/s/l/x}` with the desired model scale.
+```python
+from ultralytics import YOLO
+model = YOLO('yolov13{n/s/l/x}.pt')  # Replace with the desired model scale
+model.export(format="engine", half=True)  # or format="onnx"
+```
+
+## Related Projects 🔗
+
+- The code is based on [Ultralytics](https://github.com/ultralytics/ultralytics). Thanks for their excellent work!
+- Other wonderful works about Hypergraph Computation:
+  - "Hypergraph Neural Networks": [[paper](https://arxiv.org/abs/1809.09401)]
+  - "HGNN+: General Hypergraph Nerual Networks": [[paper](https://ieeexplore.ieee.org/abstract/document/9795251)]
+  - "SoftHGNN: Soft Hypergraph Neural Networks for General Visual Recognition": [[paper](https://arxiv.org/abs/2505.15325)] [[code](https://github.com/Mengqi-Lei/SoftHGNN)]
+
+## Cite YOLOv13 📝
+```bibtex
+Coming soon...
+```
+

BIN
assets/framework.png


BIN
assets/hyperedge.png


BIN
assets/icon.png


BIN
assets/vis.png


+ 92 - 0
docker/Dockerfile

@@ -0,0 +1,92 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+# Builds ultralytics/ultralytics:latest image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
+# Image is CUDA-optimized for YOLO11 single/multi-GPU training and inference
+
+# Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch or nvcr.io/nvidia/pytorch:23.03-py3
+FROM pytorch/pytorch:2.5.1-cuda12.4-cudnn9-runtime
+
+# Set environment variables
+# Avoid DDP error "MKL_THREADING_LAYER=INTEL is incompatible with libgomp.so.1 library" https://github.com/pytorch/pytorch/issues/37377
+ENV PYTHONUNBUFFERED=1 \
+    PYTHONDONTWRITEBYTECODE=1 \
+    PIP_NO_CACHE_DIR=1 \
+    PIP_BREAK_SYSTEM_PACKAGES=1 \
+    MKL_THREADING_LAYER=GNU \
+    OMP_NUM_THREADS=1 
+
+# Downloads to user config dir
+ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
+    /root/.config/Ultralytics/
+
+# Install linux packages
+# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
+# libsm6 required by libqxcb to create QT-based windows for visualization; set 'QT_DEBUG_PLUGINS=1' to test in docker
+RUN apt-get update && \
+    apt-get install -y --no-install-recommends \
+    gcc git zip unzip wget curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 libsm6 \
+    && rm -rf /var/lib/apt/lists/*
+
+# Security updates
+# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796
+RUN apt upgrade --no-install-recommends -y openssl tar
+
+# Create working directory
+WORKDIR /ultralytics
+
+# Copy contents and configure git
+COPY . .
+RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
+ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
+
+# Install pip packages
+RUN pip install uv
+# Note -cu12 must be used with tensorrt
+RUN uv pip install --system -e ".[export]" tensorrt-cu12 "albumentations>=1.4.6" comet pycocotools
+
+# Run exports to AutoInstall packages
+# Edge TPU export fails the first time so is run twice here
+RUN yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32 || yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32
+RUN yolo export model=tmp/yolo11n.pt format=ncnn imgsz=32
+# Requires <= Python 3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991
+RUN uv pip install --system "paddlepaddle>=2.6.0" x2paddle
+# Fix error: `np.bool` was a deprecated alias for the builtin `bool` segmentation error in Tests
+RUN uv pip install --system numpy==1.23.5
+
+# Remove extra build files
+RUN rm -rf tmp /root/.config/Ultralytics/persistent_cache.json
+
+# Usage Examples -------------------------------------------------------------------------------------------------------
+
+# Build and Push
+# t=ultralytics/ultralytics:latest && sudo docker build -f docker/Dockerfile -t $t . && sudo docker push $t
+
+# Pull and Run with access to all GPUs
+# t=ultralytics/ultralytics:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
+
+# Pull and Run with access to GPUs 2 and 3 (inside container CUDA devices will appear as 0 and 1)
+# t=ultralytics/ultralytics:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus '"device=2,3"' $t
+
+# Pull and Run with local directory access
+# t=ultralytics/ultralytics:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/shared/datasets:/datasets $t
+
+# Kill all
+# sudo docker kill $(sudo docker ps -q)
+
+# Kill all image-based
+# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/ultralytics:latest)
+
+# DockerHub tag update
+# t=ultralytics/ultralytics:latest tnew=ultralytics/ultralytics:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew
+
+# Clean up
+# sudo docker system prune -a --volumes
+
+# Update Ubuntu drivers
+# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/
+
+# DDP test
+# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
+
+# GCP VM from Image
+# docker.io/ultralytics/ultralytics:latest

+ 58 - 0
docker/Dockerfile-arm64

@@ -0,0 +1,58 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+# Builds ultralytics/ultralytics:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
+# Image is aarch64-compatible for Apple M1, M2, M3, Raspberry Pi and other ARM architectures
+
+# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu with "FROM arm64v8/ubuntu:22.04" (deprecated)
+# Start FROM Debian image for arm64v8 https://hub.docker.com/r/arm64v8/debian (new)
+FROM arm64v8/debian:bookworm-slim
+
+# Set environment variables
+ENV PYTHONUNBUFFERED=1 \
+    PYTHONDONTWRITEBYTECODE=1 \
+    PIP_NO_CACHE_DIR=1 \
+    PIP_BREAK_SYSTEM_PACKAGES=1
+
+# Downloads to user config dir
+ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
+    /root/.config/Ultralytics/
+
+# Install linux packages
+# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
+# pkg-config and libhdf5-dev (not included) are needed to build 'h5py==3.11.0' aarch64 wheel required by 'tensorflow'
+RUN apt-get update && \
+    apt-get install -y --no-install-recommends \
+    python3-pip git zip unzip wget curl htop gcc libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 \
+    && rm -rf /var/lib/apt/lists/*
+
+# Create working directory
+WORKDIR /ultralytics
+
+# Copy contents and configure git
+COPY . .
+RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
+ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
+
+# Install pip packages
+RUN pip install uv
+RUN uv pip install --system -e ".[export]" --break-system-packages
+
+# Creates a symbolic link to make 'python' point to 'python3'
+RUN ln -sf /usr/bin/python3 /usr/bin/python
+
+# Remove extra build files
+RUN rm -rf /root/.config/Ultralytics/persistent_cache.json
+
+# Usage Examples -------------------------------------------------------------------------------------------------------
+
+# Build and Push
+# t=ultralytics/ultralytics:latest-arm64 && sudo docker build --platform linux/arm64 -f docker/Dockerfile-arm64 -t $t . && sudo docker push $t
+
+# Run
+# t=ultralytics/ultralytics:latest-arm64 && sudo docker run -it --ipc=host $t
+
+# Pull and Run
+# t=ultralytics/ultralytics:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host $t
+
+# Pull and Run with local volume mounted
+# t=ultralytics/ultralytics:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/shared/datasets:/datasets $t

+ 50 - 0
docker/Dockerfile-conda

@@ -0,0 +1,50 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+# Builds ultralytics/ultralytics:latest-conda image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
+# Image is optimized for Ultralytics Anaconda (https://anaconda.org/conda-forge/ultralytics) installation and usage
+
+# Start FROM miniconda3 image https://hub.docker.com/r/continuumio/miniconda3
+FROM continuumio/miniconda3:latest
+
+# Set environment variables
+ENV PYTHONUNBUFFERED=1 \
+    PYTHONDONTWRITEBYTECODE=1 \
+    PIP_NO_CACHE_DIR=1 \
+    PIP_BREAK_SYSTEM_PACKAGES=1
+
+# Downloads to user config dir
+ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
+    /root/.config/Ultralytics/
+
+# Install linux packages
+RUN apt-get update && \
+    apt-get install -y --no-install-recommends \
+    libgl1 \
+    && rm -rf /var/lib/apt/lists/*
+
+# Copy contents
+ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
+
+# Install conda packages
+# mkl required to fix 'OSError: libmkl_intel_lp64.so.2: cannot open shared object file: No such file or directory'
+RUN conda config --set solver libmamba && \
+    conda install pytorch torchvision pytorch-cuda=12.1 -c pytorch -c nvidia && \
+    conda install -c conda-forge ultralytics mkl
+    # conda install -c pytorch -c nvidia -c conda-forge pytorch torchvision pytorch-cuda=12.1 ultralytics mkl
+
+# Remove extra build files
+RUN rm -rf /root/.config/Ultralytics/persistent_cache.json
+
+# Usage Examples -------------------------------------------------------------------------------------------------------
+
+# Build and Push
+# t=ultralytics/ultralytics:latest-conda && sudo docker build -f docker/Dockerfile-cpu -t $t . && sudo docker push $t
+
+# Run
+# t=ultralytics/ultralytics:latest-conda && sudo docker run -it --ipc=host $t
+
+# Pull and Run
+# t=ultralytics/ultralytics:latest-conda && sudo docker pull $t && sudo docker run -it --ipc=host $t
+
+# Pull and Run with local volume mounted
+# t=ultralytics/ultralytics:latest-conda && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/shared/datasets:/datasets $t

+ 62 - 0
docker/Dockerfile-cpu

@@ -0,0 +1,62 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+# Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
+# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLO11 deployments
+
+# Use official Python base image for reproducibility (3.11.10 for export and 3.12.6 for inference)
+FROM python:3.11.10-slim-bookworm
+
+# Set environment variables
+ENV PYTHONUNBUFFERED=1 \
+    PYTHONDONTWRITEBYTECODE=1 \
+    PIP_NO_CACHE_DIR=1 \
+    PIP_BREAK_SYSTEM_PACKAGES=1
+
+# Downloads to user config dir
+ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
+    /root/.config/Ultralytics/
+
+# Install linux packages
+# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
+RUN apt-get update && \
+    apt-get install -y --no-install-recommends \
+    python3-pip git zip unzip wget curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 \
+    && rm -rf /var/lib/apt/lists/*
+
+# Create working directory
+WORKDIR /ultralytics
+
+# Copy contents and configure git
+COPY . .
+RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
+ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
+
+# Install pip packages
+RUN pip install uv
+RUN uv pip install --system -e ".[export]" --extra-index-url https://download.pytorch.org/whl/cpu --index-strategy unsafe-first-match
+
+# Run exports to AutoInstall packages
+RUN yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32
+RUN yolo export model=tmp/yolo11n.pt format=ncnn imgsz=32
+# Requires Python<=3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991
+RUN uv pip install --system "paddlepaddle>=2.6.0" x2paddle
+
+# Remove extra build files
+RUN rm -rf tmp /root/.config/Ultralytics/persistent_cache.json
+
+# Set default command to bash
+CMD ["/bin/bash"]
+
+# Usage Examples -------------------------------------------------------------------------------------------------------
+
+# Build and Push
+# t=ultralytics/ultralytics:latest-cpu && sudo docker build -f docker/Dockerfile-cpu -t $t . && sudo docker push $t
+
+# Run
+# t=ultralytics/ultralytics:latest-cpu && sudo docker run -it --ipc=host --name NAME $t
+
+# Pull and Run
+# t=ultralytics/ultralytics:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host --name NAME $t
+
+# Pull and Run with local volume mounted
+# t=ultralytics/ultralytics:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/shared/datasets:/datasets $t

+ 70 - 0
docker/Dockerfile-jetson-jetpack4

@@ -0,0 +1,70 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+# Builds ultralytics/ultralytics:jetson-jetpack4 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
+# Supports JetPack4.x for YOLO11 on Jetson Nano, TX2, Xavier NX, AGX Xavier
+
+# Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-cuda
+FROM nvcr.io/nvidia/l4t-cuda:10.2.460-runtime
+
+# Set environment variables
+ENV PYTHONUNBUFFERED=1 \
+    PYTHONDONTWRITEBYTECODE=1
+
+# Downloads to user config dir
+ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
+    /root/.config/Ultralytics/
+
+# Add NVIDIA repositories for TensorRT dependencies
+RUN wget -q -O - https://repo.download.nvidia.com/jetson/jetson-ota-public.asc | apt-key add - && \
+  echo "deb https://repo.download.nvidia.com/jetson/common r32.7 main" > /etc/apt/sources.list.d/nvidia-l4t-apt-source.list && \
+  echo "deb https://repo.download.nvidia.com/jetson/t194 r32.7 main" >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list
+
+# Install dependencies
+RUN apt-get update && \
+    apt-get install -y --no-install-recommends \
+    git python3.8 python3.8-dev python3-pip python3-libnvinfer libopenmpi-dev libopenblas-base libomp-dev gcc \
+    && rm -rf /var/lib/apt/lists/*
+
+# Create symbolic links for python3.8 and pip3
+RUN ln -sf /usr/bin/python3.8 /usr/bin/python3
+RUN ln -s /usr/bin/pip3 /usr/bin/pip
+
+# Create working directory
+WORKDIR /ultralytics
+
+# Copy contents and configure git
+COPY . .
+RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
+ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
+
+# Download onnxruntime-gpu 1.8.0 and tensorrt 8.2.0.6
+# Other versions can be seen in https://elinux.org/Jetson_Zoo and https://forums.developer.nvidia.com/t/pytorch-for-jetson/72048
+ADD https://nvidia.box.com/shared/static/gjqofg7rkg97z3gc8jeyup6t8n9j8xjw.whl onnxruntime_gpu-1.8.0-cp38-cp38-linux_aarch64.whl
+ADD https://forums.developer.nvidia.com/uploads/short-url/hASzFOm9YsJx6VVFrDW1g44CMmv.whl tensorrt-8.2.0.6-cp38-none-linux_aarch64.whl
+
+# Install pip packages
+RUN python3 -m pip install --upgrade pip
+RUN python3 -m pip install uv
+RUN uv pip install --system \
+    onnxruntime_gpu-1.8.0-cp38-cp38-linux_aarch64.whl \
+    tensorrt-8.2.0.6-cp38-none-linux_aarch64.whl \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/torch-1.11.0a0+gitbc2c6ed-cp38-cp38-linux_aarch64.whl \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/torchvision-0.12.0a0+9b5a3fe-cp38-cp38-linux_aarch64.whl
+RUN uv pip install --system -e ".[export]"
+
+# Remove extra build files
+RUN rm -rf *.whl /root/.config/Ultralytics/persistent_cache.json
+
+# Usage Examples -------------------------------------------------------------------------------------------------------
+
+# Build and Push
+# t=ultralytics/ultralytics:latest-jetson-jetpack4 && sudo docker build --platform linux/arm64 -f docker/Dockerfile-jetson-jetpack4 -t $t . && sudo docker push $t
+
+# Run
+# t=ultralytics/ultralytics:latest-jetson-jetpack4 && sudo docker run -it --ipc=host $t
+
+# Pull and Run
+# t=ultralytics/ultralytics:latest-jetson-jetpack4 && sudo docker pull $t && sudo docker run -it --ipc=host $t
+
+# Pull and Run with NVIDIA runtime
+# t=ultralytics/ultralytics:latest-jetson-jetpack4 && sudo docker pull $t && sudo docker run -it --ipc=host --runtime=nvidia $t

+ 57 - 0
docker/Dockerfile-jetson-jetpack5

@@ -0,0 +1,57 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+# Builds ultralytics/ultralytics:jetson-jetson-jetpack5 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
+# Supports JetPack5.1.2 for YOLO11 on Jetson Xavier NX, AGX Xavier, AGX Orin, Orin Nano and Orin NX
+
+# Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-jetpack
+FROM nvcr.io/nvidia/l4t-jetpack:r35.4.1
+
+# Set environment variables
+ENV PYTHONUNBUFFERED=1 \
+    PYTHONDONTWRITEBYTECODE=1 \
+    PIP_NO_CACHE_DIR=1 \
+    PIP_BREAK_SYSTEM_PACKAGES=1
+
+# Downloads to user config dir
+ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
+    /root/.config/Ultralytics/
+
+# Install dependencies
+RUN apt-get update && \
+    apt-get install -y --no-install-recommends \
+    git python3-pip libopenmpi-dev libopenblas-base libomp-dev \
+    && rm -rf /var/lib/apt/lists/*
+
+# Create working directory
+WORKDIR /ultralytics
+
+# Copy contents and configure git
+COPY . .
+RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
+ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
+
+# Pip install onnxruntime-gpu, torch, torchvision and ultralytics
+RUN python3 -m pip install --upgrade pip uv
+RUN uv pip install --system \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/onnxruntime_gpu-1.18.0-cp38-cp38-linux_aarch64.whl \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/torch-2.1.0a0+41361538.nv23.06-cp38-cp38-linux_aarch64.whl \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/torchvision-0.16.2+c6f3977-cp38-cp38-linux_aarch64.whl
+
+RUN uv pip install --system -e ".[export]"
+
+# Remove extra build files
+RUN rm -rf *.whl /root/.config/Ultralytics/persistent_cache.json
+
+# Usage Examples -------------------------------------------------------------------------------------------------------
+
+# Build and Push
+# t=ultralytics/ultralytics:latest-jetson-jetpack5 && sudo docker build --platform linux/arm64 -f docker/Dockerfile-jetson-jetpack5 -t $t . && sudo docker push $t
+
+# Run
+# t=ultralytics/ultralytics:latest-jetson-jetpack5 && sudo docker run -it --ipc=host $t
+
+# Pull and Run
+# t=ultralytics/ultralytics:latest-jetson-jetpack5 && sudo docker pull $t && sudo docker run -it --ipc=host $t
+
+# Pull and Run with NVIDIA runtime
+# t=ultralytics/ultralytics:latest-jetson-jetpack5 && sudo docker pull $t && sudo docker run -it --ipc=host --runtime=nvidia $t

+ 58 - 0
docker/Dockerfile-jetson-jetpack6

@@ -0,0 +1,58 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+# Builds ultralytics/ultralytics:jetson-jetpack6 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
+# Supports JetPack6.1 for YOLO11 on Jetson AGX Orin, Orin NX and Orin Nano Series
+
+# Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-jetpack
+FROM nvcr.io/nvidia/l4t-jetpack:r36.4.0
+
+# Set environment variables
+ENV PYTHONUNBUFFERED=1 \
+    PYTHONDONTWRITEBYTECODE=1 \
+    PIP_NO_CACHE_DIR=1 \
+    PIP_BREAK_SYSTEM_PACKAGES=1
+
+# Downloads to user config dir
+ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
+    /root/.config/Ultralytics/
+
+# Install dependencies
+ADD https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb .
+RUN dpkg -i cuda-keyring_1.1-1_all.deb && \
+    apt-get update && \
+    apt-get install -y --no-install-recommends \
+    git python3-pip libopenmpi-dev libopenblas-base libomp-dev libcusparselt0 libcusparselt-dev \
+    && rm -rf /var/lib/apt/lists/*
+
+# Create working directory
+WORKDIR /ultralytics
+
+# Copy contents and configure git
+COPY . .
+RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
+ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
+
+# Pip install onnxruntime-gpu, torch, torchvision and ultralytics
+RUN python3 -m pip install --upgrade pip uv
+RUN uv pip install --system \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/onnxruntime_gpu-1.20.0-cp310-cp310-linux_aarch64.whl \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/torch-2.5.0a0+872d972e41.nv24.08-cp310-cp310-linux_aarch64.whl \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/torchvision-0.20.0a0+afc54f7-cp310-cp310-linux_aarch64.whl
+RUN uv pip install --system -e ".[export]"
+
+# Remove extra build files
+RUN rm -rf *.whl /root/.config/Ultralytics/persistent_cache.json
+
+# Usage Examples -------------------------------------------------------------------------------------------------------
+
+# Build and Push
+# t=ultralytics/ultralytics:latest-jetson-jetpack6 && sudo docker build --platform linux/arm64 -f docker/Dockerfile-jetson-jetpack6 -t $t . && sudo docker push $t
+
+# Run
+# t=ultralytics/ultralytics:latest-jetson-jetpack6 && sudo docker run -it --ipc=host $t
+
+# Pull and Run
+# t=ultralytics/ultralytics:latest-jetson-jetpack6 && sudo docker pull $t && sudo docker run -it --ipc=host $t
+
+# Pull and Run with NVIDIA runtime
+# t=ultralytics/ultralytics:latest-jetson-jetpack6 && sudo docker pull $t && sudo docker run -it --ipc=host --runtime=nvidia $t

+ 33 - 0
docker/Dockerfile-jupyter

@@ -0,0 +1,33 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+# Builds ultralytics/ultralytics:latest-jupyter image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
+# Image provides JupyterLab interface for interactive YOLO development and includes tutorial notebooks
+
+# Start from Python-based Ultralytics image for full Python environment
+FROM ultralytics/ultralytics:latest-python
+
+# Install JupyterLab for interactive development
+RUN uv pip install --system jupyterlab
+
+# Create persistent data directory structure
+RUN mkdir /data
+
+# Configure YOLO directories
+RUN mkdir /data/{datasets,weights,runs} && \
+    yolo settings datasets_dir="/data/datasets" weights_dir="/data/weights" runs_dir="/data/runs"
+
+# Start JupyterLab with tutorial notebook
+ENTRYPOINT ["/usr/local/bin/jupyter", "lab", "--allow-root", "--ip=*", "/ultralytics/examples/tutorial.ipynb"]
+
+# Usage Examples -------------------------------------------------------------------------------------------------------
+
+# Build and Push
+# t=ultralytics/ultralytics:latest-jupyter && sudo docker build -f docker/Dockerfile-jupyter -t $t . && sudo docker push $t
+
+# Run
+# t=ultralytics/ultralytics:latest-jupyter && sudo docker run -it --ipc=host -p 8888:8888 $t
+
+# Pull and Run
+# t=ultralytics/ultralytics:latest-jupyter && sudo docker pull $t && sudo docker run -it --ipc=host -p 8888:8888 $t
+
+# Pull and Run with local volume mounted
+# t=ultralytics/ultralytics:latest-jupyter && sudo docker pull $t && sudo docker run -it --ipc=host -p 8888:8888 -v "$(pwd)"/datasets:/data/datasets $t

+ 59 - 0
docker/Dockerfile-python

@@ -0,0 +1,59 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+# Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
+# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLO11 deployments
+
+# Use official Python base image for reproducibility (3.11.10 for export and 3.12.6 for inference)
+FROM python:3.11.10-slim-bookworm
+
+# Set environment variables
+ENV PYTHONUNBUFFERED=1 \
+    PYTHONDONTWRITEBYTECODE=1 \
+    PIP_NO_CACHE_DIR=1 \
+    PIP_BREAK_SYSTEM_PACKAGES=1
+
+# Downloads to user config dir
+ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
+    https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
+    /root/.config/Ultralytics/
+
+# Install linux packages
+# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
+RUN apt-get update && \
+    apt-get install -y --no-install-recommends \
+    python3-pip git zip unzip wget curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 \
+    && rm -rf /var/lib/apt/lists/*
+
+# Create working directory
+WORKDIR /ultralytics
+
+# Copy contents and configure git
+COPY . .
+RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
+ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
+
+# Install pip packages
+RUN pip install uv
+RUN uv pip install --system -e ".[export]" --extra-index-url https://download.pytorch.org/whl/cpu --index-strategy unsafe-first-match
+
+# Run exports to AutoInstall packages
+RUN yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32
+RUN yolo export model=tmp/yolo11n.pt format=ncnn imgsz=32
+# Requires Python<=3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991
+RUN uv pip install --system "paddlepaddle>=2.6.0" x2paddle
+
+# Remove extra build files
+RUN rm -rf tmp /root/.config/Ultralytics/persistent_cache.json
+
+# Usage Examples -------------------------------------------------------------------------------------------------------
+
+# Build and Push
+# t=ultralytics/ultralytics:latest-python && sudo docker build -f docker/Dockerfile-python -t $t . && sudo docker push $t
+
+# Run
+# t=ultralytics/ultralytics:latest-python && sudo docker run -it --ipc=host $t
+
+# Pull and Run
+# t=ultralytics/ultralytics:latest-python && sudo docker pull $t && sudo docker run -it --ipc=host $t
+
+# Pull and Run with local volume mounted
+# t=ultralytics/ultralytics:latest-python && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/shared/datasets:/datasets $t

+ 44 - 0
docker/Dockerfile-runner

@@ -0,0 +1,44 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+# Builds GitHub actions CI runner image for deployment to DockerHub https://hub.docker.com/r/ultralytics/ultralytics
+# Image is CUDA-optimized for YOLO11 single/multi-GPU training and inference tests
+
+# Start FROM Ultralytics GPU image
+FROM ultralytics/ultralytics:latest
+
+# Set environment variables
+ENV PYTHONUNBUFFERED=1 \
+    PYTHONDONTWRITEBYTECODE=1 \
+    PIP_NO_CACHE_DIR=1 \
+    PIP_BREAK_SYSTEM_PACKAGES=1 \
+    RUNNER_ALLOW_RUNASROOT=1 \
+    DEBIAN_FRONTEND=noninteractive
+
+# Set the working directory
+WORKDIR /actions-runner
+
+# Download and unpack the latest runner from https://github.com/actions/runner
+RUN FILENAME=actions-runner-linux-x64-2.320.0.tar.gz && \
+    curl -o $FILENAME -L https://github.com/actions/runner/releases/download/v2.320.0/$FILENAME && \
+    tar xzf $FILENAME && \
+    rm $FILENAME
+
+# Install runner dependencies
+RUN uv pip install --system pytest-cov
+RUN ./bin/installdependencies.sh && \
+    apt-get -y install libicu-dev
+
+# Inline ENTRYPOINT command to configure and start runner with default TOKEN and NAME
+ENTRYPOINT sh -c './config.sh --url https://github.com/ultralytics/ultralytics \
+                              --token ${GITHUB_RUNNER_TOKEN:-TOKEN} \
+                              --name ${GITHUB_RUNNER_NAME:-NAME} \
+                              --labels gpu-latest \
+                              --replace && \
+                  ./run.sh'
+
+# Usage Examples -------------------------------------------------------------------------------------------------------
+
+# Build and Push
+# t=ultralytics/ultralytics:latest-runner && sudo docker build -f docker/Dockerfile-runner -t $t . && sudo docker push $t
+
+# Pull and Run in detached mode with access to GPUs 0 and 1
+# t=ultralytics/ultralytics:latest-runner && sudo docker run -d -e GITHUB_RUNNER_TOKEN=TOKEN -e GITHUB_RUNNER_NAME=NAME --ipc=host --gpus '"device=0,1"' $t

+ 40 - 0
examples/README.md

@@ -0,0 +1,40 @@
+## Ultralytics Examples
+
+This directory features a collection of real-world applications and walkthroughs, provided as either Python files or notebooks. Explore the examples below to see how YOLO can be integrated into various applications.
+
+### Ultralytics YOLO Example Applications
+
+| Title                                                                                                                                     | Format             | Contributor                                                                               |
+| ----------------------------------------------------------------------------------------------------------------------------------------- | ------------------ | ----------------------------------------------------------------------------------------- |
+| [YOLO ONNX Detection Inference with C++](./YOLOv8-CPP-Inference)                                                                          | C++/ONNX           | [Justas Bartnykas](https://github.com/JustasBart)                                         |
+| [YOLO OpenCV ONNX Detection Python](./YOLOv8-OpenCV-ONNX-Python)                                                                          | OpenCV/Python/ONNX | [Farid Inawan](https://github.com/frdteknikelektro)                                       |
+| [YOLO C# ONNX-Runtime](https://github.com/dme-compunet/YoloSharp)                                                                         | .NET/ONNX-Runtime  | [Compunet](https://github.com/dme-compunet)                                               |
+| [YOLO .Net ONNX Detection C#](https://www.nuget.org/packages/Yolov8.Net)                                                                  | C# .Net            | [Samuel Stainback](https://github.com/sstainba)                                           |
+| [YOLOv8 on NVIDIA Jetson(TensorRT and DeepStream)](https://wiki.seeedstudio.com/YOLOv8-DeepStream-TRT-Jetson/)                            | Python             | [Lakshantha](https://github.com/lakshanthad)                                              |
+| [YOLOv8 ONNXRuntime Python](./YOLOv8-ONNXRuntime)                                                                                         | Python/ONNXRuntime | [Semih Demirel](https://github.com/semihhdemirel)                                         |
+| [RTDETR ONNXRuntime Python](./RTDETR-ONNXRuntime-Python)                                                                                  | Python/ONNXRuntime | [Semih Demirel](https://github.com/semihhdemirel)                                         |
+| [YOLOv8 ONNXRuntime CPP](./YOLOv8-ONNXRuntime-CPP)                                                                                        | C++/ONNXRuntime    | [DennisJcy](https://github.com/DennisJcy), [Onuralp Sezer](https://github.com/onuralpszr) |
+| [RTDETR ONNXRuntime C#](https://github.com/Kayzwer/yolo-cs/blob/master/RTDETR.cs)                                                         | C#/ONNX            | [Kayzwer](https://github.com/Kayzwer)                                                     |
+| [YOLOv8 SAHI Video Inference](https://github.com/RizwanMunawar/ultralytics/blob/main/examples/YOLOv8-SAHI-Inference-Video/yolov8_sahi.py) | Python             | [Muhammad Rizwan Munawar](https://github.com/RizwanMunawar)                               |
+| [YOLOv8 Region Counter](https://github.com/RizwanMunawar/ultralytics/blob/main/examples/YOLOv8-Region-Counter/yolov8_region_counter.py)   | Python             | [Muhammad Rizwan Munawar](https://github.com/RizwanMunawar)                               |
+| [YOLOv8 Segmentation ONNXRuntime Python](./YOLOv8-Segmentation-ONNXRuntime-Python)                                                        | Python/ONNXRuntime | [jamjamjon](https://github.com/jamjamjon)                                                 |
+| [YOLOv8 LibTorch CPP](./YOLOv8-LibTorch-CPP-Inference)                                                                                    | C++/LibTorch       | [Myyura](https://github.com/Myyura)                                                       |
+| [YOLOv8 OpenCV INT8 TFLite Python](./YOLOv8-TFLite-Python)                                                                                | Python             | [Wamiq Raza](https://github.com/wamiqraza)                                                |
+| [YOLOv8 All Tasks ONNXRuntime Rust](./YOLOv8-ONNXRuntime-Rust)                                                                            | Rust/ONNXRuntime   | [jamjamjon](https://github.com/jamjamjon)                                                 |
+| [YOLOv8 OpenVINO CPP](./YOLOv8-OpenVINO-CPP-Inference)                                                                                    | C++/OpenVINO       | [Erlangga Yudi Pradana](https://github.com/rlggyp)                                        |
+| [YOLOv5-YOLO11 ONNXRuntime Rust](./YOLO-Series-ONNXRuntime-Rust)                                                                          | Rust/ONNXRuntime   | [jamjamjon](https://github.com/jamjamjon)                                                 |
+
+### How to Contribute
+
+We greatly appreciate contributions from the community, including examples, applications, and guides. If you'd like to contribute, please follow these guidelines:
+
+1. **Create a pull request (PR)** with the title prefix `[Example]`, adding your new example folder to the `examples/` directory within the repository.
+2. **Ensure your project adheres to the following standards:**
+   - Makes use of the `ultralytics` package.
+   - Includes a `README.md` with clear instructions for setting up and running the example.
+   - Avoids adding large files or dependencies unless they are absolutely necessary for the example.
+   - Contributors should be willing to provide support for their examples and address related issues.
+
+For more detailed information and guidance on contributing, please visit our [contribution documentation](https://docs.ultralytics.com/help/contributing/).
+
+If you encounter any questions or concerns regarding these guidelines, feel free to open a PR or an issue in the repository, and we will assist you in the contribution process.

+ 43 - 0
examples/RTDETR-ONNXRuntime-Python/README.md

@@ -0,0 +1,43 @@
+# RTDETR - ONNX Runtime
+
+This project implements RTDETR using ONNX Runtime.
+
+## Installation
+
+To run this project, you need to install the required dependencies. The following instructions will guide you through the installation process.
+
+### Installing Required Dependencies
+
+You can install the required dependencies by running the following command:
+
+```bash
+pip install -r requirements.txt
+```
+
+### Installing `onnxruntime-gpu`
+
+If you have an NVIDIA GPU and want to leverage GPU acceleration, you can install the onnxruntime-gpu package using the following command:
+
+```bash
+pip install onnxruntime-gpu
+```
+
+Note: Make sure you have the appropriate GPU drivers installed on your system.
+
+### Installing `onnxruntime` (CPU version)
+
+If you don't have an NVIDIA GPU or prefer to use the CPU version of onnxruntime, you can install the onnxruntime package using the following command:
+
+```bash
+pip install onnxruntime
+```
+
+### Usage
+
+After successfully installing the required packages, you can run the RTDETR implementation using the following command:
+
+```bash
+python main.py --model rtdetr-l.onnx --img image.jpg --conf-thres 0.5 --iou-thres 0.5
+```
+
+Make sure to replace rtdetr-l.onnx with the path to your RTDETR ONNX model file, image.jpg with the path to your input image, and adjust the confidence threshold (conf-thres) and IoU threshold (iou-thres) values as needed.

+ 222 - 0
examples/RTDETR-ONNXRuntime-Python/main.py

@@ -0,0 +1,222 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import argparse
+
+import cv2
+import numpy as np
+import onnxruntime as ort
+import torch
+
+from ultralytics.utils import ASSETS, yaml_load
+from ultralytics.utils.checks import check_requirements, check_yaml
+
+
+class RTDETR:
+    """RTDETR object detection model class for handling inference and visualization."""
+
+    def __init__(self, model_path, img_path, conf_thres=0.5, iou_thres=0.5):
+        """
+        Initializes the RTDETR object with the specified parameters.
+
+        Args:
+            model_path: Path to the ONNX model file.
+            img_path: Path to the input image.
+            conf_thres: Confidence threshold for object detection.
+            iou_thres: IoU threshold for non-maximum suppression
+        """
+        self.model_path = model_path
+        self.img_path = img_path
+        self.conf_thres = conf_thres
+        self.iou_thres = iou_thres
+
+        # Set up the ONNX runtime session with CUDA and CPU execution providers
+        self.session = ort.InferenceSession(model_path, providers=["CUDAExecutionProvider", "CPUExecutionProvider"])
+        self.model_input = self.session.get_inputs()
+        self.input_width = self.model_input[0].shape[2]
+        self.input_height = self.model_input[0].shape[3]
+
+        # Load class names from the COCO dataset YAML file
+        self.classes = yaml_load(check_yaml("coco8.yaml"))["names"]
+
+        # Generate a color palette for drawing bounding boxes
+        self.color_palette = np.random.uniform(0, 255, size=(len(self.classes), 3))
+
+    def draw_detections(self, box, score, class_id):
+        """
+        Draws bounding boxes and labels on the input image based on the detected objects.
+
+        Args:
+            box: Detected bounding box.
+            score: Corresponding detection score.
+            class_id: Class ID for the detected object.
+
+        Returns:
+            None
+        """
+        # Extract the coordinates of the bounding box
+        x1, y1, x2, y2 = box
+
+        # Retrieve the color for the class ID
+        color = self.color_palette[class_id]
+
+        # Draw the bounding box on the image
+        cv2.rectangle(self.img, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)
+
+        # Create the label text with class name and score
+        label = f"{self.classes[class_id]}: {score:.2f}"
+
+        # Calculate the dimensions of the label text
+        (label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
+
+        # Calculate the position of the label text
+        label_x = x1
+        label_y = y1 - 10 if y1 - 10 > label_height else y1 + 10
+
+        # Draw a filled rectangle as the background for the label text
+        cv2.rectangle(
+            self.img,
+            (int(label_x), int(label_y - label_height)),
+            (int(label_x + label_width), int(label_y + label_height)),
+            color,
+            cv2.FILLED,
+        )
+
+        # Draw the label text on the image
+        cv2.putText(
+            self.img, label, (int(label_x), int(label_y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA
+        )
+
+    def preprocess(self):
+        """
+        Preprocesses the input image before performing inference.
+
+        Returns:
+            image_data: Preprocessed image data ready for inference.
+        """
+        # Read the input image using OpenCV
+        self.img = cv2.imread(self.img_path)
+
+        # Get the height and width of the input image
+        self.img_height, self.img_width = self.img.shape[:2]
+
+        # Convert the image color space from BGR to RGB
+        img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
+
+        # Resize the image to match the input shape
+        img = cv2.resize(img, (self.input_width, self.input_height))
+
+        # Normalize the image data by dividing it by 255.0
+        image_data = np.array(img) / 255.0
+
+        # Transpose the image to have the channel dimension as the first dimension
+        image_data = np.transpose(image_data, (2, 0, 1))  # Channel first
+
+        # Expand the dimensions of the image data to match the expected input shape
+        image_data = np.expand_dims(image_data, axis=0).astype(np.float32)
+
+        # Return the preprocessed image data
+        return image_data
+
+    def bbox_cxcywh_to_xyxy(self, boxes):
+        """
+        Converts bounding boxes from (center x, center y, width, height) format to (x_min, y_min, x_max, y_max) format.
+
+        Args:
+            boxes (numpy.ndarray): An array of shape (N, 4) where each row represents
+                                a bounding box in (cx, cy, w, h) format.
+
+        Returns:
+            numpy.ndarray: An array of shape (N, 4) where each row represents
+                        a bounding box in (x_min, y_min, x_max, y_max) format.
+        """
+        # Calculate half width and half height of the bounding boxes
+        half_width = boxes[:, 2] / 2
+        half_height = boxes[:, 3] / 2
+
+        # Calculate the coordinates of the bounding boxes
+        x_min = boxes[:, 0] - half_width
+        y_min = boxes[:, 1] - half_height
+        x_max = boxes[:, 0] + half_width
+        y_max = boxes[:, 1] + half_height
+
+        # Return the bounding boxes in (x_min, y_min, x_max, y_max) format
+        return np.column_stack((x_min, y_min, x_max, y_max))
+
+    def postprocess(self, model_output):
+        """
+        Postprocesses the model output to extract detections and draw them on the input image.
+
+        Args:
+            model_output: Output of the model inference.
+
+        Returns:
+            np.array: Annotated image with detections.
+        """
+        # Squeeze the model output to remove unnecessary dimensions
+        outputs = np.squeeze(model_output[0])
+
+        # Extract bounding boxes and scores from the model output
+        boxes = outputs[:, :4]
+        scores = outputs[:, 4:]
+
+        # Get the class labels and scores for each detection
+        labels = np.argmax(scores, axis=1)
+        scores = np.max(scores, axis=1)
+
+        # Apply confidence threshold to filter out low-confidence detections
+        mask = scores > self.conf_thres
+        boxes, scores, labels = boxes[mask], scores[mask], labels[mask]
+
+        # Convert bounding boxes to (x_min, y_min, x_max, y_max) format
+        boxes = self.bbox_cxcywh_to_xyxy(boxes)
+
+        # Scale bounding boxes to match the original image dimensions
+        boxes[:, 0::2] *= self.img_width
+        boxes[:, 1::2] *= self.img_height
+
+        # Draw detections on the image
+        for box, score, label in zip(boxes, scores, labels):
+            self.draw_detections(box, score, label)
+
+        # Return the annotated image
+        return self.img
+
+    def main(self):
+        """
+        Executes the detection on the input image using the ONNX model.
+
+        Returns:
+            np.array: Output image with annotations.
+        """
+        # Preprocess the image for model input
+        image_data = self.preprocess()
+
+        # Run the model inference
+        model_output = self.session.run(None, {self.model_input[0].name: image_data})
+
+        # Process and return the model output
+        return self.postprocess(model_output)
+
+
+if __name__ == "__main__":
+    # Set up argument parser for command-line arguments
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--model", type=str, default="rtdetr-l.onnx", help="Path to the ONNX model file.")
+    parser.add_argument("--img", type=str, default=str(ASSETS / "bus.jpg"), help="Path to the input image.")
+    parser.add_argument("--conf-thres", type=float, default=0.5, help="Confidence threshold for object detection.")
+    parser.add_argument("--iou-thres", type=float, default=0.5, help="IoU threshold for non-maximum suppression.")
+    args = parser.parse_args()
+
+    # Check for dependencies and set up ONNX runtime
+    check_requirements("onnxruntime-gpu" if torch.cuda.is_available() else "onnxruntime")
+
+    # Create the detector instance with specified parameters
+    detection = RTDETR(args.model, args.img, args.conf_thres, args.iou_thres)
+
+    # Perform detection and get the output image
+    output_image = detection.main()
+
+    # Display the annotated output image
+    cv2.namedWindow("Output", cv2.WINDOW_NORMAL)
+    cv2.imshow("Output", output_image)
+    cv2.waitKey(0)

+ 14 - 0
examples/YOLO-Series-ONNXRuntime-Rust/Cargo.toml

@@ -0,0 +1,14 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+[package]
+name = "YOLO-ONNXRuntime-Rust"
+version = "0.1.0"
+edition = "2021"
+authors = ["Jamjamjon <xxyydzml@outlook.com>"]
+
+[dependencies]
+anyhow = "1.0.92"
+clap = "4.5.20"
+tracing = "0.1.40"
+tracing-subscriber = "0.3.18"
+usls = { version = "0.0.19", features = ["auto"] }

+ 94 - 0
examples/YOLO-Series-ONNXRuntime-Rust/README.md

@@ -0,0 +1,94 @@
+# YOLO-Series ONNXRuntime Rust Demo for Core YOLO Tasks
+
+This repository provides a Rust demo for key YOLO-Series tasks such as `Classification`, `Segmentation`, `Detection`, `Pose Detection`, and `OBB` using ONNXRuntime. It supports various YOLO models (v5 - 11) across multiple vision tasks.
+
+## Introduction
+
+- This example leverages the latest versions of both ONNXRuntime and YOLO models.
+- We utilize the [usls](https://github.com/jamjamjon/usls/tree/main) crate to streamline YOLO model inference, providing efficient data loading, visualization, and optimized inference performance.
+
+## Features
+
+- **Extensive Model Compatibility**: Supports `YOLOv5`, `YOLOv6`, `YOLOv7`, `YOLOv8`, `YOLOv9`, `YOLOv10`, `YOLO11`, `YOLO-world`, `RTDETR`, and others, covering a wide range of YOLO versions.
+- **Versatile Task Coverage**: Includes `Classification`, `Segmentation`, `Detection`, `Pose`, and `OBB`.
+- **Precision Flexibility**: Works with `FP16` and `FP32` ONNX models.
+- **Execution Providers**: Accelerated support for `CPU`, `CUDA`, `CoreML`, and `TensorRT`.
+- **Dynamic Input Shapes**: Dynamically adjusts to variable `batch`, `width`, and `height` dimensions for flexible model input.
+- **Flexible Data Loading**: The `DataLoader` handles images, folders, videos, and video streams.
+- **Real-Time Display and Video Export**: `Viewer` provides real-time frame visualization and video export functions, similar to OpenCV’s `imshow()` and `imwrite()`.
+- **Enhanced Annotation and Visualization**: The `Annotator` facilitates comprehensive result rendering, with support for bounding boxes (HBB), oriented bounding boxes (OBB), polygons, masks, keypoints, and text labels.
+
+## Setup Instructions
+
+### 1. ONNXRuntime Linking
+
+<details>
+<summary>You have two options to link the ONNXRuntime library:</summary>
+
+- **Option 1: Manual Linking**
+
+  - For detailed setup, consult the [ONNX Runtime linking documentation](https://ort.pyke.io/setup/linking).
+  - **Linux or macOS**:
+    1. Download the ONNX Runtime package from the [Releases page](https://github.com/microsoft/onnxruntime/releases).
+    2. Set up the library path by exporting the `ORT_DYLIB_PATH` environment variable:
+       ```shell
+       export ORT_DYLIB_PATH=/path/to/onnxruntime/lib/libonnxruntime.so.1.19.0
+       ```
+
+- **Option 2: Automatic Download**
+  - Use the `--features auto` flag to handle downloading automatically:
+    ```shell
+    cargo run -r --example yolo --features auto
+    ```
+
+</details>
+
+### 2. \[Optional\] Install CUDA, CuDNN, and TensorRT
+
+- The CUDA execution provider requires CUDA version `12.x`.
+- The TensorRT execution provider requires both CUDA `12.x` and TensorRT `10.x`.
+
+### 3. \[Optional\] Install ffmpeg
+
+To view video frames and save video inferences, install `rust-ffmpeg`. For instructions, see:  
+[https://github.com/zmwangx/rust-ffmpeg/wiki/Notes-on-building#dependencies](https://github.com/zmwangx/rust-ffmpeg/wiki/Notes-on-building#dependencies)
+
+## Get Started
+
+```Shell
+# customized
+cargo run -r -- --task detect --ver v8 --nc 6 --model xxx.onnx  # YOLOv8
+
+# Classify
+cargo run -r -- --task classify --ver v5 --scale s --width 224 --height 224 --nc 1000  # YOLOv5
+cargo run -r -- --task classify --ver v8 --scale n --width 224 --height 224 --nc 1000  # YOLOv8
+cargo run -r -- --task classify --ver v11 --scale n --width 224 --height 224 --nc 1000  # YOLO11
+
+# Detect
+cargo run -r -- --task detect --ver v5 --scale n  # YOLOv5
+cargo run -r -- --task detect --ver v6 --scale n  # YOLOv6
+cargo run -r -- --task detect --ver v7 --scale t  # YOLOv7
+cargo run -r -- --task detect --ver v8 --scale n  # YOLOv8
+cargo run -r -- --task detect --ver v9 --scale t  # YOLOv9
+cargo run -r -- --task detect --ver v10 --scale n  # YOLOv10
+cargo run -r -- --task detect --ver v11 --scale n  # YOLO11
+cargo run -r -- --task detect --ver rtdetr --scale l  # RTDETR
+
+# Pose
+cargo run -r -- --task pose --ver v8 --scale n   # YOLOv8-Pose
+cargo run -r -- --task pose --ver v11 --scale n  # YOLO11-Pose
+
+# Segment
+cargo run -r -- --task segment --ver v5 --scale n  # YOLOv5-Segment
+cargo run -r -- --task segment --ver v8 --scale n  # YOLOv8-Segment
+cargo run -r -- --task segment --ver v11 --scale n  # YOLOv8-Segment
+cargo run -r -- --task segment --ver v8 --model yolo/FastSAM-s-dyn-f16.onnx  # FastSAM
+
+# OBB
+cargo run -r -- --ver v8 --task obb --scale n --width 1024 --height 1024 --source images/dota.png  # YOLOv8-Obb
+cargo run -r -- --ver v11 --task obb --scale n --width 1024 --height 1024 --source images/dota.png  # YOLO11-Obb
+```
+
+**`cargo run -- --help` for more options**
+
+For more details, please refer to [usls-yolo](https://github.com/jamjamjon/usls/tree/main/examples/yolo).

+ 236 - 0
examples/YOLO-Series-ONNXRuntime-Rust/src/main.rs

@@ -0,0 +1,236 @@
+use anyhow::Result;
+use clap::Parser;
+
+use usls::{
+    models::YOLO, Annotator, DataLoader, Device, Options, Viewer, Vision, YOLOScale, YOLOTask,
+    YOLOVersion, COCO_SKELETONS_16,
+};
+
+#[derive(Parser, Clone)]
+#[command(author, version, about, long_about = None)]
+pub struct Args {
+    /// Path to the ONNX model
+    #[arg(long)]
+    pub model: Option<String>,
+
+    /// Input source path
+    #[arg(long, default_value_t = String::from("../../ultralytics/assets/bus.jpg"))]
+    pub source: String,
+
+    /// YOLO Task
+    #[arg(long, value_enum, default_value_t = YOLOTask::Detect)]
+    pub task: YOLOTask,
+
+    /// YOLO Version
+    #[arg(long, value_enum, default_value_t = YOLOVersion::V8)]
+    pub ver: YOLOVersion,
+
+    /// YOLO Scale
+    #[arg(long, value_enum, default_value_t = YOLOScale::N)]
+    pub scale: YOLOScale,
+
+    /// Batch size
+    #[arg(long, default_value_t = 1)]
+    pub batch_size: usize,
+
+    /// Minimum input width
+    #[arg(long, default_value_t = 224)]
+    pub width_min: isize,
+
+    /// Input width
+    #[arg(long, default_value_t = 640)]
+    pub width: isize,
+
+    /// Maximum input width
+    #[arg(long, default_value_t = 1024)]
+    pub width_max: isize,
+
+    /// Minimum input height
+    #[arg(long, default_value_t = 224)]
+    pub height_min: isize,
+
+    /// Input height
+    #[arg(long, default_value_t = 640)]
+    pub height: isize,
+
+    /// Maximum input height
+    #[arg(long, default_value_t = 1024)]
+    pub height_max: isize,
+
+    /// Number of classes
+    #[arg(long, default_value_t = 80)]
+    pub nc: usize,
+
+    /// Class confidence
+    #[arg(long)]
+    pub confs: Vec<f32>,
+
+    /// Enable TensorRT support
+    #[arg(long)]
+    pub trt: bool,
+
+    /// Enable CUDA support
+    #[arg(long)]
+    pub cuda: bool,
+
+    /// Enable CoreML support
+    #[arg(long)]
+    pub coreml: bool,
+
+    /// Use TensorRT half precision
+    #[arg(long)]
+    pub half: bool,
+
+    /// Device ID to use
+    #[arg(long, default_value_t = 0)]
+    pub device_id: usize,
+
+    /// Enable performance profiling
+    #[arg(long)]
+    pub profile: bool,
+
+    /// Disable contour drawing, for saving time
+    #[arg(long)]
+    pub no_contours: bool,
+
+    /// Show result
+    #[arg(long)]
+    pub view: bool,
+
+    /// Do not save output
+    #[arg(long)]
+    pub nosave: bool,
+}
+
+fn main() -> Result<()> {
+    let args = Args::parse();
+
+    // logger
+    if args.profile {
+        tracing_subscriber::fmt()
+            .with_max_level(tracing::Level::INFO)
+            .init();
+    }
+
+    // model path
+    let path = match &args.model {
+        None => format!(
+            "yolo/{}-{}-{}.onnx",
+            args.ver.name(),
+            args.scale.name(),
+            args.task.name()
+        ),
+        Some(x) => x.to_string(),
+    };
+
+    // saveout
+    let saveout = match &args.model {
+        None => format!(
+            "{}-{}-{}",
+            args.ver.name(),
+            args.scale.name(),
+            args.task.name()
+        ),
+        Some(x) => {
+            let p = std::path::PathBuf::from(&x);
+            p.file_stem().unwrap().to_str().unwrap().to_string()
+        }
+    };
+
+    // device
+    let device = if args.cuda {
+        Device::Cuda(args.device_id)
+    } else if args.trt {
+        Device::Trt(args.device_id)
+    } else if args.coreml {
+        Device::CoreML(args.device_id)
+    } else {
+        Device::Cpu(args.device_id)
+    };
+
+    // build options
+    let options = Options::new()
+        .with_model(&path)?
+        .with_yolo_version(args.ver)
+        .with_yolo_task(args.task)
+        .with_device(device)
+        .with_trt_fp16(args.half)
+        .with_ixx(0, 0, (1, args.batch_size as _, 4).into())
+        .with_ixx(0, 2, (args.height_min, args.height, args.height_max).into())
+        .with_ixx(0, 3, (args.width_min, args.width, args.width_max).into())
+        .with_confs(if args.confs.is_empty() {
+            &[0.2, 0.15]
+        } else {
+            &args.confs
+        })
+        .with_nc(args.nc)
+        .with_find_contours(!args.no_contours) // find contours or not
+        // .with_names(&COCO_CLASS_NAMES_80)  // detection class names
+        // .with_names2(&COCO_KEYPOINTS_17) // keypoints class names
+        // .exclude_classes(&[0])
+        // .retain_classes(&[0, 5])
+        .with_profile(args.profile);
+
+    // build model
+    let mut model = YOLO::new(options)?;
+
+    // build dataloader
+    let dl = DataLoader::new(&args.source)?
+        .with_batch(model.batch() as _)
+        .build()?;
+
+    // build annotator
+    let annotator = Annotator::default()
+        .with_skeletons(&COCO_SKELETONS_16)
+        .without_masks(true) // no masks plotting when doing segment task
+        .with_bboxes_thickness(3)
+        .with_keypoints_name(false) // enable keypoints names
+        .with_saveout_subs(&["YOLO"])
+        .with_saveout(&saveout);
+
+    // build viewer
+    let mut viewer = if args.view {
+        Some(Viewer::new().with_delay(5).with_scale(1.).resizable(true))
+    } else {
+        None
+    };
+
+    // run & annotate
+    for (xs, _paths) in dl {
+        let ys = model.forward(&xs, args.profile)?;
+        let images_plotted = annotator.plot(&xs, &ys, !args.nosave)?;
+
+        // show image
+        match &mut viewer {
+            Some(viewer) => viewer.imshow(&images_plotted)?,
+            None => continue,
+        }
+
+        // check out window and key event
+        match &mut viewer {
+            Some(viewer) => {
+                if !viewer.is_open() || viewer.is_key_pressed(usls::Key::Escape) {
+                    break;
+                }
+            }
+            None => continue,
+        }
+
+        // write video
+        if !args.nosave {
+            match &mut viewer {
+                Some(viewer) => viewer.write_batch(&images_plotted)?,
+                None => continue,
+            }
+        }
+    }
+
+    // finish video write
+    if !args.nosave {
+        if let Some(viewer) = &mut viewer {
+            viewer.finish_write()?;
+        }
+    }
+
+    Ok(())
+}

+ 464 - 0
examples/YOLOv8-Action-Recognition/action_recognition.py

@@ -0,0 +1,464 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import argparse
+import time
+from collections import defaultdict
+from typing import List, Optional, Tuple
+from urllib.parse import urlparse
+
+import cv2
+import numpy as np
+import torch
+from transformers import AutoModel, AutoProcessor
+
+from ultralytics import YOLO
+from ultralytics.data.loaders import get_best_youtube_url
+from ultralytics.utils.plotting import Annotator
+from ultralytics.utils.torch_utils import select_device
+
+
+class TorchVisionVideoClassifier:
+    """Classifies videos using pretrained TorchVision models; see https://pytorch.org/vision/stable/."""
+
+    from torchvision.models.video import (
+        MViT_V1_B_Weights,
+        MViT_V2_S_Weights,
+        R3D_18_Weights,
+        S3D_Weights,
+        Swin3D_B_Weights,
+        Swin3D_T_Weights,
+        mvit_v1_b,
+        mvit_v2_s,
+        r3d_18,
+        s3d,
+        swin3d_b,
+        swin3d_t,
+    )
+
+    model_name_to_model_and_weights = {
+        "s3d": (s3d, S3D_Weights.DEFAULT),
+        "r3d_18": (r3d_18, R3D_18_Weights.DEFAULT),
+        "swin3d_t": (swin3d_t, Swin3D_T_Weights.DEFAULT),
+        "swin3d_b": (swin3d_b, Swin3D_B_Weights.DEFAULT),
+        "mvit_v1_b": (mvit_v1_b, MViT_V1_B_Weights.DEFAULT),
+        "mvit_v2_s": (mvit_v2_s, MViT_V2_S_Weights.DEFAULT),
+    }
+
+    def __init__(self, model_name: str, device: str or torch.device = ""):
+        """
+        Initialize the VideoClassifier with the specified model name and device.
+
+        Args:
+            model_name (str): The name of the model to use.
+            device (str or torch.device, optional): The device to run the model on. Defaults to "".
+
+        Raises:
+            ValueError: If an invalid model name is provided.
+        """
+        if model_name not in self.model_name_to_model_and_weights:
+            raise ValueError(f"Invalid model name '{model_name}'. Available models: {self.available_model_names()}")
+        model, self.weights = self.model_name_to_model_and_weights[model_name]
+        self.device = select_device(device)
+        self.model = model(weights=self.weights).to(self.device).eval()
+
+    @staticmethod
+    def available_model_names() -> List[str]:
+        """
+        Get the list of available model names.
+
+        Returns:
+            list: List of available model names.
+        """
+        return list(TorchVisionVideoClassifier.model_name_to_model_and_weights.keys())
+
+    def preprocess_crops_for_video_cls(self, crops: List[np.ndarray], input_size: list = None) -> torch.Tensor:
+        """
+        Preprocess a list of crops for video classification.
+
+        Args:
+            crops (List[np.ndarray]): List of crops to preprocess. Each crop should have dimensions (H, W, C)
+            input_size (tuple, optional): The target input size for the model. Defaults to (224, 224).
+
+        Returns:
+            torch.Tensor: Preprocessed crops as a tensor with dimensions (1, T, C, H, W).
+        """
+        if input_size is None:
+            input_size = [224, 224]
+        from torchvision.transforms import v2
+
+        transform = v2.Compose(
+            [
+                v2.ToDtype(torch.float32, scale=True),
+                v2.Resize(input_size, antialias=True),
+                v2.Normalize(mean=self.weights.transforms().mean, std=self.weights.transforms().std),
+            ]
+        )
+
+        processed_crops = [transform(torch.from_numpy(crop).permute(2, 0, 1)) for crop in crops]
+        return torch.stack(processed_crops).unsqueeze(0).permute(0, 2, 1, 3, 4).to(self.device)
+
+    def __call__(self, sequences: torch.Tensor):
+        """
+        Perform inference on the given sequences.
+
+        Args:
+            sequences (torch.Tensor): The input sequences for the model. The expected input dimensions are
+                                      (B, T, C, H, W) for batched video frames or (T, C, H, W) for single video frames.
+
+        Returns:
+            torch.Tensor: The model's output.
+        """
+        with torch.inference_mode():
+            return self.model(sequences)
+
+    def postprocess(self, outputs: torch.Tensor) -> Tuple[List[str], List[float]]:
+        """
+        Postprocess the model's batch output.
+
+        Args:
+            outputs (torch.Tensor): The model's output.
+
+        Returns:
+            List[str]: The predicted labels.
+            List[float]: The predicted confidences.
+        """
+        pred_labels = []
+        pred_confs = []
+        for output in outputs:
+            pred_class = output.argmax(0).item()
+            pred_label = self.weights.meta["categories"][pred_class]
+            pred_labels.append(pred_label)
+            pred_conf = output.softmax(0)[pred_class].item()
+            pred_confs.append(pred_conf)
+
+        return pred_labels, pred_confs
+
+
+class HuggingFaceVideoClassifier:
+    """Zero-shot video classifier using Hugging Face models for various devices."""
+
+    def __init__(
+        self,
+        labels: List[str],
+        model_name: str = "microsoft/xclip-base-patch16-zero-shot",
+        device: str or torch.device = "",
+        fp16: bool = False,
+    ):
+        """
+        Initialize the HuggingFaceVideoClassifier with the specified model name.
+
+        Args:
+            labels (List[str]): List of labels for zero-shot classification.
+            model_name (str): The name of the model to use. Defaults to "microsoft/xclip-base-patch16-zero-shot".
+            device (str or torch.device, optional): The device to run the model on. Defaults to "".
+            fp16 (bool, optional): Whether to use FP16 for inference. Defaults to False.
+        """
+        self.fp16 = fp16
+        self.labels = labels
+        self.device = select_device(device)
+        self.processor = AutoProcessor.from_pretrained(model_name)
+        model = AutoModel.from_pretrained(model_name).to(self.device)
+        if fp16:
+            model = model.half()
+        self.model = model.eval()
+
+    def preprocess_crops_for_video_cls(self, crops: List[np.ndarray], input_size: list = None) -> torch.Tensor:
+        """
+        Preprocess a list of crops for video classification.
+
+        Args:
+            crops (List[np.ndarray]): List of crops to preprocess. Each crop should have dimensions (H, W, C)
+            input_size (tuple, optional): The target input size for the model. Defaults to (224, 224).
+
+        Returns:
+            torch.Tensor: Preprocessed crops as a tensor (1, T, C, H, W).
+        """
+        if input_size is None:
+            input_size = [224, 224]
+        from torchvision import transforms
+
+        transform = transforms.Compose(
+            [
+                transforms.Lambda(lambda x: x.float() / 255.0),
+                transforms.Resize(input_size),
+                transforms.Normalize(
+                    mean=self.processor.image_processor.image_mean, std=self.processor.image_processor.image_std
+                ),
+            ]
+        )
+
+        processed_crops = [transform(torch.from_numpy(crop).permute(2, 0, 1)) for crop in crops]  # (T, C, H, W)
+        output = torch.stack(processed_crops).unsqueeze(0).to(self.device)  # (1, T, C, H, W)
+        if self.fp16:
+            output = output.half()
+        return output
+
+    def __call__(self, sequences: torch.Tensor) -> torch.Tensor:
+        """
+        Perform inference on the given sequences.
+
+        Args:
+            sequences (torch.Tensor): The input sequences for the model. Batched video frames with shape (B, T, H, W, C).
+
+        Returns:
+            torch.Tensor: The model's output.
+        """
+        input_ids = self.processor(text=self.labels, return_tensors="pt", padding=True)["input_ids"].to(self.device)
+
+        inputs = {"pixel_values": sequences, "input_ids": input_ids}
+
+        with torch.inference_mode():
+            outputs = self.model(**inputs)
+
+        return outputs.logits_per_video
+
+    def postprocess(self, outputs: torch.Tensor) -> Tuple[List[List[str]], List[List[float]]]:
+        """
+        Postprocess the model's batch output.
+
+        Args:
+            outputs (torch.Tensor): The model's output.
+
+        Returns:
+            List[List[str]]: The predicted top3 labels.
+            List[List[float]]: The predicted top3 confidences.
+        """
+        pred_labels = []
+        pred_confs = []
+
+        with torch.no_grad():
+            logits_per_video = outputs  # Assuming outputs is already the logits tensor
+            probs = logits_per_video.softmax(dim=-1)  # Use softmax to convert logits to probabilities
+
+        for prob in probs:
+            top2_indices = prob.topk(2).indices.tolist()
+            top2_labels = [self.labels[idx] for idx in top2_indices]
+            top2_confs = prob[top2_indices].tolist()
+            pred_labels.append(top2_labels)
+            pred_confs.append(top2_confs)
+
+        return pred_labels, pred_confs
+
+
+def crop_and_pad(frame, box, margin_percent):
+    """Crop box with margin and take square crop from frame."""
+    x1, y1, x2, y2 = map(int, box)
+    w, h = x2 - x1, y2 - y1
+
+    # Add margin
+    margin_x, margin_y = int(w * margin_percent / 100), int(h * margin_percent / 100)
+    x1, y1 = max(0, x1 - margin_x), max(0, y1 - margin_y)
+    x2, y2 = min(frame.shape[1], x2 + margin_x), min(frame.shape[0], y2 + margin_y)
+
+    # Take square crop from frame
+    size = max(y2 - y1, x2 - x1)
+    center_y, center_x = (y1 + y2) // 2, (x1 + x2) // 2
+    half_size = size // 2
+    square_crop = frame[
+        max(0, center_y - half_size) : min(frame.shape[0], center_y + half_size),
+        max(0, center_x - half_size) : min(frame.shape[1], center_x + half_size),
+    ]
+
+    return cv2.resize(square_crop, (224, 224), interpolation=cv2.INTER_LINEAR)
+
+
+def run(
+    weights: str = "yolo11n.pt",
+    device: str = "",
+    source: str = "https://www.youtube.com/watch?v=dQw4w9WgXcQ",
+    output_path: Optional[str] = None,
+    crop_margin_percentage: int = 10,
+    num_video_sequence_samples: int = 8,
+    skip_frame: int = 2,
+    video_cls_overlap_ratio: float = 0.25,
+    fp16: bool = False,
+    video_classifier_model: str = "microsoft/xclip-base-patch32",
+    labels: List[str] = None,
+) -> None:
+    """
+    Run action recognition on a video source using YOLO for object detection and a video classifier.
+
+    Args:
+        weights (str): Path to the YOLO model weights. Defaults to "yolo11n.pt".
+        device (str): Device to run the model on. Use 'cuda' for NVIDIA GPU, 'mps' for Apple Silicon, or 'cpu'. Defaults to auto-detection.
+        source (str): Path to mp4 video file or YouTube URL. Defaults to a sample YouTube video.
+        output_path (Optional[str], optional): Path to save the output video. Defaults to None.
+        crop_margin_percentage (int, optional): Percentage of margin to add around detected objects. Defaults to 10.
+        num_video_sequence_samples (int, optional): Number of video frames to use for classification. Defaults to 8.
+        skip_frame (int, optional): Number of frames to skip between detections. Defaults to 4.
+        video_cls_overlap_ratio (float, optional): Overlap ratio between video sequences. Defaults to 0.25.
+        fp16 (bool, optional): Whether to use half-precision floating point. Defaults to False.
+        video_classifier_model (str, optional): Name or path of the video classifier model. Defaults to "microsoft/xclip-base-patch32".
+        labels (List[str], optional): List of labels for zero-shot classification. Defaults to predefined list.
+
+    Returns:
+        None</edit>
+    """
+    if labels is None:
+        labels = [
+            "walking",
+            "running",
+            "brushing teeth",
+            "looking into phone",
+            "weight lifting",
+            "cooking",
+            "sitting",
+        ]
+    # Initialize models and device
+    device = select_device(device)
+    yolo_model = YOLO(weights).to(device)
+    if video_classifier_model in TorchVisionVideoClassifier.available_model_names():
+        print("'fp16' is not supported for TorchVisionVideoClassifier. Setting fp16 to False.")
+        print(
+            "'labels' is not used for TorchVisionVideoClassifier. Ignoring the provided labels and using Kinetics-400 labels."
+        )
+        video_classifier = TorchVisionVideoClassifier(video_classifier_model, device=device)
+    else:
+        video_classifier = HuggingFaceVideoClassifier(
+            labels, model_name=video_classifier_model, device=device, fp16=fp16
+        )
+
+    # Initialize video capture
+    if source.startswith("http") and urlparse(source).hostname in {"www.youtube.com", "youtube.com", "youtu.be"}:
+        source = get_best_youtube_url(source)
+    elif not source.endswith(".mp4"):
+        raise ValueError("Invalid source. Supported sources are YouTube URLs and MP4 files.")
+    cap = cv2.VideoCapture(source)
+
+    # Get video properties
+    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+    fps = cap.get(cv2.CAP_PROP_FPS)
+
+    # Initialize VideoWriter
+    if output_path is not None:
+        fourcc = cv2.VideoWriter_fourcc(*"mp4v")
+        out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
+
+    # Initialize track history
+    track_history = defaultdict(list)
+    frame_counter = 0
+
+    track_ids_to_infer = []
+    crops_to_infer = []
+    pred_labels = []
+    pred_confs = []
+
+    while cap.isOpened():
+        success, frame = cap.read()
+        if not success:
+            break
+
+        frame_counter += 1
+
+        # Run YOLO tracking
+        results = yolo_model.track(frame, persist=True, classes=[0])  # Track only person class
+
+        if results[0].boxes.id is not None:
+            boxes = results[0].boxes.xyxy.cpu().numpy()
+            track_ids = results[0].boxes.id.cpu().numpy()
+
+            # Visualize prediction
+            annotator = Annotator(frame, line_width=3, font_size=10, pil=False)
+
+            if frame_counter % skip_frame == 0:
+                crops_to_infer = []
+                track_ids_to_infer = []
+
+            for box, track_id in zip(boxes, track_ids):
+                if frame_counter % skip_frame == 0:
+                    crop = crop_and_pad(frame, box, crop_margin_percentage)
+                    track_history[track_id].append(crop)
+
+                if len(track_history[track_id]) > num_video_sequence_samples:
+                    track_history[track_id].pop(0)
+
+                if len(track_history[track_id]) == num_video_sequence_samples and frame_counter % skip_frame == 0:
+                    start_time = time.time()
+                    crops = video_classifier.preprocess_crops_for_video_cls(track_history[track_id])
+                    end_time = time.time()
+                    preprocess_time = end_time - start_time
+                    print(f"video cls preprocess time: {preprocess_time:.4f} seconds")
+                    crops_to_infer.append(crops)
+                    track_ids_to_infer.append(track_id)
+
+            if crops_to_infer and (
+                not pred_labels
+                or frame_counter % int(num_video_sequence_samples * skip_frame * (1 - video_cls_overlap_ratio)) == 0
+            ):
+                crops_batch = torch.cat(crops_to_infer, dim=0)
+
+                start_inference_time = time.time()
+                output_batch = video_classifier(crops_batch)
+                end_inference_time = time.time()
+                inference_time = end_inference_time - start_inference_time
+                print(f"video cls inference time: {inference_time:.4f} seconds")
+
+                pred_labels, pred_confs = video_classifier.postprocess(output_batch)
+
+            if track_ids_to_infer and crops_to_infer:
+                for box, track_id, pred_label, pred_conf in zip(boxes, track_ids_to_infer, pred_labels, pred_confs):
+                    top2_preds = sorted(zip(pred_label, pred_conf), key=lambda x: x[1], reverse=True)
+                    label_text = " | ".join([f"{label} ({conf:.2f})" for label, conf in top2_preds])
+                    annotator.box_label(box, label_text, color=(0, 0, 255))
+
+        # Write the annotated frame to the output video
+        if output_path is not None:
+            out.write(frame)
+
+        # Display the annotated frame
+        cv2.imshow("YOLOv8 Tracking with S3D Classification", frame)
+
+        if cv2.waitKey(1) & 0xFF == ord("q"):
+            break
+
+    cap.release()
+    if output_path is not None:
+        out.release()
+    cv2.destroyAllWindows()
+
+
+def parse_opt():
+    """Parse command line arguments."""
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--weights", type=str, default="yolo11n.pt", help="ultralytics detector model path")
+    parser.add_argument("--device", default="", help='cuda device, i.e. 0 or 0,1,2,3 or cpu/mps, "" for auto-detection')
+    parser.add_argument(
+        "--source",
+        type=str,
+        default="https://www.youtube.com/watch?v=dQw4w9WgXcQ",
+        help="video file path or youtube URL",
+    )
+    parser.add_argument("--output-path", type=str, default="output_video.mp4", help="output video file path")
+    parser.add_argument(
+        "--crop-margin-percentage", type=int, default=10, help="percentage of margin to add around detected objects"
+    )
+    parser.add_argument(
+        "--num-video-sequence-samples", type=int, default=8, help="number of video frames to use for classification"
+    )
+    parser.add_argument("--skip-frame", type=int, default=2, help="number of frames to skip between detections")
+    parser.add_argument(
+        "--video-cls-overlap-ratio", type=float, default=0.25, help="overlap ratio between video sequences"
+    )
+    parser.add_argument("--fp16", action="store_true", help="use FP16 for inference")
+    parser.add_argument(
+        "--video-classifier-model", type=str, default="microsoft/xclip-base-patch32", help="video classifier model name"
+    )
+    parser.add_argument(
+        "--labels",
+        nargs="+",
+        type=str,
+        default=["dancing", "singing a song"],
+        help="labels for zero-shot video classification",
+    )
+    return parser.parse_args()
+
+
+def main(opt):
+    """Main function."""
+    run(**vars(opt))
+
+
+if __name__ == "__main__":
+    opt = parse_opt()
+    main(opt)

+ 116 - 0
examples/YOLOv8-Action-Recognition/readme.md

@@ -0,0 +1,116 @@
+# Zero-shot Action Recognition with YOLOv8 (Inference on Video)
+
+- Action recognition is a technique used to identify and classify actions performed by individuals in a video. This process enables more advanced analyses when multiple actions are considered. The actions can be detected and classified in real time.
+- The system can be customized to recognize specific actions based on the user's preferences and requirements.
+
+## Table of Contents
+
+- [Step 1: Install the Required Libraries](#step-1-install-the-required-libraries)
+- [Step 2: Run the Action Recognition Using Ultralytics YOLOv8](#step-2-run-the-action-recognition-using-ultralytics-yolov8)
+- [Usage Options](#usage-options)
+- [FAQ](#faq)
+
+## Step 1: Install the Required Libraries
+
+Clone the repository, install dependencies and `cd` to this local directory for commands in Step 2.
+
+```bash
+# Clone ultralytics repo
+git clone https://github.com/ultralytics/ultralytics
+
+# cd to local directory
+cd examples/YOLOv8-Action-Recognition
+
+# Install dependencies
+pip install -U -r requirements.txt
+```
+
+## Step 2: Run the Action Recognition Using Ultralytics YOLOv8
+
+Here are the basic commands for running the inference:
+
+### Note
+
+The action recognition model will automatically detect and track people in the video, and classify their actions based on the specified labels. The results will be displayed in real-time on the video output. You can customize the action labels by modifying the `--labels` argument when running the script.
+
+```bash
+# Quick start
+python action_recognition.py
+
+# Basic usage
+python action_recognition.py --source "https://www.youtube.com/watch?v=dQw4w9WgXcQ" --labels "dancing" "singing a song"
+
+# Use local video file
+python action_recognition.py --source path/to/video.mp4
+
+# Better detector performance
+python action_recognition.py --weights yolov8m.pt
+
+# Run on CPU
+python action_recognition.py --device cpu
+
+# Use a different video classifier model
+python action_recognition.py --video-classifier-model "s3d"
+
+# Use FP16 for inference (only for HuggingFace models)
+python action_recognition.py --fp16
+
+# Export output as mp4
+python action_recognition.py --output-path output.mp4
+
+# Combine multiple options
+python action_recognition.py --source "https://www.youtube.com/watch?v=dQw4w9WgXcQ" --device 0 --video-classifier-model "microsoft/xclip-base-patch32" --labels "dancing" "singing a song" --fp16
+```
+
+## Usage Options
+
+- `--weights`: Path to the YOLO model weights (default: "yolov8n.pt")
+- `--device`: Cuda device, i.e. 0 or 0,1,2,3 or cpu (default: auto-detect)
+- `--source`: Video file path or YouTube URL (default: "[rickroll](https://www.youtube.com/watch?v=dQw4w9WgXcQ)")
+- `--output-path`: Output video file path
+- `--crop-margin-percentage`: Percentage of margin to add around detected objects (default: 10)
+- `--num-video-sequence-samples`: Number of video frames to use for classification (default: 8)
+- `--skip-frame`: Number of frames to skip between detections (default: 1)
+- `--video-cls-overlap-ratio`: Overlap ratio between video sequences (default: 0.25)
+- `--fp16`: Use FP16 for inference (only for HuggingFace models)
+- `--video-classifier-model`: Video classifier model name or path (default: "microsoft/xclip-base-patch32")
+- `--labels`: Labels for zero-shot video classification (default: \["dancing" "singing a song"\])
+
+## FAQ
+
+**1. What Does Action Recognition Involve?**
+
+Action recognition is a computational method used to identify and classify actions or activities performed by individuals in recorded video or real-time streams. This technique is widely used in video analysis, surveillance, and human-computer interaction, enabling the detection and understanding of human behaviors based on their motion patterns and context.
+
+**2. Is Custom Action Labels Supported by the Action Recognition?**
+
+Yes, custom action labels are supported by the action recognition system. The `action_recognition.py` script allows users to specify their own custom labels for zero-shot video classification. This can be done using the `--labels` argument when running the script. For example:
+
+```bash
+python action_recognition.py --source https://www.youtube.com/watch?v=dQw4w9WgXcQ --labels "dancing" "singing" "jumping"
+```
+
+You can adjust these labels to match the specific actions you want to recognize in your video. The system will then attempt to classify the detected actions based on these custom labels.
+
+Additionally, you can choose between different video classification models:
+
+1. For Hugging Face models, you can use any compatible video classification model. The default is set to:
+
+   - "microsoft/xclip-base-patch32"
+
+2. For TorchVision models (no support for zero-shot labels), you can select from the following options:
+
+   - "s3d"
+   - "r3d_18"
+   - "swin3d_t"
+   - "swin3d_b"
+   - "mvit_v1_b"
+   - "mvit_v2_s"
+
+**3. Why Combine Action Recognition with YOLOv8?**
+
+YOLOv8 specializes in the detection and tracking of objects in video streams. Action recognition complements this by enabling the identification and classification of actions performed by individuals, making it a valuable application of YOLOv8.
+
+**4. Can I Employ Other YOLO Versions?**
+
+Certainly, you have the flexibility to specify different YOLO model weights using the `--weights` option.

+ 4 - 0
examples/YOLOv8-Action-Recognition/requirements.txt

@@ -0,0 +1,4 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+
+ultralytics
+transformers

+ 28 - 0
examples/YOLOv8-CPP-Inference/CMakeLists.txt

@@ -0,0 +1,28 @@
+cmake_minimum_required(VERSION 3.5)
+
+project(Yolov8CPPInference VERSION 0.1)
+
+set(CMAKE_INCLUDE_CURRENT_DIR ON)
+
+# CUDA
+set(CUDA_TOOLKIT_ROOT_DIR "/usr/local/cuda")
+find_package(CUDA 11 REQUIRED)
+
+set(CMAKE_CUDA_STANDARD 11)
+set(CMAKE_CUDA_STANDARD_REQUIRED ON)
+# !CUDA
+
+# OpenCV
+find_package(OpenCV REQUIRED)
+include_directories(${OpenCV_INCLUDE_DIRS})
+# !OpenCV
+
+set(PROJECT_SOURCES
+    main.cpp
+
+    inference.h
+    inference.cpp
+)
+
+add_executable(Yolov8CPPInference ${PROJECT_SOURCES})
+target_link_libraries(Yolov8CPPInference ${OpenCV_LIBS})

+ 50 - 0
examples/YOLOv8-CPP-Inference/README.md

@@ -0,0 +1,50 @@
+# YOLOv8/YOLOv5 Inference C++
+
+This example demonstrates how to perform inference using YOLOv8 and YOLOv5 models in C++ with OpenCV DNN API.
+
+## Usage
+
+```bash
+git clone ultralytics
+cd ultralytics
+pip install .
+cd examples/YOLOv8-CPP-Inference
+
+# Add a **yolov8\_.onnx** and/or **yolov5\_.onnx** model(s) to the ultralytics folder.
+# Edit the **main.cpp** to change the **projectBasePath** to match your user.
+
+# Note that by default the CMake file will try to import the CUDA library to be used with the OpenCVs dnn (cuDNN) GPU Inference.
+# If your OpenCV build does not use CUDA/cuDNN you can remove that import call and run the example on CPU.
+
+mkdir build
+cd build
+cmake ..
+make
+./Yolov8CPPInference
+```
+
+## Exporting YOLOv8 and YOLOv5 Models
+
+To export YOLOv8 models:
+
+```bash
+yolo export model=yolov8s.pt imgsz=480,640 format=onnx opset=12
+```
+
+To export YOLOv5 models:
+
+```bash
+python3 export.py --weights yolov5s.pt --img 480 640 --include onnx --opset 12
+```
+
+yolov8s.onnx:
+
+![image](https://user-images.githubusercontent.com/40023722/217356132-a4cecf2e-2729-4acb-b80a-6559022d7707.png)
+
+yolov5s.onnx:
+
+![image](https://user-images.githubusercontent.com/40023722/217357005-07464492-d1da-42e3-98a7-fc753f87d5e6.png)
+
+This repository utilizes OpenCV DNN API to run ONNX exported models of YOLOv5 and YOLOv8. In theory, it should work for YOLOv6 and YOLOv7 as well, but they have not been tested. Note that the example networks are exported with rectangular (640x480) resolutions, but any exported resolution will work. You may want to use the letterbox approach for square images, depending on your use case.
+
+The **main** branch version uses Qt as a GUI wrapper. The primary focus here is the **Inference** class file, which demonstrates how to transpose YOLOv8 models to work as YOLOv5 models.

+ 185 - 0
examples/YOLOv8-CPP-Inference/inference.cpp

@@ -0,0 +1,185 @@
+#include "inference.h"
+
+Inference::Inference(const std::string &onnxModelPath, const cv::Size &modelInputShape, const std::string &classesTxtFile, const bool &runWithCuda)
+{
+    modelPath = onnxModelPath;
+    modelShape = modelInputShape;
+    classesPath = classesTxtFile;
+    cudaEnabled = runWithCuda;
+
+    loadOnnxNetwork();
+    // loadClassesFromFile(); The classes are hard-coded for this example
+}
+
+std::vector<Detection> Inference::runInference(const cv::Mat &input)
+{
+    cv::Mat modelInput = input;
+    if (letterBoxForSquare && modelShape.width == modelShape.height)
+        modelInput = formatToSquare(modelInput);
+
+    cv::Mat blob;
+    cv::dnn::blobFromImage(modelInput, blob, 1.0/255.0, modelShape, cv::Scalar(), true, false);
+    net.setInput(blob);
+
+    std::vector<cv::Mat> outputs;
+    net.forward(outputs, net.getUnconnectedOutLayersNames());
+
+    int rows = outputs[0].size[1];
+    int dimensions = outputs[0].size[2];
+
+    bool yolov8 = false;
+    // yolov5 has an output of shape (batchSize, 25200, 85) (Num classes + box[x,y,w,h] + confidence[c])
+    // yolov8 has an output of shape (batchSize, 84,  8400) (Num classes + box[x,y,w,h])
+    if (dimensions > rows) // Check if the shape[2] is more than shape[1] (yolov8)
+    {
+        yolov8 = true;
+        rows = outputs[0].size[2];
+        dimensions = outputs[0].size[1];
+
+        outputs[0] = outputs[0].reshape(1, dimensions);
+        cv::transpose(outputs[0], outputs[0]);
+    }
+    float *data = (float *)outputs[0].data;
+
+    float x_factor = modelInput.cols / modelShape.width;
+    float y_factor = modelInput.rows / modelShape.height;
+
+    std::vector<int> class_ids;
+    std::vector<float> confidences;
+    std::vector<cv::Rect> boxes;
+
+    for (int i = 0; i < rows; ++i)
+    {
+        if (yolov8)
+        {
+            float *classes_scores = data+4;
+
+            cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores);
+            cv::Point class_id;
+            double maxClassScore;
+
+            minMaxLoc(scores, 0, &maxClassScore, 0, &class_id);
+
+            if (maxClassScore > modelScoreThreshold)
+            {
+                confidences.push_back(maxClassScore);
+                class_ids.push_back(class_id.x);
+
+                float x = data[0];
+                float y = data[1];
+                float w = data[2];
+                float h = data[3];
+
+                int left = int((x - 0.5 * w) * x_factor);
+                int top = int((y - 0.5 * h) * y_factor);
+
+                int width = int(w * x_factor);
+                int height = int(h * y_factor);
+
+                boxes.push_back(cv::Rect(left, top, width, height));
+            }
+        }
+        else // yolov5
+        {
+            float confidence = data[4];
+
+            if (confidence >= modelConfidenceThreshold)
+            {
+                float *classes_scores = data+5;
+
+                cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores);
+                cv::Point class_id;
+                double max_class_score;
+
+                minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
+
+                if (max_class_score > modelScoreThreshold)
+                {
+                    confidences.push_back(confidence);
+                    class_ids.push_back(class_id.x);
+
+                    float x = data[0];
+                    float y = data[1];
+                    float w = data[2];
+                    float h = data[3];
+
+                    int left = int((x - 0.5 * w) * x_factor);
+                    int top = int((y - 0.5 * h) * y_factor);
+
+                    int width = int(w * x_factor);
+                    int height = int(h * y_factor);
+
+                    boxes.push_back(cv::Rect(left, top, width, height));
+                }
+            }
+        }
+
+        data += dimensions;
+    }
+
+    std::vector<int> nms_result;
+    cv::dnn::NMSBoxes(boxes, confidences, modelScoreThreshold, modelNMSThreshold, nms_result);
+
+    std::vector<Detection> detections{};
+    for (unsigned long i = 0; i < nms_result.size(); ++i)
+    {
+        int idx = nms_result[i];
+
+        Detection result;
+        result.class_id = class_ids[idx];
+        result.confidence = confidences[idx];
+
+        std::random_device rd;
+        std::mt19937 gen(rd());
+        std::uniform_int_distribution<int> dis(100, 255);
+        result.color = cv::Scalar(dis(gen),
+                                  dis(gen),
+                                  dis(gen));
+
+        result.className = classes[result.class_id];
+        result.box = boxes[idx];
+
+        detections.push_back(result);
+    }
+
+    return detections;
+}
+
+void Inference::loadClassesFromFile()
+{
+    std::ifstream inputFile(classesPath);
+    if (inputFile.is_open())
+    {
+        std::string classLine;
+        while (std::getline(inputFile, classLine))
+            classes.push_back(classLine);
+        inputFile.close();
+    }
+}
+
+void Inference::loadOnnxNetwork()
+{
+    net = cv::dnn::readNetFromONNX(modelPath);
+    if (cudaEnabled)
+    {
+        std::cout << "\nRunning on CUDA" << std::endl;
+        net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
+        net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA);
+    }
+    else
+    {
+        std::cout << "\nRunning on CPU" << std::endl;
+        net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV);
+        net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
+    }
+}
+
+cv::Mat Inference::formatToSquare(const cv::Mat &source)
+{
+    int col = source.cols;
+    int row = source.rows;
+    int _max = MAX(col, row);
+    cv::Mat result = cv::Mat::zeros(_max, _max, CV_8UC3);
+    source.copyTo(result(cv::Rect(0, 0, col, row)));
+    return result;
+}

+ 52 - 0
examples/YOLOv8-CPP-Inference/inference.h

@@ -0,0 +1,52 @@
+#ifndef INFERENCE_H
+#define INFERENCE_H
+
+// Cpp native
+#include <fstream>
+#include <vector>
+#include <string>
+#include <random>
+
+// OpenCV / DNN / Inference
+#include <opencv2/imgproc.hpp>
+#include <opencv2/opencv.hpp>
+#include <opencv2/dnn.hpp>
+
+struct Detection
+{
+    int class_id{0};
+    std::string className{};
+    float confidence{0.0};
+    cv::Scalar color{};
+    cv::Rect box{};
+};
+
+class Inference
+{
+public:
+    Inference(const std::string &onnxModelPath, const cv::Size &modelInputShape = {640, 640}, const std::string &classesTxtFile = "", const bool &runWithCuda = true);
+    std::vector<Detection> runInference(const cv::Mat &input);
+
+private:
+    void loadClassesFromFile();
+    void loadOnnxNetwork();
+    cv::Mat formatToSquare(const cv::Mat &source);
+
+    std::string modelPath{};
+    std::string classesPath{};
+    bool cudaEnabled{};
+
+    std::vector<std::string> classes{"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"};
+
+    cv::Size2f modelShape{};
+
+    float modelConfidenceThreshold {0.25};
+    float modelScoreThreshold      {0.45};
+    float modelNMSThreshold        {0.50};
+
+    bool letterBoxForSquare = true;
+
+    cv::dnn::Net net;
+};
+
+#endif // INFERENCE_H

+ 70 - 0
examples/YOLOv8-CPP-Inference/main.cpp

@@ -0,0 +1,70 @@
+#include <iostream>
+#include <vector>
+#include <getopt.h>
+
+#include <opencv2/opencv.hpp>
+
+#include "inference.h"
+
+using namespace std;
+using namespace cv;
+
+int main(int argc, char **argv)
+{
+    std::string projectBasePath = "/home/user/ultralytics"; // Set your ultralytics base path
+
+    bool runOnGPU = true;
+
+    //
+    // Pass in either:
+    //
+    // "yolov8s.onnx" or "yolov5s.onnx"
+    //
+    // To run Inference with yolov8/yolov5 (ONNX)
+    //
+
+    // Note that in this example the classes are hard-coded and 'classes.txt' is a place holder.
+    Inference inf(projectBasePath + "/yolov8s.onnx", cv::Size(640, 640), "classes.txt", runOnGPU);
+
+    std::vector<std::string> imageNames;
+    imageNames.push_back(projectBasePath + "/ultralytics/assets/bus.jpg");
+    imageNames.push_back(projectBasePath + "/ultralytics/assets/zidane.jpg");
+
+    for (int i = 0; i < imageNames.size(); ++i)
+    {
+        cv::Mat frame = cv::imread(imageNames[i]);
+
+        // Inference starts here...
+        std::vector<Detection> output = inf.runInference(frame);
+
+        int detections = output.size();
+        std::cout << "Number of detections:" << detections << std::endl;
+
+        for (int i = 0; i < detections; ++i)
+        {
+            Detection detection = output[i];
+
+            cv::Rect box = detection.box;
+            cv::Scalar color = detection.color;
+
+            // Detection box
+            cv::rectangle(frame, box, color, 2);
+
+            // Detection box text
+            std::string classString = detection.className + ' ' + std::to_string(detection.confidence).substr(0, 4);
+            cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0);
+            cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);
+
+            cv::rectangle(frame, textBox, color, cv::FILLED);
+            cv::putText(frame, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 0), 2, 0);
+        }
+        // Inference ends here...
+
+        // This is only for preview purposes
+        float scale = 0.8;
+        cv::resize(frame, frame, cv::Size(frame.cols*scale, frame.rows*scale));
+        cv::imshow("Inference", frame);
+
+        cv::waitKey(-1);
+    }
+}

+ 47 - 0
examples/YOLOv8-LibTorch-CPP-Inference/CMakeLists.txt

@@ -0,0 +1,47 @@
+cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
+
+project(yolov8_libtorch_example)
+
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+
+# -------------- OpenCV --------------
+set(OpenCV_DIR "/path/to/opencv/lib/cmake/opencv4")
+find_package(OpenCV REQUIRED)
+
+message(STATUS "OpenCV library status:")
+message(STATUS "    config: ${OpenCV_DIR}")
+message(STATUS "    version: ${OpenCV_VERSION}")
+message(STATUS "    libraries: ${OpenCV_LIBS}")
+message(STATUS "    include path: ${OpenCV_INCLUDE_DIRS}")
+
+include_directories(${OpenCV_INCLUDE_DIRS})
+
+# -------------- libtorch --------------
+list(APPEND CMAKE_PREFIX_PATH "/path/to/libtorch")
+set(Torch_DIR "/path/to/libtorch/share/cmake/Torch")
+
+find_package(Torch REQUIRED)
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
+message("${TORCH_LIBRARIES}")
+message("${TORCH_INCLUDE_DIRS}")
+
+# The following code block is suggested to be used on Windows.
+# According to https://github.com/pytorch/pytorch/issues/25457,
+# the DLLs need to be copied to avoid memory errors.
+# if (MSVC)
+#   file(GLOB TORCH_DLLS "${TORCH_INSTALL_PREFIX}/lib/*.dll")
+#   add_custom_command(TARGET yolov8_libtorch_example
+#                      POST_BUILD
+#                      COMMAND ${CMAKE_COMMAND} -E copy_if_different
+#                      ${TORCH_DLLS}
+#                      $<TARGET_FILE_DIR:yolov8_libtorch_example>)
+# endif (MSVC)
+
+include_directories(${TORCH_INCLUDE_DIRS})
+
+add_executable(yolov8_libtorch_inference "${CMAKE_CURRENT_SOURCE_DIR}/main.cc")
+target_link_libraries(yolov8_libtorch_inference ${TORCH_LIBRARIES} ${OpenCV_LIBS})
+set_property(TARGET yolov8_libtorch_inference PROPERTY CXX_STANDARD 17)

+ 35 - 0
examples/YOLOv8-LibTorch-CPP-Inference/README.md

@@ -0,0 +1,35 @@
+# YOLOv8 LibTorch Inference C++
+
+This example demonstrates how to perform inference using YOLOv8 models in C++ with LibTorch API.
+
+## Dependencies
+
+| Dependency   | Version  |
+| ------------ | -------- |
+| OpenCV       | >=4.0.0  |
+| C++ Standard | >=17     |
+| Cmake        | >=3.18   |
+| Libtorch     | >=1.12.1 |
+
+## Usage
+
+```bash
+git clone ultralytics
+cd ultralytics
+pip install .
+cd examples/YOLOv8-LibTorch-CPP-Inference
+
+mkdir build
+cd build
+cmake ..
+make
+./yolov8_libtorch_inference
+```
+
+## Exporting YOLOv8
+
+To export YOLOv8 models:
+
+```bash
+yolo export model=yolov8s.pt imgsz=640 format=torchscript
+```

+ 260 - 0
examples/YOLOv8-LibTorch-CPP-Inference/main.cc

@@ -0,0 +1,260 @@
+#include <iostream>
+
+#include <opencv2/core.hpp>
+#include <opencv2/imgproc.hpp>
+#include <opencv2/imgcodecs.hpp>
+#include <torch/torch.h>
+#include <torch/script.h>
+
+using torch::indexing::Slice;
+using torch::indexing::None;
+
+
+float generate_scale(cv::Mat& image, const std::vector<int>& target_size) {
+    int origin_w = image.cols;
+    int origin_h = image.rows;
+
+    int target_h = target_size[0];
+    int target_w = target_size[1];
+
+    float ratio_h = static_cast<float>(target_h) / static_cast<float>(origin_h);
+    float ratio_w = static_cast<float>(target_w) / static_cast<float>(origin_w);
+    float resize_scale = std::min(ratio_h, ratio_w);
+    return resize_scale;
+}
+
+
+float letterbox(cv::Mat &input_image, cv::Mat &output_image, const std::vector<int> &target_size) {
+    if (input_image.cols == target_size[1] && input_image.rows == target_size[0]) {
+        if (input_image.data == output_image.data) {
+            return 1.;
+        } else {
+            output_image = input_image.clone();
+            return 1.;
+        }
+    }
+
+    float resize_scale = generate_scale(input_image, target_size);
+    int new_shape_w = std::round(input_image.cols * resize_scale);
+    int new_shape_h = std::round(input_image.rows * resize_scale);
+    float padw = (target_size[1] - new_shape_w) / 2.;
+    float padh = (target_size[0] - new_shape_h) / 2.;
+
+    int top = std::round(padh - 0.1);
+    int bottom = std::round(padh + 0.1);
+    int left = std::round(padw - 0.1);
+    int right = std::round(padw + 0.1);
+
+    cv::resize(input_image, output_image,
+               cv::Size(new_shape_w, new_shape_h),
+               0, 0, cv::INTER_AREA);
+
+    cv::copyMakeBorder(output_image, output_image, top, bottom, left, right,
+                       cv::BORDER_CONSTANT, cv::Scalar(114.));
+    return resize_scale;
+}
+
+
+torch::Tensor xyxy2xywh(const torch::Tensor& x) {
+    auto y = torch::empty_like(x);
+    y.index_put_({"...", 0}, (x.index({"...", 0}) + x.index({"...", 2})).div(2));
+    y.index_put_({"...", 1}, (x.index({"...", 1}) + x.index({"...", 3})).div(2));
+    y.index_put_({"...", 2}, x.index({"...", 2}) - x.index({"...", 0}));
+    y.index_put_({"...", 3}, x.index({"...", 3}) - x.index({"...", 1}));
+    return y;
+}
+
+
+torch::Tensor xywh2xyxy(const torch::Tensor& x) {
+    auto y = torch::empty_like(x);
+    auto dw = x.index({"...", 2}).div(2);
+    auto dh = x.index({"...", 3}).div(2);
+    y.index_put_({"...", 0}, x.index({"...", 0}) - dw);
+    y.index_put_({"...", 1}, x.index({"...", 1}) - dh);
+    y.index_put_({"...", 2}, x.index({"...", 0}) + dw);
+    y.index_put_({"...", 3}, x.index({"...", 1}) + dh);
+    return y;
+}
+
+
+// Reference: https://github.com/pytorch/vision/blob/main/torchvision/csrc/ops/cpu/nms_kernel.cpp
+torch::Tensor nms(const torch::Tensor& bboxes, const torch::Tensor& scores, float iou_threshold) {
+    if (bboxes.numel() == 0)
+        return torch::empty({0}, bboxes.options().dtype(torch::kLong));
+
+    auto x1_t = bboxes.select(1, 0).contiguous();
+    auto y1_t = bboxes.select(1, 1).contiguous();
+    auto x2_t = bboxes.select(1, 2).contiguous();
+    auto y2_t = bboxes.select(1, 3).contiguous();
+
+    torch::Tensor areas_t = (x2_t - x1_t) * (y2_t - y1_t);
+
+    auto order_t = std::get<1>(
+        scores.sort(/*stable=*/true, /*dim=*/0, /* descending=*/true));
+
+    auto ndets = bboxes.size(0);
+    torch::Tensor suppressed_t = torch::zeros({ndets}, bboxes.options().dtype(torch::kByte));
+    torch::Tensor keep_t = torch::zeros({ndets}, bboxes.options().dtype(torch::kLong));
+
+    auto suppressed = suppressed_t.data_ptr<uint8_t>();
+    auto keep = keep_t.data_ptr<int64_t>();
+    auto order = order_t.data_ptr<int64_t>();
+    auto x1 = x1_t.data_ptr<float>();
+    auto y1 = y1_t.data_ptr<float>();
+    auto x2 = x2_t.data_ptr<float>();
+    auto y2 = y2_t.data_ptr<float>();
+    auto areas = areas_t.data_ptr<float>();
+
+    int64_t num_to_keep = 0;
+
+    for (int64_t _i = 0; _i < ndets; _i++) {
+        auto i = order[_i];
+        if (suppressed[i] == 1)
+            continue;
+        keep[num_to_keep++] = i;
+        auto ix1 = x1[i];
+        auto iy1 = y1[i];
+        auto ix2 = x2[i];
+        auto iy2 = y2[i];
+        auto iarea = areas[i];
+
+        for (int64_t _j = _i + 1; _j < ndets; _j++) {
+        auto j = order[_j];
+        if (suppressed[j] == 1)
+            continue;
+        auto xx1 = std::max(ix1, x1[j]);
+        auto yy1 = std::max(iy1, y1[j]);
+        auto xx2 = std::min(ix2, x2[j]);
+        auto yy2 = std::min(iy2, y2[j]);
+
+        auto w = std::max(static_cast<float>(0), xx2 - xx1);
+        auto h = std::max(static_cast<float>(0), yy2 - yy1);
+        auto inter = w * h;
+        auto ovr = inter / (iarea + areas[j] - inter);
+        if (ovr > iou_threshold)
+            suppressed[j] = 1;
+        }
+    }
+    return keep_t.narrow(0, 0, num_to_keep);
+}
+
+
+torch::Tensor non_max_suppression(torch::Tensor& prediction, float conf_thres = 0.25, float iou_thres = 0.45, int max_det = 300) {
+    auto bs = prediction.size(0);
+    auto nc = prediction.size(1) - 4;
+    auto nm = prediction.size(1) - nc - 4;
+    auto mi = 4 + nc;
+    auto xc = prediction.index({Slice(), Slice(4, mi)}).amax(1) > conf_thres;
+
+    prediction = prediction.transpose(-1, -2);
+    prediction.index_put_({"...", Slice({None, 4})}, xywh2xyxy(prediction.index({"...", Slice(None, 4)})));
+
+    std::vector<torch::Tensor> output;
+    for (int i = 0; i < bs; i++) {
+        output.push_back(torch::zeros({0, 6 + nm}, prediction.device()));
+    }
+
+    for (int xi = 0; xi < prediction.size(0); xi++) {
+        auto x = prediction[xi];
+        x = x.index({xc[xi]});
+        auto x_split = x.split({4, nc, nm}, 1);
+        auto box = x_split[0], cls = x_split[1], mask = x_split[2];
+        auto [conf, j] = cls.max(1, true);
+        x = torch::cat({box, conf, j.toType(torch::kFloat), mask}, 1);
+        x = x.index({conf.view(-1) > conf_thres});
+        int n = x.size(0);
+        if (!n) { continue; }
+
+        // NMS
+        auto c = x.index({Slice(), Slice{5, 6}}) * 7680;
+        auto boxes = x.index({Slice(), Slice(None, 4)}) + c;
+        auto scores = x.index({Slice(), 4});
+        auto i = nms(boxes, scores, iou_thres);
+        i = i.index({Slice(None, max_det)});
+        output[xi] = x.index({i});
+    }
+
+    return torch::stack(output);
+}
+
+
+torch::Tensor clip_boxes(torch::Tensor& boxes, const std::vector<int>& shape) {
+    boxes.index_put_({"...", 0}, boxes.index({"...", 0}).clamp(0, shape[1]));
+    boxes.index_put_({"...", 1}, boxes.index({"...", 1}).clamp(0, shape[0]));
+    boxes.index_put_({"...", 2}, boxes.index({"...", 2}).clamp(0, shape[1]));
+    boxes.index_put_({"...", 3}, boxes.index({"...", 3}).clamp(0, shape[0]));
+    return boxes;
+}
+
+
+torch::Tensor scale_boxes(const std::vector<int>& img1_shape, torch::Tensor& boxes, const std::vector<int>& img0_shape) {
+    auto gain = (std::min)((float)img1_shape[0] / img0_shape[0], (float)img1_shape[1] / img0_shape[1]);
+    auto pad0 = std::round((float)(img1_shape[1] - img0_shape[1] * gain) / 2. - 0.1);
+    auto pad1 = std::round((float)(img1_shape[0] - img0_shape[0] * gain) / 2. - 0.1);
+
+    boxes.index_put_({"...", 0}, boxes.index({"...", 0}) - pad0);
+    boxes.index_put_({"...", 2}, boxes.index({"...", 2}) - pad0);
+    boxes.index_put_({"...", 1}, boxes.index({"...", 1}) - pad1);
+    boxes.index_put_({"...", 3}, boxes.index({"...", 3}) - pad1);
+    boxes.index_put_({"...", Slice(None, 4)}, boxes.index({"...", Slice(None, 4)}).div(gain));
+    return boxes;
+}
+
+
+int main() {
+    // Device
+    torch::Device device(torch::cuda::is_available() ? torch::kCUDA :torch::kCPU);
+
+    // Note that in this example the classes are hard-coded
+    std::vector<std::string> classes {"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant",
+                                      "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra",
+                                      "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite",
+                                      "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife",
+                                      "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
+                                      "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
+                                      "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"};
+
+    try {
+        // Load the model (e.g. yolov8s.torchscript)
+        std::string model_path = "/path/to/yolov8s.torchscript";
+        torch::jit::script::Module yolo_model;
+        yolo_model = torch::jit::load(model_path);
+        yolo_model.eval();
+        yolo_model.to(device, torch::kFloat32);
+
+        // Load image and preprocess
+        cv::Mat image = cv::imread("/path/to/bus.jpg");
+        cv::Mat input_image;
+        letterbox(image, input_image, {640, 640});
+        cv::cvtColor(input_image, input_image, cv::COLOR_BGR2RGB);
+
+        torch::Tensor image_tensor = torch::from_blob(input_image.data, {input_image.rows, input_image.cols, 3}, torch::kByte).to(device);
+        image_tensor = image_tensor.toType(torch::kFloat32).div(255);
+        image_tensor = image_tensor.permute({2, 0, 1});
+        image_tensor = image_tensor.unsqueeze(0);
+        std::vector<torch::jit::IValue> inputs {image_tensor};
+
+        // Inference
+        torch::Tensor output = yolo_model.forward(inputs).toTensor().cpu();
+
+        // NMS
+        auto keep = non_max_suppression(output)[0];
+        auto boxes = keep.index({Slice(), Slice(None, 4)});
+        keep.index_put_({Slice(), Slice(None, 4)}, scale_boxes({input_image.rows, input_image.cols}, boxes, {image.rows, image.cols}));
+
+        // Show the results
+        for (int i = 0; i < keep.size(0); i++) {
+            int x1 = keep[i][0].item().toFloat();
+            int y1 = keep[i][1].item().toFloat();
+            int x2 = keep[i][2].item().toFloat();
+            int y2 = keep[i][3].item().toFloat();
+            float conf = keep[i][4].item().toFloat();
+            int cls = keep[i][5].item().toInt();
+            std::cout << "Rect: [" << x1 << "," << y1 << "," << x2 << "," << y2 << "]  Conf: " << conf << "  Class: " << classes[cls] << std::endl;
+        }
+    } catch (const c10::Error& e) {
+        std::cout << e.msg() << std::endl;
+    }
+
+    return 0;
+}

+ 99 - 0
examples/YOLOv8-ONNXRuntime-CPP/CMakeLists.txt

@@ -0,0 +1,99 @@
+cmake_minimum_required(VERSION 3.5)
+
+set(PROJECT_NAME Yolov8OnnxRuntimeCPPInference)
+project(${PROJECT_NAME} VERSION 0.0.1 LANGUAGES CXX)
+
+
+# -------------- Support C++17 for using filesystem  ------------------#
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS ON)
+set(CMAKE_INCLUDE_CURRENT_DIR ON)
+
+
+# -------------- OpenCV  ------------------#
+find_package(OpenCV REQUIRED)
+include_directories(${OpenCV_INCLUDE_DIRS})
+
+
+# -------------- Compile CUDA for FP16 inference if needed  ------------------#
+option(USE_CUDA "Enable CUDA support" ON)
+if (NOT APPLE AND USE_CUDA)
+    find_package(CUDA REQUIRED)
+    include_directories(${CUDA_INCLUDE_DIRS})
+    add_definitions(-DUSE_CUDA)
+else ()
+    set(USE_CUDA OFF)
+endif ()
+
+# -------------- ONNXRUNTIME  ------------------#
+
+# Set ONNXRUNTIME_VERSION
+set(ONNXRUNTIME_VERSION 1.15.1)
+
+if (WIN32)
+    if (USE_CUDA)
+        set(ONNXRUNTIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime-win-x64-gpu-${ONNXRUNTIME_VERSION}")
+    else ()
+        set(ONNXRUNTIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime-win-x64-${ONNXRUNTIME_VERSION}")
+    endif ()
+elseif (LINUX)
+    if (USE_CUDA)
+        set(ONNXRUNTIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION}")
+    else ()
+        set(ONNXRUNTIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}")
+    endif ()
+elseif (APPLE)
+    set(ONNXRUNTIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime-osx-arm64-${ONNXRUNTIME_VERSION}")
+    # Apple X64 binary
+    # set(ONNXRUNTIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime-osx-x64-${ONNXRUNTIME_VERSION}")
+    # Apple Universal binary
+    # set(ONNXRUNTIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime-osx-universal2-${ONNXRUNTIME_VERSION}")
+else ()
+    message(SEND_ERROR "Variable ONNXRUNTIME_ROOT is not set properly. Please check if your cmake project \
+    is not compiled with `-D WIN32=TRUE`, `-D LINUX=TRUE`, or `-D APPLE=TRUE`!")
+endif ()
+
+include_directories(${PROJECT_NAME} ${ONNXRUNTIME_ROOT}/include)
+
+set(PROJECT_SOURCES
+        main.cpp
+        inference.h
+        inference.cpp
+)
+
+add_executable(${PROJECT_NAME} ${PROJECT_SOURCES})
+
+if (WIN32)
+    target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBS} ${ONNXRUNTIME_ROOT}/lib/onnxruntime.lib)
+    if (USE_CUDA)
+        target_link_libraries(${PROJECT_NAME} ${CUDA_LIBRARIES})
+    endif ()
+elseif (LINUX)
+    target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBS} ${ONNXRUNTIME_ROOT}/lib/libonnxruntime.so)
+    if (USE_CUDA)
+        target_link_libraries(${PROJECT_NAME} ${CUDA_LIBRARIES})
+    endif ()
+elseif (APPLE)
+    target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBS} ${ONNXRUNTIME_ROOT}/lib/libonnxruntime.dylib)
+endif ()
+
+# For windows system, copy onnxruntime.dll to the same folder of the executable file
+if (WIN32)
+    add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD
+            COMMAND ${CMAKE_COMMAND} -E copy_if_different
+            "${ONNXRUNTIME_ROOT}/lib/onnxruntime.dll"
+            $<TARGET_FILE_DIR:${PROJECT_NAME}>)
+endif ()
+
+# Download https://raw.githubusercontent.com/ultralytics/ultralytics/main/ultralytics/cfg/datasets/coco.yaml
+# and put it in the same folder of the executable file
+configure_file(coco.yaml ${CMAKE_CURRENT_BINARY_DIR}/coco.yaml COPYONLY)
+
+# Copy yolov8n.onnx file to the same folder of the executable file
+configure_file(yolov8n.onnx ${CMAKE_CURRENT_BINARY_DIR}/yolov8n.onnx COPYONLY)
+
+# Create folder name images in the same folder of the executable file
+add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD
+    COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/images
+)

+ 120 - 0
examples/YOLOv8-ONNXRuntime-CPP/README.md

@@ -0,0 +1,120 @@
+# YOLOv8 OnnxRuntime C++
+
+<img alt="C++" src="https://img.shields.io/badge/C++-17-blue.svg?style=flat&logo=c%2B%2B"> <img alt="Onnx-runtime" src="https://img.shields.io/badge/OnnxRuntime-717272.svg?logo=Onnx&logoColor=white">
+
+This example demonstrates how to perform inference using YOLOv8 in C++ with ONNX Runtime and OpenCV's API.
+
+## Benefits ✨
+
+- Friendly for deployment in the industrial sector.
+- Faster than OpenCV's DNN inference on both CPU and GPU.
+- Supports FP32 and FP16 CUDA acceleration.
+
+## Note ☕
+
+1. Benefit for Ultralytics' latest release, a `Transpose` op is added to the YOLOv8 model, while make v8 and v5 has the same output shape. Therefore, you can run inference with YOLOv5/v7/v8 via this project.
+
+## Exporting YOLOv8 Models 📦
+
+To export YOLOv8 models, use the following Python script:
+
+```python
+from ultralytics import YOLO
+
+# Load a YOLOv8 model
+model = YOLO("yolov8n.pt")
+
+# Export the model
+model.export(format="onnx", opset=12, simplify=True, dynamic=False, imgsz=640)
+```
+
+Alternatively, you can use the following command for exporting the model in the terminal
+
+```bash
+yolo export model=yolov8n.pt opset=12 simplify=True dynamic=False format=onnx imgsz=640,640
+```
+
+## Exporting YOLOv8 FP16 Models 📦
+
+```python
+import onnx
+from onnxconverter_common import float16
+
+model = onnx.load(R"YOUR_ONNX_PATH")
+model_fp16 = float16.convert_float_to_float16(model)
+onnx.save(model_fp16, R"YOUR_FP16_ONNX_PATH")
+```
+
+## Download COCO.yaml file 📂
+
+In order to run example, you also need to download coco.yaml. You can download the file manually from [here](https://raw.githubusercontent.com/ultralytics/ultralytics/main/ultralytics/cfg/datasets/coco.yaml)
+
+## Dependencies ⚙️
+
+| Dependency                       | Version       |
+| -------------------------------- | ------------- |
+| Onnxruntime(linux,windows,macos) | >=1.14.1      |
+| OpenCV                           | >=4.0.0       |
+| C++ Standard                     | >=17          |
+| Cmake                            | >=3.5         |
+| Cuda (Optional)                  | >=11.4 \<12.0 |
+| cuDNN (Cuda required)            | =8            |
+
+Note: The dependency on C++17 is due to the usage of the C++17 filesystem feature.
+
+Note (2): Due to ONNX Runtime, we need to use CUDA 11 and cuDNN 8. Keep in mind that this requirement might change in the future.
+
+## Build 🛠️
+
+1. Clone the repository to your local machine.
+
+2. Navigate to the root directory of the repository.
+
+3. Create a build directory and navigate to it:
+
+   ```console
+   mkdir build && cd build
+   ```
+
+4. Run CMake to generate the build files:
+
+   ```console
+   cmake ..
+   ```
+
+   **Notice**:
+
+   If you encounter an error indicating that the `ONNXRUNTIME_ROOT` variable is not set correctly, you can resolve this by building the project using the appropriate command tailored to your system.
+
+   ```console
+   # compiled in a win32 system
+   cmake -D WIN32=TRUE ..
+   # compiled in a linux system
+   cmake -D LINUX=TRUE ..
+   # compiled in an apple system
+   cmake -D APPLE=TRUE ..
+   ```
+
+5. Build the project:
+
+   ```console
+   make
+   ```
+
+6. The built executable should now be located in the `build` directory.
+
+## Usage 🚀
+
+```c++
+//change your param as you like
+//Pay attention to your device and the onnx model type(fp32 or fp16)
+DL_INIT_PARAM params;
+params.rectConfidenceThreshold = 0.1;
+params.iouThreshold = 0.5;
+params.modelPath = "yolov8n.onnx";
+params.imgSize = { 640, 640 };
+params.cudaEnable = true;
+params.modelType = YOLO_DETECT_V8;
+yoloDetector->CreateSession(params);
+Detector(yoloDetector);
+```

+ 375 - 0
examples/YOLOv8-ONNXRuntime-CPP/inference.cpp

@@ -0,0 +1,375 @@
+#include "inference.h"
+#include <regex>
+
+#define benchmark
+#define min(a,b)            (((a) < (b)) ? (a) : (b))
+YOLO_V8::YOLO_V8() {
+
+}
+
+
+YOLO_V8::~YOLO_V8() {
+    delete session;
+}
+
+#ifdef USE_CUDA
+namespace Ort
+{
+    template<>
+    struct TypeToTensorType<half> { static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16; };
+}
+#endif
+
+
+template<typename T>
+char* BlobFromImage(cv::Mat& iImg, T& iBlob) {
+    int channels = iImg.channels();
+    int imgHeight = iImg.rows;
+    int imgWidth = iImg.cols;
+
+    for (int c = 0; c < channels; c++)
+    {
+        for (int h = 0; h < imgHeight; h++)
+        {
+            for (int w = 0; w < imgWidth; w++)
+            {
+                iBlob[c * imgWidth * imgHeight + h * imgWidth + w] = typename std::remove_pointer<T>::type(
+                    (iImg.at<cv::Vec3b>(h, w)[c]) / 255.0f);
+            }
+        }
+    }
+    return RET_OK;
+}
+
+
+char* YOLO_V8::PreProcess(cv::Mat& iImg, std::vector<int> iImgSize, cv::Mat& oImg)
+{
+    if (iImg.channels() == 3)
+    {
+        oImg = iImg.clone();
+        cv::cvtColor(oImg, oImg, cv::COLOR_BGR2RGB);
+    }
+    else
+    {
+        cv::cvtColor(iImg, oImg, cv::COLOR_GRAY2RGB);
+    }
+
+    switch (modelType)
+    {
+    case YOLO_DETECT_V8:
+    case YOLO_POSE:
+    case YOLO_DETECT_V8_HALF:
+    case YOLO_POSE_V8_HALF://LetterBox
+    {
+        if (iImg.cols >= iImg.rows)
+        {
+            resizeScales = iImg.cols / (float)iImgSize.at(0);
+            cv::resize(oImg, oImg, cv::Size(iImgSize.at(0), int(iImg.rows / resizeScales)));
+        }
+        else
+        {
+            resizeScales = iImg.rows / (float)iImgSize.at(0);
+            cv::resize(oImg, oImg, cv::Size(int(iImg.cols / resizeScales), iImgSize.at(1)));
+        }
+        cv::Mat tempImg = cv::Mat::zeros(iImgSize.at(0), iImgSize.at(1), CV_8UC3);
+        oImg.copyTo(tempImg(cv::Rect(0, 0, oImg.cols, oImg.rows)));
+        oImg = tempImg;
+        break;
+    }
+    case YOLO_CLS://CenterCrop
+    {
+        int h = iImg.rows;
+        int w = iImg.cols;
+        int m = min(h, w);
+        int top = (h - m) / 2;
+        int left = (w - m) / 2;
+        cv::resize(oImg(cv::Rect(left, top, m, m)), oImg, cv::Size(iImgSize.at(0), iImgSize.at(1)));
+        break;
+    }
+    }
+    return RET_OK;
+}
+
+
+char* YOLO_V8::CreateSession(DL_INIT_PARAM& iParams) {
+    char* Ret = RET_OK;
+    std::regex pattern("[\u4e00-\u9fa5]");
+    bool result = std::regex_search(iParams.modelPath, pattern);
+    if (result)
+    {
+        Ret = "[YOLO_V8]:Your model path is error.Change your model path without chinese characters.";
+        std::cout << Ret << std::endl;
+        return Ret;
+    }
+    try
+    {
+        rectConfidenceThreshold = iParams.rectConfidenceThreshold;
+        iouThreshold = iParams.iouThreshold;
+        imgSize = iParams.imgSize;
+        modelType = iParams.modelType;
+        env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, "Yolo");
+        Ort::SessionOptions sessionOption;
+        if (iParams.cudaEnable)
+        {
+            cudaEnable = iParams.cudaEnable;
+            OrtCUDAProviderOptions cudaOption;
+            cudaOption.device_id = 0;
+            sessionOption.AppendExecutionProvider_CUDA(cudaOption);
+        }
+        sessionOption.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL);
+        sessionOption.SetIntraOpNumThreads(iParams.intraOpNumThreads);
+        sessionOption.SetLogSeverityLevel(iParams.logSeverityLevel);
+
+#ifdef _WIN32
+        int ModelPathSize = MultiByteToWideChar(CP_UTF8, 0, iParams.modelPath.c_str(), static_cast<int>(iParams.modelPath.length()), nullptr, 0);
+        wchar_t* wide_cstr = new wchar_t[ModelPathSize + 1];
+        MultiByteToWideChar(CP_UTF8, 0, iParams.modelPath.c_str(), static_cast<int>(iParams.modelPath.length()), wide_cstr, ModelPathSize);
+        wide_cstr[ModelPathSize] = L'\0';
+        const wchar_t* modelPath = wide_cstr;
+#else
+        const char* modelPath = iParams.modelPath.c_str();
+#endif // _WIN32
+
+        session = new Ort::Session(env, modelPath, sessionOption);
+        Ort::AllocatorWithDefaultOptions allocator;
+        size_t inputNodesNum = session->GetInputCount();
+        for (size_t i = 0; i < inputNodesNum; i++)
+        {
+            Ort::AllocatedStringPtr input_node_name = session->GetInputNameAllocated(i, allocator);
+            char* temp_buf = new char[50];
+            strcpy(temp_buf, input_node_name.get());
+            inputNodeNames.push_back(temp_buf);
+        }
+        size_t OutputNodesNum = session->GetOutputCount();
+        for (size_t i = 0; i < OutputNodesNum; i++)
+        {
+            Ort::AllocatedStringPtr output_node_name = session->GetOutputNameAllocated(i, allocator);
+            char* temp_buf = new char[10];
+            strcpy(temp_buf, output_node_name.get());
+            outputNodeNames.push_back(temp_buf);
+        }
+        options = Ort::RunOptions{ nullptr };
+        WarmUpSession();
+        return RET_OK;
+    }
+    catch (const std::exception& e)
+    {
+        const char* str1 = "[YOLO_V8]:";
+        const char* str2 = e.what();
+        std::string result = std::string(str1) + std::string(str2);
+        char* merged = new char[result.length() + 1];
+        std::strcpy(merged, result.c_str());
+        std::cout << merged << std::endl;
+        delete[] merged;
+        return "[YOLO_V8]:Create session failed.";
+    }
+
+}
+
+
+char* YOLO_V8::RunSession(cv::Mat& iImg, std::vector<DL_RESULT>& oResult) {
+#ifdef benchmark
+    clock_t starttime_1 = clock();
+#endif // benchmark
+
+    char* Ret = RET_OK;
+    cv::Mat processedImg;
+    PreProcess(iImg, imgSize, processedImg);
+    if (modelType < 4)
+    {
+        float* blob = new float[processedImg.total() * 3];
+        BlobFromImage(processedImg, blob);
+        std::vector<int64_t> inputNodeDims = { 1, 3, imgSize.at(0), imgSize.at(1) };
+        TensorProcess(starttime_1, iImg, blob, inputNodeDims, oResult);
+    }
+    else
+    {
+#ifdef USE_CUDA
+        half* blob = new half[processedImg.total() * 3];
+        BlobFromImage(processedImg, blob);
+        std::vector<int64_t> inputNodeDims = { 1,3,imgSize.at(0),imgSize.at(1) };
+        TensorProcess(starttime_1, iImg, blob, inputNodeDims, oResult);
+#endif
+    }
+
+    return Ret;
+}
+
+
+template<typename N>
+char* YOLO_V8::TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std::vector<int64_t>& inputNodeDims,
+    std::vector<DL_RESULT>& oResult) {
+    Ort::Value inputTensor = Ort::Value::CreateTensor<typename std::remove_pointer<N>::type>(
+        Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU), blob, 3 * imgSize.at(0) * imgSize.at(1),
+        inputNodeDims.data(), inputNodeDims.size());
+#ifdef benchmark
+    clock_t starttime_2 = clock();
+#endif // benchmark
+    auto outputTensor = session->Run(options, inputNodeNames.data(), &inputTensor, 1, outputNodeNames.data(),
+        outputNodeNames.size());
+#ifdef benchmark
+    clock_t starttime_3 = clock();
+#endif // benchmark
+
+    Ort::TypeInfo typeInfo = outputTensor.front().GetTypeInfo();
+    auto tensor_info = typeInfo.GetTensorTypeAndShapeInfo();
+    std::vector<int64_t> outputNodeDims = tensor_info.GetShape();
+    auto output = outputTensor.front().GetTensorMutableData<typename std::remove_pointer<N>::type>();
+    delete[] blob;
+    switch (modelType)
+    {
+    case YOLO_DETECT_V8:
+    case YOLO_DETECT_V8_HALF:
+    {
+        int signalResultNum = outputNodeDims[1];//84
+        int strideNum = outputNodeDims[2];//8400
+        std::vector<int> class_ids;
+        std::vector<float> confidences;
+        std::vector<cv::Rect> boxes;
+        cv::Mat rawData;
+        if (modelType == YOLO_DETECT_V8)
+        {
+            // FP32
+            rawData = cv::Mat(signalResultNum, strideNum, CV_32F, output);
+        }
+        else
+        {
+            // FP16
+            rawData = cv::Mat(signalResultNum, strideNum, CV_16F, output);
+            rawData.convertTo(rawData, CV_32F);
+        }
+        // Note:
+        // ultralytics add transpose operator to the output of yolov8 model.which make yolov8/v5/v7 has same shape
+        // https://github.com/ultralytics/assets/releases/download/v8.3.0/yolov8n.pt
+        rawData = rawData.t();
+
+        float* data = (float*)rawData.data;
+
+        for (int i = 0; i < strideNum; ++i)
+        {
+            float* classesScores = data + 4;
+            cv::Mat scores(1, this->classes.size(), CV_32FC1, classesScores);
+            cv::Point class_id;
+            double maxClassScore;
+            cv::minMaxLoc(scores, 0, &maxClassScore, 0, &class_id);
+            if (maxClassScore > rectConfidenceThreshold)
+            {
+                confidences.push_back(maxClassScore);
+                class_ids.push_back(class_id.x);
+                float x = data[0];
+                float y = data[1];
+                float w = data[2];
+                float h = data[3];
+
+                int left = int((x - 0.5 * w) * resizeScales);
+                int top = int((y - 0.5 * h) * resizeScales);
+
+                int width = int(w * resizeScales);
+                int height = int(h * resizeScales);
+
+                boxes.push_back(cv::Rect(left, top, width, height));
+            }
+            data += signalResultNum;
+        }
+        std::vector<int> nmsResult;
+        cv::dnn::NMSBoxes(boxes, confidences, rectConfidenceThreshold, iouThreshold, nmsResult);
+        for (int i = 0; i < nmsResult.size(); ++i)
+        {
+            int idx = nmsResult[i];
+            DL_RESULT result;
+            result.classId = class_ids[idx];
+            result.confidence = confidences[idx];
+            result.box = boxes[idx];
+            oResult.push_back(result);
+        }
+
+#ifdef benchmark
+        clock_t starttime_4 = clock();
+        double pre_process_time = (double)(starttime_2 - starttime_1) / CLOCKS_PER_SEC * 1000;
+        double process_time = (double)(starttime_3 - starttime_2) / CLOCKS_PER_SEC * 1000;
+        double post_process_time = (double)(starttime_4 - starttime_3) / CLOCKS_PER_SEC * 1000;
+        if (cudaEnable)
+        {
+            std::cout << "[YOLO_V8(CUDA)]: " << pre_process_time << "ms pre-process, " << process_time << "ms inference, " << post_process_time << "ms post-process." << std::endl;
+        }
+        else
+        {
+            std::cout << "[YOLO_V8(CPU)]: " << pre_process_time << "ms pre-process, " << process_time << "ms inference, " << post_process_time << "ms post-process." << std::endl;
+        }
+#endif // benchmark
+
+        break;
+    }
+    case YOLO_CLS:
+    case YOLO_CLS_HALF:
+    {
+        cv::Mat rawData;
+        if (modelType == YOLO_CLS) {
+            // FP32
+            rawData = cv::Mat(1, this->classes.size(), CV_32F, output);
+        } else {
+            // FP16
+            rawData = cv::Mat(1, this->classes.size(), CV_16F, output);
+            rawData.convertTo(rawData, CV_32F);
+        }
+        float *data = (float *) rawData.data;
+
+        DL_RESULT result;
+        for (int i = 0; i < this->classes.size(); i++)
+        {
+            result.classId = i;
+            result.confidence = data[i];
+            oResult.push_back(result);
+        }
+        break;
+    }
+    default:
+        std::cout << "[YOLO_V8]: " << "Not support model type." << std::endl;
+    }
+    return RET_OK;
+
+}
+
+
+char* YOLO_V8::WarmUpSession() {
+    clock_t starttime_1 = clock();
+    cv::Mat iImg = cv::Mat(cv::Size(imgSize.at(0), imgSize.at(1)), CV_8UC3);
+    cv::Mat processedImg;
+    PreProcess(iImg, imgSize, processedImg);
+    if (modelType < 4)
+    {
+        float* blob = new float[iImg.total() * 3];
+        BlobFromImage(processedImg, blob);
+        std::vector<int64_t> YOLO_input_node_dims = { 1, 3, imgSize.at(0), imgSize.at(1) };
+        Ort::Value input_tensor = Ort::Value::CreateTensor<float>(
+            Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU), blob, 3 * imgSize.at(0) * imgSize.at(1),
+            YOLO_input_node_dims.data(), YOLO_input_node_dims.size());
+        auto output_tensors = session->Run(options, inputNodeNames.data(), &input_tensor, 1, outputNodeNames.data(),
+            outputNodeNames.size());
+        delete[] blob;
+        clock_t starttime_4 = clock();
+        double post_process_time = (double)(starttime_4 - starttime_1) / CLOCKS_PER_SEC * 1000;
+        if (cudaEnable)
+        {
+            std::cout << "[YOLO_V8(CUDA)]: " << "Cuda warm-up cost " << post_process_time << " ms. " << std::endl;
+        }
+    }
+    else
+    {
+#ifdef USE_CUDA
+        half* blob = new half[iImg.total() * 3];
+        BlobFromImage(processedImg, blob);
+        std::vector<int64_t> YOLO_input_node_dims = { 1,3,imgSize.at(0),imgSize.at(1) };
+        Ort::Value input_tensor = Ort::Value::CreateTensor<half>(Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU), blob, 3 * imgSize.at(0) * imgSize.at(1), YOLO_input_node_dims.data(), YOLO_input_node_dims.size());
+        auto output_tensors = session->Run(options, inputNodeNames.data(), &input_tensor, 1, outputNodeNames.data(), outputNodeNames.size());
+        delete[] blob;
+        clock_t starttime_4 = clock();
+        double post_process_time = (double)(starttime_4 - starttime_1) / CLOCKS_PER_SEC * 1000;
+        if (cudaEnable)
+        {
+            std::cout << "[YOLO_V8(CUDA)]: " << "Cuda warm-up cost " << post_process_time << " ms. " << std::endl;
+        }
+#endif
+    }
+    return RET_OK;
+}

+ 94 - 0
examples/YOLOv8-ONNXRuntime-CPP/inference.h

@@ -0,0 +1,94 @@
+#pragma once
+
+#define    RET_OK nullptr
+
+#ifdef _WIN32
+#include <Windows.h>
+#include <direct.h>
+#include <io.h>
+#endif
+
+#include <string>
+#include <vector>
+#include <cstdio>
+#include <opencv2/opencv.hpp>
+#include "onnxruntime_cxx_api.h"
+
+#ifdef USE_CUDA
+#include <cuda_fp16.h>
+#endif
+
+
+enum MODEL_TYPE
+{
+    //FLOAT32 MODEL
+    YOLO_DETECT_V8 = 1,
+    YOLO_POSE = 2,
+    YOLO_CLS = 3,
+
+    //FLOAT16 MODEL
+    YOLO_DETECT_V8_HALF = 4,
+    YOLO_POSE_V8_HALF = 5,
+    YOLO_CLS_HALF = 6
+};
+
+
+typedef struct _DL_INIT_PARAM
+{
+    std::string modelPath;
+    MODEL_TYPE modelType = YOLO_DETECT_V8;
+    std::vector<int> imgSize = { 640, 640 };
+    float rectConfidenceThreshold = 0.6;
+    float iouThreshold = 0.5;
+    int	keyPointsNum = 2;//Note:kpt number for pose
+    bool cudaEnable = false;
+    int logSeverityLevel = 3;
+    int intraOpNumThreads = 1;
+} DL_INIT_PARAM;
+
+
+typedef struct _DL_RESULT
+{
+    int classId;
+    float confidence;
+    cv::Rect box;
+    std::vector<cv::Point2f> keyPoints;
+} DL_RESULT;
+
+
+class YOLO_V8
+{
+public:
+    YOLO_V8();
+
+    ~YOLO_V8();
+
+public:
+    char* CreateSession(DL_INIT_PARAM& iParams);
+
+    char* RunSession(cv::Mat& iImg, std::vector<DL_RESULT>& oResult);
+
+    char* WarmUpSession();
+
+    template<typename N>
+    char* TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std::vector<int64_t>& inputNodeDims,
+        std::vector<DL_RESULT>& oResult);
+
+    char* PreProcess(cv::Mat& iImg, std::vector<int> iImgSize, cv::Mat& oImg);
+
+    std::vector<std::string> classes{};
+
+private:
+    Ort::Env env;
+    Ort::Session* session;
+    bool cudaEnable;
+    Ort::RunOptions options;
+    std::vector<const char*> inputNodeNames;
+    std::vector<const char*> outputNodeNames;
+
+    MODEL_TYPE modelType;
+    std::vector<int> imgSize;
+    float rectConfidenceThreshold;
+    float iouThreshold;
+    float resizeScales;//letterbox scale
+};

+ 193 - 0
examples/YOLOv8-ONNXRuntime-CPP/main.cpp

@@ -0,0 +1,193 @@
+#include <iostream>
+#include <iomanip>
+#include "inference.h"
+#include <filesystem>
+#include <fstream>
+#include <random>
+
+void Detector(YOLO_V8*& p) {
+    std::filesystem::path current_path = std::filesystem::current_path();
+    std::filesystem::path imgs_path = current_path / "images";
+    for (auto& i : std::filesystem::directory_iterator(imgs_path))
+    {
+        if (i.path().extension() == ".jpg" || i.path().extension() == ".png" || i.path().extension() == ".jpeg")
+        {
+            std::string img_path = i.path().string();
+            cv::Mat img = cv::imread(img_path);
+            std::vector<DL_RESULT> res;
+            p->RunSession(img, res);
+
+            for (auto& re : res)
+            {
+                cv::RNG rng(cv::getTickCount());
+                cv::Scalar color(rng.uniform(0, 256), rng.uniform(0, 256), rng.uniform(0, 256));
+
+                cv::rectangle(img, re.box, color, 3);
+
+                float confidence = floor(100 * re.confidence) / 100;
+                std::cout << std::fixed << std::setprecision(2);
+                std::string label = p->classes[re.classId] + " " +
+                    std::to_string(confidence).substr(0, std::to_string(confidence).size() - 4);
+
+                cv::rectangle(
+                    img,
+                    cv::Point(re.box.x, re.box.y - 25),
+                    cv::Point(re.box.x + label.length() * 15, re.box.y),
+                    color,
+                    cv::FILLED
+                );
+
+                cv::putText(
+                    img,
+                    label,
+                    cv::Point(re.box.x, re.box.y - 5),
+                    cv::FONT_HERSHEY_SIMPLEX,
+                    0.75,
+                    cv::Scalar(0, 0, 0),
+                    2
+                );
+
+
+            }
+            std::cout << "Press any key to exit" << std::endl;
+            cv::imshow("Result of Detection", img);
+            cv::waitKey(0);
+            cv::destroyAllWindows();
+        }
+    }
+}
+
+
+void Classifier(YOLO_V8*& p)
+{
+    std::filesystem::path current_path = std::filesystem::current_path();
+    std::filesystem::path imgs_path = current_path;// / "images"
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<int> dis(0, 255);
+    for (auto& i : std::filesystem::directory_iterator(imgs_path))
+    {
+        if (i.path().extension() == ".jpg" || i.path().extension() == ".png")
+        {
+            std::string img_path = i.path().string();
+            //std::cout << img_path << std::endl;
+            cv::Mat img = cv::imread(img_path);
+            std::vector<DL_RESULT> res;
+            char* ret = p->RunSession(img, res);
+
+            float positionY = 50;
+            for (int i = 0; i < res.size(); i++)
+            {
+                int r = dis(gen);
+                int g = dis(gen);
+                int b = dis(gen);
+                cv::putText(img, std::to_string(i) + ":", cv::Point(10, positionY), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(b, g, r), 2);
+                cv::putText(img, std::to_string(res.at(i).confidence), cv::Point(70, positionY), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(b, g, r), 2);
+                positionY += 50;
+            }
+
+            cv::imshow("TEST_CLS", img);
+            cv::waitKey(0);
+            cv::destroyAllWindows();
+            //cv::imwrite("E:\\output\\" + std::to_string(k) + ".png", img);
+        }
+
+    }
+}
+
+
+
+int ReadCocoYaml(YOLO_V8*& p) {
+    // Open the YAML file
+    std::ifstream file("coco.yaml");
+    if (!file.is_open())
+    {
+        std::cerr << "Failed to open file" << std::endl;
+        return 1;
+    }
+
+    // Read the file line by line
+    std::string line;
+    std::vector<std::string> lines;
+    while (std::getline(file, line))
+    {
+        lines.push_back(line);
+    }
+
+    // Find the start and end of the names section
+    std::size_t start = 0;
+    std::size_t end = 0;
+    for (std::size_t i = 0; i < lines.size(); i++)
+    {
+        if (lines[i].find("names:") != std::string::npos)
+        {
+            start = i + 1;
+        }
+        else if (start > 0 && lines[i].find(':') == std::string::npos)
+        {
+            end = i;
+            break;
+        }
+    }
+
+    // Extract the names
+    std::vector<std::string> names;
+    for (std::size_t i = start; i < end; i++)
+    {
+        std::stringstream ss(lines[i]);
+        std::string name;
+        std::getline(ss, name, ':'); // Extract the number before the delimiter
+        std::getline(ss, name); // Extract the string after the delimiter
+        names.push_back(name);
+    }
+
+    p->classes = names;
+    return 0;
+}
+
+
+void DetectTest()
+{
+    YOLO_V8* yoloDetector = new YOLO_V8;
+    ReadCocoYaml(yoloDetector);
+    DL_INIT_PARAM params;
+    params.rectConfidenceThreshold = 0.1;
+    params.iouThreshold = 0.5;
+    params.modelPath = "yolov8n.onnx";
+    params.imgSize = { 640, 640 };
+#ifdef USE_CUDA
+    params.cudaEnable = true;
+
+    // GPU FP32 inference
+    params.modelType = YOLO_DETECT_V8;
+    // GPU FP16 inference
+    //Note: change fp16 onnx model
+    //params.modelType = YOLO_DETECT_V8_HALF;
+
+#else
+    // CPU inference
+    params.modelType = YOLO_DETECT_V8;
+    params.cudaEnable = false;
+
+#endif
+    yoloDetector->CreateSession(params);
+    Detector(yoloDetector);
+}
+
+
+void ClsTest()
+{
+    YOLO_V8* yoloDetector = new YOLO_V8;
+    std::string model_path = "cls.onnx";
+    ReadCocoYaml(yoloDetector);
+    DL_INIT_PARAM params{ model_path, YOLO_CLS, {224, 224} };
+    yoloDetector->CreateSession(params);
+    Classifier(yoloDetector);
+}
+
+
+int main()
+{
+    //DetectTest();
+    ClsTest();
+}

+ 24 - 0
examples/YOLOv8-ONNXRuntime-Rust/Cargo.toml

@@ -0,0 +1,24 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+[package]
+name = "yolov8-rs"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+clap = { version = "4.2.4", features = ["derive"] }
+image = { version = "0.25.2"}
+imageproc = { version = "0.25.0"}
+ndarray = { version = "0.16" }
+ort = { version = "2.0.0-rc.5", features = ["cuda", "tensorrt", "load-dynamic", "copy-dylibs", "half"]}
+rusttype = { version = "0.9.3" }
+anyhow = { version = "1.0.75" }
+regex = { version = "1.5.4" }
+rand = { version = "0.8.5" }
+chrono = { version = "0.4.30" }
+half = { version = "2.3.1" }
+dirs = { version = "5.0.1" }
+ureq = { version = "2.9.1" }
+ab_glyph = "0.2.29"

+ 212 - 0
examples/YOLOv8-ONNXRuntime-Rust/README.md

@@ -0,0 +1,212 @@
+# YOLOv8-ONNXRuntime-Rust for All the Key YOLO Tasks
+
+This repository provides a Rust demo for performing YOLOv8 tasks like `Classification`, `Segmentation`, `Detection`, `Pose Detection` and `OBB` using ONNXRuntime.
+
+## Recently Updated
+
+- Add YOLOv8-OBB demo
+- Update ONNXRuntime to 1.19.x
+
+Newly updated YOLOv8 example code is located in [this repository](https://github.com/jamjamjon/usls/tree/main/examples/yolo)
+
+## Features
+
+- Support `Classification`, `Segmentation`, `Detection`, `Pose(Keypoints)-Detection`, `OBB` tasks.
+- Support `FP16` & `FP32` ONNX models.
+- Support `CPU`, `CUDA` and `TensorRT` execution provider to accelerate computation.
+- Support dynamic input shapes(`batch`, `width`, `height`).
+
+## Installation
+
+### 1. Install Rust
+
+Please follow the Rust official installation. (https://www.rust-lang.org/tools/install)
+
+### 2. ONNXRuntime Linking
+
+- #### For detailed setup instructions, refer to the [ORT documentation](https://ort.pyke.io/setup/linking).
+
+- #### For Linux or macOS Users:
+  - Download the ONNX Runtime package from the [Releases page](https://github.com/microsoft/onnxruntime/releases).
+  - Set up the library path by exporting the `ORT_DYLIB_PATH` environment variable:
+    ```shell
+    export ORT_DYLIB_PATH=/path/to/onnxruntime/lib/libonnxruntime.so.1.19.0
+    ```
+
+### 3. \[Optional\] Install CUDA & CuDNN & TensorRT
+
+- CUDA execution provider requires CUDA v11.6+.
+- TensorRT execution provider requires CUDA v11.4+ and TensorRT v8.4+.
+
+## Get Started
+
+### 1. Export the YOLOv8 ONNX Models
+
+```bash
+pip install -U ultralytics
+
+# export onnx model with dynamic shapes
+yolo export model=yolov8m.pt format=onnx  simplify dynamic
+yolo export model=yolov8m-cls.pt format=onnx  simplify dynamic
+yolo export model=yolov8m-pose.pt format=onnx  simplify dynamic
+yolo export model=yolov8m-seg.pt format=onnx  simplify dynamic
+
+
+# export onnx model with constant shapes
+yolo export model=yolov8m.pt format=onnx  simplify
+yolo export model=yolov8m-cls.pt format=onnx  simplify
+yolo export model=yolov8m-pose.pt format=onnx  simplify
+yolo export model=yolov8m-seg.pt format=onnx  simplify
+```
+
+### 2. Run Inference
+
+It will perform inference with the ONNX model on the source image.
+
+```bash
+cargo run --release -- --model <MODEL> --source <SOURCE>
+```
+
+Set `--cuda` to use CUDA execution provider to speed up inference.
+
+```bash
+cargo run --release -- --cuda --model <MODEL> --source <SOURCE>
+```
+
+Set `--trt` to use TensorRT execution provider, and you can set `--fp16` at the same time to use TensorRT FP16 engine.
+
+```bash
+cargo run --release -- --trt --fp16 --model <MODEL> --source <SOURCE>
+```
+
+Set `--device_id` to select which device to run. When you have only one GPU, and you set `device_id` to 1 will not cause program panic, the `ort` would automatically fall back to `CPU` EP.
+
+```bash
+cargo run --release -- --cuda --device_id 0 --model <MODEL> --source <SOURCE>
+```
+
+Set `--batch` to do multi-batch-size inference.
+
+If you're using `--trt`, you can also set `--batch-min` and `--batch-max` to explicitly specify min/max/opt batch for dynamic batch input.(https://onnxruntime.ai/docs/execution-providers/TensorRT-ExecutionProvider.html#explicit-shape-range-for-dynamic-shape-input).(Note that the ONNX model should be exported with dynamic shapes.)
+
+```bash
+cargo run --release -- --cuda --batch 2 --model <MODEL> --source <SOURCE>
+```
+
+Set `--height` and `--width` to do dynamic image size inference. (Note that the ONNX model should be exported with dynamic shapes.)
+
+```bash
+cargo run --release -- --cuda --width 480 --height 640 --model <MODEL> --source <SOURCE>
+```
+
+Set `--profile` to check time consumed in each stage.(Note that the model usually needs to take 1~3 times dry run to warmup. Make sure to run enough times to evaluate the result.)
+
+```bash
+cargo run --release -- --trt --fp16 --profile --model <MODEL> --source <SOURCE>
+```
+
+Results: (yolov8m.onnx, batch=1, 3 times, trt, fp16, RTX 3060Ti)
+
+```bash
+==> 0
+[Model Preprocess]: 12.75788ms
+[ORT H2D]: 237.118µs
+[ORT Inference]: 507.895469ms
+[ORT D2H]: 191.655µs
+[Model Inference]: 508.34589ms
+[Model Postprocess]: 1.061122ms
+==> 1
+[Model Preprocess]: 13.658655ms
+[ORT H2D]: 209.975µs
+[ORT Inference]: 5.12372ms
+[ORT D2H]: 182.389µs
+[Model Inference]: 5.530022ms
+[Model Postprocess]: 1.04851ms
+==> 2
+[Model Preprocess]: 12.475332ms
+[ORT H2D]: 246.127µs
+[ORT Inference]: 5.048432ms
+[ORT D2H]: 187.117µs
+[Model Inference]: 5.493119ms
+[Model Postprocess]: 1.040906ms
+```
+
+And also:
+
+`--conf`: confidence threshold \[default: 0.3\]
+
+`--iou`: iou threshold in NMS \[default: 0.45\]
+
+`--kconf`: confidence threshold of keypoint \[default: 0.55\]
+
+`--plot`: plot inference result with random RGB color and save
+
+you can check out all CLI arguments by:
+
+```bash
+git clone https://github.com/ultralytics/ultralytics
+cd ultralytics/examples/YOLOv8-ONNXRuntime-Rust
+cargo run --release -- --help
+```
+
+## Examples
+
+![Ultralytics YOLO Tasks](https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-tasks.png)
+
+### Classification
+
+Running dynamic shape ONNX model on `CPU` with image size `--height 224 --width 224`. Saving plotted image in `runs` directory.
+
+```bash
+cargo run --release -- --model ../assets/weights/yolov8m-cls-dyn.onnx --source ../assets/images/dog.jpg --height 224 --width 224 --plot --profile
+```
+
+You will see result like:
+
+```bash
+Summary:
+> Task: Classify (Ultralytics 8.0.217)
+> EP: Cpu
+> Dtype: Float32
+> Batch: 1 (Dynamic), Height: 224 (Dynamic), Width: 224 (Dynamic)
+> nc: 1000 nk: 0, nm: 0, conf: 0.3, kconf: 0.55, iou: 0.45
+
+[Model Preprocess]: 16.363477ms
+[ORT H2D]: 50.722µs
+[ORT Inference]: 16.295808ms
+[ORT D2H]: 8.37µs
+[Model Inference]: 16.367046ms
+[Model Postprocess]: 3.527µs
+[
+    YOLOResult {
+        Probs(top5): Some([(208, 0.6950566), (209, 0.13823675), (178, 0.04849795), (215, 0.019029364), (212, 0.016506357)]),
+        Bboxes: None,
+        Keypoints: None,
+        Masks: None,
+    },
+]
+```
+
+### Object Detection
+
+Using `CUDA` EP and dynamic image size `--height 640 --width 480`
+
+```bash
+cargo run --release -- --cuda --model ../assets/weights/yolov8m-dynamic.onnx --source ../assets/images/bus.jpg --plot --height 640 --width 480
+```
+
+### Pose Detection
+
+using `TensorRT` EP
+
+```bash
+cargo run --release -- --trt --model ../assets/weights/yolov8m-pose.onnx --source ../assets/images/bus.jpg --plot
+```
+
+### Instance Segmentation
+
+using `TensorRT` EP and FP16 model `--fp16`
+
+```bash
+cargo run --release --  --trt --fp16 --model ../assets/weights/yolov8m-seg.onnx --source ../assets/images/0172.jpg --plot
+```

+ 87 - 0
examples/YOLOv8-ONNXRuntime-Rust/src/cli.rs

@@ -0,0 +1,87 @@
+use clap::Parser;
+
+use crate::YOLOTask;
+
+#[derive(Parser, Clone)]
+#[command(author, version, about, long_about = None)]
+pub struct Args {
+    /// ONNX model path
+    #[arg(long, required = true)]
+    pub model: String,
+
+    /// input path
+    #[arg(long, required = true)]
+    pub source: String,
+
+    /// device id
+    #[arg(long, default_value_t = 0)]
+    pub device_id: i32,
+
+    /// using TensorRT EP
+    #[arg(long)]
+    pub trt: bool,
+
+    /// using CUDA EP
+    #[arg(long)]
+    pub cuda: bool,
+
+    /// input batch size
+    #[arg(long, default_value_t = 1)]
+    pub batch: u32,
+
+    /// trt input min_batch size
+    #[arg(long, default_value_t = 1)]
+    pub batch_min: u32,
+
+    /// trt input max_batch size
+    #[arg(long, default_value_t = 32)]
+    pub batch_max: u32,
+
+    /// using TensorRT --fp16
+    #[arg(long)]
+    pub fp16: bool,
+
+    /// specify YOLO task
+    #[arg(long, value_enum)]
+    pub task: Option<YOLOTask>,
+
+    /// num_classes
+    #[arg(long)]
+    pub nc: Option<u32>,
+
+    /// num_keypoints
+    #[arg(long)]
+    pub nk: Option<u32>,
+
+    /// num_masks
+    #[arg(long)]
+    pub nm: Option<u32>,
+
+    /// input image width
+    #[arg(long)]
+    pub width: Option<u32>,
+
+    /// input image height
+    #[arg(long)]
+    pub height: Option<u32>,
+
+    /// confidence threshold
+    #[arg(long, required = false, default_value_t = 0.3)]
+    pub conf: f32,
+
+    /// iou threshold in NMS
+    #[arg(long, required = false, default_value_t = 0.45)]
+    pub iou: f32,
+
+    /// confidence threshold of keypoint
+    #[arg(long, required = false, default_value_t = 0.55)]
+    pub kconf: f32,
+
+    /// plot inference result and save
+    #[arg(long)]
+    pub plot: bool,
+
+    /// check time consumed in each stage
+    #[arg(long)]
+    pub profile: bool,
+}

+ 160 - 0
examples/YOLOv8-ONNXRuntime-Rust/src/lib.rs

@@ -0,0 +1,160 @@
+#![allow(clippy::type_complexity)]
+
+use std::io::{Read, Write};
+
+pub mod cli;
+pub mod model;
+pub mod ort_backend;
+pub mod yolo_result;
+pub use crate::cli::Args;
+pub use crate::model::YOLOv8;
+pub use crate::ort_backend::{Batch, OrtBackend, OrtConfig, OrtEP, YOLOTask};
+pub use crate::yolo_result::{Bbox, Embedding, Point2, YOLOResult};
+
+pub fn non_max_suppression(
+    xs: &mut Vec<(Bbox, Option<Vec<Point2>>, Option<Vec<f32>>)>,
+    iou_threshold: f32,
+) {
+    xs.sort_by(|b1, b2| b2.0.confidence().partial_cmp(&b1.0.confidence()).unwrap());
+
+    let mut current_index = 0;
+    for index in 0..xs.len() {
+        let mut drop = false;
+        for prev_index in 0..current_index {
+            let iou = xs[prev_index].0.iou(&xs[index].0);
+            if iou > iou_threshold {
+                drop = true;
+                break;
+            }
+        }
+        if !drop {
+            xs.swap(current_index, index);
+            current_index += 1;
+        }
+    }
+    xs.truncate(current_index);
+}
+
+pub fn gen_time_string(delimiter: &str) -> String {
+    let offset = chrono::FixedOffset::east_opt(8 * 60 * 60).unwrap(); // Beijing
+    let t_now = chrono::Utc::now().with_timezone(&offset);
+    let fmt = format!(
+        "%Y{}%m{}%d{}%H{}%M{}%S{}%f",
+        delimiter, delimiter, delimiter, delimiter, delimiter, delimiter
+    );
+    t_now.format(&fmt).to_string()
+}
+
+pub const SKELETON: [(usize, usize); 16] = [
+    (0, 1),
+    (0, 2),
+    (1, 3),
+    (2, 4),
+    (5, 6),
+    (5, 11),
+    (6, 12),
+    (11, 12),
+    (5, 7),
+    (6, 8),
+    (7, 9),
+    (8, 10),
+    (11, 13),
+    (12, 14),
+    (13, 15),
+    (14, 16),
+];
+
+pub fn check_font(font: &str) -> rusttype::Font<'static> {
+    // check then load font
+
+    // ultralytics font path
+    let font_path_config = match dirs::config_dir() {
+        Some(mut d) => {
+            d.push("Ultralytics");
+            d.push(font);
+            d
+        }
+        None => panic!("Unsupported operating system. Now support Linux, MacOS, Windows."),
+    };
+
+    // current font path
+    let font_path_current = std::path::PathBuf::from(font);
+
+    // check font
+    let font_path = if font_path_config.exists() {
+        font_path_config
+    } else if font_path_current.exists() {
+        font_path_current
+    } else {
+        println!("Downloading font...");
+        let source_url = "https://ultralytics.com/assets/Arial.ttf";
+        let resp = ureq::get(source_url)
+            .timeout(std::time::Duration::from_secs(500))
+            .call()
+            .unwrap_or_else(|err| panic!("> Failed to download font: {source_url}: {err:?}"));
+
+        // read to buffer
+        let mut buffer = vec![];
+        let total_size = resp
+            .header("Content-Length")
+            .and_then(|s| s.parse::<u64>().ok())
+            .unwrap();
+        let _reader = resp
+            .into_reader()
+            .take(total_size)
+            .read_to_end(&mut buffer)
+            .unwrap();
+
+        // save
+        let _path = std::fs::File::create(font).unwrap();
+        let mut writer = std::io::BufWriter::new(_path);
+        writer.write_all(&buffer).unwrap();
+        println!("Font saved at: {:?}", font_path_current.display());
+        font_path_current
+    };
+
+    // load font
+    let buffer = std::fs::read(font_path).unwrap();
+    rusttype::Font::try_from_vec(buffer).unwrap()
+}
+
+use ab_glyph::FontArc;
+pub fn load_font() -> FontArc {
+    use std::path::Path;
+    let font_path = Path::new("./font/Arial.ttf");
+    match font_path.try_exists() {
+        Ok(true) => {
+            let buffer = std::fs::read(font_path).unwrap();
+            FontArc::try_from_vec(buffer).unwrap()
+        }
+        Ok(false) => {
+            std::fs::create_dir_all("./font").unwrap();
+            println!("Downloading font...");
+            let source_url = "https://ultralytics.com/assets/Arial.ttf";
+            let resp = ureq::get(source_url)
+                .timeout(std::time::Duration::from_secs(500))
+                .call()
+                .unwrap_or_else(|err| panic!("> Failed to download font: {source_url}: {err:?}"));
+
+            // read to buffer
+            let mut buffer = vec![];
+            let total_size = resp
+                .header("Content-Length")
+                .and_then(|s| s.parse::<u64>().ok())
+                .unwrap();
+            let _reader = resp
+                .into_reader()
+                .take(total_size)
+                .read_to_end(&mut buffer)
+                .unwrap();
+            // save
+            let mut fd = std::fs::File::create(font_path).unwrap();
+            fd.write_all(&buffer).unwrap();
+            println!("Font saved at: {:?}", font_path.display());
+            FontArc::try_from_vec(buffer).unwrap()
+        }
+        Err(e) => {
+            panic!("Failed to load font {}", e);
+        }
+    }
+}

+ 28 - 0
examples/YOLOv8-ONNXRuntime-Rust/src/main.rs

@@ -0,0 +1,28 @@
+use clap::Parser;
+
+use yolov8_rs::{Args, YOLOv8};
+
+fn main() -> Result<(), Box<dyn std::error::Error>> {
+    let args = Args::parse();
+
+    // 1. load image
+    let x = image::ImageReader::open(&args.source)?
+        .with_guessed_format()?
+        .decode()?;
+
+    // 2. model support dynamic batch inference, so input should be a Vec
+    let xs = vec![x];
+
+    // You can test `--batch 2` with this
+    // let xs = vec![x.clone(), x];
+
+    // 3. build yolov8 model
+    let mut model = YOLOv8::new(args)?;
+    model.summary(); // model info
+
+    // 4. run
+    let ys = model.run(&xs)?;
+    println!("{:?}", ys);
+
+    Ok(())
+}

+ 651 - 0
examples/YOLOv8-ONNXRuntime-Rust/src/model.rs

@@ -0,0 +1,651 @@
+#![allow(clippy::type_complexity)]
+
+use ab_glyph::FontArc;
+use anyhow::Result;
+use image::{DynamicImage, GenericImageView, ImageBuffer};
+use ndarray::{s, Array, Axis, IxDyn};
+use rand::{thread_rng, Rng};
+use std::path::PathBuf;
+
+use crate::{
+    gen_time_string, load_font, non_max_suppression, Args, Batch, Bbox, Embedding, OrtBackend,
+    OrtConfig, OrtEP, Point2, YOLOResult, YOLOTask, SKELETON,
+};
+
+pub struct YOLOv8 {
+    // YOLOv8 model for all yolo-tasks
+    engine: OrtBackend,
+    nc: u32,
+    nk: u32,
+    nm: u32,
+    height: u32,
+    width: u32,
+    batch: u32,
+    task: YOLOTask,
+    conf: f32,
+    kconf: f32,
+    iou: f32,
+    names: Vec<String>,
+    color_palette: Vec<(u8, u8, u8)>,
+    profile: bool,
+    plot: bool,
+}
+
+impl YOLOv8 {
+    pub fn new(config: Args) -> Result<Self> {
+        // execution provider
+        let ep = if config.trt {
+            OrtEP::Trt(config.device_id)
+        } else if config.cuda {
+            OrtEP::CUDA(config.device_id)
+        } else {
+            OrtEP::CPU
+        };
+
+        // batch
+        let batch = Batch {
+            opt: config.batch,
+            min: config.batch_min,
+            max: config.batch_max,
+        };
+
+        // build ort engine
+        let ort_args = OrtConfig {
+            ep,
+            batch,
+            f: config.model,
+            task: config.task,
+            trt_fp16: config.fp16,
+            image_size: (config.height, config.width),
+        };
+        let engine = OrtBackend::build(ort_args)?;
+
+        //  get batch, height, width, tasks, nc, nk, nm
+        let (batch, height, width, task) = (
+            engine.batch(),
+            engine.height(),
+            engine.width(),
+            engine.task(),
+        );
+        let nc = engine.nc().or(config.nc).unwrap_or_else(|| {
+            panic!("Failed to get num_classes, make it explicit with `--nc`");
+        });
+        let (nk, nm) = match task {
+            YOLOTask::Pose => {
+                let nk = engine.nk().or(config.nk).unwrap_or_else(|| {
+                    panic!("Failed to get num_keypoints, make it explicit with `--nk`");
+                });
+                (nk, 0)
+            }
+            YOLOTask::Segment => {
+                let nm = engine.nm().or(config.nm).unwrap_or_else(|| {
+                    panic!("Failed to get num_masks, make it explicit with `--nm`");
+                });
+                (0, nm)
+            }
+            _ => (0, 0),
+        };
+
+        // class names
+        let names = engine.names().unwrap_or(vec!["Unknown".to_string()]);
+
+        // color palette
+        let mut rng = thread_rng();
+        let color_palette: Vec<_> = names
+            .iter()
+            .map(|_| {
+                (
+                    rng.gen_range(0..=255),
+                    rng.gen_range(0..=255),
+                    rng.gen_range(0..=255),
+                )
+            })
+            .collect();
+
+        Ok(Self {
+            engine,
+            names,
+            conf: config.conf,
+            kconf: config.kconf,
+            iou: config.iou,
+            color_palette,
+            profile: config.profile,
+            plot: config.plot,
+            nc,
+            nk,
+            nm,
+            height,
+            width,
+            batch,
+            task,
+        })
+    }
+
+    pub fn scale_wh(&self, w0: f32, h0: f32, w1: f32, h1: f32) -> (f32, f32, f32) {
+        let r = (w1 / w0).min(h1 / h0);
+        (r, (w0 * r).round(), (h0 * r).round())
+    }
+
+    pub fn preprocess(&mut self, xs: &Vec<DynamicImage>) -> Result<Array<f32, IxDyn>> {
+        let mut ys =
+            Array::ones((xs.len(), 3, self.height() as usize, self.width() as usize)).into_dyn();
+        ys.fill(144.0 / 255.0);
+        for (idx, x) in xs.iter().enumerate() {
+            let img = match self.task() {
+                YOLOTask::Classify => x.resize_exact(
+                    self.width(),
+                    self.height(),
+                    image::imageops::FilterType::Triangle,
+                ),
+                _ => {
+                    let (w0, h0) = x.dimensions();
+                    let w0 = w0 as f32;
+                    let h0 = h0 as f32;
+                    let (_, w_new, h_new) =
+                        self.scale_wh(w0, h0, self.width() as f32, self.height() as f32); // f32 round
+                    x.resize_exact(
+                        w_new as u32,
+                        h_new as u32,
+                        if let YOLOTask::Segment = self.task() {
+                            image::imageops::FilterType::CatmullRom
+                        } else {
+                            image::imageops::FilterType::Triangle
+                        },
+                    )
+                }
+            };
+
+            for (x, y, rgb) in img.pixels() {
+                let x = x as usize;
+                let y = y as usize;
+                let [r, g, b, _] = rgb.0;
+                ys[[idx, 0, y, x]] = (r as f32) / 255.0;
+                ys[[idx, 1, y, x]] = (g as f32) / 255.0;
+                ys[[idx, 2, y, x]] = (b as f32) / 255.0;
+            }
+        }
+
+        Ok(ys)
+    }
+
+    pub fn run(&mut self, xs: &Vec<DynamicImage>) -> Result<Vec<YOLOResult>> {
+        // pre-process
+        let t_pre = std::time::Instant::now();
+        let xs_ = self.preprocess(xs)?;
+        if self.profile {
+            println!("[Model Preprocess]: {:?}", t_pre.elapsed());
+        }
+
+        // run
+        let t_run = std::time::Instant::now();
+        let ys = self.engine.run(xs_, self.profile)?;
+        if self.profile {
+            println!("[Model Inference]: {:?}", t_run.elapsed());
+        }
+
+        // post-process
+        let t_post = std::time::Instant::now();
+        let ys = self.postprocess(ys, xs)?;
+        if self.profile {
+            println!("[Model Postprocess]: {:?}", t_post.elapsed());
+        }
+
+        // plot and save
+        if self.plot {
+            self.plot_and_save(&ys, xs, Some(&SKELETON));
+        }
+        Ok(ys)
+    }
+
+    pub fn postprocess(
+        &self,
+        xs: Vec<Array<f32, IxDyn>>,
+        xs0: &[DynamicImage],
+    ) -> Result<Vec<YOLOResult>> {
+        if let YOLOTask::Classify = self.task() {
+            let mut ys = Vec::new();
+            let preds = &xs[0];
+            for batch in preds.axis_iter(Axis(0)) {
+                ys.push(YOLOResult::new(
+                    Some(Embedding::new(batch.into_owned())),
+                    None,
+                    None,
+                    None,
+                ));
+            }
+            Ok(ys)
+        } else {
+            const CXYWH_OFFSET: usize = 4; // cxcywh
+            const KPT_STEP: usize = 3; // xyconf
+            let preds = &xs[0];
+            let protos = {
+                if xs.len() > 1 {
+                    Some(&xs[1])
+                } else {
+                    None
+                }
+            };
+            let mut ys = Vec::new();
+            for (idx, anchor) in preds.axis_iter(Axis(0)).enumerate() {
+                // [bs, 4 + nc + nm, anchors]
+                // input image
+                let width_original = xs0[idx].width() as f32;
+                let height_original = xs0[idx].height() as f32;
+                let ratio = (self.width() as f32 / width_original)
+                    .min(self.height() as f32 / height_original);
+
+                // save each result
+                let mut data: Vec<(Bbox, Option<Vec<Point2>>, Option<Vec<f32>>)> = Vec::new();
+                for pred in anchor.axis_iter(Axis(1)) {
+                    // split preds for different tasks
+                    let bbox = pred.slice(s![0..CXYWH_OFFSET]);
+                    let clss = pred.slice(s![CXYWH_OFFSET..CXYWH_OFFSET + self.nc() as usize]);
+                    let kpts = {
+                        if let YOLOTask::Pose = self.task() {
+                            Some(pred.slice(s![pred.len() - KPT_STEP * self.nk() as usize..]))
+                        } else {
+                            None
+                        }
+                    };
+                    let coefs = {
+                        if let YOLOTask::Segment = self.task() {
+                            Some(pred.slice(s![pred.len() - self.nm() as usize..]).to_vec())
+                        } else {
+                            None
+                        }
+                    };
+
+                    // confidence and id
+                    let (id, &confidence) = clss
+                        .into_iter()
+                        .enumerate()
+                        .reduce(|max, x| if x.1 > max.1 { x } else { max })
+                        .unwrap(); // definitely will not panic!
+
+                    // confidence filter
+                    if confidence < self.conf {
+                        continue;
+                    }
+
+                    // bbox re-scale
+                    let cx = bbox[0] / ratio;
+                    let cy = bbox[1] / ratio;
+                    let w = bbox[2] / ratio;
+                    let h = bbox[3] / ratio;
+                    let x = cx - w / 2.;
+                    let y = cy - h / 2.;
+                    let y_bbox = Bbox::new(
+                        x.max(0.0f32).min(width_original),
+                        y.max(0.0f32).min(height_original),
+                        w,
+                        h,
+                        id,
+                        confidence,
+                    );
+
+                    // kpts
+                    let y_kpts = {
+                        if let Some(kpts) = kpts {
+                            let mut kpts_ = Vec::new();
+                            // rescale
+                            for i in 0..self.nk() as usize {
+                                let kx = kpts[KPT_STEP * i] / ratio;
+                                let ky = kpts[KPT_STEP * i + 1] / ratio;
+                                let kconf = kpts[KPT_STEP * i + 2];
+                                if kconf < self.kconf {
+                                    kpts_.push(Point2::default());
+                                } else {
+                                    kpts_.push(Point2::new_with_conf(
+                                        kx.max(0.0f32).min(width_original),
+                                        ky.max(0.0f32).min(height_original),
+                                        kconf,
+                                    ));
+                                }
+                            }
+                            Some(kpts_)
+                        } else {
+                            None
+                        }
+                    };
+
+                    // data merged
+                    data.push((y_bbox, y_kpts, coefs));
+                }
+
+                // nms
+                non_max_suppression(&mut data, self.iou);
+
+                // decode
+                let mut y_bboxes: Vec<Bbox> = Vec::new();
+                let mut y_kpts: Vec<Vec<Point2>> = Vec::new();
+                let mut y_masks: Vec<Vec<u8>> = Vec::new();
+                for elem in data.into_iter() {
+                    if let Some(kpts) = elem.1 {
+                        y_kpts.push(kpts)
+                    }
+
+                    // decode masks
+                    if let Some(coefs) = elem.2 {
+                        let proto = protos.unwrap().slice(s![idx, .., .., ..]);
+                        let (nm, nh, nw) = proto.dim();
+
+                        // coefs * proto -> mask
+                        let coefs = Array::from_shape_vec((1, nm), coefs)?; // (n, nm)
+
+                        let proto = proto.to_owned();
+                        let proto = proto.to_shape((nm, nh * nw))?; // (nm, nh*nw)
+                        let mask = coefs.dot(&proto); // (nh, nw, n)
+                        let mask = mask.to_shape((nh, nw, 1))?;
+
+                        // build image from ndarray
+                        let mask_im: ImageBuffer<image::Luma<_>, Vec<f32>> =
+                            match ImageBuffer::from_raw(
+                                nw as u32,
+                                nh as u32,
+                                mask.to_owned().into_raw_vec_and_offset().0,
+                            ) {
+                                Some(image) => image,
+                                None => panic!("can not create image from ndarray"),
+                            };
+                        let mut mask_im = image::DynamicImage::from(mask_im); // -> dyn
+
+                        // rescale masks
+                        let (_, w_mask, h_mask) =
+                            self.scale_wh(width_original, height_original, nw as f32, nh as f32);
+                        let mask_cropped = mask_im.crop(0, 0, w_mask as u32, h_mask as u32);
+                        let mask_original = mask_cropped.resize_exact(
+                            // resize_to_fill
+                            width_original as u32,
+                            height_original as u32,
+                            match self.task() {
+                                YOLOTask::Segment => image::imageops::FilterType::CatmullRom,
+                                _ => image::imageops::FilterType::Triangle,
+                            },
+                        );
+
+                        // crop-mask with bbox
+                        let mut mask_original_cropped = mask_original.into_luma8();
+                        for y in 0..height_original as usize {
+                            for x in 0..width_original as usize {
+                                if x < elem.0.xmin() as usize
+                                    || x > elem.0.xmax() as usize
+                                    || y < elem.0.ymin() as usize
+                                    || y > elem.0.ymax() as usize
+                                {
+                                    mask_original_cropped.put_pixel(
+                                        x as u32,
+                                        y as u32,
+                                        image::Luma([0u8]),
+                                    );
+                                }
+                            }
+                        }
+                        y_masks.push(mask_original_cropped.into_raw());
+                    }
+                    y_bboxes.push(elem.0);
+                }
+
+                // save each result
+                let y = YOLOResult {
+                    probs: None,
+                    bboxes: if !y_bboxes.is_empty() {
+                        Some(y_bboxes)
+                    } else {
+                        None
+                    },
+                    keypoints: if !y_kpts.is_empty() {
+                        Some(y_kpts)
+                    } else {
+                        None
+                    },
+                    masks: if !y_masks.is_empty() {
+                        Some(y_masks)
+                    } else {
+                        None
+                    },
+                };
+                ys.push(y);
+            }
+
+            Ok(ys)
+        }
+    }
+
+    pub fn plot_and_save(
+        &self,
+        ys: &[YOLOResult],
+        xs0: &[DynamicImage],
+        skeletons: Option<&[(usize, usize)]>,
+    ) {
+        // check font then load
+        let font: FontArc = load_font();
+        for (_idb, (img0, y)) in xs0.iter().zip(ys.iter()).enumerate() {
+            let mut img = img0.to_rgb8();
+
+            // draw for classifier
+            if let Some(probs) = y.probs() {
+                for (i, k) in probs.topk(5).iter().enumerate() {
+                    let legend = format!("{} {:.2}%", self.names[k.0], k.1);
+                    let scale = 32;
+                    let legend_size = img.width().max(img.height()) / scale;
+                    let x = img.width() / 20;
+                    let y = img.height() / 20 + i as u32 * legend_size;
+
+                    imageproc::drawing::draw_text_mut(
+                        &mut img,
+                        image::Rgb([0, 255, 0]),
+                        x as i32,
+                        y as i32,
+                        legend_size as f32,
+                        &font,
+                        &legend,
+                    );
+                }
+            }
+
+            // draw bboxes & keypoints
+            if let Some(bboxes) = y.bboxes() {
+                for (_idx, bbox) in bboxes.iter().enumerate() {
+                    // rect
+                    imageproc::drawing::draw_hollow_rect_mut(
+                        &mut img,
+                        imageproc::rect::Rect::at(bbox.xmin() as i32, bbox.ymin() as i32)
+                            .of_size(bbox.width() as u32, bbox.height() as u32),
+                        image::Rgb(self.color_palette[bbox.id()].into()),
+                    );
+
+                    // text
+                    let legend = format!("{} {:.2}%", self.names[bbox.id()], bbox.confidence());
+                    let scale = 40;
+                    let legend_size = img.width().max(img.height()) / scale;
+                    imageproc::drawing::draw_text_mut(
+                        &mut img,
+                        image::Rgb(self.color_palette[bbox.id()].into()),
+                        bbox.xmin() as i32,
+                        (bbox.ymin() - legend_size as f32) as i32,
+                        legend_size as f32,
+                        &font,
+                        &legend,
+                    );
+                }
+            }
+
+            // draw kpts
+            if let Some(keypoints) = y.keypoints() {
+                for kpts in keypoints.iter() {
+                    for kpt in kpts.iter() {
+                        // filter
+                        if kpt.confidence() < self.kconf {
+                            continue;
+                        }
+
+                        // draw point
+                        imageproc::drawing::draw_filled_circle_mut(
+                            &mut img,
+                            (kpt.x() as i32, kpt.y() as i32),
+                            2,
+                            image::Rgb([0, 255, 0]),
+                        );
+                    }
+
+                    // draw skeleton if has
+                    if let Some(skeletons) = skeletons {
+                        for &(idx1, idx2) in skeletons.iter() {
+                            let kpt1 = &kpts[idx1];
+                            let kpt2 = &kpts[idx2];
+                            if kpt1.confidence() < self.kconf || kpt2.confidence() < self.kconf {
+                                continue;
+                            }
+                            imageproc::drawing::draw_line_segment_mut(
+                                &mut img,
+                                (kpt1.x(), kpt1.y()),
+                                (kpt2.x(), kpt2.y()),
+                                image::Rgb([233, 14, 57]),
+                            );
+                        }
+                    }
+                }
+            }
+
+            // draw mask
+            if let Some(masks) = y.masks() {
+                for (mask, _bbox) in masks.iter().zip(y.bboxes().unwrap().iter()) {
+                    let mask_nd: ImageBuffer<image::Luma<_>, Vec<u8>> =
+                        match ImageBuffer::from_vec(img.width(), img.height(), mask.to_vec()) {
+                            Some(image) => image,
+                            None => panic!("can not crate image from ndarray"),
+                        };
+
+                    for _x in 0..img.width() {
+                        for _y in 0..img.height() {
+                            let mask_p = imageproc::drawing::Canvas::get_pixel(&mask_nd, _x, _y);
+                            if mask_p.0[0] > 0 {
+                                let mut img_p = imageproc::drawing::Canvas::get_pixel(&img, _x, _y);
+                                // img_p.0[2] = self.color_palette[bbox.id()].2 / 2;
+                                // img_p.0[1] = self.color_palette[bbox.id()].1 / 2;
+                                // img_p.0[0] = self.color_palette[bbox.id()].0 / 2;
+                                img_p.0[2] /= 2;
+                                img_p.0[1] = 255 - (255 - img_p.0[2]) / 2;
+                                img_p.0[0] /= 2;
+                                imageproc::drawing::Canvas::draw_pixel(&mut img, _x, _y, img_p)
+                            }
+                        }
+                    }
+                }
+            }
+
+            // mkdir and save
+            let mut runs = PathBuf::from("runs");
+            if !runs.exists() {
+                std::fs::create_dir_all(&runs).unwrap();
+            }
+            runs.push(gen_time_string("-"));
+            let saveout = format!("{}.jpg", runs.to_str().unwrap());
+            let _ = img.save(saveout);
+        }
+    }
+
+    pub fn summary(&self) {
+        println!(
+            "\nSummary:\n\
+            > Task: {:?}{}\n\
+            > EP: {:?} {}\n\
+            > Dtype: {:?}\n\
+            > Batch: {} ({}), Height: {} ({}), Width: {} ({})\n\
+            > nc: {} nk: {}, nm: {}, conf: {}, kconf: {}, iou: {}\n\
+            ",
+            self.task(),
+            match self.engine.author().zip(self.engine.version()) {
+                Some((author, ver)) => format!(" ({} {})", author, ver),
+                None => String::from(""),
+            },
+            self.engine.ep(),
+            if let OrtEP::CPU = self.engine.ep() {
+                ""
+            } else {
+                "(May still fall back to CPU)"
+            },
+            self.engine.dtype(),
+            self.batch(),
+            if self.engine.is_batch_dynamic() {
+                "Dynamic"
+            } else {
+                "Const"
+            },
+            self.height(),
+            if self.engine.is_height_dynamic() {
+                "Dynamic"
+            } else {
+                "Const"
+            },
+            self.width(),
+            if self.engine.is_width_dynamic() {
+                "Dynamic"
+            } else {
+                "Const"
+            },
+            self.nc(),
+            self.nk(),
+            self.nm(),
+            self.conf,
+            self.kconf,
+            self.iou,
+        );
+    }
+
+    pub fn engine(&self) -> &OrtBackend {
+        &self.engine
+    }
+
+    pub fn conf(&self) -> f32 {
+        self.conf
+    }
+
+    pub fn set_conf(&mut self, val: f32) {
+        self.conf = val;
+    }
+
+    pub fn conf_mut(&mut self) -> &mut f32 {
+        &mut self.conf
+    }
+
+    pub fn kconf(&self) -> f32 {
+        self.kconf
+    }
+
+    pub fn iou(&self) -> f32 {
+        self.iou
+    }
+
+    pub fn task(&self) -> &YOLOTask {
+        &self.task
+    }
+
+    pub fn batch(&self) -> u32 {
+        self.batch
+    }
+
+    pub fn width(&self) -> u32 {
+        self.width
+    }
+
+    pub fn height(&self) -> u32 {
+        self.height
+    }
+
+    pub fn nc(&self) -> u32 {
+        self.nc
+    }
+
+    pub fn nk(&self) -> u32 {
+        self.nk
+    }
+
+    pub fn nm(&self) -> u32 {
+        self.nm
+    }
+
+    pub fn names(&self) -> &Vec<String> {
+        &self.names
+    }
+}

+ 553 - 0
examples/YOLOv8-ONNXRuntime-Rust/src/ort_backend.rs

@@ -0,0 +1,553 @@
+use anyhow::Result;
+use clap::ValueEnum;
+use half::f16;
+use ndarray::{Array, CowArray, IxDyn};
+use ort::{
+    CPUExecutionProvider, CUDAExecutionProvider, ExecutionProvider, ExecutionProviderDispatch,
+    TensorRTExecutionProvider,
+};
+use ort::{Session, SessionBuilder};
+use ort::{TensorElementType, ValueType};
+use regex::Regex;
+#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
+pub enum YOLOTask {
+    // YOLO tasks
+    Classify,
+    Detect,
+    Pose,
+    Segment,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
+pub enum OrtEP {
+    // ONNXRuntime execution provider
+    CPU,
+    CUDA(i32),
+    Trt(i32),
+}
+
+#[derive(Debug)]
+pub struct Batch {
+    pub opt: u32,
+    pub min: u32,
+    pub max: u32,
+}
+
+impl Default for Batch {
+    fn default() -> Self {
+        Self {
+            opt: 1,
+            min: 1,
+            max: 1,
+        }
+    }
+}
+
+#[derive(Debug, Default)]
+pub struct OrtInputs {
+    // ONNX model inputs attrs
+    pub shapes: Vec<Vec<i64>>,
+    //pub dtypes: Vec<TensorElementDataType>,
+    pub dtypes: Vec<TensorElementType>,
+    pub names: Vec<String>,
+    pub sizes: Vec<Vec<u32>>,
+}
+
+impl OrtInputs {
+    pub fn new(session: &Session) -> Self {
+        let mut shapes = Vec::new();
+        let mut dtypes = Vec::new();
+        let mut names = Vec::new();
+        for i in session.inputs.iter() {
+            /*             let shape: Vec<i32> = i
+                .dimensions()
+                .map(|x| if let Some(x) = x { x as i32 } else { -1i32 })
+                .collect();
+            shapes.push(shape); */
+            if let ort::ValueType::Tensor { ty, dimensions } = &i.input_type {
+                dtypes.push(ty.clone());
+                let shape = dimensions.clone();
+                shapes.push(shape);
+            } else {
+                panic!("不支持的数据格式, {} - {}", file!(), line!());
+            }
+            //dtypes.push(i.input_type);
+            names.push(i.name.clone());
+        }
+        Self {
+            shapes,
+            dtypes,
+            names,
+            ..Default::default()
+        }
+    }
+}
+
+#[derive(Debug)]
+pub struct OrtConfig {
+    // ORT config
+    pub f: String,
+    pub task: Option<YOLOTask>,
+    pub ep: OrtEP,
+    pub trt_fp16: bool,
+    pub batch: Batch,
+    pub image_size: (Option<u32>, Option<u32>),
+}
+
+#[derive(Debug)]
+pub struct OrtBackend {
+    // ORT engine
+    session: Session,
+    task: YOLOTask,
+    ep: OrtEP,
+    batch: Batch,
+    inputs: OrtInputs,
+}
+
+impl OrtBackend {
+    pub fn build(args: OrtConfig) -> Result<Self> {
+        // build env & session
+        // in version 2.x environment is removed
+        /*         let env = ort::EnvironmentBuilder
+        ::with_name("YOLOv8")
+        .build()?
+        .into_arc(); */
+        let sessionbuilder = SessionBuilder::new()?;
+        let session = sessionbuilder.commit_from_file(&args.f)?;
+        //let session = SessionBuilder::new(&env)?.with_model_from_file(&args.f)?;
+
+        // get inputs
+        let mut inputs = OrtInputs::new(&session);
+
+        // batch size
+        let mut batch = args.batch;
+        let batch = if inputs.shapes[0][0] == -1 {
+            batch
+        } else {
+            assert_eq!(
+                inputs.shapes[0][0] as u32, batch.opt,
+                "Expected batch size: {}, got {}. Try using `--batch {}`.",
+                inputs.shapes[0][0] as u32, batch.opt, inputs.shapes[0][0] as u32
+            );
+            batch.opt = inputs.shapes[0][0] as u32;
+            batch
+        };
+
+        // input size: height and width
+        let height = if inputs.shapes[0][2] == -1 {
+            match args.image_size.0 {
+                Some(height) => height,
+                None => panic!("Failed to get model height. Make it explicit with `--height`"),
+            }
+        } else {
+            inputs.shapes[0][2] as u32
+        };
+        let width = if inputs.shapes[0][3] == -1 {
+            match args.image_size.1 {
+                Some(width) => width,
+                None => panic!("Failed to get model width. Make it explicit with `--width`"),
+            }
+        } else {
+            inputs.shapes[0][3] as u32
+        };
+        inputs.sizes.push(vec![height, width]);
+
+        // build provider
+        let (ep, provider) = match args.ep {
+            OrtEP::CUDA(device_id) => Self::set_ep_cuda(device_id),
+            OrtEP::Trt(device_id) => Self::set_ep_trt(device_id, args.trt_fp16, &batch, &inputs),
+            _ => (
+                OrtEP::CPU,
+                ExecutionProviderDispatch::from(CPUExecutionProvider::default()),
+            ),
+        };
+
+        // build session again with the new provider
+        let session = SessionBuilder::new()?
+            // .with_optimization_level(ort::GraphOptimizationLevel::Level3)?
+            .with_execution_providers([provider])?
+            .commit_from_file(args.f)?;
+
+        // task: using given one or guessing
+        let task = match args.task {
+            Some(task) => task,
+            None => match session.metadata() {
+                Err(_) => panic!("No metadata found. Try making it explicit by `--task`"),
+                Ok(metadata) => match metadata.custom("task") {
+                    Err(_) => panic!("Can not get custom value. Try making it explicit by `--task`"),
+                    Ok(value) => match value {
+                        None => panic!("No corresponding value of `task` found in metadata. Make it explicit by `--task`"),
+                        Some(task) => match task.as_str() {
+                            "classify" => YOLOTask::Classify,
+                            "detect" => YOLOTask::Detect,
+                            "pose" => YOLOTask::Pose,
+                            "segment" => YOLOTask::Segment,
+                            x => todo!("{:?} is not supported for now!", x),
+                        },
+                    },
+                },
+            },
+        };
+
+        Ok(Self {
+            session,
+            task,
+            ep,
+            batch,
+            inputs,
+        })
+    }
+
+    pub fn fetch_inputs_from_session(
+        session: &Session,
+    ) -> (Vec<Vec<i64>>, Vec<TensorElementType>, Vec<String>) {
+        // get inputs attrs from ONNX model
+        let mut shapes = Vec::new();
+        let mut dtypes = Vec::new();
+        let mut names = Vec::new();
+        for i in session.inputs.iter() {
+            if let ort::ValueType::Tensor { ty, dimensions } = &i.input_type {
+                dtypes.push(ty.clone());
+                let shape = dimensions.clone();
+                shapes.push(shape);
+            } else {
+                panic!("不支持的数据格式, {} - {}", file!(), line!());
+            }
+            names.push(i.name.clone());
+        }
+        (shapes, dtypes, names)
+    }
+
+    pub fn set_ep_cuda(device_id: i32) -> (OrtEP, ExecutionProviderDispatch) {
+        let cuda_provider = CUDAExecutionProvider::default().with_device_id(device_id);
+        if let Ok(true) = cuda_provider.is_available() {
+            (
+                OrtEP::CUDA(device_id),
+                ExecutionProviderDispatch::from(cuda_provider), //PlantForm::CUDA(cuda_provider)
+            )
+        } else {
+            println!("> CUDA is not available! Using CPU.");
+            (
+                OrtEP::CPU,
+                ExecutionProviderDispatch::from(CPUExecutionProvider::default()), //PlantForm::CPU(CPUExecutionProvider::default())
+            )
+        }
+    }
+
+    pub fn set_ep_trt(
+        device_id: i32,
+        fp16: bool,
+        batch: &Batch,
+        inputs: &OrtInputs,
+    ) -> (OrtEP, ExecutionProviderDispatch) {
+        // set TensorRT
+        let trt_provider = TensorRTExecutionProvider::default().with_device_id(device_id);
+
+        //trt_provider.
+        if let Ok(true) = trt_provider.is_available() {
+            let (height, width) = (inputs.sizes[0][0], inputs.sizes[0][1]);
+            if inputs.dtypes[0] == TensorElementType::Float16 && !fp16 {
+                panic!(
+                    "Dtype mismatch! Expected: Float32, got: {:?}. You should use `--fp16`",
+                    inputs.dtypes[0]
+                );
+            }
+            // dynamic shape: input_tensor_1:dim_1xdim_2x...,input_tensor_2:dim_3xdim_4x...,...
+            let mut opt_string = String::new();
+            let mut min_string = String::new();
+            let mut max_string = String::new();
+            for name in inputs.names.iter() {
+                let s_opt = format!("{}:{}x3x{}x{},", name, batch.opt, height, width);
+                let s_min = format!("{}:{}x3x{}x{},", name, batch.min, height, width);
+                let s_max = format!("{}:{}x3x{}x{},", name, batch.max, height, width);
+                opt_string.push_str(s_opt.as_str());
+                min_string.push_str(s_min.as_str());
+                max_string.push_str(s_max.as_str());
+            }
+            let _ = opt_string.pop();
+            let _ = min_string.pop();
+            let _ = max_string.pop();
+
+            let trt_provider = trt_provider
+                .with_profile_opt_shapes(opt_string)
+                .with_profile_min_shapes(min_string)
+                .with_profile_max_shapes(max_string)
+                .with_fp16(fp16)
+                .with_timing_cache(true);
+            (
+                OrtEP::Trt(device_id),
+                ExecutionProviderDispatch::from(trt_provider),
+            )
+        } else {
+            println!("> TensorRT is not available! Try using CUDA...");
+            Self::set_ep_cuda(device_id)
+        }
+    }
+
+    pub fn fetch_from_metadata(&self, key: &str) -> Option<String> {
+        // fetch value from onnx model file by key
+        match self.session.metadata() {
+            Err(_) => None,
+            Ok(metadata) => match metadata.custom(key) {
+                Err(_) => None,
+                Ok(value) => value,
+            },
+        }
+    }
+
+    pub fn run(&self, xs: Array<f32, IxDyn>, profile: bool) -> Result<Vec<Array<f32, IxDyn>>> {
+        // ORT inference
+        match self.dtype() {
+            TensorElementType::Float16 => self.run_fp16(xs, profile),
+            TensorElementType::Float32 => self.run_fp32(xs, profile),
+            _ => todo!(),
+        }
+    }
+
+    pub fn run_fp16(&self, xs: Array<f32, IxDyn>, profile: bool) -> Result<Vec<Array<f32, IxDyn>>> {
+        // f32->f16
+        let t = std::time::Instant::now();
+        let xs = xs.mapv(f16::from_f32);
+        if profile {
+            println!("[ORT f32->f16]: {:?}", t.elapsed());
+        }
+
+        // h2d
+        let t = std::time::Instant::now();
+        let xs = CowArray::from(xs);
+        if profile {
+            println!("[ORT H2D]: {:?}", t.elapsed());
+        }
+
+        // run
+        let t = std::time::Instant::now();
+        let ys = self.session.run(ort::inputs![xs.view()]?)?;
+        if profile {
+            println!("[ORT Inference]: {:?}", t.elapsed());
+        }
+
+        // d2h
+        Ok(ys
+            .iter()
+            .map(|(_k, v)| {
+                // d2h
+                let t = std::time::Instant::now();
+                let v = v.try_extract_tensor().unwrap();
+                //let v = v.try_extract::<_>().unwrap().view().clone().into_owned();
+                if profile {
+                    println!("[ORT D2H]: {:?}", t.elapsed());
+                }
+
+                // f16->f32
+                let t_ = std::time::Instant::now();
+                let v = v.mapv(f16::to_f32);
+                if profile {
+                    println!("[ORT f16->f32]: {:?}", t_.elapsed());
+                }
+                v
+            })
+            .collect::<Vec<Array<_, _>>>())
+    }
+
+    pub fn run_fp32(&self, xs: Array<f32, IxDyn>, profile: bool) -> Result<Vec<Array<f32, IxDyn>>> {
+        // h2d
+        let t = std::time::Instant::now();
+        let xs = CowArray::from(xs);
+        if profile {
+            println!("[ORT H2D]: {:?}", t.elapsed());
+        }
+
+        // run
+        let t = std::time::Instant::now();
+        let ys = self.session.run(ort::inputs![xs.view()]?)?;
+        if profile {
+            println!("[ORT Inference]: {:?}", t.elapsed());
+        }
+
+        // d2h
+        Ok(ys
+            .iter()
+            .map(|(_k, v)| {
+                let t = std::time::Instant::now();
+                let v = v.try_extract_tensor::<f32>().unwrap().into_owned();
+                //let x = x.try_extract::<_>().unwrap().view().clone().into_owned();
+                if profile {
+                    println!("[ORT D2H]: {:?}", t.elapsed());
+                }
+                v
+            })
+            .collect::<Vec<Array<_, _>>>())
+    }
+
+    pub fn output_shapes(&self) -> Vec<Vec<i64>> {
+        let mut shapes = Vec::new();
+        for output in &self.session.outputs {
+            if let ValueType::Tensor { ty: _, dimensions } = &output.output_type {
+                let shape = dimensions.clone();
+                shapes.push(shape);
+            } else {
+                panic!("not support data format, {} - {}", file!(), line!());
+            }
+        }
+        shapes
+    }
+
+    pub fn output_dtypes(&self) -> Vec<TensorElementType> {
+        let mut dtypes = Vec::new();
+        for output in &self.session.outputs {
+            if let ValueType::Tensor { ty, dimensions: _ } = &output.output_type {
+                dtypes.push(ty.clone());
+            } else {
+                panic!("not support data format, {} - {}", file!(), line!());
+            }
+        }
+        dtypes
+    }
+
+    pub fn input_shapes(&self) -> &Vec<Vec<i64>> {
+        &self.inputs.shapes
+    }
+
+    pub fn input_names(&self) -> &Vec<String> {
+        &self.inputs.names
+    }
+
+    pub fn input_dtypes(&self) -> &Vec<TensorElementType> {
+        &self.inputs.dtypes
+    }
+
+    pub fn dtype(&self) -> TensorElementType {
+        self.input_dtypes()[0]
+    }
+
+    pub fn height(&self) -> u32 {
+        self.inputs.sizes[0][0]
+    }
+
+    pub fn width(&self) -> u32 {
+        self.inputs.sizes[0][1]
+    }
+
+    pub fn is_height_dynamic(&self) -> bool {
+        self.input_shapes()[0][2] == -1
+    }
+
+    pub fn is_width_dynamic(&self) -> bool {
+        self.input_shapes()[0][3] == -1
+    }
+
+    pub fn batch(&self) -> u32 {
+        self.batch.opt
+    }
+
+    pub fn is_batch_dynamic(&self) -> bool {
+        self.input_shapes()[0][0] == -1
+    }
+
+    pub fn ep(&self) -> &OrtEP {
+        &self.ep
+    }
+
+    pub fn task(&self) -> YOLOTask {
+        self.task.clone()
+    }
+
+    pub fn names(&self) -> Option<Vec<String>> {
+        // class names, metadata parsing
+        // String format: `{0: 'person', 1: 'bicycle', 2: 'sports ball', ..., 27: "yellow_lady's_slipper"}`
+        match self.fetch_from_metadata("names") {
+            Some(names) => {
+                let re = Regex::new(r#"(['"])([-()\w '"]+)(['"])"#).unwrap();
+                let mut names_ = vec![];
+                for (_, [_, name, _]) in re.captures_iter(&names).map(|x| x.extract()) {
+                    names_.push(name.to_string());
+                }
+                Some(names_)
+            }
+            None => None,
+        }
+    }
+
+    pub fn nk(&self) -> Option<u32> {
+        // num_keypoints, metadata parsing: String `nk` in onnx model: `[17, 3]`
+        match self.fetch_from_metadata("kpt_shape") {
+            None => None,
+            Some(kpt_string) => {
+                let re = Regex::new(r"([0-9]+), ([0-9]+)").unwrap();
+                let caps = re.captures(&kpt_string).unwrap();
+                Some(caps.get(1).unwrap().as_str().parse::<u32>().unwrap())
+            }
+        }
+    }
+
+    pub fn nc(&self) -> Option<u32> {
+        // num_classes
+        match self.names() {
+            // by names
+            Some(names) => Some(names.len() as u32),
+            None => match self.task() {
+                // by task calculation
+                YOLOTask::Classify => Some(self.output_shapes()[0][1] as u32),
+                YOLOTask::Detect => {
+                    if self.output_shapes()[0][1] == -1 {
+                        None
+                    } else {
+                        // cxywhclss
+                        Some(self.output_shapes()[0][1] as u32 - 4)
+                    }
+                }
+                YOLOTask::Pose => {
+                    match self.nk() {
+                        None => None,
+                        Some(nk) => {
+                            if self.output_shapes()[0][1] == -1 {
+                                None
+                            } else {
+                                // cxywhclss3*kpt
+                                Some(self.output_shapes()[0][1] as u32 - 4 - 3 * nk)
+                            }
+                        }
+                    }
+                }
+                YOLOTask::Segment => {
+                    if self.output_shapes()[0][1] == -1 {
+                        None
+                    } else {
+                        // cxywhclssnm
+                        Some((self.output_shapes()[0][1] - self.output_shapes()[1][1]) as u32 - 4)
+                    }
+                }
+            },
+        }
+    }
+
+    pub fn nm(&self) -> Option<u32> {
+        // num_masks
+        match self.task() {
+            YOLOTask::Segment => Some(self.output_shapes()[1][1] as u32),
+            _ => None,
+        }
+    }
+
+    pub fn na(&self) -> Option<u32> {
+        // num_anchors
+        match self.task() {
+            YOLOTask::Segment | YOLOTask::Detect | YOLOTask::Pose => {
+                if self.output_shapes()[0][2] == -1 {
+                    None
+                } else {
+                    Some(self.output_shapes()[0][2] as u32)
+                }
+            }
+            _ => None,
+        }
+    }
+
+    pub fn author(&self) -> Option<String> {
+        self.fetch_from_metadata("author")
+    }
+
+    pub fn version(&self) -> Option<String> {
+        self.fetch_from_metadata("version")
+    }
+}

+ 235 - 0
examples/YOLOv8-ONNXRuntime-Rust/src/yolo_result.rs

@@ -0,0 +1,235 @@
+use ndarray::{Array, Axis, IxDyn};
+
+#[derive(Clone, PartialEq, Default)]
+pub struct YOLOResult {
+    // YOLO tasks results of an image
+    pub probs: Option<Embedding>,
+    pub bboxes: Option<Vec<Bbox>>,
+    pub keypoints: Option<Vec<Vec<Point2>>>,
+    pub masks: Option<Vec<Vec<u8>>>,
+}
+
+impl std::fmt::Debug for YOLOResult {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("YOLOResult")
+            .field(
+                "Probs(top5)",
+                &format_args!("{:?}", self.probs().map(|probs| probs.topk(5))),
+            )
+            .field("Bboxes", &self.bboxes)
+            .field("Keypoints", &self.keypoints)
+            .field(
+                "Masks",
+                &format_args!("{:?}", self.masks().map(|masks| masks.len())),
+            )
+            .finish()
+    }
+}
+
+impl YOLOResult {
+    pub fn new(
+        probs: Option<Embedding>,
+        bboxes: Option<Vec<Bbox>>,
+        keypoints: Option<Vec<Vec<Point2>>>,
+        masks: Option<Vec<Vec<u8>>>,
+    ) -> Self {
+        Self {
+            probs,
+            bboxes,
+            keypoints,
+            masks,
+        }
+    }
+
+    pub fn probs(&self) -> Option<&Embedding> {
+        self.probs.as_ref()
+    }
+
+    pub fn keypoints(&self) -> Option<&Vec<Vec<Point2>>> {
+        self.keypoints.as_ref()
+    }
+
+    pub fn masks(&self) -> Option<&Vec<Vec<u8>>> {
+        self.masks.as_ref()
+    }
+
+    pub fn bboxes(&self) -> Option<&Vec<Bbox>> {
+        self.bboxes.as_ref()
+    }
+
+    pub fn bboxes_mut(&mut self) -> Option<&mut Vec<Bbox>> {
+        self.bboxes.as_mut()
+    }
+}
+
+#[derive(Debug, PartialEq, Clone, Default)]
+pub struct Point2 {
+    // A point2d with x, y, conf
+    x: f32,
+    y: f32,
+    confidence: f32,
+}
+
+impl Point2 {
+    pub fn new_with_conf(x: f32, y: f32, confidence: f32) -> Self {
+        Self { x, y, confidence }
+    }
+
+    pub fn new(x: f32, y: f32) -> Self {
+        Self {
+            x,
+            y,
+            ..Default::default()
+        }
+    }
+
+    pub fn x(&self) -> f32 {
+        self.x
+    }
+
+    pub fn y(&self) -> f32 {
+        self.y
+    }
+
+    pub fn confidence(&self) -> f32 {
+        self.confidence
+    }
+}
+
+#[derive(Debug, Clone, PartialEq, Default)]
+pub struct Embedding {
+    // An float32 n-dims tensor
+    data: Array<f32, IxDyn>,
+}
+
+impl Embedding {
+    pub fn new(data: Array<f32, IxDyn>) -> Self {
+        Self { data }
+    }
+
+    pub fn data(&self) -> &Array<f32, IxDyn> {
+        &self.data
+    }
+
+    pub fn topk(&self, k: usize) -> Vec<(usize, f32)> {
+        let mut probs = self
+            .data
+            .iter()
+            .enumerate()
+            .map(|(a, b)| (a, *b))
+            .collect::<Vec<_>>();
+        probs.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
+        let mut topk = Vec::new();
+        for &(id, confidence) in probs.iter().take(k) {
+            topk.push((id, confidence));
+        }
+        topk
+    }
+
+    pub fn norm(&self) -> Array<f32, IxDyn> {
+        let std_ = self.data.mapv(|x| x * x).sum_axis(Axis(0)).mapv(f32::sqrt);
+        self.data.clone() / std_
+    }
+
+    pub fn top1(&self) -> (usize, f32) {
+        self.topk(1)[0]
+    }
+}
+
+#[derive(Debug, Clone, PartialEq, Default)]
+pub struct Bbox {
+    // a bounding box around an object
+    xmin: f32,
+    ymin: f32,
+    width: f32,
+    height: f32,
+    id: usize,
+    confidence: f32,
+}
+
+impl Bbox {
+    pub fn new_from_xywh(xmin: f32, ymin: f32, width: f32, height: f32) -> Self {
+        Self {
+            xmin,
+            ymin,
+            width,
+            height,
+            ..Default::default()
+        }
+    }
+
+    pub fn new(xmin: f32, ymin: f32, width: f32, height: f32, id: usize, confidence: f32) -> Self {
+        Self {
+            xmin,
+            ymin,
+            width,
+            height,
+            id,
+            confidence,
+        }
+    }
+
+    pub fn width(&self) -> f32 {
+        self.width
+    }
+
+    pub fn height(&self) -> f32 {
+        self.height
+    }
+
+    pub fn xmin(&self) -> f32 {
+        self.xmin
+    }
+
+    pub fn ymin(&self) -> f32 {
+        self.ymin
+    }
+
+    pub fn xmax(&self) -> f32 {
+        self.xmin + self.width
+    }
+
+    pub fn ymax(&self) -> f32 {
+        self.ymin + self.height
+    }
+
+    pub fn tl(&self) -> Point2 {
+        Point2::new(self.xmin, self.ymin)
+    }
+
+    pub fn br(&self) -> Point2 {
+        Point2::new(self.xmax(), self.ymax())
+    }
+
+    pub fn cxcy(&self) -> Point2 {
+        Point2::new(self.xmin + self.width / 2., self.ymin + self.height / 2.)
+    }
+
+    pub fn id(&self) -> usize {
+        self.id
+    }
+
+    pub fn confidence(&self) -> f32 {
+        self.confidence
+    }
+
+    pub fn area(&self) -> f32 {
+        self.width * self.height
+    }
+
+    pub fn intersection_area(&self, another: &Bbox) -> f32 {
+        let l = self.xmin.max(another.xmin);
+        let r = (self.xmin + self.width).min(another.xmin + another.width);
+        let t = self.ymin.max(another.ymin);
+        let b = (self.ymin + self.height).min(another.ymin + another.height);
+        (r - l + 1.).max(0.) * (b - t + 1.).max(0.)
+    }
+
+    pub fn union(&self, another: &Bbox) -> f32 {
+        self.area() + another.area() - self.intersection_area(another)
+    }
+
+    pub fn iou(&self, another: &Bbox) -> f32 {
+        self.intersection_area(another) / self.union(another)
+    }
+}

+ 43 - 0
examples/YOLOv8-ONNXRuntime/README.md

@@ -0,0 +1,43 @@
+# YOLOv8 - ONNX Runtime
+
+This project implements YOLOv8 using ONNX Runtime.
+
+## Installation
+
+To run this project, you need to install the required dependencies. The following instructions will guide you through the installation process.
+
+### Installing Required Dependencies
+
+You can install the required dependencies by running the following command:
+
+```bash
+pip install -r requirements.txt
+```
+
+### Installing `onnxruntime-gpu`
+
+If you have an NVIDIA GPU and want to leverage GPU acceleration, you can install the onnxruntime-gpu package using the following command:
+
+```bash
+pip install onnxruntime-gpu
+```
+
+Note: Make sure you have the appropriate GPU drivers installed on your system.
+
+### Installing `onnxruntime` (CPU version)
+
+If you don't have an NVIDIA GPU or prefer to use the CPU version of onnxruntime, you can install the onnxruntime package using the following command:
+
+```bash
+pip install onnxruntime
+```
+
+### Usage
+
+After successfully installing the required packages, you can run the YOLOv8 implementation using the following command:
+
+```bash
+python main.py --model yolov8n.onnx --img image.jpg --conf-thres 0.5 --iou-thres 0.5
+```
+
+Make sure to replace yolov8n.onnx with the path to your YOLOv8 ONNX model file, image.jpg with the path to your input image, and adjust the confidence threshold (conf-thres) and IoU threshold (iou-thres) values as needed.

+ 229 - 0
examples/YOLOv8-ONNXRuntime/main.py

@@ -0,0 +1,229 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import argparse
+
+import cv2
+import numpy as np
+import onnxruntime as ort
+import torch
+
+from ultralytics.utils import ASSETS, yaml_load
+from ultralytics.utils.checks import check_requirements, check_yaml
+
+
+class YOLOv8:
+    """YOLOv8 object detection model class for handling inference and visualization."""
+
+    def __init__(self, onnx_model, input_image, confidence_thres, iou_thres):
+        """
+        Initializes an instance of the YOLOv8 class.
+
+        Args:
+            onnx_model: Path to the ONNX model.
+            input_image: Path to the input image.
+            confidence_thres: Confidence threshold for filtering detections.
+            iou_thres: IoU (Intersection over Union) threshold for non-maximum suppression.
+        """
+        self.onnx_model = onnx_model
+        self.input_image = input_image
+        self.confidence_thres = confidence_thres
+        self.iou_thres = iou_thres
+
+        # Load the class names from the COCO dataset
+        self.classes = yaml_load(check_yaml("coco8.yaml"))["names"]
+
+        # Generate a color palette for the classes
+        self.color_palette = np.random.uniform(0, 255, size=(len(self.classes), 3))
+
+    def draw_detections(self, img, box, score, class_id):
+        """
+        Draws bounding boxes and labels on the input image based on the detected objects.
+
+        Args:
+            img: The input image to draw detections on.
+            box: Detected bounding box.
+            score: Corresponding detection score.
+            class_id: Class ID for the detected object.
+
+        Returns:
+            None
+        """
+        # Extract the coordinates of the bounding box
+        x1, y1, w, h = box
+
+        # Retrieve the color for the class ID
+        color = self.color_palette[class_id]
+
+        # Draw the bounding box on the image
+        cv2.rectangle(img, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2)
+
+        # Create the label text with class name and score
+        label = f"{self.classes[class_id]}: {score:.2f}"
+
+        # Calculate the dimensions of the label text
+        (label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
+
+        # Calculate the position of the label text
+        label_x = x1
+        label_y = y1 - 10 if y1 - 10 > label_height else y1 + 10
+
+        # Draw a filled rectangle as the background for the label text
+        cv2.rectangle(
+            img, (label_x, label_y - label_height), (label_x + label_width, label_y + label_height), color, cv2.FILLED
+        )
+
+        # Draw the label text on the image
+        cv2.putText(img, label, (label_x, label_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
+
+    def preprocess(self):
+        """
+        Preprocesses the input image before performing inference.
+
+        Returns:
+            image_data: Preprocessed image data ready for inference.
+        """
+        # Read the input image using OpenCV
+        self.img = cv2.imread(self.input_image)
+
+        # Get the height and width of the input image
+        self.img_height, self.img_width = self.img.shape[:2]
+
+        # Convert the image color space from BGR to RGB
+        img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
+
+        # Resize the image to match the input shape
+        img = cv2.resize(img, (self.input_width, self.input_height))
+
+        # Normalize the image data by dividing it by 255.0
+        image_data = np.array(img) / 255.0
+
+        # Transpose the image to have the channel dimension as the first dimension
+        image_data = np.transpose(image_data, (2, 0, 1))  # Channel first
+
+        # Expand the dimensions of the image data to match the expected input shape
+        image_data = np.expand_dims(image_data, axis=0).astype(np.float32)
+
+        # Return the preprocessed image data
+        return image_data
+
+    def postprocess(self, input_image, output):
+        """
+        Performs post-processing on the model's output to extract bounding boxes, scores, and class IDs.
+
+        Args:
+            input_image (numpy.ndarray): The input image.
+            output (numpy.ndarray): The output of the model.
+
+        Returns:
+            numpy.ndarray: The input image with detections drawn on it.
+        """
+        # Transpose and squeeze the output to match the expected shape
+        outputs = np.transpose(np.squeeze(output[0]))
+
+        # Get the number of rows in the outputs array
+        rows = outputs.shape[0]
+
+        # Lists to store the bounding boxes, scores, and class IDs of the detections
+        boxes = []
+        scores = []
+        class_ids = []
+
+        # Calculate the scaling factors for the bounding box coordinates
+        x_factor = self.img_width / self.input_width
+        y_factor = self.img_height / self.input_height
+
+        # Iterate over each row in the outputs array
+        for i in range(rows):
+            # Extract the class scores from the current row
+            classes_scores = outputs[i][4:]
+
+            # Find the maximum score among the class scores
+            max_score = np.amax(classes_scores)
+
+            # If the maximum score is above the confidence threshold
+            if max_score >= self.confidence_thres:
+                # Get the class ID with the highest score
+                class_id = np.argmax(classes_scores)
+
+                # Extract the bounding box coordinates from the current row
+                x, y, w, h = outputs[i][0], outputs[i][1], outputs[i][2], outputs[i][3]
+
+                # Calculate the scaled coordinates of the bounding box
+                left = int((x - w / 2) * x_factor)
+                top = int((y - h / 2) * y_factor)
+                width = int(w * x_factor)
+                height = int(h * y_factor)
+
+                # Add the class ID, score, and box coordinates to the respective lists
+                class_ids.append(class_id)
+                scores.append(max_score)
+                boxes.append([left, top, width, height])
+
+        # Apply non-maximum suppression to filter out overlapping bounding boxes
+        indices = cv2.dnn.NMSBoxes(boxes, scores, self.confidence_thres, self.iou_thres)
+
+        # Iterate over the selected indices after non-maximum suppression
+        for i in indices:
+            # Get the box, score, and class ID corresponding to the index
+            box = boxes[i]
+            score = scores[i]
+            class_id = class_ids[i]
+
+            # Draw the detection on the input image
+            self.draw_detections(input_image, box, score, class_id)
+
+        # Return the modified input image
+        return input_image
+
+    def main(self):
+        """
+        Performs inference using an ONNX model and returns the output image with drawn detections.
+
+        Returns:
+            output_img: The output image with drawn detections.
+        """
+        # Create an inference session using the ONNX model and specify execution providers
+        session = ort.InferenceSession(self.onnx_model, providers=["CUDAExecutionProvider", "CPUExecutionProvider"])
+
+        # Get the model inputs
+        model_inputs = session.get_inputs()
+
+        # Store the shape of the input for later use
+        input_shape = model_inputs[0].shape
+        self.input_width = input_shape[2]
+        self.input_height = input_shape[3]
+
+        # Preprocess the image data
+        img_data = self.preprocess()
+
+        # Run inference using the preprocessed image data
+        outputs = session.run(None, {model_inputs[0].name: img_data})
+
+        # Perform post-processing on the outputs to obtain output image.
+        return self.postprocess(self.img, outputs)  # output image
+
+
+if __name__ == "__main__":
+    # Create an argument parser to handle command-line arguments
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--model", type=str, default="yolov8n.onnx", help="Input your ONNX model.")
+    parser.add_argument("--img", type=str, default=str(ASSETS / "bus.jpg"), help="Path to input image.")
+    parser.add_argument("--conf-thres", type=float, default=0.5, help="Confidence threshold")
+    parser.add_argument("--iou-thres", type=float, default=0.5, help="NMS IoU threshold")
+    args = parser.parse_args()
+
+    # Check the requirements and select the appropriate backend (CPU or GPU)
+    check_requirements("onnxruntime-gpu" if torch.cuda.is_available() else "onnxruntime")
+
+    # Create an instance of the YOLOv8 class with the specified arguments
+    detection = YOLOv8(args.model, args.img, args.conf_thres, args.iou_thres)
+
+    # Perform object detection and obtain the output image
+    output_image = detection.main()
+
+    # Display the output image in a window
+    cv2.namedWindow("Output", cv2.WINDOW_NORMAL)
+    cv2.imshow("Output", output_image)
+
+    # Wait for a key press to exit
+    cv2.waitKey(0)

+ 19 - 0
examples/YOLOv8-OpenCV-ONNX-Python/README.md

@@ -0,0 +1,19 @@
+# YOLOv8 - OpenCV
+
+Implementation YOLOv8 on OpenCV using ONNX Format.
+
+Just simply clone and run
+
+```bash
+pip install -r requirements.txt
+python main.py --model yolov8n.onnx --img image.jpg
+```
+
+If you start from scratch:
+
+```bash
+pip install ultralytics
+yolo export model=yolov8n.pt imgsz=640 format=onnx opset=12
+```
+
+_\*Make sure to include "opset=12"_

+ 130 - 0
examples/YOLOv8-OpenCV-ONNX-Python/main.py

@@ -0,0 +1,130 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import argparse
+
+import cv2.dnn
+import numpy as np
+
+from ultralytics.utils import ASSETS, yaml_load
+from ultralytics.utils.checks import check_yaml
+
+CLASSES = yaml_load(check_yaml("coco8.yaml"))["names"]
+colors = np.random.uniform(0, 255, size=(len(CLASSES), 3))
+
+
+def draw_bounding_box(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
+    """
+    Draws bounding boxes on the input image based on the provided arguments.
+
+    Args:
+        img (numpy.ndarray): The input image to draw the bounding box on.
+        class_id (int): Class ID of the detected object.
+        confidence (float): Confidence score of the detected object.
+        x (int): X-coordinate of the top-left corner of the bounding box.
+        y (int): Y-coordinate of the top-left corner of the bounding box.
+        x_plus_w (int): X-coordinate of the bottom-right corner of the bounding box.
+        y_plus_h (int): Y-coordinate of the bottom-right corner of the bounding box.
+    """
+    label = f"{CLASSES[class_id]} ({confidence:.2f})"
+    color = colors[class_id]
+    cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)
+    cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
+
+
+def main(onnx_model, input_image):
+    """
+    Main function to load ONNX model, perform inference, draw bounding boxes, and display the output image.
+
+    Args:
+        onnx_model (str): Path to the ONNX model.
+        input_image (str): Path to the input image.
+
+    Returns:
+        list: List of dictionaries containing detection information such as class_id, class_name, confidence, etc.
+    """
+    # Load the ONNX model
+    model: cv2.dnn.Net = cv2.dnn.readNetFromONNX(onnx_model)
+
+    # Read the input image
+    original_image: np.ndarray = cv2.imread(input_image)
+    [height, width, _] = original_image.shape
+
+    # Prepare a square image for inference
+    length = max((height, width))
+    image = np.zeros((length, length, 3), np.uint8)
+    image[0:height, 0:width] = original_image
+
+    # Calculate scale factor
+    scale = length / 640
+
+    # Preprocess the image and prepare blob for model
+    blob = cv2.dnn.blobFromImage(image, scalefactor=1 / 255, size=(640, 640), swapRB=True)
+    model.setInput(blob)
+
+    # Perform inference
+    outputs = model.forward()
+
+    # Prepare output array
+    outputs = np.array([cv2.transpose(outputs[0])])
+    rows = outputs.shape[1]
+
+    boxes = []
+    scores = []
+    class_ids = []
+
+    # Iterate through output to collect bounding boxes, confidence scores, and class IDs
+    for i in range(rows):
+        classes_scores = outputs[0][i][4:]
+        (minScore, maxScore, minClassLoc, (x, maxClassIndex)) = cv2.minMaxLoc(classes_scores)
+        if maxScore >= 0.25:
+            box = [
+                outputs[0][i][0] - (0.5 * outputs[0][i][2]),
+                outputs[0][i][1] - (0.5 * outputs[0][i][3]),
+                outputs[0][i][2],
+                outputs[0][i][3],
+            ]
+            boxes.append(box)
+            scores.append(maxScore)
+            class_ids.append(maxClassIndex)
+
+    # Apply NMS (Non-maximum suppression)
+    result_boxes = cv2.dnn.NMSBoxes(boxes, scores, 0.25, 0.45, 0.5)
+
+    detections = []
+
+    # Iterate through NMS results to draw bounding boxes and labels
+    for i in range(len(result_boxes)):
+        index = result_boxes[i]
+        box = boxes[index]
+        detection = {
+            "class_id": class_ids[index],
+            "class_name": CLASSES[class_ids[index]],
+            "confidence": scores[index],
+            "box": box,
+            "scale": scale,
+        }
+        detections.append(detection)
+        draw_bounding_box(
+            original_image,
+            class_ids[index],
+            scores[index],
+            round(box[0] * scale),
+            round(box[1] * scale),
+            round((box[0] + box[2]) * scale),
+            round((box[1] + box[3]) * scale),
+        )
+
+    # Display the image with bounding boxes
+    cv2.imshow("image", original_image)
+    cv2.waitKey(0)
+    cv2.destroyAllWindows()
+
+    return detections
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--model", default="yolov8n.onnx", help="Input your ONNX model.")
+    parser.add_argument("--img", default=str(ASSETS / "bus.jpg"), help="Path to input image.")
+    args = parser.parse_args()
+    main(args.model, args.img)

+ 21 - 0
examples/YOLOv8-OpenVINO-CPP-Inference/CMakeLists.txt

@@ -0,0 +1,21 @@
+cmake_minimum_required(VERSION 3.12)
+project(yolov8_openvino_example)
+
+set(CMAKE_CXX_STANDARD 14)
+
+find_package(OpenCV REQUIRED)
+
+include_directories(
+	${OpenCV_INCLUDE_DIRS}
+	/path/to/intel/openvino/runtime/include
+)
+
+add_executable(detect 
+	main.cc
+	inference.cc
+)
+
+target_link_libraries(detect
+	${OpenCV_LIBS}
+	/path/to/intel/openvino/runtime/lib/intel64/libopenvino.so
+)

+ 69 - 0
examples/YOLOv8-OpenVINO-CPP-Inference/README.md

@@ -0,0 +1,69 @@
+# YOLOv8 OpenVINO Inference in C++ 🦾
+
+Welcome to the YOLOv8 OpenVINO Inference example in C++! This guide will help you get started with leveraging the powerful YOLOv8 models using OpenVINO and OpenCV API in your C++ projects. Whether you're looking to enhance performance or add flexibility to your applications, this example has got you covered.
+
+## 🌟 Features
+
+- 🚀 **Model Format Support**: Compatible with `ONNX` and `OpenVINO IR` formats.
+- ⚡ **Precision Options**: Run models in `FP32`, `FP16`, and `INT8` precisions.
+- 🔄 **Dynamic Shape Loading**: Easily handle models with dynamic input shapes.
+
+## 📋 Dependencies
+
+To ensure smooth execution, please make sure you have the following dependencies installed:
+
+| Dependency | Version  |
+| ---------- | -------- |
+| OpenVINO   | >=2023.3 |
+| OpenCV     | >=4.5.0  |
+| C++        | >=14     |
+| CMake      | >=3.12.0 |
+
+## ⚙️ Build Instructions
+
+Follow these steps to build the project:
+
+1. Clone the repository:
+
+   ```bash
+   git clone https://github.com/ultralytics/ultralytics.git
+   cd ultralytics/YOLOv8-OpenVINO-CPP-Inference
+   ```
+
+2. Create a build directory and compile the project:
+   ```bash
+   mkdir build
+   cd build
+   cmake ..
+   make
+   ```
+
+## 🛠️ Usage
+
+Once built, you can run inference on an image using the following command:
+
+```bash
+./detect <model_path.{onnx, xml}> <image_path.jpg>
+```
+
+## 🔄 Exporting YOLOv8 Models
+
+To use your YOLOv8 model with OpenVINO, you need to export it first. Use the command below to export the model:
+
+```bash
+yolo export model=yolov8s.pt imgsz=640 format=openvino
+```
+
+## 📸 Screenshots
+
+### Running Using OpenVINO Model
+
+![Running OpenVINO Model](https://github.com/ultralytics/ultralytics/assets/76827698/2d7cf201-3def-4357-824c-12446ccf85a9)
+
+### Running Using ONNX Model
+
+![Running ONNX Model](https://github.com/ultralytics/ultralytics/assets/76827698/9b90031c-cc81-4cfb-8b34-c619e09035a7)
+
+## ❤️ Contributions
+
+We hope this example helps you integrate YOLOv8 with OpenVINO and OpenCV into your C++ projects effortlessly. Happy coding! 🚀

+ 175 - 0
examples/YOLOv8-OpenVINO-CPP-Inference/inference.cc

@@ -0,0 +1,175 @@
+#include "inference.h"
+
+#include <memory>
+#include <opencv2/dnn.hpp>
+#include <random>
+
+namespace yolo {
+
+// Constructor to initialize the model with default input shape
+Inference::Inference(const std::string &model_path, const float &model_confidence_threshold, const float &model_NMS_threshold) {
+	model_input_shape_ = cv::Size(640, 640); // Set the default size for models with dynamic shapes to prevent errors.
+	model_confidence_threshold_ = model_confidence_threshold;
+	model_NMS_threshold_ = model_NMS_threshold;
+	InitializeModel(model_path);
+}
+
+// Constructor to initialize the model with specified input shape
+Inference::Inference(const std::string &model_path, const cv::Size model_input_shape, const float &model_confidence_threshold, const float &model_NMS_threshold) {
+	model_input_shape_ = model_input_shape;
+	model_confidence_threshold_ = model_confidence_threshold;
+	model_NMS_threshold_ = model_NMS_threshold;
+	InitializeModel(model_path);
+}
+
+void Inference::InitializeModel(const std::string &model_path) {
+	ov::Core core; // OpenVINO core object
+	std::shared_ptr<ov::Model> model = core.read_model(model_path); // Read the model from file
+
+	// If the model has dynamic shapes, reshape it to the specified input shape
+	if (model->is_dynamic()) {
+		model->reshape({1, 3, static_cast<long int>(model_input_shape_.height), static_cast<long int>(model_input_shape_.width)});
+	}
+
+	// Preprocessing setup for the model
+	ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model);
+	ppp.input().tensor().set_element_type(ov::element::u8).set_layout("NHWC").set_color_format(ov::preprocess::ColorFormat::BGR);
+	ppp.input().preprocess().convert_element_type(ov::element::f32).convert_color(ov::preprocess::ColorFormat::RGB).scale({255, 255, 255});
+	ppp.input().model().set_layout("NCHW");
+	ppp.output().tensor().set_element_type(ov::element::f32);
+	model = ppp.build(); // Build the preprocessed model
+
+	// Compile the model for inference
+	compiled_model_ = core.compile_model(model, "AUTO");
+	inference_request_ = compiled_model_.create_infer_request(); // Create inference request
+
+	short width, height;
+
+	// Get input shape from the model
+	const std::vector<ov::Output<ov::Node>> inputs = model->inputs();
+	const ov::Shape input_shape = inputs[0].get_shape();
+	height = input_shape[1];
+	width = input_shape[2];
+	model_input_shape_ = cv::Size2f(width, height);
+
+	// Get output shape from the model
+	const std::vector<ov::Output<ov::Node>> outputs = model->outputs();
+	const ov::Shape output_shape = outputs[0].get_shape();
+	height = output_shape[1];
+	width = output_shape[2];
+	model_output_shape_ = cv::Size(width, height);
+}
+
+// Method to run inference on an input frame
+void Inference::RunInference(cv::Mat &frame) {
+	Preprocessing(frame); // Preprocess the input frame
+	inference_request_.infer(); // Run inference
+	PostProcessing(frame); // Postprocess the inference results
+}
+
+// Method to preprocess the input frame
+void Inference::Preprocessing(const cv::Mat &frame) {
+	cv::Mat resized_frame;
+	cv::resize(frame, resized_frame, model_input_shape_, 0, 0, cv::INTER_AREA); // Resize the frame to match the model input shape
+
+	// Calculate scaling factor
+	scale_factor_.x = static_cast<float>(frame.cols / model_input_shape_.width);
+	scale_factor_.y = static_cast<float>(frame.rows / model_input_shape_.height);
+
+	float *input_data = (float *)resized_frame.data; // Get pointer to resized frame data
+	const ov::Tensor input_tensor = ov::Tensor(compiled_model_.input().get_element_type(), compiled_model_.input().get_shape(), input_data); // Create input tensor
+	inference_request_.set_input_tensor(input_tensor); // Set input tensor for inference
+}
+
+// Method to postprocess the inference results
+void Inference::PostProcessing(cv::Mat &frame) {
+	std::vector<int> class_list;
+	std::vector<float> confidence_list;
+	std::vector<cv::Rect> box_list;
+
+	// Get the output tensor from the inference request
+	const float *detections = inference_request_.get_output_tensor().data<const float>();
+	const cv::Mat detection_outputs(model_output_shape_, CV_32F, (float *)detections); // Create OpenCV matrix from output tensor
+
+	// Iterate over detections and collect class IDs, confidence scores, and bounding boxes
+	for (int i = 0; i < detection_outputs.cols; ++i) {
+		const cv::Mat classes_scores = detection_outputs.col(i).rowRange(4, detection_outputs.rows);
+
+		cv::Point class_id;
+		double score;
+		cv::minMaxLoc(classes_scores, nullptr, &score, nullptr, &class_id); // Find the class with the highest score
+
+		// Check if the detection meets the confidence threshold
+		if (score > model_confidence_threshold_) {
+			class_list.push_back(class_id.y);
+			confidence_list.push_back(score);
+
+			const float x = detection_outputs.at<float>(0, i);
+			const float y = detection_outputs.at<float>(1, i);
+			const float w = detection_outputs.at<float>(2, i);
+			const float h = detection_outputs.at<float>(3, i);
+
+			cv::Rect box;
+			box.x = static_cast<int>(x);
+			box.y = static_cast<int>(y);
+			box.width = static_cast<int>(w);
+			box.height = static_cast<int>(h);
+			box_list.push_back(box);
+		}
+	}
+
+	// Apply Non-Maximum Suppression (NMS) to filter overlapping bounding boxes
+	std::vector<int> NMS_result;
+	cv::dnn::NMSBoxes(box_list, confidence_list, model_confidence_threshold_, model_NMS_threshold_, NMS_result);
+
+	// Collect final detections after NMS
+	for (int i = 0; i < NMS_result.size(); ++i) {
+		Detection result;
+		const unsigned short id = NMS_result[i];
+
+		result.class_id = class_list[id];
+		result.confidence = confidence_list[id];
+		result.box = GetBoundingBox(box_list[id]);
+
+		DrawDetectedObject(frame, result);
+	}
+}
+
+// Method to get the bounding box in the correct scale
+cv::Rect Inference::GetBoundingBox(const cv::Rect &src) const {
+	cv::Rect box = src;
+	box.x = (box.x - box.width / 2) * scale_factor_.x;
+	box.y = (box.y - box.height / 2) * scale_factor_.y;
+	box.width *= scale_factor_.x;
+	box.height *= scale_factor_.y;
+	return box;
+}
+
+void Inference::DrawDetectedObject(cv::Mat &frame, const Detection &detection) const {
+	const cv::Rect &box = detection.box;
+	const float &confidence = detection.confidence;
+	const int &class_id = detection.class_id;
+	
+	// Generate a random color for the bounding box
+	std::random_device rd;
+	std::mt19937 gen(rd());
+	std::uniform_int_distribution<int> dis(120, 255);
+	const cv::Scalar &color = cv::Scalar(dis(gen), dis(gen), dis(gen));
+	
+	// Draw the bounding box around the detected object
+	cv::rectangle(frame, cv::Point(box.x, box.y), cv::Point(box.x + box.width, box.y + box.height), color, 3);
+	
+	// Prepare the class label and confidence text
+	std::string classString = classes_[class_id] + std::to_string(confidence).substr(0, 4);
+	
+	// Get the size of the text box
+	cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 0.75, 2, 0);
+	cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);
+	
+	// Draw the text box
+	cv::rectangle(frame, textBox, color, cv::FILLED);
+	
+	// Put the class label and confidence text above the bounding box
+	cv::putText(frame, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 0.75, cv::Scalar(0, 0, 0), 2, 0);
+}
+} // namespace yolo

+ 59 - 0
examples/YOLOv8-OpenVINO-CPP-Inference/inference.h

@@ -0,0 +1,59 @@
+#ifndef YOLO_INFERENCE_H_
+#define YOLO_INFERENCE_H_
+
+#include <string>
+#include <vector>
+#include <opencv2/imgproc.hpp>
+#include <openvino/openvino.hpp>
+
+namespace yolo {
+
+struct Detection {
+	short class_id;
+	float confidence;
+	cv::Rect box;
+};
+
+class Inference {
+ public:
+	Inference() {}
+	// Constructor to initialize the model with default input shape
+	Inference(const std::string &model_path, const float &model_confidence_threshold, const float &model_NMS_threshold);
+	// Constructor to initialize the model with specified input shape
+	Inference(const std::string &model_path, const cv::Size model_input_shape, const float &model_confidence_threshold, const float &model_NMS_threshold);
+
+	void RunInference(cv::Mat &frame);
+
+ private:
+	void InitializeModel(const std::string &model_path);
+	void Preprocessing(const cv::Mat &frame);
+	void PostProcessing(cv::Mat &frame);
+	cv::Rect GetBoundingBox(const cv::Rect &src) const;
+	void DrawDetectedObject(cv::Mat &frame, const Detection &detections) const;
+
+	cv::Point2f scale_factor_;			// Scaling factor for the input frame
+	cv::Size2f model_input_shape_;	// Input shape of the model
+	cv::Size model_output_shape_;		// Output shape of the model
+
+	ov::InferRequest inference_request_;  // OpenVINO inference request
+	ov::CompiledModel compiled_model_;    // OpenVINO compiled model
+
+	float model_confidence_threshold_;  // Confidence threshold for detections
+	float model_NMS_threshold_;         // Non-Maximum Suppression threshold
+
+	std::vector<std::string> classes_ {
+		"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", 
+		"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", 
+		"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", 
+		"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", 
+		"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", 
+		"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", 
+		"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", 
+		"cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", 
+		"scissors", "teddy bear", "hair drier", "toothbrush"
+	};
+};
+
+} // namespace yolo
+
+#endif // YOLO_INFERENCE_H_

+ 41 - 0
examples/YOLOv8-OpenVINO-CPP-Inference/main.cc

@@ -0,0 +1,41 @@
+#include "inference.h"
+
+#include <iostream>
+#include <opencv2/highgui.hpp>
+
+int main(int argc, char **argv) {
+	// Check if the correct number of arguments is provided
+	if (argc != 3) {
+		std::cerr << "usage: " << argv[0] << " <model_path> <image_path>" << std::endl;
+		return 1;
+	}
+	
+	// Get the model and image paths from the command-line arguments
+	const std::string model_path = argv[1];
+	const std::string image_path = argv[2];
+	
+	// Read the input image
+	cv::Mat image = cv::imread(image_path);
+	
+	// Check if the image was successfully loaded
+	if (image.empty()) {
+		std::cerr << "ERROR: image is empty" << std::endl;
+		return 1;
+	}
+	
+	// Define the confidence and NMS thresholds
+	const float confidence_threshold = 0.5;
+	const float NMS_threshold = 0.5;
+	
+	// Initialize the YOLO inference with the specified model and parameters
+	yolo::Inference inference(model_path, cv::Size(640, 640), confidence_threshold, NMS_threshold);
+
+	// Run inference on the input image
+	inference.RunInference(image);
+	
+	// Display the image with the detections
+	cv::imshow("image", image);
+	cv::waitKey(0);
+
+	return 0;
+}

+ 128 - 0
examples/YOLOv8-Region-Counter/readme.md

@@ -0,0 +1,128 @@
+# Regions Counting Using YOLOv8 (Inference on Video)
+
+> **Region Counter** is now part of **[Ultralytics Solutions](https://docs.ultralytics.com/solutions/)**, offering improved features and regular updates. Enjoy improved features and regular updates!
+
+🔗 **[Explore Object Counting in Regions Here](https://docs.ultralytics.com/guides/region-counting/)**
+
+> 🔔 **Notice:**
+
+> The GitHub example will remain available but **will no longer be actively maintained**. For the latest updates and improvements, please use the official [link](https://docs.ultralytics.com/guides/region-counting/). Thank you!
+
+Region counting is a method employed to tally the objects within a specified area, allowing for more sophisticated analyses when multiple regions are considered. These regions can be adjusted interactively using a Left Mouse Click, and the counting process occurs in real time. Regions can be adjusted to suit the user's preferences and requirements.
+
+<div>
+<p align="center">
+  <img src="https://github.com/RizwanMunawar/ultralytics/assets/62513924/5ab3bbd7-fd12-4849-928e-5f294d6c3fcf" width="45%" alt="YOLOv8 region counting visual 1">
+  <img src="https://github.com/RizwanMunawar/ultralytics/assets/62513924/e7c1aea7-474d-4d78-8d48-b50854ffe1ca" width="45%" alt="YOLOv8 region counting visual 2">
+</p>
+</div>
+
+## Table of Contents
+
+- [Step 1: Install the Required Libraries](#step-1-install-the-required-libraries)
+- [Step 2: Run the Region Counting Using Ultralytics YOLOv8](#step-2-run-the-region-counting-using-ultralytics-yolov8)
+- [Usage Options](#usage-options)
+- [FAQ](#faq)
+
+## Step 1: Install the Required Libraries
+
+Clone the repository, install dependencies and `cd` to this local directory for commands in Step 2.
+
+```bash
+# Clone ultralytics repo
+git clone https://github.com/ultralytics/ultralytics
+
+# cd to local directory
+cd ultralytics/examples/YOLOv8-Region-Counter
+```
+
+## Step 2: Run the Region Counting Using Ultralytics YOLOv8
+
+Here are the basic commands for running the inference:
+
+### Note
+
+After the video begins playing, you can freely move the region anywhere within the video by simply clicking and dragging using the left mouse button.
+
+```bash
+# If you want to save results
+python yolov8_region_counter.py --source "path/to/video.mp4" --save-img --view-img
+
+# If you want to run model on CPU
+python yolov8_region_counter.py --source "path/to/video.mp4" --save-img --view-img --device cpu
+
+# If you want to change model file
+python yolov8_region_counter.py --source "path/to/video.mp4" --save-img --weights "path/to/model.pt"
+
+# If you want to detect specific class (first class and third class)
+python yolov8_region_counter.py --source "path/to/video.mp4" --classes 0 2 --weights "path/to/model.pt"
+
+# If you don't want to save results
+python yolov8_region_counter.py --source "path/to/video.mp4" --view-img
+```
+
+## Usage Options
+
+- `--source`: Specifies the path to the video file you want to run inference on.
+- `--device`: Specifies the device `cpu` or `0`
+- `--save-img`: Flag to save the detection results as images.
+- `--weights`: Specifies a different YOLOv8 model file (e.g., `yolov8n.pt`, `yolov8s.pt`, `yolov8m.pt`, `yolov8l.pt`, `yolov8x.pt`).
+- `--classes`: Specifies the class to be detected
+- `--line-thickness`: Specifies the bounding box thickness
+- `--region-thickness`: Specifies the region boxes thickness
+- `--track-thickness`: Specifies the track line thickness
+
+## FAQ
+
+**1. What Does Region Counting Involve?**
+
+Region counting is a computational method utilized to ascertain the quantity of objects within a specific area in recorded video or real-time streams. This technique finds frequent application in image processing, computer vision, and pattern recognition, facilitating the analysis and segmentation of objects or features based on their spatial relationships.
+
+**2. Is Friendly Region Plotting Supported by the Region Counter?**
+
+The Region Counting offers the capability to create regions in various formats, such as polygons and rectangles. You have the flexibility to modify region attributes, including coordinates, colors, and other details, as demonstrated in the following code:
+
+```python
+from shapely.geometry import Polygon
+
+counting_regions = [
+    {
+        "name": "YOLOv8 Polygon Region",
+        "polygon": Polygon(
+            [(50, 80), (250, 20), (450, 80), (400, 350), (100, 350)]
+        ),  # Polygon with five points (Pentagon)
+        "counts": 0,
+        "dragging": False,
+        "region_color": (255, 42, 4),  # BGR Value
+        "text_color": (255, 255, 255),  # Region Text Color
+    },
+    {
+        "name": "YOLOv8 Rectangle Region",
+        "polygon": Polygon([(200, 250), (440, 250), (440, 550), (200, 550)]),  # Rectangle with four points
+        "counts": 0,
+        "dragging": False,
+        "region_color": (37, 255, 225),  # BGR Value
+        "text_color": (0, 0, 0),  # Region Text Color
+    },
+]
+```
+
+**3. Why Combine Region Counting with YOLOv8?**
+
+YOLOv8 specializes in the detection and tracking of objects in video streams. Region counting complements this by enabling object counting within designated areas, making it a valuable application of YOLOv8.
+
+**4. How Can I Troubleshoot Issues?**
+
+To gain more insights during inference, you can include the `--debug` flag in your command:
+
+```bash
+python yolov8_region_counter.py --source "path to video file" --debug
+```
+
+**5. Can I Employ Other YOLO Versions?**
+
+Certainly, you have the flexibility to specify different YOLO model weights using the `--weights` option.
+
+**6. Where Can I Access Additional Information?**
+
+For a comprehensive guide on using YOLOv8 with Object Tracking, please refer to [Multi-Object Tracking with Ultralytics YOLO](https://docs.ultralytics.com/modes/track/).

+ 253 - 0
examples/YOLOv8-Region-Counter/yolov8_region_counter.py

@@ -0,0 +1,253 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import argparse
+from collections import defaultdict
+from pathlib import Path
+
+import cv2
+import numpy as np
+from shapely.geometry import Polygon
+from shapely.geometry.point import Point
+
+from ultralytics import YOLO
+from ultralytics.utils.files import increment_path
+from ultralytics.utils.plotting import Annotator, colors
+
+track_history = defaultdict(list)
+
+current_region = None
+counting_regions = [
+    {
+        "name": "YOLOv8 Polygon Region",
+        "polygon": Polygon([(50, 80), (250, 20), (450, 80), (400, 350), (100, 350)]),  # Polygon points
+        "counts": 0,
+        "dragging": False,
+        "region_color": (255, 42, 4),  # BGR Value
+        "text_color": (255, 255, 255),  # Region Text Color
+    },
+    {
+        "name": "YOLOv8 Rectangle Region",
+        "polygon": Polygon([(200, 250), (440, 250), (440, 550), (200, 550)]),  # Polygon points
+        "counts": 0,
+        "dragging": False,
+        "region_color": (37, 255, 225),  # BGR Value
+        "text_color": (0, 0, 0),  # Region Text Color
+    },
+]
+
+
+def mouse_callback(event, x, y, flags, param):
+    """
+    Handles mouse events for region manipulation.
+
+    Args:
+        event (int): The mouse event type (e.g., cv2.EVENT_LBUTTONDOWN).
+        x (int): The x-coordinate of the mouse pointer.
+        y (int): The y-coordinate of the mouse pointer.
+        flags (int): Additional flags passed by OpenCV.
+        param: Additional parameters passed to the callback (not used in this function).
+
+    Global Variables:
+        current_region (dict): A dictionary representing the current selected region.
+
+    Mouse Events:
+        - LBUTTONDOWN: Initiates dragging for the region containing the clicked point.
+        - MOUSEMOVE: Moves the selected region if dragging is active.
+        - LBUTTONUP: Ends dragging for the selected region.
+
+    Notes:
+        - This function is intended to be used as a callback for OpenCV mouse events.
+        - Requires the existence of the 'counting_regions' list and the 'Polygon' class.
+
+    Example:
+        >>> cv2.setMouseCallback(window_name, mouse_callback)
+    """
+    global current_region
+
+    # Mouse left button down event
+    if event == cv2.EVENT_LBUTTONDOWN:
+        for region in counting_regions:
+            if region["polygon"].contains(Point((x, y))):
+                current_region = region
+                current_region["dragging"] = True
+                current_region["offset_x"] = x
+                current_region["offset_y"] = y
+
+    # Mouse move event
+    elif event == cv2.EVENT_MOUSEMOVE:
+        if current_region is not None and current_region["dragging"]:
+            dx = x - current_region["offset_x"]
+            dy = y - current_region["offset_y"]
+            current_region["polygon"] = Polygon(
+                [(p[0] + dx, p[1] + dy) for p in current_region["polygon"].exterior.coords]
+            )
+            current_region["offset_x"] = x
+            current_region["offset_y"] = y
+
+    # Mouse left button up event
+    elif event == cv2.EVENT_LBUTTONUP:
+        if current_region is not None and current_region["dragging"]:
+            current_region["dragging"] = False
+
+
+def run(
+    weights="yolov8n.pt",
+    source=None,
+    device="cpu",
+    view_img=False,
+    save_img=False,
+    exist_ok=False,
+    classes=None,
+    line_thickness=2,
+    track_thickness=2,
+    region_thickness=2,
+):
+    """
+    Run Region counting on a video using YOLOv8 and ByteTrack.
+
+    Supports movable region for real time counting inside specific area.
+    Supports multiple regions counting.
+    Regions can be Polygons or rectangle in shape
+
+    Args:
+        weights (str): Model weights path.
+        source (str): Video file path.
+        device (str): processing device cpu, 0, 1
+        view_img (bool): Show results.
+        save_img (bool): Save results.
+        exist_ok (bool): Overwrite existing files.
+        classes (list): classes to detect and track
+        line_thickness (int): Bounding box thickness.
+        track_thickness (int): Tracking line thickness
+        region_thickness (int): Region thickness.
+    """
+    vid_frame_count = 0
+
+    # Check source path
+    if not Path(source).exists():
+        raise FileNotFoundError(f"Source path '{source}' does not exist.")
+
+    # Setup Model
+    model = YOLO(f"{weights}")
+    model.to("cuda") if device == "0" else model.to("cpu")
+
+    # Extract classes names
+    names = model.names
+
+    # Video setup
+    videocapture = cv2.VideoCapture(source)
+    frame_width = int(videocapture.get(3))
+    frame_height = int(videocapture.get(4))
+    fps = int(videocapture.get(5))
+    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
+
+    # Output setup
+    save_dir = increment_path(Path("ultralytics_rc_output") / "exp", exist_ok)
+    save_dir.mkdir(parents=True, exist_ok=True)
+    video_writer = cv2.VideoWriter(str(save_dir / f"{Path(source).stem}.avi"), fourcc, fps, (frame_width, frame_height))
+
+    # Iterate over video frames
+    while videocapture.isOpened():
+        success, frame = videocapture.read()
+        if not success:
+            break
+        vid_frame_count += 1
+
+        # Extract the results
+        results = model.track(frame, persist=True, classes=classes)
+
+        if results[0].boxes.id is not None:
+            boxes = results[0].boxes.xyxy.cpu()
+            track_ids = results[0].boxes.id.int().cpu().tolist()
+            clss = results[0].boxes.cls.cpu().tolist()
+
+            annotator = Annotator(frame, line_width=line_thickness, example=str(names))
+
+            for box, track_id, cls in zip(boxes, track_ids, clss):
+                annotator.box_label(box, str(names[cls]), color=colors(cls, True))
+                bbox_center = (box[0] + box[2]) / 2, (box[1] + box[3]) / 2  # Bbox center
+
+                track = track_history[track_id]  # Tracking Lines plot
+                track.append((float(bbox_center[0]), float(bbox_center[1])))
+                if len(track) > 30:
+                    track.pop(0)
+                points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
+                cv2.polylines(frame, [points], isClosed=False, color=colors(cls, True), thickness=track_thickness)
+
+                # Check if detection inside region
+                for region in counting_regions:
+                    if region["polygon"].contains(Point((bbox_center[0], bbox_center[1]))):
+                        region["counts"] += 1
+
+        # Draw regions (Polygons/Rectangles)
+        for region in counting_regions:
+            region_label = str(region["counts"])
+            region_color = region["region_color"]
+            region_text_color = region["text_color"]
+
+            polygon_coordinates = np.array(region["polygon"].exterior.coords, dtype=np.int32)
+            centroid_x, centroid_y = int(region["polygon"].centroid.x), int(region["polygon"].centroid.y)
+
+            text_size, _ = cv2.getTextSize(
+                region_label, cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, thickness=line_thickness
+            )
+            text_x = centroid_x - text_size[0] // 2
+            text_y = centroid_y + text_size[1] // 2
+            cv2.rectangle(
+                frame,
+                (text_x - 5, text_y - text_size[1] - 5),
+                (text_x + text_size[0] + 5, text_y + 5),
+                region_color,
+                -1,
+            )
+            cv2.putText(
+                frame, region_label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, region_text_color, line_thickness
+            )
+            cv2.polylines(frame, [polygon_coordinates], isClosed=True, color=region_color, thickness=region_thickness)
+
+        if view_img:
+            if vid_frame_count == 1:
+                cv2.namedWindow("Ultralytics YOLOv8 Region Counter Movable")
+                cv2.setMouseCallback("Ultralytics YOLOv8 Region Counter Movable", mouse_callback)
+            cv2.imshow("Ultralytics YOLOv8 Region Counter Movable", frame)
+
+        if save_img:
+            video_writer.write(frame)
+
+        for region in counting_regions:  # Reinitialize count for each region
+            region["counts"] = 0
+
+        if cv2.waitKey(1) & 0xFF == ord("q"):
+            break
+
+    del vid_frame_count
+    video_writer.release()
+    videocapture.release()
+    cv2.destroyAllWindows()
+
+
+def parse_opt():
+    """Parse command line arguments."""
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--weights", type=str, default="yolov8n.pt", help="initial weights path")
+    parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
+    parser.add_argument("--source", type=str, required=True, help="video file path")
+    parser.add_argument("--view-img", action="store_true", help="show results")
+    parser.add_argument("--save-img", action="store_true", help="save results")
+    parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
+    parser.add_argument("--classes", nargs="+", type=int, help="filter by class: --classes 0, or --classes 0 2 3")
+    parser.add_argument("--line-thickness", type=int, default=2, help="bounding box thickness")
+    parser.add_argument("--track-thickness", type=int, default=2, help="Tracking line thickness")
+    parser.add_argument("--region-thickness", type=int, default=4, help="Region thickness")
+
+    return parser.parse_args()
+
+
+def main(options):
+    """Main function."""
+    run(**vars(options))
+
+
+if __name__ == "__main__":
+    opt = parse_opt()
+    main(opt)

+ 69 - 0
examples/YOLOv8-SAHI-Inference-Video/readme.md

@@ -0,0 +1,69 @@
+# YOLO11 with SAHI (Inference on Video)
+
+[SAHI](https://docs.ultralytics.com/guides/sahi-tiled-inference/) is designed to optimize object detection algorithms for large-scale and high-resolution imagery. It partitions images into manageable slices, performs object detection on each slice, and then stitches the results back together. This tutorial will guide you through the process of running YOLO11 inference on video files with the aid of SAHI.
+
+## Table of Contents
+
+- [Step 1: Install the Required Libraries](#step-1-install-the-required-libraries)
+- [Step 2: Run the Inference with SAHI using Ultralytics YOLO11](#step-2-run-the-inference-with-sahi-using-ultralytics-yolo11)
+- [Usage Options](#usage-options)
+- [FAQ](#faq)
+
+## Step 1: Install the Required Libraries
+
+Clone the repository, install dependencies and `cd` to this local directory for commands in Step 2.
+
+```bash
+# Clone ultralytics repo
+git clone https://github.com/ultralytics/ultralytics
+
+# Install dependencies
+pip install -U sahi ultralytics
+
+# cd to local directory
+cd ultralytics/examples/YOLOv8-SAHI-Inference-Video
+```
+
+## Step 2: Run the Inference with SAHI using Ultralytics YOLO11
+
+Here are the basic commands for running the inference:
+
+```bash
+#if you want to save results
+python yolov8_sahi.py --source "path/to/video.mp4" --save-img
+
+#if you want to change model file
+python yolov8_sahi.py --source "path/to/video.mp4" --save-img --weights "yolo11n.pt"
+```
+
+## Usage Options
+
+- `--source`: Specifies the path to the video file you want to run inference on.
+- `--save-img`: Flag to save the detection results as images.
+- `--weights`: Specifies a different YOLO11 model file (e.g., `yolo11n.pt`, `yolov8s.pt`, `yolo11m.pt`, `yolo11l.pt`, `yolo11x.pt`).
+
+## FAQ
+
+**1. What is SAHI?**
+
+SAHI stands for Slicing Aided Hyper Inference. It is a library designed to optimize object detection algorithms for large-scale and high-resolution images. The library source code is available on [GitHub](https://github.com/obss/sahi).
+
+**2. Why use SAHI with YOLO11?**
+
+SAHI can handle large-scale images by slicing them into smaller, more manageable sizes without compromising the detection quality. This makes it a great companion to YOLO11, especially when working with high-resolution videos.
+
+**3. How do I debug issues?**
+
+You can add the `--debug` flag to your command to print out more information during inference:
+
+```bash
+python yolov8_sahi.py --source "path to video file" --debug
+```
+
+**4. Can I use other YOLO versions?**
+
+Yes, you can specify different YOLO model weights using the `--weights` option.
+
+**5. Where can I find more information?**
+
+For a full guide to YOLO11 with SAHI see [https://docs.ultralytics.com/guides/sahi-tiled-inference](https://docs.ultralytics.com/guides/sahi-tiled-inference/).

+ 108 - 0
examples/YOLOv8-SAHI-Inference-Video/yolov8_sahi.py

@@ -0,0 +1,108 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import argparse
+from pathlib import Path
+
+import cv2
+from sahi import AutoDetectionModel
+from sahi.predict import get_sliced_prediction
+from sahi.utils.ultralytics import download_yolo11n_model
+
+from ultralytics.utils.files import increment_path
+from ultralytics.utils.plotting import Annotator, colors
+
+
+class SAHIInference:
+    """Runs Ultralytics YOLO11 and SAHI for object detection on video with options to view, save, and track results."""
+
+    def __init__(self):
+        """Initializes the SAHIInference class for performing sliced inference using SAHI with YOLO11 models."""
+        self.detection_model = None
+
+    def load_model(self, weights):
+        """Loads a YOLO11 model with specified weights for object detection using SAHI."""
+        yolo11_model_path = f"models/{weights}"
+        download_yolo11n_model(yolo11_model_path)
+        self.detection_model = AutoDetectionModel.from_pretrained(
+            model_type="ultralytics", model_path=yolo11_model_path, device="cpu"
+        )
+
+    def inference(
+        self,
+        weights="yolo11n.pt",
+        source="test.mp4",
+        view_img=False,
+        save_img=False,
+        exist_ok=False,
+    ):
+        """
+        Run object detection on a video using YOLO11 and SAHI.
+
+        Args:
+            weights (str): Model weights path.
+            source (str): Video file path.
+            view_img (bool): Show results.
+            save_img (bool): Save results.
+            exist_ok (bool): Overwrite existing files.
+        """
+        # Video setup
+        cap = cv2.VideoCapture(source)
+        assert cap.isOpened(), "Error reading video file"
+        frame_width, frame_height = int(cap.get(3)), int(cap.get(4))
+
+        # Output setup
+        save_dir = increment_path(Path("ultralytics_results_with_sahi") / "exp", exist_ok)
+        save_dir.mkdir(parents=True, exist_ok=True)
+        video_writer = cv2.VideoWriter(
+            str(save_dir / f"{Path(source).stem}.avi"),
+            cv2.VideoWriter_fourcc(*"MJPG"),
+            int(cap.get(5)),
+            (frame_width, frame_height),
+        )
+
+        # Load model
+        self.load_model(weights)
+        while cap.isOpened():
+            success, frame = cap.read()
+            if not success:
+                break
+            annotator = Annotator(frame)  # Initialize annotator for plotting detection and tracking results
+            results = get_sliced_prediction(
+                frame[..., ::-1],
+                self.detection_model,
+                slice_height=512,
+                slice_width=512,
+            )
+            detection_data = [
+                (det.category.name, det.category.id, (det.bbox.minx, det.bbox.miny, det.bbox.maxx, det.bbox.maxy))
+                for det in results.object_prediction_list
+            ]
+
+            for det in detection_data:
+                annotator.box_label(det[2], label=str(det[0]), color=colors(int(det[1]), True))
+
+            if view_img:
+                cv2.imshow(Path(source).stem, frame)
+            if save_img:
+                video_writer.write(frame)
+
+            if cv2.waitKey(1) & 0xFF == ord("q"):
+                break
+        video_writer.release()
+        cap.release()
+        cv2.destroyAllWindows()
+
+    def parse_opt(self):
+        """Parse command line arguments."""
+        parser = argparse.ArgumentParser()
+        parser.add_argument("--weights", type=str, default="yolo11n.pt", help="initial weights path")
+        parser.add_argument("--source", type=str, required=True, help="video file path")
+        parser.add_argument("--view-img", action="store_true", help="show results")
+        parser.add_argument("--save-img", action="store_true", help="save results")
+        parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
+        return parser.parse_args()
+
+
+if __name__ == "__main__":
+    inference = SAHIInference()
+    inference.inference(**vars(inference.parse_opt()))

+ 63 - 0
examples/YOLOv8-Segmentation-ONNXRuntime-Python/README.md

@@ -0,0 +1,63 @@
+# YOLOv8-Segmentation-ONNXRuntime-Python Demo
+
+This repository provides a Python demo for performing segmentation with YOLOv8 using ONNX Runtime, highlighting the interoperability of YOLOv8 models without the need for the full PyTorch stack.
+
+## Features
+
+- **Framework Agnostic**: Runs segmentation inference purely on ONNX Runtime without importing PyTorch.
+- **Efficient Inference**: Supports both FP32 and FP16 precision for ONNX models, catering to different computational needs.
+- **Ease of Use**: Utilizes simple command-line arguments for model execution.
+- **Broad Compatibility**: Leverages Numpy and OpenCV for image processing, ensuring broad compatibility with various environments.
+
+## Installation
+
+Install the required packages using pip. You will need `ultralytics` for exporting YOLOv8-seg ONNX model and using some utility functions, `onnxruntime-gpu` for GPU-accelerated inference, and `opencv-python` for image processing.
+
+```bash
+pip install ultralytics
+pip install onnxruntime-gpu  # For GPU support
+# pip install onnxruntime    # Use this instead if you don't have an NVIDIA GPU
+pip install numpy
+pip install opencv-python
+```
+
+## Getting Started
+
+### 1. Export the YOLOv8 ONNX Model
+
+Export the YOLOv8 segmentation model to ONNX format using the provided `ultralytics` package.
+
+```bash
+yolo export model=yolov8s-seg.pt imgsz=640 format=onnx opset=12 simplify
+```
+
+### 2. Run Inference
+
+Perform inference with the exported ONNX model on your images.
+
+```bash
+python main.py --model <MODEL_PATH> --source <IMAGE_PATH>
+```
+
+### Example Output
+
+After running the command, you should see segmentation results similar to this:
+
+<img src="https://user-images.githubusercontent.com/51357717/279988626-eb74823f-1563-4d58-a8e4-0494025b7c9a.jpg" alt="Segmentation Demo" width="800">
+
+## Advanced Usage
+
+For more advanced usage, including real-time video processing, please refer to the `main.py` script's command-line arguments.
+
+## Contributing
+
+We welcome contributions to improve this demo! Please submit issues and pull requests for bug reports, feature requests, or submitting a new algorithm enhancement.
+
+## License
+
+This project is licensed under the AGPL-3.0 License - see the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for details.
+
+## Acknowledgments
+
+- The YOLOv8-Segmentation-ONNXRuntime-Python demo is contributed by GitHub user [jamjamjon](https://github.com/jamjamjon).
+- Thanks to the ONNX Runtime community for providing a robust and efficient inference engine.

+ 338 - 0
examples/YOLOv8-Segmentation-ONNXRuntime-Python/main.py

@@ -0,0 +1,338 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import argparse
+
+import cv2
+import numpy as np
+import onnxruntime as ort
+
+from ultralytics.utils import ASSETS, yaml_load
+from ultralytics.utils.checks import check_yaml
+from ultralytics.utils.plotting import Colors
+
+
+class YOLOv8Seg:
+    """YOLOv8 segmentation model."""
+
+    def __init__(self, onnx_model):
+        """
+        Initialization.
+
+        Args:
+            onnx_model (str): Path to the ONNX model.
+        """
+        # Build Ort session
+        self.session = ort.InferenceSession(
+            onnx_model,
+            providers=["CUDAExecutionProvider", "CPUExecutionProvider"]
+            if ort.get_device() == "GPU"
+            else ["CPUExecutionProvider"],
+        )
+
+        # Numpy dtype: support both FP32 and FP16 onnx model
+        self.ndtype = np.half if self.session.get_inputs()[0].type == "tensor(float16)" else np.single
+
+        # Get model width and height(YOLOv8-seg only has one input)
+        self.model_height, self.model_width = [x.shape for x in self.session.get_inputs()][0][-2:]
+
+        # Load COCO class names
+        self.classes = yaml_load(check_yaml("coco8.yaml"))["names"]
+
+        # Create color palette
+        self.color_palette = Colors()
+
+    def __call__(self, im0, conf_threshold=0.4, iou_threshold=0.45, nm=32):
+        """
+        The whole pipeline: pre-process -> inference -> post-process.
+
+        Args:
+            im0 (Numpy.ndarray): original input image.
+            conf_threshold (float): confidence threshold for filtering predictions.
+            iou_threshold (float): iou threshold for NMS.
+            nm (int): the number of masks.
+
+        Returns:
+            boxes (List): list of bounding boxes.
+            segments (List): list of segments.
+            masks (np.ndarray): [N, H, W], output masks.
+        """
+        # Pre-process
+        im, ratio, (pad_w, pad_h) = self.preprocess(im0)
+
+        # Ort inference
+        preds = self.session.run(None, {self.session.get_inputs()[0].name: im})
+
+        # Post-process
+        boxes, segments, masks = self.postprocess(
+            preds,
+            im0=im0,
+            ratio=ratio,
+            pad_w=pad_w,
+            pad_h=pad_h,
+            conf_threshold=conf_threshold,
+            iou_threshold=iou_threshold,
+            nm=nm,
+        )
+        return boxes, segments, masks
+
+    def preprocess(self, img):
+        """
+        Pre-processes the input image.
+
+        Args:
+            img (Numpy.ndarray): image about to be processed.
+
+        Returns:
+            img_process (Numpy.ndarray): image preprocessed for inference.
+            ratio (tuple): width, height ratios in letterbox.
+            pad_w (float): width padding in letterbox.
+            pad_h (float): height padding in letterbox.
+        """
+        # Resize and pad input image using letterbox() (Borrowed from Ultralytics)
+        shape = img.shape[:2]  # original image shape
+        new_shape = (self.model_height, self.model_width)
+        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
+        ratio = r, r
+        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
+        pad_w, pad_h = (new_shape[1] - new_unpad[0]) / 2, (new_shape[0] - new_unpad[1]) / 2  # wh padding
+        if shape[::-1] != new_unpad:  # resize
+            img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
+        top, bottom = int(round(pad_h - 0.1)), int(round(pad_h + 0.1))
+        left, right = int(round(pad_w - 0.1)), int(round(pad_w + 0.1))
+        img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114))
+
+        # Transforms: HWC to CHW -> BGR to RGB -> div(255) -> contiguous -> add axis(optional)
+        img = np.ascontiguousarray(np.einsum("HWC->CHW", img)[::-1], dtype=self.ndtype) / 255.0
+        img_process = img[None] if len(img.shape) == 3 else img
+        return img_process, ratio, (pad_w, pad_h)
+
+    def postprocess(self, preds, im0, ratio, pad_w, pad_h, conf_threshold, iou_threshold, nm=32):
+        """
+        Post-process the prediction.
+
+        Args:
+            preds (Numpy.ndarray): predictions come from ort.session.run().
+            im0 (Numpy.ndarray): [h, w, c] original input image.
+            ratio (tuple): width, height ratios in letterbox.
+            pad_w (float): width padding in letterbox.
+            pad_h (float): height padding in letterbox.
+            conf_threshold (float): conf threshold.
+            iou_threshold (float): iou threshold.
+            nm (int): the number of masks.
+
+        Returns:
+            boxes (List): list of bounding boxes.
+            segments (List): list of segments.
+            masks (np.ndarray): [N, H, W], output masks.
+        """
+        x, protos = preds[0], preds[1]  # Two outputs: predictions and protos
+
+        # Transpose dim 1: (Batch_size, xywh_conf_cls_nm, Num_anchors) -> (Batch_size, Num_anchors, xywh_conf_cls_nm)
+        x = np.einsum("bcn->bnc", x)
+
+        # Predictions filtering by conf-threshold
+        x = x[np.amax(x[..., 4:-nm], axis=-1) > conf_threshold]
+
+        # Create a new matrix which merge these(box, score, cls, nm) into one
+        # For more details about `numpy.c_()`: https://numpy.org/doc/1.26/reference/generated/numpy.c_.html
+        x = np.c_[x[..., :4], np.amax(x[..., 4:-nm], axis=-1), np.argmax(x[..., 4:-nm], axis=-1), x[..., -nm:]]
+
+        # NMS filtering
+        x = x[cv2.dnn.NMSBoxes(x[:, :4], x[:, 4], conf_threshold, iou_threshold)]
+
+        # Decode and return
+        if len(x) > 0:
+            # Bounding boxes format change: cxcywh -> xyxy
+            x[..., [0, 1]] -= x[..., [2, 3]] / 2
+            x[..., [2, 3]] += x[..., [0, 1]]
+
+            # Rescales bounding boxes from model shape(model_height, model_width) to the shape of original image
+            x[..., :4] -= [pad_w, pad_h, pad_w, pad_h]
+            x[..., :4] /= min(ratio)
+
+            # Bounding boxes boundary clamp
+            x[..., [0, 2]] = x[:, [0, 2]].clip(0, im0.shape[1])
+            x[..., [1, 3]] = x[:, [1, 3]].clip(0, im0.shape[0])
+
+            # Process masks
+            masks = self.process_mask(protos[0], x[:, 6:], x[:, :4], im0.shape)
+
+            # Masks -> Segments(contours)
+            segments = self.masks2segments(masks)
+            return x[..., :6], segments, masks  # boxes, segments, masks
+        else:
+            return [], [], []
+
+    @staticmethod
+    def masks2segments(masks):
+        """
+        Takes a list of masks(n,h,w) and returns a list of segments(n,xy), from
+        https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/ops.py.
+
+        Args:
+            masks (numpy.ndarray): the output of the model, which is a tensor of shape (batch_size, 160, 160).
+
+        Returns:
+            segments (List): list of segment masks.
+        """
+        segments = []
+        for x in masks.astype("uint8"):
+            c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0]  # CHAIN_APPROX_SIMPLE
+            if c:
+                c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)
+            else:
+                c = np.zeros((0, 2))  # no segments found
+            segments.append(c.astype("float32"))
+        return segments
+
+    @staticmethod
+    def crop_mask(masks, boxes):
+        """
+        Takes a mask and a bounding box, and returns a mask that is cropped to the bounding box, from
+        https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/ops.py.
+
+        Args:
+            masks (Numpy.ndarray): [n, h, w] tensor of masks.
+            boxes (Numpy.ndarray): [n, 4] tensor of bbox coordinates in relative point form.
+
+        Returns:
+            (Numpy.ndarray): The masks are being cropped to the bounding box.
+        """
+        n, h, w = masks.shape
+        x1, y1, x2, y2 = np.split(boxes[:, :, None], 4, 1)
+        r = np.arange(w, dtype=x1.dtype)[None, None, :]
+        c = np.arange(h, dtype=x1.dtype)[None, :, None]
+        return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
+
+    def process_mask(self, protos, masks_in, bboxes, im0_shape):
+        """
+        Takes the output of the mask head, and applies the mask to the bounding boxes. This produces masks of higher
+        quality but is slower, from https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/ops.py.
+
+        Args:
+            protos (numpy.ndarray): [mask_dim, mask_h, mask_w].
+            masks_in (numpy.ndarray): [n, mask_dim], n is number of masks after nms.
+            bboxes (numpy.ndarray): bboxes re-scaled to original image shape.
+            im0_shape (tuple): the size of the input image (h,w,c).
+
+        Returns:
+            (numpy.ndarray): The upsampled masks.
+        """
+        c, mh, mw = protos.shape
+        masks = np.matmul(masks_in, protos.reshape((c, -1))).reshape((-1, mh, mw)).transpose(1, 2, 0)  # HWN
+        masks = np.ascontiguousarray(masks)
+        masks = self.scale_mask(masks, im0_shape)  # re-scale mask from P3 shape to original input image shape
+        masks = np.einsum("HWN -> NHW", masks)  # HWN -> NHW
+        masks = self.crop_mask(masks, bboxes)
+        return np.greater(masks, 0.5)
+
+    @staticmethod
+    def scale_mask(masks, im0_shape, ratio_pad=None):
+        """
+        Takes a mask, and resizes it to the original image size, from
+        https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/ops.py.
+
+        Args:
+            masks (np.ndarray): resized and padded masks/images, [h, w, num]/[h, w, 3].
+            im0_shape (tuple): the original image shape.
+            ratio_pad (tuple): the ratio of the padding to the original image.
+
+        Returns:
+            masks (np.ndarray): The masks that are being returned.
+        """
+        im1_shape = masks.shape[:2]
+        if ratio_pad is None:  # calculate from im0_shape
+            gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1])  # gain  = old / new
+            pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2  # wh padding
+        else:
+            pad = ratio_pad[1]
+
+        # Calculate tlbr of mask
+        top, left = int(round(pad[1] - 0.1)), int(round(pad[0] - 0.1))  # y, x
+        bottom, right = int(round(im1_shape[0] - pad[1] + 0.1)), int(round(im1_shape[1] - pad[0] + 0.1))
+        if len(masks.shape) < 2:
+            raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
+        masks = masks[top:bottom, left:right]
+        masks = cv2.resize(
+            masks, (im0_shape[1], im0_shape[0]), interpolation=cv2.INTER_LINEAR
+        )  # INTER_CUBIC would be better
+        if len(masks.shape) == 2:
+            masks = masks[:, :, None]
+        return masks
+
+    def draw_and_visualize(self, im, bboxes, segments, vis=False, save=True):
+        """
+        Draw and visualize results.
+
+        Args:
+            im (np.ndarray): original image, shape [h, w, c].
+            bboxes (numpy.ndarray): [n, 4], n is number of bboxes.
+            segments (List): list of segment masks.
+            vis (bool): imshow using OpenCV.
+            save (bool): save image annotated.
+
+        Returns:
+            None
+        """
+        # Draw rectangles and polygons
+        im_canvas = im.copy()
+        for (*box, conf, cls_), segment in zip(bboxes, segments):
+            # draw contour and fill mask
+            cv2.polylines(im, np.int32([segment]), True, (255, 255, 255), 2)  # white borderline
+            cv2.fillPoly(im_canvas, np.int32([segment]), self.color_palette(int(cls_), bgr=True))
+
+            # draw bbox rectangle
+            cv2.rectangle(
+                im,
+                (int(box[0]), int(box[1])),
+                (int(box[2]), int(box[3])),
+                self.color_palette(int(cls_), bgr=True),
+                1,
+                cv2.LINE_AA,
+            )
+            cv2.putText(
+                im,
+                f"{self.classes[cls_]}: {conf:.3f}",
+                (int(box[0]), int(box[1] - 9)),
+                cv2.FONT_HERSHEY_SIMPLEX,
+                0.7,
+                self.color_palette(int(cls_), bgr=True),
+                2,
+                cv2.LINE_AA,
+            )
+
+        # Mix image
+        im = cv2.addWeighted(im_canvas, 0.3, im, 0.7, 0)
+
+        # Show image
+        if vis:
+            cv2.imshow("demo", im)
+            cv2.waitKey(0)
+            cv2.destroyAllWindows()
+
+        # Save image
+        if save:
+            cv2.imwrite("demo.jpg", im)
+
+
+if __name__ == "__main__":
+    # Create an argument parser to handle command-line arguments
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--model", type=str, required=True, help="Path to ONNX model")
+    parser.add_argument("--source", type=str, default=str(ASSETS / "bus.jpg"), help="Path to input image")
+    parser.add_argument("--conf", type=float, default=0.25, help="Confidence threshold")
+    parser.add_argument("--iou", type=float, default=0.45, help="NMS IoU threshold")
+    args = parser.parse_args()
+
+    # Build model
+    model = YOLOv8Seg(args.model)
+
+    # Read image by OpenCV
+    img = cv2.imread(args.source)
+
+    # Inference
+    boxes, segments, _ = model(img, conf_threshold=args.conf, iou_threshold=args.iou)
+
+    # Draw bboxes and polygons
+    if len(boxes) > 0:
+        model.draw_and_visualize(img, boxes, segments, vis=False, save=True)

+ 55 - 0
examples/YOLOv8-TFLite-Python/README.md

@@ -0,0 +1,55 @@
+# YOLOv8 - TFLite Runtime
+
+This example shows how to run inference with YOLOv8 TFLite model. It supports FP32, FP16 and INT8 models.
+
+## Installation
+
+### Installing `tflite-runtime`
+
+To load TFLite models, install the `tflite-runtime` package using:
+
+```bash
+pip install tflite-runtime
+```
+
+### Installing `tensorflow-gpu` (For NVIDIA GPU Users)
+
+Leverage GPU acceleration with NVIDIA GPUs by installing `tensorflow-gpu`:
+
+```bash
+pip install tensorflow-gpu
+```
+
+**Note:** Ensure you have compatible GPU drivers installed on your system.
+
+### Installing `tensorflow` (CPU Version)
+
+For CPU usage or non-NVIDIA GPUs, install TensorFlow with:
+
+```bash
+pip install tensorflow
+```
+
+## Usage
+
+Follow these instructions to run YOLOv8 after successful installation.
+
+Convert the YOLOv8 model to TFLite format:
+
+```bash
+yolo export model=yolov8n.pt imgsz=640 format=tflite int8
+```
+
+Locate the TFLite model in `yolov8n_saved_model`. Then, execute the following in your terminal:
+
+```bash
+python main.py --model yolov8n_full_integer_quant.tflite --img image.jpg --conf 0.25 --iou 0.45 --metadata "metadata.yaml"
+```
+
+Replace `best_full_integer_quant.tflite` with the TFLite model path, `image.jpg` with the input image path, `metadata.yaml` with the one generated by `ultralytics` during export, and adjust the confidence (conf) and IoU thresholds (iou) as necessary.
+
+### Output
+
+The output would show the detections along with the class labels and confidences of each detected object.
+
+![image](https://github.com/wamiqraza/Attribute-recognition-and-reidentification-Market1501-dataset/blob/main/img/bus.jpg)

+ 221 - 0
examples/YOLOv8-TFLite-Python/main.py

@@ -0,0 +1,221 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import argparse
+from typing import Tuple, Union
+
+import cv2
+import numpy as np
+import tensorflow as tf
+import yaml
+
+from ultralytics.utils import ASSETS
+
+try:
+    from tflite_runtime.interpreter import Interpreter
+except ImportError:
+    import tensorflow as tf
+
+    Interpreter = tf.lite.Interpreter
+
+
+class YOLOv8TFLite:
+    """
+    YOLOv8TFLite.
+
+    A class for performing object detection using the YOLOv8 model with TensorFlow Lite.
+
+    Attributes:
+        model (str): Path to the TensorFlow Lite model file.
+        conf (float): Confidence threshold for filtering detections.
+        iou (float): Intersection over Union threshold for non-maximum suppression.
+        metadata (Optional[str]): Path to the metadata file, if any.
+
+    Methods:
+        detect(img_path: str) -> np.ndarray:
+            Performs inference and returns the output image with drawn detections.
+    """
+
+    def __init__(self, model: str, conf: float = 0.25, iou: float = 0.45, metadata: Union[str, None] = None):
+        """
+        Initializes an instance of the YOLOv8TFLite class.
+
+        Args:
+            model (str): Path to the TFLite model.
+            conf (float, optional): Confidence threshold for filtering detections. Defaults to 0.25.
+            iou (float, optional): IoU (Intersection over Union) threshold for non-maximum suppression. Defaults to 0.45.
+            metadata (Union[str, None], optional): Path to the metadata file or None if not used. Defaults to None.
+        """
+        self.conf = conf
+        self.iou = iou
+        if metadata is None:
+            self.classes = {i: i for i in range(1000)}
+        else:
+            with open(metadata) as f:
+                self.classes = yaml.safe_load(f)["names"]
+        np.random.seed(42)
+        self.color_palette = np.random.uniform(128, 255, size=(len(self.classes), 3))
+
+        self.model = Interpreter(model_path=model)
+        self.model.allocate_tensors()
+
+        input_details = self.model.get_input_details()[0]
+
+        self.in_width, self.in_height = input_details["shape"][1:3]
+        self.in_index = input_details["index"]
+        self.in_scale, self.in_zero_point = input_details["quantization"]
+        self.int8 = input_details["dtype"] == np.int8
+
+        output_details = self.model.get_output_details()[0]
+        self.out_index = output_details["index"]
+        self.out_scale, self.out_zero_point = output_details["quantization"]
+
+    def letterbox(self, img: np.ndarray, new_shape: Tuple = (640, 640)) -> Tuple[np.ndarray, Tuple[float, float]]:
+        """Resizes and reshapes images while maintaining aspect ratio by adding padding, suitable for YOLO models."""
+        shape = img.shape[:2]  # current shape [height, width]
+
+        # Scale ratio (new / old)
+        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
+
+        # Compute padding
+        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
+        dw, dh = (new_shape[1] - new_unpad[0]) / 2, (new_shape[0] - new_unpad[1]) / 2  # wh padding
+
+        if shape[::-1] != new_unpad:  # resize
+            img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
+        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
+        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
+        img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114))
+
+        return img, (top / img.shape[0], left / img.shape[1])
+
+    def draw_detections(self, img: np.ndarray, box: np.ndarray, score: np.float32, class_id: int) -> None:
+        """
+        Draws bounding boxes and labels on the input image based on the detected objects.
+
+        Args:
+            img (np.ndarray): The input image to draw detections on.
+            box (np.ndarray): Detected bounding box in the format [x1, y1, width, height].
+            score (np.float32): Corresponding detection score.
+            class_id (int): Class ID for the detected object.
+
+        Returns:
+            None
+        """
+        x1, y1, w, h = box
+        color = self.color_palette[class_id]
+
+        cv2.rectangle(img, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2)
+
+        label = f"{self.classes[class_id]}: {score:.2f}"
+
+        (label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
+
+        label_x = x1
+        label_y = y1 - 10 if y1 - 10 > label_height else y1 + 10
+
+        cv2.rectangle(
+            img,
+            (int(label_x), int(label_y - label_height)),
+            (int(label_x + label_width), int(label_y + label_height)),
+            color,
+            cv2.FILLED,
+        )
+
+        cv2.putText(img, label, (int(label_x), int(label_y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
+
+    def preprocess(self, img: np.ndarray) -> Tuple[np.ndarray, Tuple[float, float]]:
+        """
+        Preprocesses the input image before performing inference.
+
+        Args:
+            img (np.ndarray): The input image to be preprocessed.
+
+        Returns:
+            Tuple[np.ndarray, Tuple[float, float]]: A tuple containing:
+                - The preprocessed image (np.ndarray).
+                - A tuple of two float values representing the padding applied (top/bottom, left/right).
+        """
+        img, pad = self.letterbox(img, (self.in_width, self.in_height))
+        img = img[..., ::-1][None]  # N,H,W,C for TFLite
+        img = np.ascontiguousarray(img)
+        img = img.astype(np.float32)
+        return img / 255, pad
+
+    def postprocess(self, img: np.ndarray, outputs: np.ndarray, pad: Tuple[float, float]) -> np.ndarray:
+        """
+        Performs post-processing on the model's output to extract bounding boxes, scores, and class IDs.
+
+        Args:
+            img (numpy.ndarray): The input image.
+            outputs (numpy.ndarray): The output of the model.
+            pad (Tuple[float, float]): Padding used by letterbox.
+
+        Returns:
+            numpy.ndarray: The input image with detections drawn on it.
+        """
+        outputs[:, 0] -= pad[1]
+        outputs[:, 1] -= pad[0]
+        outputs[:, :4] *= max(img.shape)
+
+        outputs = outputs.transpose(0, 2, 1)
+        outputs[..., 0] -= outputs[..., 2] / 2
+        outputs[..., 1] -= outputs[..., 3] / 2
+
+        for out in outputs:
+            scores = out[:, 4:].max(-1)
+            keep = scores > self.conf
+            boxes = out[keep, :4]
+            scores = scores[keep]
+            class_ids = out[keep, 4:].argmax(-1)
+
+            indices = cv2.dnn.NMSBoxes(boxes, scores, self.conf, self.iou).flatten()
+
+            [self.draw_detections(img, boxes[i], scores[i], class_ids[i]) for i in indices]
+
+        return img
+
+    def detect(self, img_path: str) -> np.ndarray:
+        """
+        Performs inference using a TFLite model and returns the output image with drawn detections.
+
+        Args:
+            img_path (str): The path to the input image file.
+
+        Returns:
+            np.ndarray: The output image with drawn detections.
+        """
+        img = cv2.imread(img_path)
+        x, pad = self.preprocess(img)
+        if self.int8:
+            x = (x / self.in_scale + self.in_zero_point).astype(np.int8)
+        self.model.set_tensor(self.in_index, x)
+
+        self.model.invoke()
+
+        y = self.model.get_tensor(self.out_index)
+
+        if self.int8:
+            y = (y.astype(np.float32) - self.out_zero_point) * self.out_scale
+
+        return self.postprocess(img, y, pad)
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--model",
+        type=str,
+        default="yolov8n_saved_model/yolov8n_full_integer_quant.tflite",
+        help="Path to TFLite model.",
+    )
+    parser.add_argument("--img", type=str, default=str(ASSETS / "bus.jpg"), help="Path to input image")
+    parser.add_argument("--conf", type=float, default=0.25, help="Confidence threshold")
+    parser.add_argument("--iou", type=float, default=0.45, help="NMS IoU threshold")
+    parser.add_argument("--metadata", type=str, default="yolov8n_saved_model/metadata.yaml", help="Metadata yaml")
+    args = parser.parse_args()
+
+    detector = YOLOv8TFLite(args.model, args.conf, args.iou, args.metadata)
+    result = detector.detect(str(ASSETS / "bus.jpg"))
+
+    cv2.imshow("Output", result)
+    cv2.waitKey(0)

+ 186 - 0
examples/heatmaps.ipynb

@@ -0,0 +1,186 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "PN1cAxdvd61e"
+   },
+   "source": [
+    "<div align=\"center\">\n",
+    "\n",
+    "  <a href=\"https://ultralytics.com/yolo\" target=\"_blank\">\n",
+    "    <img width=\"1024\", src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png\"></a>\n",
+    "\n",
+    "  [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)\n",
+    "\n",
+    "  <a href=\"https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml\"><img src=\"https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml/badge.svg\" alt=\"Ultralytics CI\"></a>\n",
+    "  <a href=\"https://console.paperspace.com/github/ultralytics/ultralytics\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"/></a>\n",
+    "  <a href=\"https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/heatmaps.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
+    "  <a href=\"https://www.kaggle.com/models/ultralytics/yolo11\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
+    "  <a href=\"https://ultralytics.com/discord\"><img alt=\"Discord\" src=\"https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue\"></a>\n",
+    "\n",
+    "Welcome to the Ultralytics YOLO11 🚀 notebook! <a href=\"https://github.com/ultralytics/ultralytics\">YOLO11</a> is the latest version of the YOLO (You Only Look Once) AI models developed by <a href=\"https://ultralytics.com\">Ultralytics</a>. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLO11 and understand its features and capabilities.\n",
+    "\n",
+    "YOLO11 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n",
+    "\n",
+    "We hope that the resources in this notebook will help you get the most out of YOLO11. Please browse the YOLO11 <a href=\"https://docs.ultralytics.com/guides/heatmaps\">Heatmap Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/ultralytics\">GitHub</a> for support, and join our <a href=\"https://ultralytics.com/discord\">Discord</a> community for questions and discussions!\n",
+    "\n",
+    "</div>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "o68Sg1oOeZm2"
+   },
+   "source": [
+    "# Setup\n",
+    "\n",
+    "Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware.\n",
+    "\n",
+    "[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://www.pepy.tech/projects/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/"
+    },
+    "id": "9dSwz_uOReMI",
+    "outputId": "99866c77-e210-41e1-d581-8508371ce634"
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Ultralytics 8.2.17 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (T4, 15102MiB)\n",
+      "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 29.8/78.2 GB disk)\n"
+     ]
+    }
+   ],
+   "source": [
+    "%pip install ultralytics\n",
+    "import ultralytics\n",
+    "\n",
+    "ultralytics.checks()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "m7VkxQ2aeg7k"
+   },
+   "source": [
+    "# Introduction to Heatmaps\n",
+    "\n",
+    "A heatmap generated with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) transforms complex data into a vibrant, color-coded matrix. This visual tool employs a spectrum of colors to represent varying data values, where warmer hues indicate higher intensities and cooler tones signify lower values. Heatmaps excel in visualizing intricate data patterns, correlations, and anomalies, offering an accessible and engaging approach to data interpretation across diverse domains.\n",
+    "\n",
+    "## Real World Applications\n",
+    "\n",
+    "|                                                                 Transportation                                                                  |                                                                 Retail                                                                  |\n",
+    "|:-----------------------------------------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------:|\n",
+    "| ![Ultralytics YOLO11 Transportation Heatmap](https://github.com/RizwanMunawar/ultralytics/assets/62513924/288d7053-622b-4452-b4e4-1f41aeb764aa) | ![Ultralytics YOLO11 Retail Heatmap](https://github.com/RizwanMunawar/ultralytics/assets/62513924/edef75ad-50a7-4c0a-be4a-a66cdfc12802) |\n",
+    "|                                                    Ultralytics YOLO11 Transportation Heatmap                                                    |                                                    Ultralytics YOLO11 Retail Heatmap                                                    |\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "Cx-u59HQdu2o"
+   },
+   "outputs": [],
+   "source": [
+    "import cv2\n",
+    "\n",
+    "from ultralytics import solutions\n",
+    "\n",
+    "# Open video file\n",
+    "cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
+    "assert cap.isOpened(), \"Error reading video file\"\n",
+    "\n",
+    "# Get video properties\n",
+    "w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))\n",
+    "\n",
+    "# Initialize video writer\n",
+    "video_writer = cv2.VideoWriter(\"heatmap_output.avi\", cv2.VideoWriter_fourcc(*\"mp4v\"), fps, (w, h))\n",
+    "\n",
+    "# Initialize heatmap object\n",
+    "heatmap_obj = solutions.Heatmap(\n",
+    "    colormap=cv2.COLORMAP_PARULA,  # Color of the heatmap\n",
+    "    show=True,  # Display the image during processing\n",
+    "    model=\"yolo11n.pt\",  # Ultralytics YOLO11 model file\n",
+    ")\n",
+    "\n",
+    "while cap.isOpened():\n",
+    "    success, im0 = cap.read()\n",
+    "    if not success:\n",
+    "        print(\"Video frame is empty or video processing has been successfully completed.\")\n",
+    "        break\n",
+    "\n",
+    "    # Generate heatmap on the frame\n",
+    "    im0 = heatmap_obj.generate_heatmap(im0)\n",
+    "\n",
+    "    # Write the frame to the output video\n",
+    "    video_writer.write(im0)\n",
+    "\n",
+    "# Release resources\n",
+    "cap.release()\n",
+    "video_writer.release()\n",
+    "cv2.destroyAllWindows()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "QrlKg-y3fEyD"
+   },
+   "source": [
+    "# Additional Resources\n",
+    "\n",
+    "## Community Support\n",
+    "\n",
+    "For more information on using heatmaps with Ultralytics, you can explore the comprehensive [Ultralytics Heatmaps Docs](https://docs.ultralytics.com/guides/heatmaps/). This guide covers everything from basic concepts to advanced techniques, ensuring you get the most out of your heatmap visualizations.\n",
+    "\n",
+    "## Ultralytics ⚡ Resources\n",
+    "\n",
+    "At Ultralytics, we are committed to providing cutting-edge AI solutions. Here are some key resources to learn more about our company and get involved with our community:\n",
+    "\n",
+    "- [Ultralytics HUB](https://ultralytics.com/hub): Simplify your AI projects with Ultralytics HUB, our no-code tool for effortless YOLO training and deployment.\n",
+    "- [Ultralytics Licensing](https://ultralytics.com/license): Review our licensing terms to understand how you can use our software in your projects.\n",
+    "- [About Us](https://ultralytics.com/about): Discover our mission, vision, and the story behind Ultralytics.\n",
+    "- [Join Our Team](https://ultralytics.com/work): Explore career opportunities and join our team of talented professionals.\n",
+    "\n",
+    "## YOLO11 🚀 Resources\n",
+    "\n",
+    "YOLO11 is the latest evolution in the YOLO series, offering state-of-the-art performance in object detection and image segmentation. Here are some essential resources to help you get started with YOLO11:\n",
+    "\n",
+    "- [GitHub](https://github.com/ultralytics/ultralytics): Access the YOLO11 repository on GitHub, where you can find the source code, contribute to the project, and report issues.\n",
+    "- [Docs](https://docs.ultralytics.com/): Explore the official documentation for YOLO11, including installation guides, tutorials, and detailed API references.\n",
+    "- [Discord](https://ultralytics.com/discord): Join our Discord community to connect with other users, share your projects, and get help from the Ultralytics team.\n",
+    "\n",
+    "These resources are designed to help you leverage the full potential of Ultralytics' offerings and YOLO11. Whether you're a beginner or an experienced developer, you'll find the information and support you need to succeed."
+   ]
+  }
+ ],
+ "metadata": {
+  "accelerator": "GPU",
+  "colab": {
+   "gpuType": "T4",
+   "provenance": [],
+   "toc_visible": true
+  },
+  "kernelspec": {
+   "display_name": "Python 3",
+   "name": "python3"
+  },
+  "language_info": {
+   "name": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 115 - 0
examples/hub.ipynb

@@ -0,0 +1,115 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "FIzICjaph_Wy"
+   },
+   "source": [
+    "<a align=\"center\" href=\"https://ultralytics.com/hub\" target=\"_blank\">\n",
+    "<img width=\"1024\", src=\"https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png\"></a>\n",
+    "\n",
+    "<div align=\"center\">\n",
+    "\n",
+    "[中文](https://docs.ultralytics.com/zh/hub/) | [한국어](https://docs.ultralytics.com/ko/hub/) | [日本語](https://docs.ultralytics.com/ja/hub/) | [Русский](https://docs.ultralytics.com/ru/hub/) | [Deutsch](https://docs.ultralytics.com/de/hub/) | [Français](https://docs.ultralytics.com/fr/hub/) | [Español](https://docs.ultralytics.com/es/hub/) | [Português](https://docs.ultralytics.com/pt/hub/) | [Türkçe](https://docs.ultralytics.com/tr/hub/) | [Tiếng Việt](https://docs.ultralytics.com/vi/hub/) | [العربية](https://docs.ultralytics.com/ar/hub/)\n",
+    "\n",
+    "  <a href=\"https://github.com/ultralytics/hub/actions/workflows/ci.yml\"><img src=\"https://github.com/ultralytics/hub/actions/workflows/ci.yml/badge.svg\" alt=\"CI CPU\"></a>\n",
+    "  <a href=\"https://colab.research.google.com/github/ultralytics/hub/blob/main/hub.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
+    "\n",
+    "  <a href=\"https://ultralytics.com/discord\"><img alt=\"Discord\" src=\"https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue\"></a>\n",
+    "  <a href=\"https://community.ultralytics.com\"><img alt=\"Ultralytics Forums\" src=\"https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue\"></a>\n",
+    "  <a href=\"https://reddit.com/r/ultralytics\"><img alt=\"Ultralytics Reddit\" src=\"https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue\"></a>\n",
+    "\n",
+    "Welcome to the [Ultralytics](https://ultralytics.com/) HUB notebook!\n",
+    "\n",
+    "This notebook allows you to train Ultralytics [YOLO](https://github.com/ultralytics/ultralytics) 🚀 models using [HUB](https://hub.ultralytics.com/). Please browse the HUB <a href=\"https://docs.ultralytics.com/hub/\">Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/hub/issues/new/choose\">GitHub</a> for support, and join our <a href=\"https://ultralytics.com/discord\">Discord</a> community for questions and discussions!\n",
+    "</div>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "eRQ2ow94MiOv"
+   },
+   "source": [
+    "# Setup\n",
+    "\n",
+    "Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware.\n",
+    "\n",
+    "[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://www.pepy.tech/projects/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/"
+    },
+    "id": "FyDnXd-n4c7Y",
+    "outputId": "e1d713ec-e8a6-4422-fe61-c76ec9f03df5"
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Ultralytics 8.2.3 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (T4, 15102MiB)\n",
+      "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 28.8/78.2 GB disk)\n"
+     ]
+    }
+   ],
+   "source": [
+    "%pip install ultralytics  # install\n",
+    "from ultralytics import YOLO, checks, hub\n",
+    "\n",
+    "checks()  # checks"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "cQ9BwaAqxAm4"
+   },
+   "source": [
+    "# Start\n",
+    "\n",
+    "⚡ Login with your API key, load your YOLO 🚀 model and start training in 3 lines of code!"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "XSlZaJ9Iw_iZ"
+   },
+   "outputs": [],
+   "source": [
+    "# Log in to HUB using your API key (https://hub.ultralytics.com/settings?tab=api+keys)\n",
+    "hub.login(\"YOUR_API_KEY\")\n",
+    "\n",
+    "# Load your model from HUB (replace 'YOUR_MODEL_ID' with your model ID)\n",
+    "model = YOLO(\"https://hub.ultralytics.com/models/YOUR_MODEL_ID\")\n",
+    "\n",
+    "# Train the model\n",
+    "results = model.train()"
+   ]
+  }
+ ],
+ "metadata": {
+  "accelerator": "GPU",
+  "colab": {
+   "name": "Ultralytics HUB",
+   "provenance": []
+  },
+  "kernelspec": {
+   "display_name": "Python 3",
+   "name": "python3"
+  },
+  "language_info": {
+   "name": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 200 - 0
examples/object_counting.ipynb

@@ -0,0 +1,200 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "PN1cAxdvd61e"
+   },
+   "source": [
+    "<div align=\"center\">\n",
+    "\n",
+    "  <a href=\"https://ultralytics.com/yolo\" target=\"_blank\">\n",
+    "    <img width=\"1024\", src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png\"></a>\n",
+    "\n",
+    "  [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)\n",
+    "\n",
+    "  <a href=\"https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml\"><img src=\"https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml/badge.svg\" alt=\"Ultralytics CI\"></a>\n",
+    "  <a href=\"https://console.paperspace.com/github/ultralytics/ultralytics\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"/></a>\n",
+    "  <a href=\"https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/object_counting.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
+    "  <a href=\"https://www.kaggle.com/models/ultralytics/yolo11\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
+    "  <a href=\"https://ultralytics.com/discord\"><img alt=\"Discord\" src=\"https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue\"></a>\n",
+    "\n",
+    "Welcome to the Ultralytics YOLO11 🚀 notebook! <a href=\"https://github.com/ultralytics/ultralytics\">YOLO11</a> is the latest version of the YOLO (You Only Look Once) AI models developed by <a href=\"https://ultralytics.com\">Ultralytics</a>. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLO11 and understand its features and capabilities.\n",
+    "\n",
+    "YOLO11 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n",
+    "\n",
+    "We hope that the resources in this notebook will help you get the most out of YOLO11. Please browse the YOLO11 <a href=\"https://docs.ultralytics.com/guides/object-counting/\"> Object Counting Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/ultralytics\">GitHub</a> for support, and join our <a href=\"https://ultralytics.com/discord\">Discord</a> community for questions and discussions!\n",
+    "\n",
+    "</div>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "o68Sg1oOeZm2"
+   },
+   "source": [
+    "# Setup\n",
+    "\n",
+    "Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware.\n",
+    "\n",
+    "[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://www.pepy.tech/projects/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/"
+    },
+    "id": "9dSwz_uOReMI",
+    "outputId": "fd3bab88-2f25-46c0-cae9-04d2beedc0c1"
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Ultralytics 8.2.18 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (T4, 15102MiB)\n",
+      "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 29.8/78.2 GB disk)\n"
+     ]
+    }
+   ],
+   "source": [
+    "%pip install ultralytics\n",
+    "import ultralytics\n",
+    "\n",
+    "ultralytics.checks()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "m7VkxQ2aeg7k"
+   },
+   "source": [
+    "# Object Counting using Ultralytics YOLO11 🚀\n",
+    "\n",
+    "## What is Object Counting?\n",
+    "\n",
+    "Object counting with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) involves accurate identification and counting of specific objects in videos and camera streams. YOLO11 excels in real-time applications, providing efficient and precise object counting for various scenarios like crowd analysis and surveillance, thanks to its state-of-the-art algorithms and deep learning capabilities.\n",
+    "\n",
+    "## Advantages of Object Counting?\n",
+    "\n",
+    "- **Resource Optimization:** Object counting facilitates efficient resource management by providing accurate counts, and optimizing resource allocation in applications like inventory management.\n",
+    "- **Enhanced Security:** Object counting enhances security and surveillance by accurately tracking and counting entities, aiding in proactive threat detection.\n",
+    "- **Informed Decision-Making:** Object counting offers valuable insights for decision-making, optimizing processes in retail, traffic management, and various other domains.\n",
+    "\n",
+    "## Real World Applications\n",
+    "\n",
+    "|                                                                           Logistics                                                                           |                                                                     Aquaculture                                                                     |\n",
+    "|:-------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------:|\n",
+    "| ![Conveyor Belt Packets Counting Using Ultralytics YOLO11](https://github.com/RizwanMunawar/ultralytics/assets/62513924/70e2d106-510c-4c6c-a57a-d34a765aa757) | ![Fish Counting in Sea using Ultralytics YOLO11](https://github.com/RizwanMunawar/ultralytics/assets/62513924/c60d047b-3837-435f-8d29-bb9fc95d2191) |\n",
+    "|                                                    Conveyor Belt Packets Counting Using Ultralytics YOLO11                                                    |                                                    Fish Counting in Sea using Ultralytics YOLO11                                                    |\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "Cx-u59HQdu2o"
+   },
+   "outputs": [],
+   "source": [
+    "import cv2\n",
+    "\n",
+    "from ultralytics import solutions\n",
+    "\n",
+    "# Open the video file\n",
+    "cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
+    "assert cap.isOpened(), \"Error reading video file\"\n",
+    "\n",
+    "# Get video properties: width, height, and frames per second (fps)\n",
+    "w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))\n",
+    "\n",
+    "# Define points for a line or region of interest in the video frame\n",
+    "line_points = [(20, 400), (1080, 400)]  # Line coordinates\n",
+    "\n",
+    "# Initialize the video writer to save the output video\n",
+    "video_writer = cv2.VideoWriter(\"object_counting_output.avi\", cv2.VideoWriter_fourcc(*\"mp4v\"), fps, (w, h))\n",
+    "\n",
+    "# Initialize the Object Counter with visualization options and other parameters\n",
+    "counter = solutions.ObjectCounter(\n",
+    "    show=True,  # Display the image during processing\n",
+    "    region=line_points,  # Region of interest points\n",
+    "    model=\"yolo11n.pt\",  # Ultralytics YOLO11 model file\n",
+    "    line_width=2,  # Thickness of the lines and bounding boxes\n",
+    ")\n",
+    "\n",
+    "# Process video frames in a loop\n",
+    "while cap.isOpened():\n",
+    "    success, im0 = cap.read()\n",
+    "    if not success:\n",
+    "        print(\"Video frame is empty or video processing has been successfully completed.\")\n",
+    "        break\n",
+    "\n",
+    "    # Use the Object Counter to count objects in the frame and get the annotated image\n",
+    "    im0 = counter.count(im0)\n",
+    "\n",
+    "    # Write the annotated frame to the output video\n",
+    "    video_writer.write(im0)\n",
+    "\n",
+    "# Release the video capture and writer objects\n",
+    "cap.release()\n",
+    "video_writer.release()\n",
+    "\n",
+    "# Close all OpenCV windows\n",
+    "cv2.destroyAllWindows()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "QrlKg-y3fEyD"
+   },
+   "source": [
+    "# Additional Resources\n",
+    "\n",
+    "## Community Support\n",
+    "\n",
+    "For more information on counting objects with Ultralytics, you can explore the comprehensive [Ultralytics Object Counting Docs](https://docs.ultralytics.com/guides/object-counting/). This guide covers everything from basic concepts to advanced techniques, ensuring you get the most out of counting and visualization.\n",
+    "\n",
+    "## Ultralytics ⚡ Resources\n",
+    "\n",
+    "At Ultralytics, we are committed to providing cutting-edge AI solutions. Here are some key resources to learn more about our company and get involved with our community:\n",
+    "\n",
+    "- [Ultralytics HUB](https://ultralytics.com/hub): Simplify your AI projects with Ultralytics HUB, our no-code tool for effortless YOLO training and deployment.\n",
+    "- [Ultralytics Licensing](https://ultralytics.com/license): Review our licensing terms to understand how you can use our software in your projects.\n",
+    "- [About Us](https://ultralytics.com/about): Discover our mission, vision, and the story behind Ultralytics.\n",
+    "- [Join Our Team](https://ultralytics.com/work): Explore career opportunities and join our team of talented professionals.\n",
+    "\n",
+    "## YOLO11 🚀 Resources\n",
+    "\n",
+    "YOLO11 is the latest evolution in the YOLO series, offering state-of-the-art performance in object detection and image segmentation. Here are some essential resources to help you get started with YOLO11:\n",
+    "\n",
+    "- [GitHub](https://github.com/ultralytics/ultralytics): Access the YOLO11 repository on GitHub, where you can find the source code, contribute to the project, and report issues.\n",
+    "- [Docs](https://docs.ultralytics.com/): Explore the official documentation for YOLO11, including installation guides, tutorials, and detailed API references.\n",
+    "- [Discord](https://ultralytics.com/discord): Join our Discord community to connect with other users, share your projects, and get help from the Ultralytics team.\n",
+    "\n",
+    "These resources are designed to help you leverage the full potential of Ultralytics' offerings and YOLO11. Whether you're a beginner or an experienced developer, you'll find the information and support you need to succeed."
+   ]
+  }
+ ],
+ "metadata": {
+  "accelerator": "GPU",
+  "colab": {
+   "gpuType": "T4",
+   "provenance": []
+  },
+  "kernelspec": {
+   "display_name": "Python 3",
+   "name": "python3"
+  },
+  "language_info": {
+   "name": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 245 - 0
examples/object_tracking.ipynb

@@ -0,0 +1,245 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "PN1cAxdvd61e"
+   },
+   "source": [
+    "<div align=\"center\">\n",
+    "\n",
+    "  <a href=\"https://ultralytics.com/yolo\" target=\"_blank\">\n",
+    "    <img width=\"1024\", src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png\"></a>\n",
+    "\n",
+    "  [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)\n",
+    "\n",
+    "  <a href=\"https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml\"><img src=\"https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml/badge.svg\" alt=\"Ultralytics CI\"></a>\n",
+    "  <a href=\"https://console.paperspace.com/github/ultralytics/ultralytics\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"/></a>\n",
+    "  <a href=\"https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/object_tracking.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
+    "  <a href=\"https://www.kaggle.com/models/ultralytics/yolo11\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
+    "  <a href=\"https://ultralytics.com/discord\"><img alt=\"Discord\" src=\"https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue\"></a>\n",
+    "\n",
+    "Welcome to the Ultralytics YOLO11 🚀 notebook! <a href=\"https://github.com/ultralytics/ultralytics\">YOLO11</a> is the latest version of the YOLO (You Only Look Once) AI models developed by <a href=\"https://ultralytics.com\">Ultralytics</a>. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLO11 and understand its features and capabilities.\n",
+    "\n",
+    "YOLO11 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n",
+    "\n",
+    "We hope that the resources in this notebook will help you get the most out of YOLO11. Please browse the YOLO11 <a href=\"https://docs.ultralytics.com/modes/track/\"> Tracking Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/ultralytics\">GitHub</a> for support, and join our <a href=\"https://ultralytics.com/discord\">Discord</a> community for questions and discussions!\n",
+    "\n",
+    "</div>"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "o68Sg1oOeZm2"
+   },
+   "source": [
+    "# Setup\n",
+    "\n",
+    "Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware.\n",
+    "\n",
+    "[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://www.pepy.tech/projects/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/"
+    },
+    "id": "9dSwz_uOReMI",
+    "outputId": "ed8c2370-8fc7-4e4e-f669-d0bae4d944e9"
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Ultralytics 8.2.17 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (T4, 15102MiB)\n",
+      "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 29.8/78.2 GB disk)\n"
+     ]
+    }
+   ],
+   "source": [
+    "%pip install ultralytics\n",
+    "import ultralytics\n",
+    "\n",
+    "ultralytics.checks()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "m7VkxQ2aeg7k"
+   },
+   "source": [
+    "# Ultralytics Object Tracking\n",
+    "\n",
+    "[Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) instance segmentation involves identifying and outlining individual objects in an image, providing a detailed understanding of spatial distribution. Unlike semantic segmentation, it uniquely labels and precisely delineates each object, crucial for tasks like object detection and medical imaging.\n",
+    "\n",
+    "There are two types of instance segmentation tracking available in the Ultralytics package:\n",
+    "\n",
+    "- **Instance Segmentation with Class Objects:** Each class object is assigned a unique color for clear visual separation.\n",
+    "\n",
+    "- **Instance Segmentation with Object Tracks:** Every track is represented by a distinct color, facilitating easy identification and tracking.\n",
+    "\n",
+    "## Samples\n",
+    "\n",
+    "|                                                          Instance Segmentation                                                          |                                                           Instance Segmentation + Object Tracking                                                            |\n",
+    "|:---------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------:|\n",
+    "| ![Ultralytics Instance Segmentation](https://github.com/RizwanMunawar/ultralytics/assets/62513924/d4ad3499-1f33-4871-8fbc-1be0b2643aa2) | ![Ultralytics Instance Segmentation with Object Tracking](https://github.com/RizwanMunawar/ultralytics/assets/62513924/2e5c38cc-fd5c-4145-9682-fa94ae2010a0) |\n",
+    "|                                                  Ultralytics Instance Segmentation 😍                                                   |                                                  Ultralytics Instance Segmentation with Object Tracking 🔥                                                   |"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "-ZF9DM6e6gz0"
+   },
+   "source": [
+    "## CLI\n",
+    "\n",
+    "Command-Line Interface (CLI) example."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "-XJqhOwo6iqT"
+   },
+   "outputs": [],
+   "source": [
+    "!yolo track source=\"/path/to/video/file.mp4\" save=True"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "XRcw0vIE6oNb"
+   },
+   "source": [
+    "## Python\n",
+    "\n",
+    "Python Instance Segmentation and Object tracking example."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "id": "Cx-u59HQdu2o"
+   },
+   "outputs": [],
+   "source": [
+    "from collections import defaultdict\n",
+    "\n",
+    "import cv2\n",
+    "\n",
+    "from ultralytics import YOLO\n",
+    "from ultralytics.utils.plotting import Annotator, colors\n",
+    "\n",
+    "# Dictionary to store tracking history with default empty lists\n",
+    "track_history = defaultdict(lambda: [])\n",
+    "\n",
+    "# Load the YOLO model with segmentation capabilities\n",
+    "model = YOLO(\"yolo11n-seg.pt\")\n",
+    "\n",
+    "# Open the video file\n",
+    "cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
+    "\n",
+    "# Retrieve video properties: width, height, and frames per second\n",
+    "w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))\n",
+    "\n",
+    "# Initialize video writer to save the output video with the specified properties\n",
+    "out = cv2.VideoWriter(\"instance-segmentation-object-tracking.avi\", cv2.VideoWriter_fourcc(*\"MJPG\"), fps, (w, h))\n",
+    "\n",
+    "while True:\n",
+    "    # Read a frame from the video\n",
+    "    ret, im0 = cap.read()\n",
+    "    if not ret:\n",
+    "        print(\"Video frame is empty or video processing has been successfully completed.\")\n",
+    "        break\n",
+    "\n",
+    "    # Create an annotator object to draw on the frame\n",
+    "    annotator = Annotator(im0, line_width=2)\n",
+    "\n",
+    "    # Perform object tracking on the current frame\n",
+    "    results = model.track(im0, persist=True)\n",
+    "\n",
+    "    # Check if tracking IDs and masks are present in the results\n",
+    "    if results[0].boxes.id is not None and results[0].masks is not None:\n",
+    "        # Extract masks and tracking IDs\n",
+    "        masks = results[0].masks.xy\n",
+    "        track_ids = results[0].boxes.id.int().cpu().tolist()\n",
+    "\n",
+    "        # Annotate each mask with its corresponding tracking ID and color\n",
+    "        for mask, track_id in zip(masks, track_ids):\n",
+    "            annotator.seg_bbox(mask=mask, mask_color=colors(int(track_id), True), label=str(track_id))\n",
+    "\n",
+    "    # Write the annotated frame to the output video\n",
+    "    out.write(im0)\n",
+    "    # Display the annotated frame\n",
+    "    cv2.imshow(\"instance-segmentation-object-tracking\", im0)\n",
+    "\n",
+    "    # Exit the loop if 'q' is pressed\n",
+    "    if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n",
+    "        break\n",
+    "\n",
+    "# Release the video writer and capture objects, and close all OpenCV windows\n",
+    "out.release()\n",
+    "cap.release()\n",
+    "cv2.destroyAllWindows()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "QrlKg-y3fEyD"
+   },
+   "source": [
+    "# Additional Resources\n",
+    "\n",
+    "## Community Support\n",
+    "\n",
+    "For more information on using tracking with Ultralytics, you can explore the comprehensive [Ultralytics Tracking Docs](https://docs.ultralytics.com/modes/track/). This guide covers everything from basic concepts to advanced techniques, ensuring you get the most out of tracking and visualization.\n",
+    "\n",
+    "## Ultralytics ⚡ Resources\n",
+    "\n",
+    "At Ultralytics, we are committed to providing cutting-edge AI solutions. Here are some key resources to learn more about our company and get involved with our community:\n",
+    "\n",
+    "- [Ultralytics HUB](https://ultralytics.com/hub): Simplify your AI projects with Ultralytics HUB, our no-code tool for effortless YOLO training and deployment.\n",
+    "- [Ultralytics Licensing](https://ultralytics.com/license): Review our licensing terms to understand how you can use our software in your projects.\n",
+    "- [About Us](https://ultralytics.com/about): Discover our mission, vision, and the story behind Ultralytics.\n",
+    "- [Join Our Team](https://ultralytics.com/work): Explore career opportunities and join our team of talented professionals.\n",
+    "\n",
+    "## YOLO11 🚀 Resources\n",
+    "\n",
+    "YOLO11 is the latest evolution in the YOLO series, offering state-of-the-art performance in object detection and image segmentation. Here are some essential resources to help you get started with YOLO11:\n",
+    "\n",
+    "- [GitHub](https://github.com/ultralytics/ultralytics): Access the YOLO11 repository on GitHub, where you can find the source code, contribute to the project, and report issues.\n",
+    "- [Docs](https://docs.ultralytics.com/): Explore the official documentation for YOLO11, including installation guides, tutorials, and detailed API references.\n",
+    "- [Discord](https://ultralytics.com/discord): Join our Discord community to connect with other users, share your projects, and get help from the Ultralytics team.\n",
+    "\n",
+    "These resources are designed to help you leverage the full potential of Ultralytics' offerings and YOLO11. Whether you're a beginner or an experienced developer, you'll find the information and support you need to succeed."
+   ]
+  }
+ ],
+ "metadata": {
+  "accelerator": "GPU",
+  "colab": {
+   "gpuType": "T4",
+   "provenance": []
+  },
+  "kernelspec": {
+   "display_name": "Python 3",
+   "name": "python3"
+  },
+  "language_info": {
+   "name": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

+ 665 - 0
examples/tutorial.ipynb

@@ -0,0 +1,665 @@
+{
+  "nbformat": 4,
+  "nbformat_minor": 0,
+  "metadata": {
+    "colab": {
+      "name": "YOLO11 Tutorial",
+      "provenance": [],
+      "toc_visible": true
+    },
+    "kernelspec": {
+      "name": "python3",
+      "display_name": "Python 3"
+    },
+    "accelerator": "GPU"
+  },
+  "cells": [
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "t6MPjfT5NrKQ"
+      },
+      "source": [
+        "<div align=\"center\">\n",
+        "\n",
+        "  <a href=\"https://ultralytics.com/yolo\" target=\"_blank\">\n",
+        "    <img width=\"1024\", src=\"https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png\"></a>\n",
+        "\n",
+        "  [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)\n",
+        "\n",
+        "  <a href=\"https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml\"><img src=\"https://github.com/ultralytics/ultralytics/actions/workflows/ci.yml/badge.svg\" alt=\"Ultralytics CI\"></a>\n",
+        "  <a href=\"https://console.paperspace.com/github/ultralytics/ultralytics\"><img src=\"https://assets.paperspace.io/img/gradient-badge.svg\" alt=\"Run on Gradient\"/></a>\n",
+        "  <a href=\"https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n",
+        "  <a href=\"https://www.kaggle.com/models/ultralytics/yolo11\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
+        "\n",
+        "  <a href=\"https://ultralytics.com/discord\"><img alt=\"Discord\" src=\"https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue\"></a>\n",
+        "  <a href=\"https://community.ultralytics.com\"><img alt=\"Ultralytics Forums\" src=\"https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue\"></a>\n",
+        "  <a href=\"https://reddit.com/r/ultralytics\"><img alt=\"Ultralytics Reddit\" src=\"https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue\"></a>\n",
+        "\n",
+        "Welcome to the Ultralytics YOLO11 🚀 notebook! <a href=\"https://github.com/ultralytics/ultralytics\">YOLO11</a> is the latest version of the YOLO (You Only Look Once) AI models developed by <a href=\"https://ultralytics.com\">Ultralytics</a>. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLO11 and understand its features and capabilities.\n",
+        "\n",
+        "YOLO11 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n",
+        "\n",
+        "We hope that the resources in this notebook will help you get the most out of YOLO11. Please browse the YOLO11 <a href=\"https://docs.ultralytics.com/\">Docs</a> for details, raise an issue on <a href=\"https://github.com/ultralytics/ultralytics\">GitHub</a> for support, and join our <a href=\"https://ultralytics.com/discord\">Discord</a> community for questions and discussions!\n",
+        "\n",
+        "  <a href=\"https://www.youtube.com/watch?v=ZN3nRZT7b24\" target=\"_blank\">\n",
+        "    <img src=\"https://img.youtube.com/vi/ZN3nRZT7b24/maxresdefault.jpg\" alt=\"Ultralytics Video\" width=\"720\" style=\"border-radius: 10px; box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);\"></a>\n",
+        "  \n",
+        "  <p style=\"font-size: 16px; font-family: Arial, sans-serif; color: #555;\">\n",
+        "    <strong>Watch: </strong> How to Train\n",
+        "  <a href=\"https://github.com/ultralytics/ultralytics\">Ultralytics</a>\n",
+        "  <a href=\"https://docs.ultralytics.com/models/yolo11/\">YOLO11</a> Model on Custom Dataset using Google Colab Notebook 🚀</p>\n",
+        "</div>"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "7mGmQbAO5pQb"
+      },
+      "source": [
+        "# Setup\n",
+        "\n",
+        "Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) and check software and hardware.\n",
+        "\n",
+        "[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://www.pepy.tech/projects/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "metadata": {
+        "id": "wbvMlHd_QwMG",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "2e992f9f-90bb-4668-de12-fed629975285"
+      },
+      "source": [
+        "%pip install ultralytics\n",
+        "import ultralytics\n",
+        "ultralytics.checks()"
+      ],
+      "execution_count": 1,
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "Ultralytics 8.3.2 🚀 Python-3.10.12 torch-2.4.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n",
+            "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 41.1/112.6 GB disk)\n"
+          ]
+        }
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "4JnkELT0cIJg"
+      },
+      "source": [
+        "# 1. Predict\n",
+        "\n",
+        "YOLO11 may be used directly in the Command Line Interface (CLI) with a `yolo` command for a variety of tasks and modes and accepts additional arguments, i.e. `imgsz=640`. See a full list of available `yolo` [arguments](https://docs.ultralytics.com/usage/cfg/) and other details in the [YOLO11 Predict Docs](https://docs.ultralytics.com/modes/train/).\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "metadata": {
+        "id": "zR9ZbuQCH7FX",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "outputId": "e3ebec6f-658a-4803-d80c-e07d12908767"
+      },
+      "source": [
+        "# Run inference on an image with YOLO11n\n",
+        "!yolo predict model=yolo11n.pt source='https://ultralytics.com/images/zidane.jpg'"
+      ],
+      "execution_count": 2,
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "Downloading https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt to 'yolo11n.pt'...\n",
+            "100% 5.35M/5.35M [00:00<00:00, 72.7MB/s]\n",
+            "Ultralytics 8.3.2 🚀 Python-3.10.12 torch-2.4.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n",
+            "YOLO11n summary (fused): 238 layers, 2,616,248 parameters, 0 gradients, 6.5 GFLOPs\n",
+            "\n",
+            "Downloading https://ultralytics.com/images/zidane.jpg to 'zidane.jpg'...\n",
+            "100% 49.2k/49.2k [00:00<00:00, 5.37MB/s]\n",
+            "image 1/1 /content/zidane.jpg: 384x640 2 persons, 1 tie, 63.4ms\n",
+            "Speed: 14.5ms preprocess, 63.4ms inference, 820.9ms postprocess per image at shape (1, 3, 384, 640)\n",
+            "Results saved to \u001b[1mruns/detect/predict\u001b[0m\n",
+            "💡 Learn more at https://docs.ultralytics.com/modes/predict\n"
+          ]
+        }
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "hkAzDWJ7cWTr"
+      },
+      "source": [
+        "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\n",
+        "<img align=\"left\" src=\"https://user-images.githubusercontent.com/26833433/212889447-69e5bdf1-5800-4e29-835e-2ed2336dede2.jpg\" width=\"600\">"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "0eq1SMWl6Sfn"
+      },
+      "source": [
+        "# 2. Val\n",
+        "Validate a model's accuracy on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset's `val` or `test` splits. The latest YOLO11 [models](https://github.com/ultralytics/ultralytics#models) are downloaded automatically the first time they are used. See [YOLO11 Val Docs](https://docs.ultralytics.com/modes/val/) for more information."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "metadata": {
+        "id": "WQPtK1QYVaD_"
+      },
+      "source": [
+        "# Download COCO val\n",
+        "import torch\n",
+        "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')  # download (780M - 5000 images)\n",
+        "!unzip -q tmp.zip -d datasets && rm tmp.zip  # unzip"
+      ],
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "metadata": {
+        "id": "X58w8JLpMnjH",
+        "outputId": "af2a5deb-029b-466d-96a4-bd3e406987fa",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        }
+      },
+      "source": [
+        "# Validate YOLO11n on COCO8 val\n",
+        "!yolo val model=yolo11n.pt data=coco8.yaml"
+      ],
+      "execution_count": 3,
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "Ultralytics 8.3.2 🚀 Python-3.10.12 torch-2.4.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n",
+            "YOLO11n summary (fused): 238 layers, 2,616,248 parameters, 0 gradients, 6.5 GFLOPs\n",
+            "\n",
+            "Dataset 'coco8.yaml' images not found ⚠️, missing path '/content/datasets/coco8/images/val'\n",
+            "Downloading https://ultralytics.com/assets/coco8.zip to '/content/datasets/coco8.zip'...\n",
+            "100% 433k/433k [00:00<00:00, 15.8MB/s]\n",
+            "Unzipping /content/datasets/coco8.zip to /content/datasets/coco8...: 100% 25/25 [00:00<00:00, 1188.35file/s]\n",
+            "Dataset download success ✅ (1.4s), saved to \u001b[1m/content/datasets\u001b[0m\n",
+            "\n",
+            "Downloading https://ultralytics.com/assets/Arial.ttf to '/root/.config/Ultralytics/Arial.ttf'...\n",
+            "100% 755k/755k [00:00<00:00, 17.7MB/s]\n",
+            "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco8/labels/val... 4 images, 0 backgrounds, 0 corrupt: 100% 4/4 [00:00<00:00, 142.04it/s]\n",
+            "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco8/labels/val.cache\n",
+            "                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95): 100% 1/1 [00:04<00:00,  4.75s/it]\n",
+            "                   all          4         17       0.57       0.85      0.847      0.632\n",
+            "                person          3         10      0.557        0.6      0.585      0.272\n",
+            "                   dog          1          1      0.548          1      0.995      0.697\n",
+            "                 horse          1          2      0.531          1      0.995      0.674\n",
+            "              elephant          1          2      0.371        0.5      0.516      0.256\n",
+            "              umbrella          1          1      0.569          1      0.995      0.995\n",
+            "          potted plant          1          1      0.847          1      0.995      0.895\n",
+            "Speed: 1.0ms preprocess, 73.8ms inference, 0.0ms loss, 561.4ms postprocess per image\n",
+            "Results saved to \u001b[1mruns/detect/val\u001b[0m\n",
+            "💡 Learn more at https://docs.ultralytics.com/modes/val\n"
+          ]
+        }
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "ZY2VXXXu74w5"
+      },
+      "source": [
+        "# 3. Train\n",
+        "\n",
+        "<p align=\"\"><a href=\"https://ultralytics.com/hub\"><img width=\"1000\" src=\"https://github.com/ultralytics/assets/raw/main/yolov8/banner-integrations.png\"/></a></p>\n",
+        "\n",
+        "Train YOLO11 on [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/), [Classify](https://docs.ultralytics.com/tasks/classify/) and [Pose](https://docs.ultralytics.com/tasks/pose/) datasets. See [YOLO11 Train Docs](https://docs.ultralytics.com/modes/train/) for more information."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "#@title Select YOLO11 🚀 logger {run: 'auto'}\n",
+        "logger = 'Comet' #@param ['Comet', 'TensorBoard']\n",
+        "\n",
+        "if logger == 'Comet':\n",
+        "  %pip install -q comet_ml\n",
+        "  import comet_ml; comet_ml.init()\n",
+        "elif logger == 'TensorBoard':\n",
+        "  %load_ext tensorboard\n",
+        "  %tensorboard --logdir ."
+      ],
+      "metadata": {
+        "id": "ktegpM42AooT"
+      },
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "metadata": {
+        "id": "1NcFxRcFdJ_O",
+        "outputId": "952f35f7-666f-4121-fbdf-2b3a33b28081",
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        }
+      },
+      "source": [
+        "# Train YOLO11n on COCO8 for 3 epochs\n",
+        "!yolo train model=yolo11n.pt data=coco8.yaml epochs=3 imgsz=640"
+      ],
+      "execution_count": 7,
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "Ultralytics 8.3.2 🚀 Python-3.10.12 torch-2.4.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n",
+            "\u001b[34m\u001b[1mengine/trainer: \u001b[0mtask=detect, mode=train, model=yolo11n.pt, data=coco8.yaml, epochs=3, time=None, patience=100, batch=16, imgsz=640, save=True, save_period=-1, cache=False, device=None, workers=8, project=None, name=train3, exist_ok=False, pretrained=True, optimizer=auto, verbose=True, seed=0, deterministic=True, single_cls=False, rect=False, cos_lr=False, close_mosaic=10, resume=False, amp=True, fraction=1.0, profile=False, freeze=None, multi_scale=False, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, vid_stride=1, stream_buffer=False, visualize=False, augment=False, agnostic_nms=False, classes=None, retina_masks=False, embed=None, show=False, save_frames=False, save_txt=False, save_conf=False, save_crop=False, show_labels=True, show_conf=True, show_boxes=True, line_width=None, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=True, opset=None, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, pose=12.0, kobj=1.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, bgr=0.0, mosaic=1.0, mixup=0.0, copy_paste=0.0, copy_paste_mode=flip, auto_augment=randaugment, erasing=0.4, crop_fraction=1.0, cfg=None, tracker=botsort.yaml, save_dir=runs/detect/train3\n",
+            "\n",
+            "                   from  n    params  module                                       arguments                     \n",
+            "  0                  -1  1       464  ultralytics.nn.modules.conv.Conv             [3, 16, 3, 2]                 \n",
+            "  1                  -1  1      4672  ultralytics.nn.modules.conv.Conv             [16, 32, 3, 2]                \n",
+            "  2                  -1  1      6640  ultralytics.nn.modules.block.C3k2            [32, 64, 1, False, 0.25]      \n",
+            "  3                  -1  1     36992  ultralytics.nn.modules.conv.Conv             [64, 64, 3, 2]                \n",
+            "  4                  -1  1     26080  ultralytics.nn.modules.block.C3k2            [64, 128, 1, False, 0.25]     \n",
+            "  5                  -1  1    147712  ultralytics.nn.modules.conv.Conv             [128, 128, 3, 2]              \n",
+            "  6                  -1  1     87040  ultralytics.nn.modules.block.C3k2            [128, 128, 1, True]           \n",
+            "  7                  -1  1    295424  ultralytics.nn.modules.conv.Conv             [128, 256, 3, 2]              \n",
+            "  8                  -1  1    346112  ultralytics.nn.modules.block.C3k2            [256, 256, 1, True]           \n",
+            "  9                  -1  1    164608  ultralytics.nn.modules.block.SPPF            [256, 256, 5]                 \n",
+            " 10                  -1  1    249728  ultralytics.nn.modules.block.C2PSA           [256, 256, 1]                 \n",
+            " 11                  -1  1         0  torch.nn.modules.upsampling.Upsample         [None, 2, 'nearest']          \n",
+            " 12             [-1, 6]  1         0  ultralytics.nn.modules.conv.Concat           [1]                           \n",
+            " 13                  -1  1    111296  ultralytics.nn.modules.block.C3k2            [384, 128, 1, False]          \n",
+            " 14                  -1  1         0  torch.nn.modules.upsampling.Upsample         [None, 2, 'nearest']          \n",
+            " 15             [-1, 4]  1         0  ultralytics.nn.modules.conv.Concat           [1]                           \n",
+            " 16                  -1  1     32096  ultralytics.nn.modules.block.C3k2            [256, 64, 1, False]           \n",
+            " 17                  -1  1     36992  ultralytics.nn.modules.conv.Conv             [64, 64, 3, 2]                \n",
+            " 18            [-1, 13]  1         0  ultralytics.nn.modules.conv.Concat           [1]                           \n",
+            " 19                  -1  1     86720  ultralytics.nn.modules.block.C3k2            [192, 128, 1, False]          \n",
+            " 20                  -1  1    147712  ultralytics.nn.modules.conv.Conv             [128, 128, 3, 2]              \n",
+            " 21            [-1, 10]  1         0  ultralytics.nn.modules.conv.Concat           [1]                           \n",
+            " 22                  -1  1    378880  ultralytics.nn.modules.block.C3k2            [384, 256, 1, True]           \n",
+            " 23        [16, 19, 22]  1    464912  ultralytics.nn.modules.head.Detect           [80, [64, 128, 256]]          \n",
+            "YOLO11n summary: 319 layers, 2,624,080 parameters, 2,624,064 gradients, 6.6 GFLOPs\n",
+            "\n",
+            "Transferred 499/499 items from pretrained weights\n",
+            "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/detect/train', view at http://localhost:6006/\n",
+            "Freezing layer 'model.23.dfl.conv.weight'\n",
+            "\u001b[34m\u001b[1mAMP: \u001b[0mrunning Automatic Mixed Precision (AMP) checks with YOLO11n...\n",
+            "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n",
+            "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco8/labels/train.cache... 4 images, 0 backgrounds, 0 corrupt: 100% 4/4 [00:00<?, ?it/s]\n",
+            "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01, num_output_channels=3, method='weighted_average'), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n",
+            "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco8/labels/val.cache... 4 images, 0 backgrounds, 0 corrupt: 100% 4/4 [00:00<?, ?it/s]\n",
+            "Plotting labels to runs/detect/train/labels.jpg... \n",
+            "\u001b[34m\u001b[1moptimizer:\u001b[0m 'optimizer=auto' found, ignoring 'lr0=0.01' and 'momentum=0.937' and determining best 'optimizer', 'lr0' and 'momentum' automatically... \n",
+            "\u001b[34m\u001b[1moptimizer:\u001b[0m AdamW(lr=0.000119, momentum=0.9) with parameter groups 81 weight(decay=0.0), 88 weight(decay=0.0005), 87 bias(decay=0.0)\n",
+            "\u001b[34m\u001b[1mTensorBoard: \u001b[0mmodel graph visualization added ✅\n",
+            "Image sizes 640 train, 640 val\n",
+            "Using 2 dataloader workers\n",
+            "Logging results to \u001b[1mruns/detect/train\u001b[0m\n",
+            "Starting training for 3 epochs...\n",
+            "\n",
+            "      Epoch    GPU_mem   box_loss   cls_loss   dfl_loss  Instances       Size\n",
+            "        1/3     0.719G      1.004      3.249      1.367         30        640: 100% 1/1 [00:00<00:00,  1.16it/s]\n",
+            "                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95): 100% 1/1 [00:00<00:00,  5.07it/s]\n",
+            "                   all          4         17       0.58       0.85      0.849      0.631\n",
+            "\n",
+            "      Epoch    GPU_mem   box_loss   cls_loss   dfl_loss  Instances       Size\n",
+            "        2/3     0.715G       1.31      4.043      1.603         35        640: 100% 1/1 [00:00<00:00,  6.88it/s]\n",
+            "                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95): 100% 1/1 [00:00<00:00,  9.08it/s]\n",
+            "                   all          4         17      0.581       0.85      0.851       0.63\n",
+            "\n",
+            "      Epoch    GPU_mem   box_loss   cls_loss   dfl_loss  Instances       Size\n",
+            "        3/3     0.692G      1.134      3.174      1.599         18        640: 100% 1/1 [00:00<00:00,  6.75it/s]\n",
+            "                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95): 100% 1/1 [00:00<00:00, 11.60it/s]\n",
+            "                   all          4         17      0.582       0.85      0.855      0.632\n",
+            "\n",
+            "3 epochs completed in 0.003 hours.\n",
+            "Optimizer stripped from runs/detect/train/weights/last.pt, 5.5MB\n",
+            "Optimizer stripped from runs/detect/train/weights/best.pt, 5.5MB\n",
+            "\n",
+            "Validating runs/detect/train/weights/best.pt...\n",
+            "Ultralytics 8.3.2 🚀 Python-3.10.12 torch-2.4.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n",
+            "YOLO11n summary (fused): 238 layers, 2,616,248 parameters, 0 gradients, 6.5 GFLOPs\n",
+            "                 Class     Images  Instances      Box(P          R      mAP50  mAP50-95): 100% 1/1 [00:00<00:00, 23.42it/s]\n",
+            "                   all          4         17      0.579       0.85      0.855      0.615\n",
+            "                person          3         10      0.579        0.6      0.623      0.268\n",
+            "                   dog          1          1      0.549          1      0.995      0.697\n",
+            "                 horse          1          2      0.553          1      0.995      0.675\n",
+            "              elephant          1          2      0.364        0.5      0.528      0.261\n",
+            "              umbrella          1          1      0.571          1      0.995      0.895\n",
+            "          potted plant          1          1      0.857          1      0.995      0.895\n",
+            "Speed: 0.2ms preprocess, 4.3ms inference, 0.0ms loss, 1.2ms postprocess per image\n",
+            "Results saved to \u001b[1mruns/detect/train\u001b[0m\n",
+            "💡 Learn more at https://docs.ultralytics.com/modes/train\n"
+          ]
+        }
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "source": [
+        "# 4. Export\n",
+        "\n",
+        "Export a YOLO11 model to any supported format below with the `format` argument, i.e. `format=onnx`. See [YOLO11 Export Docs](https://docs.ultralytics.com/modes/export/) for more information.\n",
+        "\n",
+        "- 💡 ProTip: Export to [ONNX](https://docs.ultralytics.com/integrations/onnx/) or [OpenVINO](https://docs.ultralytics.com/integrations/openvino/) for up to 3x CPU speedup.  \n",
+        "- 💡 ProTip: Export to [TensorRT](https://docs.ultralytics.com/integrations/tensorrt/) for up to 5x GPU speedup.\n",
+        "\n",
+        "| Format                                                                   | `format` Argument | Model                     | Metadata | Arguments                                                            |\n",
+        "|--------------------------------------------------------------------------|-------------------|---------------------------|----------|----------------------------------------------------------------------|\n",
+        "| [PyTorch](https://pytorch.org/)                                          | -                 | `yolo11n.pt`              | ✅        | -                                                                    |\n",
+        "| [TorchScript](https://docs.ultralytics.com/integrations/torchscript)     | `torchscript`     | `yolo11n.torchscript`     | ✅        | `imgsz`, `optimize`, `batch`                                         |\n",
+        "| [ONNX](https://docs.ultralytics.com/integrations/onnx)                   | `onnx`            | `yolo11n.onnx`            | ✅        | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch`             |\n",
+        "| [OpenVINO](https://docs.ultralytics.com/integrations/openvino)           | `openvino`        | `yolo11n_openvino_model/` | ✅        | `imgsz`, `half`, `dynamic`, `int8`, `batch`                          |\n",
+        "| [TensorRT](https://docs.ultralytics.com/integrations/tensorrt)           | `engine`          | `yolo11n.engine`          | ✅        | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` |\n",
+        "| [CoreML](https://docs.ultralytics.com/integrations/coreml)               | `coreml`          | `yolo11n.mlpackage`       | ✅        | `imgsz`, `half`, `int8`, `nms`, `batch`                              |\n",
+        "| [TF SavedModel](https://docs.ultralytics.com/integrations/tf-savedmodel) | `saved_model`     | `yolo11n_saved_model/`    | ✅        | `imgsz`, `keras`, `int8`, `batch`                                    |\n",
+        "| [TF GraphDef](https://docs.ultralytics.com/integrations/tf-graphdef)     | `pb`              | `yolo11n.pb`              | ❌        | `imgsz`, `batch`                                                     |\n",
+        "| [TF Lite](https://docs.ultralytics.com/integrations/tflite)              | `tflite`          | `yolo11n.tflite`          | ✅        | `imgsz`, `half`, `int8`, `batch`                                     |\n",
+        "| [TF Edge TPU](https://docs.ultralytics.com/integrations/edge-tpu)        | `edgetpu`         | `yolo11n_edgetpu.tflite`  | ✅        | `imgsz`                                                              |\n",
+        "| [TF.js](https://docs.ultralytics.com/integrations/tfjs)                  | `tfjs`            | `yolo11n_web_model/`      | ✅        | `imgsz`, `half`, `int8`, `batch`                                     |\n",
+        "| [PaddlePaddle](https://docs.ultralytics.com/integrations/paddlepaddle)   | `paddle`          | `yolo11n_paddle_model/`   | ✅        | `imgsz`, `batch`                                                     |\n",
+        "| [MNN](https://docs.ultralytics.com/integrations/mnn)                     | `mnn`             | `yolo11n.mnn`             | ✅        | `imgsz`, `batch`, `int8`, `half`                                     |\n",
+        "| [NCNN](https://docs.ultralytics.com/integrations/ncnn)                   | `ncnn`            | `yolo11n_ncnn_model/`     | ✅        | `imgsz`, `half`, `batch`                                             |\n",
+        "| [IMX500](https://docs.ultralytics.com/integrations/sony-imx500)          | `imx`             | `yolov8n_imx_model/`      | ✅        | `imgsz`, `int8`                                                      |"
+      ],
+      "metadata": {
+        "id": "nPZZeNrLCQG6"
+      }
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "!yolo export model=yolo11n.pt format=torchscript"
+      ],
+      "metadata": {
+        "colab": {
+          "base_uri": "https://localhost:8080/"
+        },
+        "id": "CYIjW4igCjqD",
+        "outputId": "5357fa04-6749-4508-effe-8d4078533539"
+      },
+      "execution_count": 5,
+      "outputs": [
+        {
+          "output_type": "stream",
+          "name": "stdout",
+          "text": [
+            "Ultralytics 8.3.2 🚀 Python-3.10.12 torch-2.4.1+cu121 CPU (Intel Xeon 2.20GHz)\n",
+            "YOLO11n summary (fused): 238 layers, 2,616,248 parameters, 0 gradients, 6.5 GFLOPs\n",
+            "\n",
+            "\u001b[34m\u001b[1mPyTorch:\u001b[0m starting from 'yolo11n.pt' with input shape (1, 3, 640, 640) BCHW and output shape(s) (1, 84, 8400) (5.4 MB)\n",
+            "\n",
+            "\u001b[34m\u001b[1mTorchScript:\u001b[0m starting export with torch 2.4.1+cu121...\n",
+            "\u001b[34m\u001b[1mTorchScript:\u001b[0m export success ✅ 2.4s, saved as 'yolo11n.torchscript' (10.5 MB)\n",
+            "\n",
+            "Export complete (4.2s)\n",
+            "Results saved to \u001b[1m/content\u001b[0m\n",
+            "Predict:         yolo predict task=detect model=yolo11n.torchscript imgsz=640  \n",
+            "Validate:        yolo val task=detect model=yolo11n.torchscript imgsz=640 data=coco.yaml  \n",
+            "Visualize:       https://netron.app\n",
+            "💡 Learn more at https://docs.ultralytics.com/modes/export\n"
+          ]
+        }
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "source": [
+        "# 5. Python Usage\n",
+        "\n",
+        "YOLO11 was reimagined using Python-first principles for the most seamless Python YOLO experience yet. YOLO11 models can be loaded from a trained checkpoint or created from scratch. Then methods are used to train, val, predict, and export the model. See detailed Python usage examples in the [YOLO11 Python Docs](https://docs.ultralytics.com/usage/python/)."
+      ],
+      "metadata": {
+        "id": "kUMOQ0OeDBJG"
+      }
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "from ultralytics import YOLO\n",
+        "\n",
+        "# Load a model\n",
+        "model = YOLO('yolo11n.yaml')  # build a new model from scratch\n",
+        "model = YOLO('yolo11n.pt')  # load a pretrained model (recommended for training)\n",
+        "\n",
+        "# Use the model\n",
+        "results = model.train(data='coco8.yaml', epochs=3)  # train the model\n",
+        "results = model.val()  # evaluate model performance on the validation set\n",
+        "results = model('https://ultralytics.com/images/bus.jpg')  # predict on an image\n",
+        "results = model.export(format='onnx')  # export the model to ONNX format"
+      ],
+      "metadata": {
+        "id": "bpF9-vS_DAaf"
+      },
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "markdown",
+      "source": [
+        "# 6. Tasks\n",
+        "\n",
+        "YOLO11 can train, val, predict and export models for the most common tasks in vision AI: [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/), [Classify](https://docs.ultralytics.com/tasks/classify/) and [Pose](https://docs.ultralytics.com/tasks/pose/). See [YOLO11 Tasks Docs](https://docs.ultralytics.com/tasks/) for more information.\n",
+        "\n",
+        "<br><img width=\"1024\" src=\"https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-tasks.png\">\n"
+      ],
+      "metadata": {
+        "id": "Phm9ccmOKye5"
+      }
+    },
+    {
+      "cell_type": "markdown",
+      "source": [
+        "## 1. Detection\n",
+        "\n",
+        "YOLO11 _detection_ models have no suffix and are the default YOLO11 models, i.e. `yolo11n.pt` and are pretrained on COCO. See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for full details.\n"
+      ],
+      "metadata": {
+        "id": "yq26lwpYK1lq"
+      }
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "# Load YOLO11n, train it on COCO128 for 3 epochs and predict an image with it\n",
+        "from ultralytics import YOLO\n",
+        "\n",
+        "model = YOLO('yolo11n.pt')  # load a pretrained YOLO detection model\n",
+        "model.train(data='coco8.yaml', epochs=3)  # train the model\n",
+        "model('https://ultralytics.com/images/bus.jpg')  # predict on an image"
+      ],
+      "metadata": {
+        "id": "8Go5qqS9LbC5"
+      },
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "markdown",
+      "source": [
+        "## 2. Segmentation\n",
+        "\n",
+        "YOLO11 _segmentation_ models use the `-seg` suffix, i.e. `yolo11n-seg.pt` and are pretrained on COCO. See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for full details.\n"
+      ],
+      "metadata": {
+        "id": "7ZW58jUzK66B"
+      }
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "# Load YOLO11n-seg, train it on COCO128-seg for 3 epochs and predict an image with it\n",
+        "from ultralytics import YOLO\n",
+        "\n",
+        "model = YOLO('yolo11n-seg.pt')  # load a pretrained YOLO segmentation model\n",
+        "model.train(data='coco8-seg.yaml', epochs=3)  # train the model\n",
+        "model('https://ultralytics.com/images/bus.jpg')  # predict on an image"
+      ],
+      "metadata": {
+        "id": "WFPJIQl_L5HT"
+      },
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "markdown",
+      "source": [
+        "## 3. Classification\n",
+        "\n",
+        "YOLO11 _classification_ models use the `-cls` suffix, i.e. `yolo11n-cls.pt` and are pretrained on ImageNet. See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for full details.\n"
+      ],
+      "metadata": {
+        "id": "ax3p94VNK9zR"
+      }
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "# Load YOLO11n-cls, train it on mnist160 for 3 epochs and predict an image with it\n",
+        "from ultralytics import YOLO\n",
+        "\n",
+        "model = YOLO('yolo11n-cls.pt')  # load a pretrained YOLO classification model\n",
+        "model.train(data='mnist160', epochs=3)  # train the model\n",
+        "model('https://ultralytics.com/images/bus.jpg')  # predict on an image"
+      ],
+      "metadata": {
+        "id": "5q9Zu6zlL5rS"
+      },
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "markdown",
+      "source": [
+        "## 4. Pose\n",
+        "\n",
+        "YOLO11 _pose_ models use the `-pose` suffix, i.e. `yolo11n-pose.pt` and are pretrained on COCO Keypoints. See [Pose Docs](https://docs.ultralytics.com/tasks/pose/) for full details."
+      ],
+      "metadata": {
+        "id": "SpIaFLiO11TG"
+      }
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "# Load YOLO11n-pose, train it on COCO8-pose for 3 epochs and predict an image with it\n",
+        "from ultralytics import YOLO\n",
+        "\n",
+        "model = YOLO('yolo11n-pose.pt')  # load a pretrained YOLO pose model\n",
+        "model.train(data='coco8-pose.yaml', epochs=3)  # train the model\n",
+        "model('https://ultralytics.com/images/bus.jpg')  # predict on an image"
+      ],
+      "metadata": {
+        "id": "si4aKFNg19vX"
+      },
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "markdown",
+      "source": [
+        "## 4. Oriented Bounding Boxes (OBB)\n",
+        "\n",
+        "YOLO11 _OBB_ models use the `-obb` suffix, i.e. `yolo11n-obb.pt` and are pretrained on the DOTA dataset. See [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for full details."
+      ],
+      "metadata": {
+        "id": "cf5j_T9-B5F0"
+      }
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "# Load YOLO11n-obb, train it on DOTA8 for 3 epochs and predict an image with it\n",
+        "from ultralytics import YOLO\n",
+        "\n",
+        "model = YOLO('yolo11n-obb.pt')  # load a pretrained YOLO OBB model\n",
+        "model.train(data='dota8.yaml', epochs=3)  # train the model\n",
+        "model('https://ultralytics.com/images/boats.jpg')  # predict on an image"
+      ],
+      "metadata": {
+        "id": "IJNKClOOB5YS"
+      },
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "id": "IEijrePND_2I"
+      },
+      "source": [
+        "# Appendix\n",
+        "\n",
+        "Additional content below."
+      ]
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "# Pip install from source\n",
+        "!pip install git+https://github.com/ultralytics/ultralytics@main"
+      ],
+      "metadata": {
+        "id": "pIdE6i8C3LYp"
+      },
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "# Git clone and run tests on updates branch\n",
+        "!git clone https://github.com/ultralytics/ultralytics -b main\n",
+        "%pip install -qe ultralytics"
+      ],
+      "metadata": {
+        "id": "uRKlwxSJdhd1"
+      },
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "# Run tests (Git clone only)\n",
+        "!pytest ultralytics/tests"
+      ],
+      "metadata": {
+        "id": "GtPlh7mcCGZX"
+      },
+      "execution_count": null,
+      "outputs": []
+    },
+    {
+      "cell_type": "code",
+      "source": [
+        "# Validate multiple models\n",
+        "for x in 'nsmlx':\n",
+        "  !yolo val model=yolo11{x}.pt data=coco.yaml"
+      ],
+      "metadata": {
+        "id": "Wdc6t_bfzDDk"
+      },
+      "execution_count": null,
+      "outputs": []
+    }
+  ]
+}

+ 769 - 0
mkdocs.yml

@@ -0,0 +1,769 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+# Configuration file for building the Ultralytics YOLO documentation site using MkDocs.
+# Provides settings to control site metadata, customize the appearance using the
+# Material theme, define the navigation structure, and enable various plugins.
+
+# Site metadata
+site_name: Ultralytics YOLO Docs
+site_description: Explore Ultralytics YOLO, a cutting-edge real-time object detection and image segmentation model for various applications and hardware platforms.
+site_url: https://docs.ultralytics.com
+site_author: Ultralytics
+repo_url: https://github.com/ultralytics/ultralytics
+edit_uri: https://github.com/ultralytics/ultralytics/tree/main/docs/en/
+repo_name: ultralytics/ultralytics
+remote_name: https://github.com/ultralytics/docs
+docs_dir: "docs/en/" # where to find the markdown files
+site_dir: "site/" # where to publish to
+use_directory_urls: true # don't display 'index.html' in slugs
+
+# Theme customization
+theme:
+  name: material
+  language: en
+  custom_dir: docs/overrides/
+  logo: https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Reverse.svg
+  favicon: https://raw.githubusercontent.com/ultralytics/assets/refs/heads/main/logo/favicon-yolo.png
+  icon:
+    repo: fontawesome/brands/github
+  # font:  # disabled for faster page load times
+  #  text: Helvetica
+  #  code: Roboto Mono
+  palette:
+    - media: "(prefers-color-scheme)"
+      toggle:
+        icon: material/brightness-auto
+        name: Switch to light mode
+    - media: "(prefers-color-scheme: dark)"
+      scheme: slate
+      primary: black
+      accent: indigo
+      toggle:
+        icon: material/brightness-4
+        name: Switch to system preference
+    - media: "(prefers-color-scheme: light)"
+      scheme: default
+      primary: indigo
+      accent: indigo
+      toggle:
+        icon: material/brightness-7
+        name: Switch to dark mode
+  features:
+    - content.action.edit
+    - content.code.annotate
+    - content.code.copy
+    - content.tooltips
+    - search.highlight
+    - search.share
+    - search.suggest
+    - toc.follow
+    - navigation.top
+    - navigation.tabs
+    - navigation.tabs.sticky
+    - navigation.prune
+    - navigation.footer
+    - navigation.tracking
+    - navigation.instant
+    - navigation.instant.progress
+    - navigation.indexes
+    - navigation.sections # navigation.expand or navigation.sections
+    - content.tabs.link # all code tabs change simultaneously
+
+# Customization
+copyright: <a href="https://www.ultralytics.com/" target="_blank">© 2025 Ultralytics Inc.</a> All rights reserved.
+extra: # version:
+  homepage: https://www.ultralytics.com/
+  #   provider: mike  #  version drop-down menu
+  robots: robots.txt
+  analytics:
+    provider: google
+    property: G-2M5EHKC0BH
+  social:
+    - icon: fontawesome/brands/github
+      link: https://github.com/ultralytics
+    - icon: fontawesome/brands/linkedin
+      link: https://www.linkedin.com/company/ultralytics/
+    - icon: fontawesome/brands/x-twitter
+      link: https://twitter.com/ultralytics
+    - icon: fontawesome/brands/youtube
+      link: https://youtube.com/ultralytics?sub_confirmation=1
+    - icon: fontawesome/brands/docker
+      link: https://hub.docker.com/r/ultralytics/ultralytics/
+    - icon: fontawesome/brands/python
+      link: https://pypi.org/project/ultralytics/
+    - icon: fontawesome/brands/discord
+      link: https://discord.com/invite/ultralytics
+    - icon: fontawesome/brands/reddit
+      link: https://reddit.com/r/ultralytics
+
+extra_css:
+  - stylesheets/style.css
+
+extra_javascript:
+  - javascript/extra.js
+  - javascript/giscus.js
+
+markdown_extensions:
+  - admonition
+  - md_in_html
+  - tables
+  - attr_list
+  - def_list
+  - pymdownx.critic
+  - pymdownx.caret
+  - pymdownx.keys
+  - pymdownx.mark
+  - pymdownx.tilde
+  - pymdownx.details
+  - pymdownx.superfences
+  - pymdownx.inlinehilite
+  - pymdownx.highlight:
+      anchor_linenums: true
+  - pymdownx.snippets:
+      base_path: ./
+  - pymdownx.emoji:
+      emoji_index: !!python/name:material.extensions.emoji.twemoji
+      emoji_generator: !!python/name:material.extensions.emoji.to_svg
+  - pymdownx.tabbed:
+      alternate_style: true
+
+# Validation settings https://www.mkdocs.org/user-guide/configuration/#validation
+validation:
+  nav:
+    omitted_files: info
+    not_found: warn
+    absolute_links: info
+  links:
+    absolute_links: relative_to_docs
+    anchors: warn
+    unrecognized_links: warn
+
+# Primary navigation ---------------------------------------------------------------------------------------------------
+nav:
+  - Home:
+      - Home: index.md
+      - Quickstart: quickstart.md
+      - Modes:
+          - modes/index.md
+          - Train: modes/train.md
+          - Val: modes/val.md
+          - Predict: modes/predict.md
+          - Export: modes/export.md
+          - Track: modes/track.md
+          - Benchmark: modes/benchmark.md
+      - Tasks:
+          - tasks/index.md
+          - Detect: tasks/detect.md
+          - Segment: tasks/segment.md
+          - Classify: tasks/classify.md
+          - Pose: tasks/pose.md
+          - OBB: tasks/obb.md
+      - Models:
+          - models/index.md
+      - Datasets:
+          - datasets/index.md
+      - Solutions:
+          - solutions/index.md
+      - Guides:
+          - guides/index.md
+      - YOLO11 🚀 NEW: models/yolo11.md # for promotion of new pages
+      - Languages:
+          - 🇬🇧&nbsp English: https://ultralytics.com/docs/
+          - 🇨🇳&nbsp 简体中文: https://docs.ultralytics.com/zh/
+          - 🇰🇷&nbsp 한국어: https://docs.ultralytics.com/ko/
+          - 🇯🇵&nbsp 日本語: https://docs.ultralytics.com/ja/
+          - 🇷🇺&nbsp Русский: https://docs.ultralytics.com/ru/
+          - 🇩🇪&nbsp Deutsch: https://docs.ultralytics.com/de/
+          - 🇫🇷&nbsp Français: https://docs.ultralytics.com/fr/
+          - 🇪🇸&nbsp Español: https://docs.ultralytics.com/es/
+          - 🇵🇹&nbsp Português: https://docs.ultralytics.com/pt/
+          - 🇮🇹&nbsp Italiano: https://docs.ultralytics.com/it/
+          - 🇹🇷&nbsp Türkçe: https://docs.ultralytics.com/tr/
+          - 🇻🇳&nbsp Tiếng Việt: https://docs.ultralytics.com/vi/
+          - 🇸🇦&nbsp العربية: https://docs.ultralytics.com/ar/
+  - Quickstart:
+      - quickstart.md
+      - Usage:
+          - CLI: usage/cli.md
+          - Python: usage/python.md
+          - Callbacks: usage/callbacks.md
+          - Configuration: usage/cfg.md
+          - Simple Utilities: usage/simple-utilities.md
+          - Advanced Customization: usage/engine.md
+      - Modes:
+          - modes/index.md
+          - Train: modes/train.md
+          - Val: modes/val.md
+          - Predict: modes/predict.md
+          - Export: modes/export.md
+          - Track: modes/track.md
+          - Benchmark: modes/benchmark.md
+      - Tasks:
+          - tasks/index.md
+          - Detect: tasks/detect.md
+          - Segment: tasks/segment.md
+          - Classify: tasks/classify.md
+          - Pose: tasks/pose.md
+          - OBB: tasks/obb.md
+      - Models:
+          - models/index.md
+      - Datasets:
+          - datasets/index.md
+      - Solutions:
+          - solutions/index.md
+      - Guides:
+          - guides/index.md
+  - Modes:
+      - modes/index.md
+      - Train: modes/train.md
+      - Val: modes/val.md
+      - Predict: modes/predict.md
+      - Export: modes/export.md
+      - Track: modes/track.md
+      - Benchmark: modes/benchmark.md
+      - Tasks:
+          - tasks/index.md
+          - Detect: tasks/detect.md
+          - Segment: tasks/segment.md
+          - Classify: tasks/classify.md
+          - Pose: tasks/pose.md
+          - OBB: tasks/obb.md
+  - Tasks:
+      - tasks/index.md
+      - Detect: tasks/detect.md
+      - Segment: tasks/segment.md
+      - Classify: tasks/classify.md
+      - Pose: tasks/pose.md
+      - OBB: tasks/obb.md
+      - Modes:
+          - modes/index.md
+          - Train: modes/train.md
+          - Val: modes/val.md
+          - Predict: modes/predict.md
+          - Export: modes/export.md
+          - Track: modes/track.md
+          - Benchmark: modes/benchmark.md
+  - Models:
+      - models/index.md
+      - YOLOv3: models/yolov3.md
+      - YOLOv4: models/yolov4.md
+      - YOLOv5: models/yolov5.md
+      - YOLOv6: models/yolov6.md
+      - YOLOv7: models/yolov7.md
+      - YOLOv8: models/yolov8.md
+      - YOLOv9: models/yolov9.md
+      - YOLOv10: models/yolov10.md
+      - YOLO11 🚀 NEW: models/yolo11.md
+      - SAM (Segment Anything Model): models/sam.md
+      - SAM 2 (Segment Anything Model 2): models/sam-2.md
+      - MobileSAM (Mobile Segment Anything Model): models/mobile-sam.md
+      - FastSAM (Fast Segment Anything Model): models/fast-sam.md
+      - YOLO-NAS (Neural Architecture Search): models/yolo-nas.md
+      - RT-DETR (Realtime Detection Transformer): models/rtdetr.md
+      - YOLO-World (Real-Time Open-Vocabulary Object Detection): models/yolo-world.md
+  - Datasets:
+      - datasets/index.md
+      - Detection:
+          - datasets/detect/index.md
+          - Argoverse: datasets/detect/argoverse.md
+          - COCO: datasets/detect/coco.md
+          - LVIS: datasets/detect/lvis.md
+          - COCO8: datasets/detect/coco8.md
+          - GlobalWheat2020: datasets/detect/globalwheat2020.md
+          - Objects365: datasets/detect/objects365.md
+          - OpenImagesV7: datasets/detect/open-images-v7.md
+          - SKU-110K: datasets/detect/sku-110k.md
+          - VisDrone: datasets/detect/visdrone.md
+          - VOC: datasets/detect/voc.md
+          - xView: datasets/detect/xview.md
+          - RF100: datasets/detect/roboflow-100.md
+          - Brain-tumor: datasets/detect/brain-tumor.md
+          - African-wildlife: datasets/detect/african-wildlife.md
+          - Signature: datasets/detect/signature.md
+          - Medical-pills: datasets/detect/medical-pills.md
+      - Segmentation:
+          - datasets/segment/index.md
+          - COCO: datasets/segment/coco.md
+          - COCO8-seg: datasets/segment/coco8-seg.md
+          - Crack-seg: datasets/segment/crack-seg.md
+          - Carparts-seg: datasets/segment/carparts-seg.md
+          - Package-seg: datasets/segment/package-seg.md
+      - Pose:
+          - datasets/pose/index.md
+          - COCO: datasets/pose/coco.md
+          - COCO8-pose: datasets/pose/coco8-pose.md
+          - Tiger-pose: datasets/pose/tiger-pose.md
+          - Hand-keypoints: datasets/pose/hand-keypoints.md
+          - Dog-pose: datasets/pose/dog-pose.md
+      - Classification:
+          - datasets/classify/index.md
+          - Caltech 101: datasets/classify/caltech101.md
+          - Caltech 256: datasets/classify/caltech256.md
+          - CIFAR-10: datasets/classify/cifar10.md
+          - CIFAR-100: datasets/classify/cifar100.md
+          - Fashion-MNIST: datasets/classify/fashion-mnist.md
+          - ImageNet: datasets/classify/imagenet.md
+          - ImageNet-10: datasets/classify/imagenet10.md
+          - Imagenette: datasets/classify/imagenette.md
+          - Imagewoof: datasets/classify/imagewoof.md
+          - MNIST: datasets/classify/mnist.md
+      - Oriented Bounding Boxes (OBB):
+          - datasets/obb/index.md
+          - DOTAv2: datasets/obb/dota-v2.md
+          - DOTA8: datasets/obb/dota8.md
+      - Multi-Object Tracking:
+          - datasets/track/index.md
+  - Solutions 🚀 NEW:
+      - solutions/index.md
+      - Object Counting: guides/object-counting.md
+      - Object Cropping: guides/object-cropping.md
+      - Object Blurring: guides/object-blurring.md
+      - Workouts Monitoring: guides/workouts-monitoring.md
+      - Objects Counting in Regions: guides/region-counting.md
+      - Security Alarm System: guides/security-alarm-system.md
+      - Heatmaps: guides/heatmaps.md
+      - Instance Segmentation with Object Tracking: guides/instance-segmentation-and-tracking.md
+      - VisionEye Mapping: guides/vision-eye.md
+      - Speed Estimation: guides/speed-estimation.md
+      - Distance Calculation: guides/distance-calculation.md
+      - Queue Management: guides/queue-management.md
+      - Parking Management: guides/parking-management.md
+      - Analytics: guides/analytics.md
+      - Live Inference: guides/streamlit-live-inference.md
+      - Track Objects in Zone 🚀 NEW: guides/trackzone.md
+  - Guides:
+      - guides/index.md
+      - YOLO Common Issues: guides/yolo-common-issues.md
+      - YOLO Performance Metrics: guides/yolo-performance-metrics.md
+      - YOLO Thread-Safe Inference: guides/yolo-thread-safe-inference.md
+      - Model Deployment Options: guides/model-deployment-options.md
+      - K-Fold Cross Validation: guides/kfold-cross-validation.md
+      - Hyperparameter Tuning: guides/hyperparameter-tuning.md
+      - SAHI Tiled Inference: guides/sahi-tiled-inference.md
+      - AzureML Quickstart: guides/azureml-quickstart.md
+      - Conda Quickstart: guides/conda-quickstart.md
+      - Docker Quickstart: guides/docker-quickstart.md
+      - Raspberry Pi: guides/raspberry-pi.md
+      - NVIDIA Jetson: guides/nvidia-jetson.md
+      - DeepStream on NVIDIA Jetson: guides/deepstream-nvidia-jetson.md
+      - Triton Inference Server: guides/triton-inference-server.md
+      - Isolating Segmentation Objects: guides/isolating-segmentation-objects.md
+      - Edge TPU on Raspberry Pi: guides/coral-edge-tpu-on-raspberry-pi.md
+      - Viewing Inference Images in a Terminal: guides/view-results-in-terminal.md
+      - OpenVINO Latency vs Throughput modes: guides/optimizing-openvino-latency-vs-throughput-modes.md
+      - ROS Quickstart: guides/ros-quickstart.md
+      - Steps of a Computer Vision Project: guides/steps-of-a-cv-project.md
+      - Defining A Computer Vision Project's Goals: guides/defining-project-goals.md
+      - Data Collection and Annotation: guides/data-collection-and-annotation.md
+      - Preprocessing Annotated Data: guides/preprocessing_annotated_data.md
+      - Tips for Model Training: guides/model-training-tips.md
+      - Insights on Model Evaluation and Fine-Tuning: guides/model-evaluation-insights.md
+      - A Guide on Model Testing: guides/model-testing.md
+      - Best Practices for Model Deployment: guides/model-deployment-practices.md
+      - Maintaining Your Computer Vision Model: guides/model-monitoring-and-maintenance.md
+      - Explorer:
+          - datasets/explorer/index.md
+          - Explorer API: datasets/explorer/api.md
+          - Explorer Dashboard Demo: datasets/explorer/dashboard.md
+          - VOC Exploration Example: datasets/explorer/explorer.md
+      - YOLOv5:
+          - yolov5/index.md
+          - Quickstart: yolov5/quickstart_tutorial.md
+          - Environments:
+              - Amazon Web Services (AWS): yolov5/environments/aws_quickstart_tutorial.md
+              - Google Cloud (GCP): yolov5/environments/google_cloud_quickstart_tutorial.md
+              - AzureML: yolov5/environments/azureml_quickstart_tutorial.md
+              - Docker Image: yolov5/environments/docker_image_quickstart_tutorial.md
+          - Tutorials:
+              - Train Custom Data: yolov5/tutorials/train_custom_data.md
+              - Tips for Best Training Results: yolov5/tutorials/tips_for_best_training_results.md
+              - Multi-GPU Training: yolov5/tutorials/multi_gpu_training.md
+              - PyTorch Hub: yolov5/tutorials/pytorch_hub_model_loading.md
+              - TFLite, ONNX, CoreML, TensorRT Export: yolov5/tutorials/model_export.md
+              - Test-Time Augmentation (TTA): yolov5/tutorials/test_time_augmentation.md
+              - Model Ensembling: yolov5/tutorials/model_ensembling.md
+              - Pruning/Sparsity Tutorial: yolov5/tutorials/model_pruning_and_sparsity.md
+              - Hyperparameter evolution: yolov5/tutorials/hyperparameter_evolution.md
+              - Transfer learning with frozen layers: yolov5/tutorials/transfer_learning_with_frozen_layers.md
+              - Architecture Summary: yolov5/tutorials/architecture_description.md
+              - Roboflow Datasets: yolov5/tutorials/roboflow_datasets_integration.md
+              - Neural Magic's DeepSparse: yolov5/tutorials/neural_magic_pruning_quantization.md
+              - Comet Logging: yolov5/tutorials/comet_logging_integration.md
+              - Clearml Logging: yolov5/tutorials/clearml_logging_integration.md
+  - Integrations:
+      - integrations/index.md
+      - Amazon SageMaker: integrations/amazon-sagemaker.md
+      - ClearML: integrations/clearml.md
+      - Comet ML: integrations/comet.md
+      - CoreML: integrations/coreml.md
+      - DVC: integrations/dvc.md
+      - Google Colab: integrations/google-colab.md
+      - Gradio: integrations/gradio.md
+      - IBM Watsonx: integrations/ibm-watsonx.md
+      - JupyterLab: integrations/jupyterlab.md
+      - Kaggle: integrations/kaggle.md
+      - MLflow: integrations/mlflow.md
+      - Neural Magic: integrations/neural-magic.md
+      - ONNX: integrations/onnx.md
+      - OpenVINO: integrations/openvino.md
+      - PaddlePaddle: integrations/paddlepaddle.md
+      - MNN: integrations/mnn.md
+      - NCNN: integrations/ncnn.md
+      - Paperspace Gradient: integrations/paperspace.md
+      - Ray Tune: integrations/ray-tune.md
+      - Roboflow: integrations/roboflow.md
+      - TF GraphDef: integrations/tf-graphdef.md
+      - TF SavedModel: integrations/tf-savedmodel.md
+      - TF.js: integrations/tfjs.md
+      - TFLite: integrations/tflite.md
+      - TFLite Edge TPU: integrations/edge-tpu.md
+      - TensorBoard: integrations/tensorboard.md
+      - TensorRT: integrations/tensorrt.md
+      - TorchScript: integrations/torchscript.md
+      - VS Code: integrations/vscode.md
+      - Weights & Biases: integrations/weights-biases.md
+      - Albumentations: integrations/albumentations.md
+      - SONY IMX500: integrations/sony-imx500.md
+  - HUB:
+      - hub/index.md
+      - Web:
+          - hub/index.md
+          - Quickstart: hub/quickstart.md
+          - Datasets: hub/datasets.md
+          - Projects: hub/projects.md
+          - Models: hub/models.md
+          - Pro: hub/pro.md
+          - Cloud Training: hub/cloud-training.md
+          - Inference API: hub/inference-api.md
+          - Teams: hub/teams.md
+          - Integrations: hub/integrations.md
+      - App:
+          - hub/app/index.md
+          - iOS: hub/app/ios.md
+          - Android: hub/app/android.md
+      - Python SDK:
+          - hub/sdk/index.md
+          - Quickstart: hub/sdk/quickstart.md
+          - Model: hub/sdk/model.md
+          - Dataset: hub/sdk/dataset.md
+          - Project: hub/sdk/project.md
+          - Reference:
+              - base:
+                  - api_client: hub/sdk/reference/base/api_client.md
+                  - auth: hub/sdk/reference/base/auth.md
+                  - crud_client: hub/sdk/reference/base/crud_client.md
+                  - paginated_list: hub/sdk/reference/base/paginated_list.md
+                  - server_clients: hub/sdk/reference/base/server_clients.md
+              - helpers:
+                  - error_handler: hub/sdk/reference/helpers/error_handler.md
+                  - exceptions: hub/sdk/reference/helpers/exceptions.md
+                  - logger: hub/sdk/reference/helpers/logger.md
+                  - utils: hub/sdk/reference/helpers/utils.md
+              - hub_client: hub/sdk/reference/hub_client.md
+              - modules:
+                  - datasets: hub/sdk/reference/modules/datasets.md
+                  - models: hub/sdk/reference/modules/models.md
+                  - projects: hub/sdk/reference/modules/projects.md
+                  - teams: hub/sdk/reference/modules/teams.md
+                  - users: hub/sdk/reference/modules/users.md
+      - REST API:
+          - hub/api/index.md
+
+  - Reference:
+      - cfg:
+          - __init__: reference/cfg/__init__.md
+      - data:
+          - annotator: reference/data/annotator.md
+          - augment: reference/data/augment.md
+          - base: reference/data/base.md
+          - build: reference/data/build.md
+          - converter: reference/data/converter.md
+          - dataset: reference/data/dataset.md
+          - loaders: reference/data/loaders.md
+          - split_dota: reference/data/split_dota.md
+          - utils: reference/data/utils.md
+      - engine:
+          - exporter: reference/engine/exporter.md
+          - model: reference/engine/model.md
+          - predictor: reference/engine/predictor.md
+          - results: reference/engine/results.md
+          - trainer: reference/engine/trainer.md
+          - tuner: reference/engine/tuner.md
+          - validator: reference/engine/validator.md
+      - hub:
+          - __init__: reference/hub/__init__.md
+          - auth: reference/hub/auth.md
+          - google:
+              - __init__: reference/hub/google/__init__.md
+          - session: reference/hub/session.md
+          - utils: reference/hub/utils.md
+      - models:
+          - fastsam:
+              - model: reference/models/fastsam/model.md
+              - predict: reference/models/fastsam/predict.md
+              - utils: reference/models/fastsam/utils.md
+              - val: reference/models/fastsam/val.md
+          - nas:
+              - model: reference/models/nas/model.md
+              - predict: reference/models/nas/predict.md
+              - val: reference/models/nas/val.md
+          - rtdetr:
+              - model: reference/models/rtdetr/model.md
+              - predict: reference/models/rtdetr/predict.md
+              - train: reference/models/rtdetr/train.md
+              - val: reference/models/rtdetr/val.md
+          - sam:
+              - amg: reference/models/sam/amg.md
+              - build: reference/models/sam/build.md
+              - model: reference/models/sam/model.md
+              - modules:
+                  - blocks: reference/models/sam/modules/blocks.md
+                  - decoders: reference/models/sam/modules/decoders.md
+                  - encoders: reference/models/sam/modules/encoders.md
+                  - memory_attention: reference/models/sam/modules/memory_attention.md
+                  - sam: reference/models/sam/modules/sam.md
+                  - tiny_encoder: reference/models/sam/modules/tiny_encoder.md
+                  - transformer: reference/models/sam/modules/transformer.md
+                  - utils: reference/models/sam/modules/utils.md
+              - predict: reference/models/sam/predict.md
+          - utils:
+              - loss: reference/models/utils/loss.md
+              - ops: reference/models/utils/ops.md
+          - yolo:
+              - classify:
+                  - predict: reference/models/yolo/classify/predict.md
+                  - train: reference/models/yolo/classify/train.md
+                  - val: reference/models/yolo/classify/val.md
+              - detect:
+                  - predict: reference/models/yolo/detect/predict.md
+                  - train: reference/models/yolo/detect/train.md
+                  - val: reference/models/yolo/detect/val.md
+              - model: reference/models/yolo/model.md
+              - obb:
+                  - predict: reference/models/yolo/obb/predict.md
+                  - train: reference/models/yolo/obb/train.md
+                  - val: reference/models/yolo/obb/val.md
+              - pose:
+                  - predict: reference/models/yolo/pose/predict.md
+                  - train: reference/models/yolo/pose/train.md
+                  - val: reference/models/yolo/pose/val.md
+              - segment:
+                  - predict: reference/models/yolo/segment/predict.md
+                  - train: reference/models/yolo/segment/train.md
+                  - val: reference/models/yolo/segment/val.md
+              - world:
+                  - train: reference/models/yolo/world/train.md
+                  - train_world: reference/models/yolo/world/train_world.md
+      - nn:
+          - autobackend: reference/nn/autobackend.md
+          - modules:
+              - activation: reference/nn/modules/activation.md
+              - block: reference/nn/modules/block.md
+              - conv: reference/nn/modules/conv.md
+              - head: reference/nn/modules/head.md
+              - transformer: reference/nn/modules/transformer.md
+              - utils: reference/nn/modules/utils.md
+          - tasks: reference/nn/tasks.md
+      - solutions:
+          - ai_gym: reference/solutions/ai_gym.md
+          - analytics: reference/solutions/analytics.md
+          - distance_calculation: reference/solutions/distance_calculation.md
+          - heatmap: reference/solutions/heatmap.md
+          - object_counter: reference/solutions/object_counter.md
+          - parking_management: reference/solutions/parking_management.md
+          - queue_management: reference/solutions/queue_management.md
+          - region_counter: reference/solutions/region_counter.md
+          - security_alarm: reference/solutions/security_alarm.md
+          - solutions: reference/solutions/solutions.md
+          - speed_estimation: reference/solutions/speed_estimation.md
+          - streamlit_inference: reference/solutions/streamlit_inference.md
+          - trackzone: reference/solutions/trackzone.md
+      - trackers:
+          - basetrack: reference/trackers/basetrack.md
+          - bot_sort: reference/trackers/bot_sort.md
+          - byte_tracker: reference/trackers/byte_tracker.md
+          - track: reference/trackers/track.md
+          - utils:
+              - gmc: reference/trackers/utils/gmc.md
+              - kalman_filter: reference/trackers/utils/kalman_filter.md
+              - matching: reference/trackers/utils/matching.md
+      - utils:
+          - __init__: reference/utils/__init__.md
+          - autobatch: reference/utils/autobatch.md
+          - benchmarks: reference/utils/benchmarks.md
+          - callbacks:
+              - base: reference/utils/callbacks/base.md
+              - clearml: reference/utils/callbacks/clearml.md
+              - comet: reference/utils/callbacks/comet.md
+              - dvc: reference/utils/callbacks/dvc.md
+              - hub: reference/utils/callbacks/hub.md
+              - mlflow: reference/utils/callbacks/mlflow.md
+              - neptune: reference/utils/callbacks/neptune.md
+              - raytune: reference/utils/callbacks/raytune.md
+              - tensorboard: reference/utils/callbacks/tensorboard.md
+              - wb: reference/utils/callbacks/wb.md
+          - checks: reference/utils/checks.md
+          - dist: reference/utils/dist.md
+          - downloads: reference/utils/downloads.md
+          - errors: reference/utils/errors.md
+          - files: reference/utils/files.md
+          - instance: reference/utils/instance.md
+          - loss: reference/utils/loss.md
+          - metrics: reference/utils/metrics.md
+          - ops: reference/utils/ops.md
+          - patches: reference/utils/patches.md
+          - plotting: reference/utils/plotting.md
+          - tal: reference/utils/tal.md
+          - torch_utils: reference/utils/torch_utils.md
+          - triton: reference/utils/triton.md
+          - tuner: reference/utils/tuner.md
+
+  - Help:
+      - Help: help/index.md
+      - Frequently Asked Questions (FAQ): help/FAQ.md
+      - Contributing Guide: help/contributing.md
+      - Continuous Integration (CI) Guide: help/CI.md
+      - Contributor License Agreement (CLA): help/CLA.md
+      - Minimum Reproducible Example (MRE) Guide: help/minimum-reproducible-example.md
+      - Code of Conduct: help/code-of-conduct.md
+      - Environmental, Health and Safety (EHS) Policy: help/environmental-health-safety.md
+      - Security Policy: help/security.md
+      - Privacy Policy: help/privacy.md
+
+# Plugins including 301 redirects navigation ---------------------------------------------------------------------------
+plugins:
+  - macros
+  # - search:
+  #     lang: en
+  - mkdocstrings:
+      enabled: true
+      default_handler: python
+      handlers:
+        python:
+          options:
+            docstring_options:
+              ignore_init_summary: true
+            merge_init_into_class: true
+            docstring_style: google
+            show_root_heading: true
+            show_source: true
+            separate_signature: true
+            line_length: 80
+            show_signature_annotations: true
+            show_symbol_type_heading: true # insiders
+            show_symbol_type_toc: true # insiders
+            show_inheritance_diagram: true # insiders
+  - ultralytics:
+      add_desc: False
+      add_image: True
+      add_authors: True
+      add_json_ld: True
+      add_share_buttons: True
+      add_css: False
+      default_image: https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png
+  - redirects:
+      redirect_maps:
+        hi/index.md: index.md
+        nl/index.md: index.md
+        callbacks.md: usage/callbacks.md
+        cfg.md: usage/cfg.md
+        cli.md: usage/cli.md
+        config.md: usage/cfg.md
+        engine.md: usage/engine.md
+        environments/AWS-Quickstart.md: yolov5/environments/aws_quickstart_tutorial.md
+        environments/Docker-Quickstart.md: yolov5/environments/docker_image_quickstart_tutorial.md
+        environments/GCP-Quickstart.md: yolov5/environments/google_cloud_quickstart_tutorial.md
+        FAQ/augmentation.md: yolov5/tutorials/tips_for_best_training_results.md
+        package-framework.md: index.md
+        package-framework/mock_detector.md: index.md
+        predict.md: modes/predict.md
+        python.md: usage/python.md
+        quick-start.md: quickstart.md
+        app.md: hub/app/index.md
+        sdk.md: index.md
+        hub/inference_api.md: hub/inference-api.md
+        usage/hyperparameter_tuning.md: integrations/ray-tune.md
+        models/sam2.md: models/sam-2.md
+        reference/base_pred.md: reference/engine/predictor.md
+        reference/base_trainer.md: reference/engine/trainer.md
+        reference/exporter.md: reference/engine/exporter.md
+        reference/model.md: reference/engine/model.md
+        reference/nn.md: reference/nn/modules/head.md
+        reference/ops.md: reference/utils/ops.md
+        reference/results.md: reference/engine/results.md
+        reference/base_val.md: index.md
+        reference/index.md: reference/cfg/__init__.md
+        tasks/classification.md: tasks/classify.md
+        tasks/detection.md: tasks/detect.md
+        tasks/segmentation.md: tasks/segment.md
+        tasks/keypoints.md: tasks/pose.md
+        tasks/tracking.md: modes/track.md
+        SECURITY.md: help/security.md
+        help/minimum_reproducible_example.md: help/minimum-reproducible-example.md
+        help/code_of_conduct.md: help/code-of-conduct.md
+        tutorials/architecture-summary.md: yolov5/tutorials/architecture_description.md
+        tutorials/clearml-logging.md: yolov5/tutorials/clearml_logging_integration.md
+        tutorials/comet-logging.md: yolov5/tutorials/comet_logging_integration.md
+        tutorials/hyperparameter-evolution.md: yolov5/tutorials/hyperparameter_evolution.md
+        tutorials/model-ensembling.md: yolov5/tutorials/model_ensembling.md
+        tutorials/multi-gpu-training.md: yolov5/tutorials/multi_gpu_training.md
+        tutorials/nvidia-jetson.md: guides/nvidia-jetson.md
+        tutorials/pruning-sparsity.md: yolov5/tutorials/model_pruning_and_sparsity.md
+        tutorials/pytorch-hub.md: yolov5/tutorials/pytorch_hub_model_loading.md
+        tutorials/roboflow.md: yolov5/tutorials/roboflow_datasets_integration.md
+        tutorials/test-time-augmentation.md: yolov5/tutorials/test_time_augmentation.md
+        tutorials/torchscript-onnx-coreml-export.md: yolov5/tutorials/model_export.md
+        tutorials/train-custom-datasets.md: yolov5/tutorials/train_custom_data.md
+        tutorials/training-tips-best-results.md: yolov5/tutorials/tips_for_best_training_results.md
+        tutorials/transfer-learning-froze-layers.md: yolov5/tutorials/transfer_learning_with_frozen_layers.md
+        tutorials/weights-and-biasis-logging.md: yolov5/tutorials/comet_logging_integration.md
+        yolov5/pytorch_hub.md: yolov5/tutorials/pytorch_hub_model_loading.md
+        yolov5/hyp_evolution.md: yolov5/tutorials/hyperparameter_evolution.md
+        yolov5/pruning_sparsity.md: yolov5/tutorials/model_pruning_and_sparsity.md
+        yolov5/roboflow.md: yolov5/tutorials/roboflow_datasets_integration.md
+        yolov5/comet.md: yolov5/tutorials/comet_logging_integration.md
+        yolov5/clearml.md: yolov5/tutorials/clearml_logging_integration.md
+        yolov5/tta.md: yolov5/tutorials/test_time_augmentation.md
+        yolov5/multi_gpu_training.md: yolov5/tutorials/multi_gpu_training.md
+        yolov5/ensemble.md: yolov5/tutorials/model_ensembling.md
+        yolov5/jetson_nano.md: guides/nvidia-jetson.md
+        yolov5/transfer_learn_frozen.md: yolov5/tutorials/transfer_learning_with_frozen_layers.md
+        yolov5/neural_magic.md: yolov5/tutorials/neural_magic_pruning_quantization.md
+        yolov5/train_custom_data.md: yolov5/tutorials/train_custom_data.md
+        yolov5/architecture.md: yolov5/tutorials/architecture_description.md
+        yolov5/export.md: yolov5/tutorials/model_export.md
+        yolov5/yolov5_quickstart_tutorial.md: yolov5/quickstart_tutorial.md
+        yolov5/tips_for_best_training_results.md: yolov5/tutorials/tips_for_best_training_results.md
+        yolov5/tutorials/yolov5_neural_magic_tutorial.md: yolov5/tutorials/neural_magic_pruning_quantization.md
+        yolov5/tutorials/model_ensembling_tutorial.md: yolov5/tutorials/model_ensembling.md
+        yolov5/tutorials/pytorch_hub_tutorial.md: yolov5/tutorials/pytorch_hub_model_loading.md
+        yolov5/tutorials/yolov5_architecture_tutorial.md: yolov5/tutorials/architecture_description.md
+        yolov5/tutorials/multi_gpu_training_tutorial.md: yolov5/tutorials/multi_gpu_training.md
+        yolov5/tutorials/yolov5_pytorch_hub_tutorial.md: yolov5/tutorials/pytorch_hub_model_loading.md
+        yolov5/tutorials/model_export_tutorial.md: yolov5/tutorials/model_export.md
+        yolov5/tutorials/jetson_nano_tutorial.md: guides/nvidia-jetson.md
+        yolov5/tutorials/yolov5_model_ensembling_tutorial.md: yolov5/tutorials/model_ensembling.md
+        yolov5/tutorials/roboflow_integration.md: yolov5/tutorials/roboflow_datasets_integration.md
+        yolov5/tutorials/pruning_and_sparsity_tutorial.md: yolov5/tutorials/model_pruning_and_sparsity.md
+        yolov5/tutorials/yolov5_transfer_learning_with_frozen_layers_tutorial.md: yolov5/tutorials/transfer_learning_with_frozen_layers.md
+        yolov5/tutorials/transfer_learning_with_frozen_layers_tutorial.md: yolov5/tutorials/transfer_learning_with_frozen_layers.md
+        yolov5/tutorials/yolov5_model_export_tutorial.md: yolov5/tutorials/model_export.md
+        yolov5/tutorials/neural_magic_tutorial.md: yolov5/tutorials/neural_magic_pruning_quantization.md
+        yolov5/tutorials/yolov5_clearml_integration_tutorial.md: yolov5/tutorials/clearml_logging_integration.md
+        yolov5/tutorials/yolov5_train_custom_data.md: yolov5/tutorials/train_custom_data.md
+        yolov5/tutorials/comet_integration_tutorial.md: yolov5/tutorials/comet_logging_integration.md
+        yolov5/tutorials/yolov5_pruning_and_sparsity_tutorial.md: yolov5/tutorials/model_pruning_and_sparsity.md
+        yolov5/tutorials/yolov5_jetson_nano_tutorial.md: guides/nvidia-jetson.md
+        yolov5/tutorials/running_on_jetson_nano.md: guides/nvidia-jetson.md
+        yolov5/tutorials/yolov5_roboflow_integration.md: yolov5/tutorials/roboflow_datasets_integration.md
+        yolov5/tutorials/hyperparameter_evolution_tutorial.md: yolov5/tutorials/hyperparameter_evolution.md
+        yolov5/tutorials/yolov5_hyperparameter_evolution_tutorial.md: yolov5/tutorials/hyperparameter_evolution.md
+        yolov5/tutorials/clearml_integration_tutorial.md: yolov5/tutorials/clearml_logging_integration.md
+        yolov5/tutorials/test_time_augmentation_tutorial.md: yolov5/tutorials/test_time_augmentation.md
+        yolov5/tutorials/yolov5_test_time_augmentation_tutorial.md: yolov5/tutorials/test_time_augmentation.md
+        yolov5/environments/yolov5_amazon_web_services_quickstart_tutorial.md: yolov5/environments/aws_quickstart_tutorial.md
+        yolov5/environments/yolov5_google_cloud_platform_quickstart_tutorial.md: yolov5/environments/google_cloud_quickstart_tutorial.md
+        yolov5/environments/yolov5_docker_image_quickstart_tutorial.md: yolov5/environments/docker_image_quickstart_tutorial.md
+        reference/data/explorer/explorer.md: datasets/explorer/index.md
+        reference/data/explorer/gui/dash.md: datasets/explorer/index.md
+        reference/data/explorer/utils.md: datasets/explorer/index.md

+ 186 - 0
pyproject.toml

@@ -0,0 +1,186 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+# Overview:
+# This pyproject.toml file manages the build, packaging, and distribution of the Ultralytics library.
+# It defines essential project metadata, dependencies, and settings used to develop and deploy the library.
+
+# Key Sections:
+# - [build-system]: Specifies the build requirements and backend (e.g., setuptools, wheel).
+# - [project]: Includes details like name, version, description, authors, dependencies and more.
+# - [project.optional-dependencies]: Provides additional, optional packages for extended features.
+# - [tool.*]: Configures settings for various tools (pytest, yapf, etc.) used in the project.
+
+# Installation:
+# The Ultralytics library can be installed using the command: 'pip install ultralytics'
+# For development purposes, you can install the package in editable mode with: 'pip install -e .'
+# This approach allows for real-time code modifications without the need for re-installation.
+
+# Documentation:
+# For comprehensive documentation and usage instructions, visit: https://docs.ultralytics.com
+
+[build-system]
+requires = ["setuptools>=70.0.0", "wheel"]
+build-backend = "setuptools.build_meta"
+
+# Project settings -----------------------------------------------------------------------------------------------------
+[project]
+name = "ultralytics"
+dynamic = ["version"]
+description = "Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification."
+readme = "README.md"
+requires-python = ">=3.8"
+license = { "text" = "AGPL-3.0" }
+keywords = ["machine-learning", "deep-learning", "computer-vision", "ML", "DL", "AI", "YOLO", "YOLOv3", "YOLOv5", "YOLOv8", "YOLOv9", "YOLOv10", "YOLO11", "HUB", "Ultralytics"]
+authors = [
+    { name = "Glenn Jocher", email = "glenn.jocher@ultralytics.com" },
+    { name = "Jing Qiu", email = "jing.qiu@ultralytics.com" },
+]
+maintainers = [
+    { name = "Ultralytics", email = "hello@ultralytics.com" },
+]
+classifiers = [
+    "Development Status :: 4 - Beta",
+    "Intended Audience :: Developers",
+    "Intended Audience :: Education",
+    "Intended Audience :: Science/Research",
+    "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
+    "Programming Language :: Python :: 3",
+    "Programming Language :: Python :: 3.8",
+    "Programming Language :: Python :: 3.9",
+    "Programming Language :: Python :: 3.10",
+    "Programming Language :: Python :: 3.11",
+    "Programming Language :: Python :: 3.12",
+    "Topic :: Software Development",
+    "Topic :: Scientific/Engineering",
+    "Topic :: Scientific/Engineering :: Artificial Intelligence",
+    "Topic :: Scientific/Engineering :: Image Recognition",
+    "Operating System :: POSIX :: Linux",
+    "Operating System :: MacOS",
+    "Operating System :: Microsoft :: Windows",
+]
+
+# Required dependencies ------------------------------------------------------------------------------------------------
+dependencies = [
+    "numpy>=1.23.0",
+    "numpy<2.0.0; sys_platform == 'darwin'", # macOS OpenVINO errors https://github.com/ultralytics/ultralytics/pull/17221
+    "matplotlib>=3.3.0",
+    "opencv-python>=4.6.0",
+    "pillow>=7.1.2",
+    "pyyaml>=5.3.1",
+    "requests>=2.23.0",
+    "scipy>=1.4.1",
+    "torch>=1.8.0",
+    "torch>=1.8.0,!=2.4.0; sys_platform == 'win32'", # Windows CPU errors w/ 2.4.0 https://github.com/ultralytics/ultralytics/issues/15049
+    "torchvision>=0.9.0",
+    "tqdm>=4.64.0", # progress bars
+    "psutil", # system utilization
+    "py-cpuinfo", # display CPU info
+    "pandas>=1.1.4",
+    "seaborn>=0.11.0", # plotting
+    "ultralytics-thop>=2.0.0", # FLOPs computation https://github.com/ultralytics/thop
+]
+
+# Optional dependencies ------------------------------------------------------------------------------------------------
+[project.optional-dependencies]
+dev = [
+    "ipython",
+    "pytest",
+    "pytest-cov",
+    "coverage[toml]",
+    "mkdocs>=1.6.0",
+    "mkdocs-material>=9.5.9",
+    "mkdocstrings[python]",
+    "mkdocs-redirects", # 301 redirects
+    "mkdocs-ultralytics-plugin>=0.1.8", # for meta descriptions and images, dates and authors
+    "mkdocs-macros-plugin>=1.0.5"  # duplicating content (i.e. export tables) in multiple places
+]
+export = [
+    "onnx>=1.12.0", # ONNX export
+    "coremltools>=7.0; platform_system != 'Windows' and python_version <= '3.11'", # CoreML supported on macOS and Linux
+    "scikit-learn>=1.3.2; platform_system != 'Windows' and python_version <= '3.11'", # CoreML k-means quantization
+    "openvino>=2024.0.0", # OpenVINO export
+    "tensorflow>=2.0.0", # TF bug https://github.com/ultralytics/ultralytics/issues/5161
+    "tensorflowjs>=3.9.0", # TF.js export, automatically installs tensorflow
+    "tensorstore>=0.1.63; platform_machine == 'aarch64' and python_version >= '3.9'", # for TF Raspberry Pi exports
+    "keras", # not installed automatically by tensorflow>=2.16
+    "flatbuffers>=23.5.26,<100; platform_machine == 'aarch64'", # update old 'flatbuffers' included inside tensorflow package
+    "numpy==1.23.5; platform_machine == 'aarch64'", # fix error: `np.bool` was a deprecated alias for the builtin `bool` when using TensorRT models on NVIDIA Jetson
+    "h5py!=3.11.0; platform_machine == 'aarch64'", # fix h5py build issues due to missing aarch64 wheels in 3.11 release
+]
+solutions = [
+    "shapely>=2.0.0",    # shapely for point and polygon data matching
+    "streamlit",    # for live inference on web browser i.e `yolo streamlit-predict`
+]
+logging = [
+    "comet", # https://docs.ultralytics.com/integrations/comet/
+    "tensorboard>=2.13.0",
+    "dvclive>=2.12.0",
+]
+extra = [
+    "hub-sdk>=0.0.12", # Ultralytics HUB
+    "ipython", # interactive notebook
+    "albumentations>=1.4.6", # training augmentations
+    "pycocotools>=2.0.7", # COCO mAP
+]
+
+[project.urls]
+"Homepage" = "https://ultralytics.com"
+"Source" = "https://github.com/ultralytics/ultralytics"
+"Documentation" = "https://docs.ultralytics.com"
+"Bug Reports" = "https://github.com/ultralytics/ultralytics/issues"
+"Changelog" = "https://github.com/ultralytics/ultralytics/releases"
+
+[project.scripts]
+yolo = "ultralytics.cfg:entrypoint"
+ultralytics = "ultralytics.cfg:entrypoint"
+
+# Tools settings -------------------------------------------------------------------------------------------------------
+[tool.setuptools]  # configuration specific to the `setuptools` build backend.
+packages = { find = { where = ["."], include = ["ultralytics", "ultralytics.*"] } }
+package-data = { "ultralytics" = ["**/*.yaml", "../tests/*.py"], "ultralytics.assets" = ["*.jpg"] }
+
+[tool.setuptools.dynamic]
+version = { attr = "ultralytics.__version__" }
+
+[tool.pytest.ini_options]
+addopts = "--doctest-modules --durations=30 --color=yes"
+markers = [
+    "slow: skip slow tests unless --slow is set",
+]
+norecursedirs = [".git", "dist", "build"]
+
+[tool.coverage.run]
+source = ["ultralytics/"]
+data_file = "tests/.coverage"
+omit = ["ultralytics/utils/callbacks/*"]
+
+[tool.isort]
+line_length = 120
+multi_line_output = 0
+
+[tool.yapf]
+based_on_style = "pep8"
+spaces_before_comment = 2
+column_limit = 120
+coalesce_brackets = true
+spaces_around_power_operator = true
+space_between_ending_comma_and_closing_bracket = true
+split_before_closing_bracket = false
+split_before_first_argument = false
+
+[tool.ruff]
+line-length = 120
+
+[tool.ruff.format]
+docstring-code-format = true
+
+[tool.docformatter]
+wrap-summaries = 120
+wrap-descriptions = 120
+pre-summary-newline = true
+close-quotes-on-newline = true
+in-place = true
+
+[tool.codespell]
+ignore-words-list = "crate,nd,ned,strack,dota,ane,segway,fo,gool,winn,commend,bloc,nam,afterall"
+skip = '*.pt,*.pth,*.torchscript,*.onnx,*.tflite,*.pb,*.bin,*.param,*.mlmodel,*.engine,*.npy,*.data*,*.csv,*pnnx*,*venv*,*translat*,__pycache__*,*.ico,*.jpg,*.png,*.mp4,*.mov,/runs,/.git,./docs/??/*.md,./docs/mkdocs_??.yml'

+ 20 - 0
requirements.txt

@@ -0,0 +1,20 @@
+torch==2.2.2 
+torchvision==0.17.2
+flash_attn-2.7.3+cu11torch2.2cxx11abiFALSE-cp311-cp311-linux_x86_64.whl
+timm==1.0.14
+albumentations==2.0.4
+onnx==1.14.0
+onnxruntime==1.15.1
+pycocotools==2.0.7
+PyYAML==6.0.1
+scipy==1.13.0
+onnxslim==0.1.31
+onnxruntime-gpu==1.18.0
+gradio==4.44.1
+opencv-python==4.9.0.80
+psutil==5.9.8
+py-cpuinfo==9.0.0
+huggingface-hub==0.23.2
+safetensors==0.4.3
+numpy==1.26.4
+supervision==0.22.0

+ 22 - 0
tests/__init__.py

@@ -0,0 +1,22 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+from ultralytics.utils import ASSETS, ROOT, WEIGHTS_DIR, checks
+
+# Constants used in tests
+MODEL = WEIGHTS_DIR / "path with spaces" / "yolo11n.pt"  # test spaces in path
+CFG = "yolo11n.yaml"
+SOURCE = ASSETS / "bus.jpg"
+SOURCES_LIST = [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"]
+TMP = (ROOT / "../tests/tmp").resolve()  # temp directory for test files
+CUDA_IS_AVAILABLE = checks.cuda_is_available()
+CUDA_DEVICE_COUNT = checks.cuda_device_count()
+
+__all__ = (
+    "MODEL",
+    "CFG",
+    "SOURCE",
+    "SOURCES_LIST",
+    "TMP",
+    "CUDA_IS_AVAILABLE",
+    "CUDA_DEVICE_COUNT",
+)

+ 83 - 0
tests/conftest.py

@@ -0,0 +1,83 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import shutil
+from pathlib import Path
+
+from tests import TMP
+
+
+def pytest_addoption(parser):
+    """
+    Add custom command-line options to pytest.
+
+    Args:
+        parser (pytest.config.Parser): The pytest parser object for adding custom command-line options.
+
+    Returns:
+        (None)
+    """
+    parser.addoption("--slow", action="store_true", default=False, help="Run slow tests")
+
+
+def pytest_collection_modifyitems(config, items):
+    """
+    Modify the list of test items to exclude tests marked as slow if the --slow option is not specified.
+
+    Args:
+        config (pytest.config.Config): The pytest configuration object that provides access to command-line options.
+        items (list): The list of collected pytest item objects to be modified based on the presence of --slow option.
+
+    Returns:
+        (None) The function modifies the 'items' list in place, and does not return a value.
+    """
+    if not config.getoption("--slow"):
+        # Remove the item entirely from the list of test items if it's marked as 'slow'
+        items[:] = [item for item in items if "slow" not in item.keywords]
+
+
+def pytest_sessionstart(session):
+    """
+    Initialize session configurations for pytest.
+
+    This function is automatically called by pytest after the 'Session' object has been created but before performing
+    test collection. It sets the initial seeds and prepares the temporary directory for the test session.
+
+    Args:
+        session (pytest.Session): The pytest session object.
+
+    Returns:
+        (None)
+    """
+    from ultralytics.utils.torch_utils import init_seeds
+
+    init_seeds()
+    shutil.rmtree(TMP, ignore_errors=True)  # delete any existing tests/tmp directory
+    TMP.mkdir(parents=True, exist_ok=True)  # create a new empty directory
+
+
+def pytest_terminal_summary(terminalreporter, exitstatus, config):
+    """
+    Cleanup operations after pytest session.
+
+    This function is automatically called by pytest at the end of the entire test session. It removes certain files
+    and directories used during testing.
+
+    Args:
+        terminalreporter (pytest.terminal.TerminalReporter): The terminal reporter object used for terminal output.
+        exitstatus (int): The exit status of the test run.
+        config (pytest.config.Config): The pytest config object.
+
+    Returns:
+        (None)
+    """
+    from ultralytics.utils import WEIGHTS_DIR
+
+    # Remove files
+    models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)]
+    for file in ["decelera_portrait_min.mov", "bus.jpg", "yolo11n.onnx", "yolo11n.torchscript"] + models:
+        Path(file).unlink(missing_ok=True)
+
+    # Remove directories
+    models = [path for x in ["*.mlpackage", "*_openvino_model"] for path in WEIGHTS_DIR.rglob(x)]
+    for directory in [WEIGHTS_DIR / "path with spaces", TMP.parents[1] / ".pytest_cache", TMP] + models:
+        shutil.rmtree(directory, ignore_errors=True)

+ 122 - 0
tests/test_cli.py

@@ -0,0 +1,122 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import subprocess
+
+import pytest
+from PIL import Image
+
+from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE
+from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
+from ultralytics.utils import ASSETS, WEIGHTS_DIR, checks
+from ultralytics.utils.torch_utils import TORCH_1_9
+
+# Constants
+TASK_MODEL_DATA = [(task, WEIGHTS_DIR / TASK2MODEL[task], TASK2DATA[task]) for task in TASKS]
+MODELS = [WEIGHTS_DIR / TASK2MODEL[task] for task in TASKS]
+
+
+def run(cmd):
+    """Execute a shell command using subprocess."""
+    subprocess.run(cmd.split(), check=True)
+
+
+def test_special_modes():
+    """Test various special command-line modes for YOLO functionality."""
+    run("yolo help")
+    run("yolo checks")
+    run("yolo version")
+    run("yolo settings reset")
+    run("yolo cfg")
+
+
+@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
+def test_train(task, model, data):
+    """Test YOLO training for different tasks, models, and datasets."""
+    run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 cache=disk")
+
+
+@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
+def test_val(task, model, data):
+    """Test YOLO validation process for specified task, model, and data using a shell command."""
+    run(f"yolo val {task} model={model} data={data} imgsz=32 save_txt save_json")
+
+
+@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
+def test_predict(task, model, data):
+    """Test YOLO prediction on provided sample assets for specified task and model."""
+    run(f"yolo predict model={model} source={ASSETS} imgsz=32 save save_crop save_txt")
+
+
+@pytest.mark.parametrize("model", MODELS)
+def test_export(model):
+    """Test exporting a YOLO model to TorchScript format."""
+    run(f"yolo export model={model} format=torchscript imgsz=32")
+
+
+def test_rtdetr(task="detect", model="yolov8n-rtdetr.yaml", data="coco8.yaml"):
+    """Test the RTDETR functionality within Ultralytics for detection tasks using specified model and data."""
+    # Warning: must use imgsz=640 (note also add coma, spaces, fraction=0.25 args to test single-image training)
+    run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk fraction=0.25")
+    run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
+    if TORCH_1_9:
+        weights = WEIGHTS_DIR / "rtdetr-l.pt"
+        run(f"yolo predict {task} model={weights} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
+
+
+@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
+def test_fastsam(task="segment", model=WEIGHTS_DIR / "FastSAM-s.pt", data="coco8-seg.yaml"):
+    """Test FastSAM model for segmenting objects in images using various prompts within Ultralytics."""
+    source = ASSETS / "bus.jpg"
+
+    run(f"yolo segment val {task} model={model} data={data} imgsz=32")
+    run(f"yolo segment predict model={model} source={source} imgsz=32 save save_crop save_txt")
+
+    from ultralytics import FastSAM
+    from ultralytics.models.sam import Predictor
+
+    # Create a FastSAM model
+    sam_model = FastSAM(model)  # or FastSAM-x.pt
+
+    # Run inference on an image
+    for s in (source, Image.open(source)):
+        everything_results = sam_model(s, device="cpu", retina_masks=True, imgsz=320, conf=0.4, iou=0.9)
+
+        # Remove small regions
+        new_masks, _ = Predictor.remove_small_regions(everything_results[0].masks.data, min_area=20)
+
+        # Run inference with bboxes and points and texts prompt at the same time
+        sam_model(source, bboxes=[439, 437, 524, 709], points=[[200, 200]], labels=[1], texts="a photo of a dog")
+
+
+def test_mobilesam():
+    """Test MobileSAM segmentation with point prompts using Ultralytics."""
+    from ultralytics import SAM
+
+    # Load the model
+    model = SAM(WEIGHTS_DIR / "mobile_sam.pt")
+
+    # Source
+    source = ASSETS / "zidane.jpg"
+
+    # Predict a segment based on a 1D point prompt and 1D labels.
+    model.predict(source, points=[900, 370], labels=[1])
+
+    # Predict a segment based on 3D points and 2D labels (multiple points per object).
+    model.predict(source, points=[[[900, 370], [1000, 100]]], labels=[[1, 1]])
+
+    # Predict a segment based on a box prompt
+    model.predict(source, bboxes=[439, 437, 524, 709], save=True)
+
+    # Predict all
+    # model(source)
+
+
+# Slow Tests -----------------------------------------------------------------------------------------------------------
+@pytest.mark.slow
+@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+@pytest.mark.skipif(CUDA_DEVICE_COUNT < 2, reason="DDP is not available")
+def test_train_gpu(task, model, data):
+    """Test YOLO training on GPU(s) for various tasks and models."""
+    run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0")  # single GPU
+    run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0,1")  # multi GPU

+ 155 - 0
tests/test_cuda.py

@@ -0,0 +1,155 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+from itertools import product
+from pathlib import Path
+
+import pytest
+import torch
+
+from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE, MODEL, SOURCE
+from ultralytics import YOLO
+from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
+from ultralytics.utils import ASSETS, WEIGHTS_DIR
+from ultralytics.utils.checks import check_amp
+
+
+def test_checks():
+    """Validate CUDA settings against torch CUDA functions."""
+    assert torch.cuda.is_available() == CUDA_IS_AVAILABLE
+    assert torch.cuda.device_count() == CUDA_DEVICE_COUNT
+
+
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+def test_amp():
+    """Test AMP training checks."""
+    model = YOLO("yolo11n.pt").model.cuda()
+    assert check_amp(model)
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(True, reason="CUDA export tests disabled pending additional Ultralytics GPU server availability")
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+@pytest.mark.parametrize(
+    "task, dynamic, int8, half, batch",
+    [  # generate all combinations but exclude those where both int8 and half are True
+        (task, dynamic, int8, half, batch)
+        # Note: tests reduced below pending compute availability expansion as GPU CI runner utilization is high
+        # for task, dynamic, int8, half, batch in product(TASKS, [True, False], [True, False], [True, False], [1, 2])
+        for task, dynamic, int8, half, batch in product(TASKS, [True], [True], [False], [2])
+        if not (int8 and half)  # exclude cases where both int8 and half are True
+    ],
+)
+def test_export_engine_matrix(task, dynamic, int8, half, batch):
+    """Test YOLO model export to TensorRT format for various configurations and run inference."""
+    file = YOLO(TASK2MODEL[task]).export(
+        format="engine",
+        imgsz=32,
+        dynamic=dynamic,
+        int8=int8,
+        half=half,
+        batch=batch,
+        data=TASK2DATA[task],
+        workspace=1,  # reduce workspace GB for less resource utilization during testing
+        simplify=True,  # use 'onnxslim'
+    )
+    YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32)  # exported model inference
+    Path(file).unlink()  # cleanup
+    Path(file).with_suffix(".cache").unlink() if int8 else None  # cleanup INT8 cache
+
+
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+def test_train():
+    """Test model training on a minimal dataset using available CUDA devices."""
+    device = 0 if CUDA_DEVICE_COUNT == 1 else [0, 1]
+    YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device)  # requires imgsz>=64
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+def test_predict_multiple_devices():
+    """Validate model prediction consistency across CPU and CUDA devices."""
+    model = YOLO("yolo11n.pt")
+    model = model.cpu()
+    assert str(model.device) == "cpu"
+    _ = model(SOURCE)  # CPU inference
+    assert str(model.device) == "cpu"
+
+    model = model.to("cuda:0")
+    assert str(model.device) == "cuda:0"
+    _ = model(SOURCE)  # CUDA inference
+    assert str(model.device) == "cuda:0"
+
+    model = model.cpu()
+    assert str(model.device) == "cpu"
+    _ = model(SOURCE)  # CPU inference
+    assert str(model.device) == "cpu"
+
+    model = model.cuda()
+    assert str(model.device) == "cuda:0"
+    _ = model(SOURCE)  # CUDA inference
+    assert str(model.device) == "cuda:0"
+
+
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+def test_autobatch():
+    """Check optimal batch size for YOLO model training using autobatch utility."""
+    from ultralytics.utils.autobatch import check_train_batch_size
+
+    check_train_batch_size(YOLO(MODEL).model.cuda(), imgsz=128, amp=True)
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+def test_utils_benchmarks():
+    """Profile YOLO models for performance benchmarks."""
+    from ultralytics.utils.benchmarks import ProfileModels
+
+    # Pre-export a dynamic engine model to use dynamic inference
+    YOLO(MODEL).export(format="engine", imgsz=32, dynamic=True, batch=1)
+    ProfileModels([MODEL], imgsz=32, half=False, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
+
+
+@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
+def test_predict_sam():
+    """Test SAM model predictions using different prompts, including bounding boxes and point annotations."""
+    from ultralytics import SAM
+    from ultralytics.models.sam import Predictor as SAMPredictor
+
+    # Load a model
+    model = SAM(WEIGHTS_DIR / "sam2.1_b.pt")
+
+    # Display model information (optional)
+    model.info()
+
+    # Run inference
+    model(SOURCE, device=0)
+
+    # Run inference with bboxes prompt
+    model(SOURCE, bboxes=[439, 437, 524, 709], device=0)
+
+    # Run inference with no labels
+    model(ASSETS / "zidane.jpg", points=[900, 370], device=0)
+
+    # Run inference with 1D points and 1D labels
+    model(ASSETS / "zidane.jpg", points=[900, 370], labels=[1], device=0)
+
+    # Run inference with 2D points and 1D labels
+    model(ASSETS / "zidane.jpg", points=[[900, 370]], labels=[1], device=0)
+
+    # Run inference with multiple 2D points and 1D labels
+    model(ASSETS / "zidane.jpg", points=[[400, 370], [900, 370]], labels=[1, 1], device=0)
+
+    # Run inference with 3D points and 2D labels (multiple points per object)
+    model(ASSETS / "zidane.jpg", points=[[[900, 370], [1000, 100]]], labels=[[1, 1]], device=0)
+
+    # Create SAMPredictor
+    overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024, model=WEIGHTS_DIR / "mobile_sam.pt")
+    predictor = SAMPredictor(overrides=overrides)
+
+    # Set image
+    predictor.set_image(ASSETS / "zidane.jpg")  # set with image file
+    # predictor(bboxes=[439, 437, 524, 709])
+    # predictor(points=[900, 370], labels=[1])
+
+    # Reset image
+    predictor.reset_image()

+ 131 - 0
tests/test_engine.py

@@ -0,0 +1,131 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import sys
+from unittest import mock
+
+from tests import MODEL
+from ultralytics import YOLO
+from ultralytics.cfg import get_cfg
+from ultralytics.engine.exporter import Exporter
+from ultralytics.models.yolo import classify, detect, segment
+from ultralytics.utils import ASSETS, DEFAULT_CFG, WEIGHTS_DIR
+
+
+def test_func(*args):  # noqa
+    """Test function callback for evaluating YOLO model performance metrics."""
+    print("callback test passed")
+
+
+def test_export():
+    """Tests the model exporting function by adding a callback and asserting its execution."""
+    exporter = Exporter()
+    exporter.add_callback("on_export_start", test_func)
+    assert test_func in exporter.callbacks["on_export_start"], "callback test failed"
+    f = exporter(model=YOLO("yolo11n.yaml").model)
+    YOLO(f)(ASSETS)  # exported model inference
+
+
+def test_detect():
+    """Test YOLO object detection training, validation, and prediction functionality."""
+    overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 1, "save": False}
+    cfg = get_cfg(DEFAULT_CFG)
+    cfg.data = "coco8.yaml"
+    cfg.imgsz = 32
+
+    # Trainer
+    trainer = detect.DetectionTrainer(overrides=overrides)
+    trainer.add_callback("on_train_start", test_func)
+    assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
+    trainer.train()
+
+    # Validator
+    val = detect.DetectionValidator(args=cfg)
+    val.add_callback("on_val_start", test_func)
+    assert test_func in val.callbacks["on_val_start"], "callback test failed"
+    val(model=trainer.best)  # validate best.pt
+
+    # Predictor
+    pred = detect.DetectionPredictor(overrides={"imgsz": [64, 64]})
+    pred.add_callback("on_predict_start", test_func)
+    assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
+    # Confirm there is no issue with sys.argv being empty.
+    with mock.patch.object(sys, "argv", []):
+        result = pred(source=ASSETS, model=MODEL)
+        assert len(result), "predictor test failed"
+
+    overrides["resume"] = trainer.last
+    trainer = detect.DetectionTrainer(overrides=overrides)
+    try:
+        trainer.train()
+    except Exception as e:
+        print(f"Expected exception caught: {e}")
+        return
+
+    Exception("Resume test failed!")
+
+
+def test_segment():
+    """Tests image segmentation training, validation, and prediction pipelines using YOLO models."""
+    overrides = {"data": "coco8-seg.yaml", "model": "yolo11n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False}
+    cfg = get_cfg(DEFAULT_CFG)
+    cfg.data = "coco8-seg.yaml"
+    cfg.imgsz = 32
+    # YOLO(CFG_SEG).train(**overrides)  # works
+
+    # Trainer
+    trainer = segment.SegmentationTrainer(overrides=overrides)
+    trainer.add_callback("on_train_start", test_func)
+    assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
+    trainer.train()
+
+    # Validator
+    val = segment.SegmentationValidator(args=cfg)
+    val.add_callback("on_val_start", test_func)
+    assert test_func in val.callbacks["on_val_start"], "callback test failed"
+    val(model=trainer.best)  # validate best.pt
+
+    # Predictor
+    pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]})
+    pred.add_callback("on_predict_start", test_func)
+    assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
+    result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolo11n-seg.pt")
+    assert len(result), "predictor test failed"
+
+    # Test resume
+    overrides["resume"] = trainer.last
+    trainer = segment.SegmentationTrainer(overrides=overrides)
+    try:
+        trainer.train()
+    except Exception as e:
+        print(f"Expected exception caught: {e}")
+        return
+
+    Exception("Resume test failed!")
+
+
+def test_classify():
+    """Test image classification including training, validation, and prediction phases."""
+    overrides = {"data": "imagenet10", "model": "yolo11n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
+    cfg = get_cfg(DEFAULT_CFG)
+    cfg.data = "imagenet10"
+    cfg.imgsz = 32
+    # YOLO(CFG_SEG).train(**overrides)  # works
+
+    # Trainer
+    trainer = classify.ClassificationTrainer(overrides=overrides)
+    trainer.add_callback("on_train_start", test_func)
+    assert test_func in trainer.callbacks["on_train_start"], "callback test failed"
+    trainer.train()
+
+    # Validator
+    val = classify.ClassificationValidator(args=cfg)
+    val.add_callback("on_val_start", test_func)
+    assert test_func in val.callbacks["on_val_start"], "callback test failed"
+    val(model=trainer.best)
+
+    # Predictor
+    pred = classify.ClassificationPredictor(overrides={"imgsz": [64, 64]})
+    pred.add_callback("on_predict_start", test_func)
+    assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
+    result = pred(source=ASSETS, model=trainer.best)
+    assert len(result), "predictor test failed"

+ 216 - 0
tests/test_exports.py

@@ -0,0 +1,216 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import shutil
+import uuid
+from itertools import product
+from pathlib import Path
+
+import pytest
+
+from tests import MODEL, SOURCE
+from ultralytics import YOLO
+from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
+from ultralytics.utils import (
+    IS_RASPBERRYPI,
+    LINUX,
+    MACOS,
+    WINDOWS,
+    checks,
+)
+from ultralytics.utils.torch_utils import TORCH_1_9, TORCH_1_13
+
+
+def test_export_torchscript():
+    """Test YOLO model exporting to TorchScript format for compatibility and correctness."""
+    file = YOLO(MODEL).export(format="torchscript", optimize=False, imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)  # exported model inference
+
+
+def test_export_onnx():
+    """Test YOLO model export to ONNX format with dynamic axes."""
+    file = YOLO(MODEL).export(format="onnx", dynamic=True, imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)  # exported model inference
+
+
+@pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13")
+def test_export_openvino():
+    """Test YOLO exports to OpenVINO format for model inference compatibility."""
+    file = YOLO(MODEL).export(format="openvino", imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)  # exported model inference
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13")
+@pytest.mark.parametrize(
+    "task, dynamic, int8, half, batch",
+    [  # generate all combinations but exclude those where both int8 and half are True
+        (task, dynamic, int8, half, batch)
+        for task, dynamic, int8, half, batch in product(TASKS, [True, False], [True, False], [True, False], [1, 2])
+        if not (int8 and half)  # exclude cases where both int8 and half are True
+    ],
+)
+def test_export_openvino_matrix(task, dynamic, int8, half, batch):
+    """Test YOLO model exports to OpenVINO under various configuration matrix conditions."""
+    file = YOLO(TASK2MODEL[task]).export(
+        format="openvino",
+        imgsz=32,
+        dynamic=dynamic,
+        int8=int8,
+        half=half,
+        batch=batch,
+        data=TASK2DATA[task],
+    )
+    if WINDOWS:
+        # Use unique filenames due to Windows file permissions bug possibly due to latent threaded use
+        # See https://github.com/ultralytics/ultralytics/actions/runs/8957949304/job/24601616830?pr=10423
+        file = Path(file)
+        file = file.rename(file.with_stem(f"{file.stem}-{uuid.uuid4()}"))
+    YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32)  # exported model inference
+    shutil.rmtree(file, ignore_errors=True)  # retry in case of potential lingering multi-threaded file usage errors
+
+
+@pytest.mark.slow
+@pytest.mark.parametrize(
+    "task, dynamic, int8, half, batch, simplify", product(TASKS, [True, False], [False], [False], [1, 2], [True, False])
+)
+def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify):
+    """Test YOLO exports to ONNX format with various configurations and parameters."""
+    file = YOLO(TASK2MODEL[task]).export(
+        format="onnx",
+        imgsz=32,
+        dynamic=dynamic,
+        int8=int8,
+        half=half,
+        batch=batch,
+        simplify=simplify,
+    )
+    YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32)  # exported model inference
+    Path(file).unlink()  # cleanup
+
+
+@pytest.mark.slow
+@pytest.mark.parametrize("task, dynamic, int8, half, batch", product(TASKS, [False], [False], [False], [1, 2]))
+def test_export_torchscript_matrix(task, dynamic, int8, half, batch):
+    """Tests YOLO model exports to TorchScript format under varied configurations."""
+    file = YOLO(TASK2MODEL[task]).export(
+        format="torchscript",
+        imgsz=32,
+        dynamic=dynamic,
+        int8=int8,
+        half=half,
+        batch=batch,
+    )
+    YOLO(file)([SOURCE] * 3, imgsz=64 if dynamic else 32)  # exported model inference at batch=3
+    Path(file).unlink()  # cleanup
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(not MACOS, reason="CoreML inference only supported on macOS")
+@pytest.mark.skipif(not TORCH_1_9, reason="CoreML>=7.2 not supported with PyTorch<=1.8")
+@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="CoreML not supported in Python 3.12")
+@pytest.mark.parametrize(
+    "task, dynamic, int8, half, batch",
+    [  # generate all combinations but exclude those where both int8 and half are True
+        (task, dynamic, int8, half, batch)
+        for task, dynamic, int8, half, batch in product(TASKS, [False], [True, False], [True, False], [1])
+        if not (int8 and half)  # exclude cases where both int8 and half are True
+    ],
+)
+def test_export_coreml_matrix(task, dynamic, int8, half, batch):
+    """Test YOLO exports to CoreML format with various parameter configurations."""
+    file = YOLO(TASK2MODEL[task]).export(
+        format="coreml",
+        imgsz=32,
+        dynamic=dynamic,
+        int8=int8,
+        half=half,
+        batch=batch,
+    )
+    YOLO(file)([SOURCE] * batch, imgsz=32)  # exported model inference at batch=3
+    shutil.rmtree(file)  # cleanup
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_10, reason="TFLite export requires Python>=3.10")
+@pytest.mark.skipif(not LINUX, reason="Test disabled as TF suffers from install conflicts on Windows and macOS")
+@pytest.mark.parametrize(
+    "task, dynamic, int8, half, batch",
+    [  # generate all combinations but exclude those where both int8 and half are True
+        (task, dynamic, int8, half, batch)
+        for task, dynamic, int8, half, batch in product(TASKS, [False], [True, False], [True, False], [1])
+        if not (int8 and half)  # exclude cases where both int8 and half are True
+    ],
+)
+def test_export_tflite_matrix(task, dynamic, int8, half, batch):
+    """Test YOLO exports to TFLite format considering various export configurations."""
+    file = YOLO(TASK2MODEL[task]).export(
+        format="tflite",
+        imgsz=32,
+        dynamic=dynamic,
+        int8=int8,
+        half=half,
+        batch=batch,
+    )
+    YOLO(file)([SOURCE] * batch, imgsz=32)  # exported model inference at batch=3
+    Path(file).unlink()  # cleanup
+
+
+@pytest.mark.skipif(not TORCH_1_9, reason="CoreML>=7.2 not supported with PyTorch<=1.8")
+@pytest.mark.skipif(WINDOWS, reason="CoreML not supported on Windows")  # RuntimeError: BlobWriter not loaded
+@pytest.mark.skipif(IS_RASPBERRYPI, reason="CoreML not supported on Raspberry Pi")
+@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="CoreML not supported in Python 3.12")
+def test_export_coreml():
+    """Test YOLO exports to CoreML format, optimized for macOS only."""
+    if MACOS:
+        file = YOLO(MODEL).export(format="coreml", imgsz=32)
+        YOLO(file)(SOURCE, imgsz=32)  # model prediction only supported on macOS for nms=False models
+    else:
+        YOLO(MODEL).export(format="coreml", nms=True, imgsz=32)
+
+
+@pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_10, reason="TFLite export requires Python>=3.10")
+@pytest.mark.skipif(not LINUX, reason="Test disabled as TF suffers from install conflicts on Windows and macOS")
+def test_export_tflite():
+    """Test YOLO exports to TFLite format under specific OS and Python version conditions."""
+    model = YOLO(MODEL)
+    file = model.export(format="tflite", imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)
+
+
+@pytest.mark.skipif(True, reason="Test disabled")
+@pytest.mark.skipif(not LINUX, reason="TF suffers from install conflicts on Windows and macOS")
+def test_export_pb():
+    """Test YOLO exports to TensorFlow's Protobuf (*.pb) format."""
+    model = YOLO(MODEL)
+    file = model.export(format="pb", imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)
+
+
+@pytest.mark.skipif(True, reason="Test disabled as Paddle protobuf and ONNX protobuf requirements conflict.")
+def test_export_paddle():
+    """Test YOLO exports to Paddle format, noting protobuf conflicts with ONNX."""
+    YOLO(MODEL).export(format="paddle", imgsz=32)
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(IS_RASPBERRYPI, reason="MNN not supported on Raspberry Pi")
+def test_export_mnn():
+    """Test YOLO exports to MNN format (WARNING: MNN test must precede NCNN test or CI error on Windows)."""
+    file = YOLO(MODEL).export(format="mnn", imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)  # exported model inference
+
+
+@pytest.mark.slow
+def test_export_ncnn():
+    """Test YOLO exports to NCNN format."""
+    file = YOLO(MODEL).export(format="ncnn", imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)  # exported model inference
+
+
+@pytest.mark.skipif(True, reason="Test disabled as keras and tensorflow version conflicts with tflite export.")
+@pytest.mark.skipif(not LINUX or MACOS, reason="Skipping test on Windows and Macos")
+def test_export_imx():
+    """Test YOLOv8n exports to IMX format."""
+    model = YOLO("yolov8n.pt")
+    file = model.export(format="imx", imgsz=32)
+    YOLO(file)(SOURCE, imgsz=32)

+ 150 - 0
tests/test_integrations.py

@@ -0,0 +1,150 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import contextlib
+import os
+import subprocess
+import time
+from pathlib import Path
+
+import pytest
+
+from tests import MODEL, SOURCE, TMP
+from ultralytics import YOLO, download
+from ultralytics.utils import DATASETS_DIR, SETTINGS
+from ultralytics.utils.checks import check_requirements
+
+
+@pytest.mark.skipif(not check_requirements("ray", install=False), reason="ray[tune] not installed")
+def test_model_ray_tune():
+    """Tune YOLO model using Ray for hyperparameter optimization."""
+    YOLO("yolo11n-cls.yaml").tune(
+        use_ray=True, data="imagenet10", grace_period=1, iterations=1, imgsz=32, epochs=1, plots=False, device="cpu"
+    )
+
+
+@pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
+def test_mlflow():
+    """Test training with MLflow tracking enabled (see https://mlflow.org/ for details)."""
+    SETTINGS["mlflow"] = True
+    YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
+    SETTINGS["mlflow"] = False
+
+
+@pytest.mark.skipif(True, reason="Test failing in scheduled CI https://github.com/ultralytics/ultralytics/pull/8868")
+@pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
+def test_mlflow_keep_run_active():
+    """Ensure MLflow run status matches MLFLOW_KEEP_RUN_ACTIVE environment variable settings."""
+    import mlflow
+
+    SETTINGS["mlflow"] = True
+    run_name = "Test Run"
+    os.environ["MLFLOW_RUN"] = run_name
+
+    # Test with MLFLOW_KEEP_RUN_ACTIVE=True
+    os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "True"
+    YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
+    status = mlflow.active_run().info.status
+    assert status == "RUNNING", "MLflow run should be active when MLFLOW_KEEP_RUN_ACTIVE=True"
+
+    run_id = mlflow.active_run().info.run_id
+
+    # Test with MLFLOW_KEEP_RUN_ACTIVE=False
+    os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "False"
+    YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
+    status = mlflow.get_run(run_id=run_id).info.status
+    assert status == "FINISHED", "MLflow run should be ended when MLFLOW_KEEP_RUN_ACTIVE=False"
+
+    # Test with MLFLOW_KEEP_RUN_ACTIVE not set
+    os.environ.pop("MLFLOW_KEEP_RUN_ACTIVE", None)
+    YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
+    status = mlflow.get_run(run_id=run_id).info.status
+    assert status == "FINISHED", "MLflow run should be ended by default when MLFLOW_KEEP_RUN_ACTIVE is not set"
+    SETTINGS["mlflow"] = False
+
+
+@pytest.mark.skipif(not check_requirements("tritonclient", install=False), reason="tritonclient[all] not installed")
+def test_triton():
+    """
+    Test NVIDIA Triton Server functionalities with YOLO model.
+
+    See https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver.
+    """
+    check_requirements("tritonclient[all]")
+    from tritonclient.http import InferenceServerClient  # noqa
+
+    # Create variables
+    model_name = "yolo"
+    triton_repo = TMP / "triton_repo"  # Triton repo path
+    triton_model = triton_repo / model_name  # Triton model path
+
+    # Export model to ONNX
+    f = YOLO(MODEL).export(format="onnx", dynamic=True)
+
+    # Prepare Triton repo
+    (triton_model / "1").mkdir(parents=True, exist_ok=True)
+    Path(f).rename(triton_model / "1" / "model.onnx")
+    (triton_model / "config.pbtxt").touch()
+
+    # Define image https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver
+    tag = "nvcr.io/nvidia/tritonserver:23.09-py3"  # 6.4 GB
+
+    # Pull the image
+    subprocess.call(f"docker pull {tag}", shell=True)
+
+    # Run the Triton server and capture the container ID
+    container_id = (
+        subprocess.check_output(
+            f"docker run -d --rm -v {triton_repo}:/models -p 8000:8000 {tag} tritonserver --model-repository=/models",
+            shell=True,
+        )
+        .decode("utf-8")
+        .strip()
+    )
+
+    # Wait for the Triton server to start
+    triton_client = InferenceServerClient(url="localhost:8000", verbose=False, ssl=False)
+
+    # Wait until model is ready
+    for _ in range(10):
+        with contextlib.suppress(Exception):
+            assert triton_client.is_model_ready(model_name)
+            break
+        time.sleep(1)
+
+    # Check Triton inference
+    YOLO(f"http://localhost:8000/{model_name}", "detect")(SOURCE)  # exported model inference
+
+    # Kill and remove the container at the end of the test
+    subprocess.call(f"docker kill {container_id}", shell=True)
+
+
+@pytest.mark.skipif(not check_requirements("pycocotools", install=False), reason="pycocotools not installed")
+def test_pycocotools():
+    """Validate YOLO model predictions on COCO dataset using pycocotools."""
+    from ultralytics.models.yolo.detect import DetectionValidator
+    from ultralytics.models.yolo.pose import PoseValidator
+    from ultralytics.models.yolo.segment import SegmentationValidator
+
+    # Download annotations after each dataset downloads first
+    url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
+
+    args = {"model": "yolo11n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
+    validator = DetectionValidator(args=args)
+    validator()
+    validator.is_coco = True
+    download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations")
+    _ = validator.eval_json(validator.stats)
+
+    args = {"model": "yolo11n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
+    validator = SegmentationValidator(args=args)
+    validator()
+    validator.is_coco = True
+    download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations")
+    _ = validator.eval_json(validator.stats)
+
+    args = {"model": "yolo11n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
+    validator = PoseValidator(args=args)
+    validator()
+    validator.is_coco = True
+    download(f"{url}person_keypoints_val2017.json", dir=DATASETS_DIR / "coco8-pose/annotations")
+    _ = validator.eval_json(validator.stats)

+ 615 - 0
tests/test_python.py

@@ -0,0 +1,615 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import contextlib
+import csv
+import urllib
+from copy import copy
+from pathlib import Path
+
+import cv2
+import numpy as np
+import pytest
+import torch
+import yaml
+from PIL import Image
+
+from tests import CFG, MODEL, SOURCE, SOURCES_LIST, TMP
+from ultralytics import RTDETR, YOLO
+from ultralytics.cfg import MODELS, TASK2DATA, TASKS
+from ultralytics.data.build import load_inference_source
+from ultralytics.utils import (
+    ASSETS,
+    DEFAULT_CFG,
+    DEFAULT_CFG_PATH,
+    LOGGER,
+    ONLINE,
+    ROOT,
+    WEIGHTS_DIR,
+    WINDOWS,
+    checks,
+    is_dir_writeable,
+    is_github_action_running,
+)
+from ultralytics.utils.downloads import download
+from ultralytics.utils.torch_utils import TORCH_1_9
+
+IS_TMP_WRITEABLE = is_dir_writeable(TMP)  # WARNING: must be run once tests start as TMP does not exist on tests/init
+
+
+def test_model_forward():
+    """Test the forward pass of the YOLO model."""
+    model = YOLO(CFG)
+    model(source=None, imgsz=32, augment=True)  # also test no source and augment
+
+
+def test_model_methods():
+    """Test various methods and properties of the YOLO model to ensure correct functionality."""
+    model = YOLO(MODEL)
+
+    # Model methods
+    model.info(verbose=True, detailed=True)
+    model = model.reset_weights()
+    model = model.load(MODEL)
+    model.to("cpu")
+    model.fuse()
+    model.clear_callback("on_train_start")
+    model.reset_callbacks()
+
+    # Model properties
+    _ = model.names
+    _ = model.device
+    _ = model.transforms
+    _ = model.task_map
+
+
+def test_model_profile():
+    """Test profiling of the YOLO model with `profile=True` to assess performance and resource usage."""
+    from ultralytics.nn.tasks import DetectionModel
+
+    model = DetectionModel()  # build model
+    im = torch.randn(1, 3, 64, 64)  # requires min imgsz=64
+    _ = model.predict(im, profile=True)
+
+
+@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
+def test_predict_txt():
+    """Tests YOLO predictions with file, directory, and pattern sources listed in a text file."""
+    file = TMP / "sources_multi_row.txt"
+    with open(file, "w") as f:
+        for src in SOURCES_LIST:
+            f.write(f"{src}\n")
+    results = YOLO(MODEL)(source=file, imgsz=32)
+    assert len(results) == 7  # 1 + 2 + 2 + 2 = 7 images
+
+
+@pytest.mark.skipif(True, reason="disabled for testing")
+@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
+def test_predict_csv_multi_row():
+    """Tests YOLO predictions with sources listed in multiple rows of a CSV file."""
+    file = TMP / "sources_multi_row.csv"
+    with open(file, "w", newline="") as f:
+        writer = csv.writer(f)
+        writer.writerow(["source"])
+        writer.writerows([[src] for src in SOURCES_LIST])
+    results = YOLO(MODEL)(source=file, imgsz=32)
+    assert len(results) == 7  # 1 + 2 + 2 + 2 = 7 images
+
+
+@pytest.mark.skipif(True, reason="disabled for testing")
+@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
+def test_predict_csv_single_row():
+    """Tests YOLO predictions with sources listed in a single row of a CSV file."""
+    file = TMP / "sources_single_row.csv"
+    with open(file, "w", newline="") as f:
+        writer = csv.writer(f)
+        writer.writerow(SOURCES_LIST)
+    results = YOLO(MODEL)(source=file, imgsz=32)
+    assert len(results) == 7  # 1 + 2 + 2 + 2 = 7 images
+
+
+@pytest.mark.parametrize("model_name", MODELS)
+def test_predict_img(model_name):
+    """Test YOLO model predictions on various image input types and sources, including online images."""
+    model = YOLO(WEIGHTS_DIR / model_name)
+    im = cv2.imread(str(SOURCE))  # uint8 numpy array
+    assert len(model(source=Image.open(SOURCE), save=True, verbose=True, imgsz=32)) == 1  # PIL
+    assert len(model(source=im, save=True, save_txt=True, imgsz=32)) == 1  # ndarray
+    assert len(model(torch.rand((2, 3, 32, 32)), imgsz=32)) == 2  # batch-size 2 Tensor, FP32 0.0-1.0 RGB order
+    assert len(model(source=[im, im], save=True, save_txt=True, imgsz=32)) == 2  # batch
+    assert len(list(model(source=[im, im], save=True, stream=True, imgsz=32))) == 2  # stream
+    assert len(model(torch.zeros(320, 640, 3).numpy().astype(np.uint8), imgsz=32)) == 1  # tensor to numpy
+    batch = [
+        str(SOURCE),  # filename
+        Path(SOURCE),  # Path
+        "https://github.com/ultralytics/assets/releases/download/v0.0.0/zidane.jpg" if ONLINE else SOURCE,  # URI
+        cv2.imread(str(SOURCE)),  # OpenCV
+        Image.open(SOURCE),  # PIL
+        np.zeros((320, 640, 3), dtype=np.uint8),  # numpy
+    ]
+    assert len(model(batch, imgsz=32)) == len(batch)  # multiple sources in a batch
+
+
+@pytest.mark.parametrize("model", MODELS)
+def test_predict_visualize(model):
+    """Test model prediction methods with 'visualize=True' to generate and display prediction visualizations."""
+    YOLO(WEIGHTS_DIR / model)(SOURCE, imgsz=32, visualize=True)
+
+
+def test_predict_grey_and_4ch():
+    """Test YOLO prediction on SOURCE converted to greyscale and 4-channel images with various filenames."""
+    im = Image.open(SOURCE)
+    directory = TMP / "im4"
+    directory.mkdir(parents=True, exist_ok=True)
+
+    source_greyscale = directory / "greyscale.jpg"
+    source_rgba = directory / "4ch.png"
+    source_non_utf = directory / "non_UTF_测试文件_tést_image.jpg"
+    source_spaces = directory / "image with spaces.jpg"
+
+    im.convert("L").save(source_greyscale)  # greyscale
+    im.convert("RGBA").save(source_rgba)  # 4-ch PNG with alpha
+    im.save(source_non_utf)  # non-UTF characters in filename
+    im.save(source_spaces)  # spaces in filename
+
+    # Inference
+    model = YOLO(MODEL)
+    for f in source_rgba, source_greyscale, source_non_utf, source_spaces:
+        for source in Image.open(f), cv2.imread(str(f)), f:
+            results = model(source, save=True, verbose=True, imgsz=32)
+            assert len(results) == 1  # verify that an image was run
+        f.unlink()  # cleanup
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(not ONLINE, reason="environment is offline")
+@pytest.mark.skipif(is_github_action_running(), reason="No auth https://github.com/JuanBindez/pytubefix/issues/166")
+def test_youtube():
+    """Test YOLO model on a YouTube video stream, handling potential network-related errors."""
+    model = YOLO(MODEL)
+    try:
+        model.predict("https://youtu.be/G17sBkb38XQ", imgsz=96, save=True)
+    # Handle internet connection errors and 'urllib.error.HTTPError: HTTP Error 429: Too Many Requests'
+    except (urllib.error.HTTPError, ConnectionError) as e:
+        LOGGER.warning(f"WARNING: YouTube Test Error: {e}")
+
+
+@pytest.mark.skipif(not ONLINE, reason="environment is offline")
+@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
+def test_track_stream():
+    """
+    Tests streaming tracking on a short 10 frame video using ByteTrack tracker and different GMC methods.
+
+    Note imgsz=160 required for tracking for higher confidence and better matches.
+    """
+    video_url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/decelera_portrait_min.mov"
+    model = YOLO(MODEL)
+    model.track(video_url, imgsz=160, tracker="bytetrack.yaml")
+    model.track(video_url, imgsz=160, tracker="botsort.yaml", save_frames=True)  # test frame saving also
+
+    # Test Global Motion Compensation (GMC) methods
+    for gmc in "orb", "sift", "ecc":
+        with open(ROOT / "cfg/trackers/botsort.yaml", encoding="utf-8") as f:
+            data = yaml.safe_load(f)
+        tracker = TMP / f"botsort-{gmc}.yaml"
+        data["gmc_method"] = gmc
+        with open(tracker, "w", encoding="utf-8") as f:
+            yaml.safe_dump(data, f)
+        model.track(video_url, imgsz=160, tracker=tracker)
+
+
+def test_val():
+    """Test the validation mode of the YOLO model."""
+    YOLO(MODEL).val(data="coco8.yaml", imgsz=32, save_hybrid=True)
+
+
+def test_train_scratch():
+    """Test training the YOLO model from scratch using the provided configuration."""
+    model = YOLO(CFG)
+    model.train(data="coco8.yaml", epochs=2, imgsz=32, cache="disk", batch=-1, close_mosaic=1, name="model")
+    model(SOURCE)
+
+
+def test_train_pretrained():
+    """Test training of the YOLO model starting from a pre-trained checkpoint."""
+    model = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
+    model.train(data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0)
+    model(SOURCE)
+
+
+def test_all_model_yamls():
+    """Test YOLO model creation for all available YAML configurations in the `cfg/models` directory."""
+    for m in (ROOT / "cfg" / "models").rglob("*.yaml"):
+        if "rtdetr" in m.name:
+            if TORCH_1_9:  # torch<=1.8 issue - TypeError: __init__() got an unexpected keyword argument 'batch_first'
+                _ = RTDETR(m.name)(SOURCE, imgsz=640)  # must be 640
+        else:
+            YOLO(m.name)
+
+
+@pytest.mark.skipif(WINDOWS, reason="Windows slow CI export bug https://github.com/ultralytics/ultralytics/pull/16003")
+def test_workflow():
+    """Test the complete workflow including training, validation, prediction, and exporting."""
+    model = YOLO(MODEL)
+    model.train(data="coco8.yaml", epochs=1, imgsz=32, optimizer="SGD")
+    model.val(imgsz=32)
+    model.predict(SOURCE, imgsz=32)
+    model.export(format="torchscript")  # WARNING: Windows slow CI export bug
+
+
+def test_predict_callback_and_setup():
+    """Test callback functionality during YOLO prediction setup and execution."""
+
+    def on_predict_batch_end(predictor):
+        """Callback function that handles operations at the end of a prediction batch."""
+        path, im0s, _ = predictor.batch
+        im0s = im0s if isinstance(im0s, list) else [im0s]
+        bs = [predictor.dataset.bs for _ in range(len(path))]
+        predictor.results = zip(predictor.results, im0s, bs)  # results is List[batch_size]
+
+    model = YOLO(MODEL)
+    model.add_callback("on_predict_batch_end", on_predict_batch_end)
+
+    dataset = load_inference_source(source=SOURCE)
+    bs = dataset.bs  # noqa access predictor properties
+    results = model.predict(dataset, stream=True, imgsz=160)  # source already setup
+    for r, im0, bs in results:
+        print("test_callback", im0.shape)
+        print("test_callback", bs)
+        boxes = r.boxes  # Boxes object for bbox outputs
+        print(boxes)
+
+
+@pytest.mark.parametrize("model", MODELS)
+def test_results(model):
+    """Ensure YOLO model predictions can be processed and printed in various formats."""
+    results = YOLO(WEIGHTS_DIR / model)([SOURCE, SOURCE], imgsz=160)
+    for r in results:
+        r = r.cpu().numpy()
+        print(r, len(r), r.path)  # print numpy attributes
+        r = r.to(device="cpu", dtype=torch.float32)
+        r.save_txt(txt_file=TMP / "runs/tests/label.txt", save_conf=True)
+        r.save_crop(save_dir=TMP / "runs/tests/crops/")
+        r.to_json(normalize=True)
+        r.to_df(decimals=3)
+        r.to_csv()
+        r.to_xml()
+        r.plot(pil=True)
+        r.plot(conf=True, boxes=True)
+        print(r, len(r), r.path)  # print after methods
+
+
+def test_labels_and_crops():
+    """Test output from prediction args for saving YOLO detection labels and crops; ensures accurate saving."""
+    imgs = [SOURCE, ASSETS / "zidane.jpg"]
+    results = YOLO(WEIGHTS_DIR / "yolo11n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
+    save_path = Path(results[0].save_dir)
+    for r in results:
+        im_name = Path(r.path).stem
+        cls_idxs = r.boxes.cls.int().tolist()
+        # Check correct detections
+        assert cls_idxs == ([0, 7, 0, 0] if r.path.endswith("bus.jpg") else [0, 0, 0])  # bus.jpg and zidane.jpg classes
+        # Check label path
+        labels = save_path / f"labels/{im_name}.txt"
+        assert labels.exists()
+        # Check detections match label count
+        assert len(r.boxes.data) == len([line for line in labels.read_text().splitlines() if line])
+        # Check crops path and files
+        crop_dirs = list((save_path / "crops").iterdir())
+        crop_files = [f for p in crop_dirs for f in p.glob("*")]
+        # Crop directories match detections
+        assert all(r.names.get(c) in {d.name for d in crop_dirs} for c in cls_idxs)
+        # Same number of crops as detections
+        assert len([f for f in crop_files if im_name in f.name]) == len(r.boxes.data)
+
+
+@pytest.mark.skipif(not ONLINE, reason="environment is offline")
+def test_data_utils():
+    """Test utility functions in ultralytics/data/utils.py, including dataset stats and auto-splitting."""
+    from ultralytics.data.utils import HUBDatasetStats, autosplit
+    from ultralytics.utils.downloads import zip_directory
+
+    # from ultralytics.utils.files import WorkingDirectory
+    # with WorkingDirectory(ROOT.parent / 'tests'):
+
+    for task in TASKS:
+        file = Path(TASK2DATA[task]).with_suffix(".zip")  # i.e. coco8.zip
+        download(f"https://github.com/ultralytics/hub/raw/main/example_datasets/{file}", unzip=False, dir=TMP)
+        stats = HUBDatasetStats(TMP / file, task=task)
+        stats.get_json(save=True)
+        stats.process_images()
+
+    autosplit(TMP / "coco8")
+    zip_directory(TMP / "coco8/images/val")  # zip
+
+
+@pytest.mark.skipif(not ONLINE, reason="environment is offline")
+def test_data_converter():
+    """Test dataset conversion functions from COCO to YOLO format and class mappings."""
+    from ultralytics.data.converter import coco80_to_coco91_class, convert_coco
+
+    file = "instances_val2017.json"
+    download(f"https://github.com/ultralytics/assets/releases/download/v0.0.0/{file}", dir=TMP)
+    convert_coco(labels_dir=TMP, save_dir=TMP / "yolo_labels", use_segments=True, use_keypoints=False, cls91to80=True)
+    coco80_to_coco91_class()
+
+
+def test_data_annotator():
+    """Automatically annotate data using specified detection and segmentation models."""
+    from ultralytics.data.annotator import auto_annotate
+
+    auto_annotate(
+        ASSETS,
+        det_model=WEIGHTS_DIR / "yolo11n.pt",
+        sam_model=WEIGHTS_DIR / "mobile_sam.pt",
+        output_dir=TMP / "auto_annotate_labels",
+    )
+
+
+def test_events():
+    """Test event sending functionality."""
+    from ultralytics.hub.utils import Events
+
+    events = Events()
+    events.enabled = True
+    cfg = copy(DEFAULT_CFG)  # does not require deepcopy
+    cfg.mode = "test"
+    events(cfg)
+
+
+def test_cfg_init():
+    """Test configuration initialization utilities from the 'ultralytics.cfg' module."""
+    from ultralytics.cfg import check_dict_alignment, copy_default_cfg, smart_value
+
+    with contextlib.suppress(SyntaxError):
+        check_dict_alignment({"a": 1}, {"b": 2})
+    copy_default_cfg()
+    (Path.cwd() / DEFAULT_CFG_PATH.name.replace(".yaml", "_copy.yaml")).unlink(missing_ok=False)
+    [smart_value(x) for x in ["none", "true", "false"]]
+
+
+def test_utils_init():
+    """Test initialization utilities in the Ultralytics library."""
+    from ultralytics.utils import get_git_branch, get_git_origin_url, get_ubuntu_version, is_github_action_running
+
+    get_ubuntu_version()
+    is_github_action_running()
+    get_git_origin_url()
+    get_git_branch()
+
+
+def test_utils_checks():
+    """Test various utility checks for filenames, git status, requirements, image sizes, and versions."""
+    checks.check_yolov5u_filename("yolov5n.pt")
+    checks.git_describe(ROOT)
+    checks.check_requirements()  # check requirements.txt
+    checks.check_imgsz([600, 600], max_dim=1)
+    checks.check_imshow(warn=True)
+    checks.check_version("ultralytics", "8.0.0")
+    checks.print_args()
+
+
+@pytest.mark.skipif(WINDOWS, reason="Windows profiling is extremely slow (cause unknown)")
+def test_utils_benchmarks():
+    """Benchmark model performance using 'ProfileModels' from 'ultralytics.utils.benchmarks'."""
+    from ultralytics.utils.benchmarks import ProfileModels
+
+    ProfileModels(["yolo11n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
+
+
+def test_utils_torchutils():
+    """Test Torch utility functions including profiling and FLOP calculations."""
+    from ultralytics.nn.modules.conv import Conv
+    from ultralytics.utils.torch_utils import get_flops_with_torch_profiler, profile, time_sync
+
+    x = torch.randn(1, 64, 20, 20)
+    m = Conv(64, 64, k=1, s=2)
+
+    profile(x, [m], n=3)
+    get_flops_with_torch_profiler(m)
+    time_sync()
+
+
+def test_utils_ops():
+    """Test utility operations functions for coordinate transformation and normalization."""
+    from ultralytics.utils.ops import (
+        ltwh2xywh,
+        ltwh2xyxy,
+        make_divisible,
+        xywh2ltwh,
+        xywh2xyxy,
+        xywhn2xyxy,
+        xywhr2xyxyxyxy,
+        xyxy2ltwh,
+        xyxy2xywh,
+        xyxy2xywhn,
+        xyxyxyxy2xywhr,
+    )
+
+    make_divisible(17, torch.tensor([8]))
+
+    boxes = torch.rand(10, 4)  # xywh
+    torch.allclose(boxes, xyxy2xywh(xywh2xyxy(boxes)))
+    torch.allclose(boxes, xyxy2xywhn(xywhn2xyxy(boxes)))
+    torch.allclose(boxes, ltwh2xywh(xywh2ltwh(boxes)))
+    torch.allclose(boxes, xyxy2ltwh(ltwh2xyxy(boxes)))
+
+    boxes = torch.rand(10, 5)  # xywhr for OBB
+    boxes[:, 4] = torch.randn(10) * 30
+    torch.allclose(boxes, xyxyxyxy2xywhr(xywhr2xyxyxyxy(boxes)), rtol=1e-3)
+
+
+def test_utils_files():
+    """Test file handling utilities including file age, date, and paths with spaces."""
+    from ultralytics.utils.files import file_age, file_date, get_latest_run, spaces_in_path
+
+    file_age(SOURCE)
+    file_date(SOURCE)
+    get_latest_run(ROOT / "runs")
+
+    path = TMP / "path/with spaces"
+    path.mkdir(parents=True, exist_ok=True)
+    with spaces_in_path(path) as new_path:
+        print(new_path)
+
+
+@pytest.mark.slow
+def test_utils_patches_torch_save():
+    """Test torch_save backoff when _torch_save raises RuntimeError to ensure robustness."""
+    from unittest.mock import MagicMock, patch
+
+    from ultralytics.utils.patches import torch_save
+
+    mock = MagicMock(side_effect=RuntimeError)
+
+    with patch("ultralytics.utils.patches._torch_save", new=mock):
+        with pytest.raises(RuntimeError):
+            torch_save(torch.zeros(1), TMP / "test.pt")
+
+    assert mock.call_count == 4, "torch_save was not attempted the expected number of times"
+
+
+def test_nn_modules_conv():
+    """Test Convolutional Neural Network modules including CBAM, Conv2, and ConvTranspose."""
+    from ultralytics.nn.modules.conv import CBAM, Conv2, ConvTranspose, DWConvTranspose2d, Focus
+
+    c1, c2 = 8, 16  # input and output channels
+    x = torch.zeros(4, c1, 10, 10)  # BCHW
+
+    # Run all modules not otherwise covered in tests
+    DWConvTranspose2d(c1, c2)(x)
+    ConvTranspose(c1, c2)(x)
+    Focus(c1, c2)(x)
+    CBAM(c1)(x)
+
+    # Fuse ops
+    m = Conv2(c1, c2)
+    m.fuse_convs()
+    m(x)
+
+
+def test_nn_modules_block():
+    """Test various blocks in neural network modules including C1, C3TR, BottleneckCSP, C3Ghost, and C3x."""
+    from ultralytics.nn.modules.block import C1, C3TR, BottleneckCSP, C3Ghost, C3x
+
+    c1, c2 = 8, 16  # input and output channels
+    x = torch.zeros(4, c1, 10, 10)  # BCHW
+
+    # Run all modules not otherwise covered in tests
+    C1(c1, c2)(x)
+    C3x(c1, c2)(x)
+    C3TR(c1, c2)(x)
+    C3Ghost(c1, c2)(x)
+    BottleneckCSP(c1, c2)(x)
+
+
+@pytest.mark.skipif(not ONLINE, reason="environment is offline")
+def test_hub():
+    """Test Ultralytics HUB functionalities (e.g. export formats, logout)."""
+    from ultralytics.hub import export_fmts_hub, logout
+    from ultralytics.hub.utils import smart_request
+
+    export_fmts_hub()
+    logout()
+    smart_request("GET", "https://github.com", progress=True)
+
+
+@pytest.fixture
+def image():
+    """Load and return an image from a predefined source using OpenCV."""
+    return cv2.imread(str(SOURCE))
+
+
+@pytest.mark.parametrize(
+    "auto_augment, erasing, force_color_jitter",
+    [
+        (None, 0.0, False),
+        ("randaugment", 0.5, True),
+        ("augmix", 0.2, False),
+        ("autoaugment", 0.0, True),
+    ],
+)
+def test_classify_transforms_train(image, auto_augment, erasing, force_color_jitter):
+    """Tests classification transforms during training with various augmentations to ensure proper functionality."""
+    from ultralytics.data.augment import classify_augmentations
+
+    transform = classify_augmentations(
+        size=224,
+        mean=(0.5, 0.5, 0.5),
+        std=(0.5, 0.5, 0.5),
+        scale=(0.08, 1.0),
+        ratio=(3.0 / 4.0, 4.0 / 3.0),
+        hflip=0.5,
+        vflip=0.5,
+        auto_augment=auto_augment,
+        hsv_h=0.015,
+        hsv_s=0.4,
+        hsv_v=0.4,
+        force_color_jitter=force_color_jitter,
+        erasing=erasing,
+    )
+
+    transformed_image = transform(Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)))
+
+    assert transformed_image.shape == (3, 224, 224)
+    assert torch.is_tensor(transformed_image)
+    assert transformed_image.dtype == torch.float32
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(not ONLINE, reason="environment is offline")
+def test_model_tune():
+    """Tune YOLO model for performance improvement."""
+    YOLO("yolo11n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
+    YOLO("yolo11n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
+
+
+def test_model_embeddings():
+    """Test YOLO model embeddings."""
+    model_detect = YOLO(MODEL)
+    model_segment = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
+
+    for batch in [SOURCE], [SOURCE, SOURCE]:  # test batch size 1 and 2
+        assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch)
+        assert len(model_segment.embed(source=batch, imgsz=32)) == len(batch)
+
+
+@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12")
+def test_yolo_world():
+    """Tests YOLO world models with CLIP support, including detection and training scenarios."""
+    model = YOLO(WEIGHTS_DIR / "yolov8s-world.pt")  # no YOLO11n-world model yet
+    model.set_classes(["tree", "window"])
+    model(SOURCE, conf=0.01)
+
+    model = YOLO(WEIGHTS_DIR / "yolov8s-worldv2.pt")  # no YOLO11n-world model yet
+    # Training from a pretrained model. Eval is included at the final stage of training.
+    # Use dota8.yaml which has fewer categories to reduce the inference time of CLIP model
+    model.train(
+        data="dota8.yaml",
+        epochs=1,
+        imgsz=32,
+        cache="disk",
+        close_mosaic=1,
+    )
+
+    # test WorWorldTrainerFromScratch
+    from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
+
+    model = YOLO("yolov8s-worldv2.yaml")  # no YOLO11n-world model yet
+    model.train(
+        data={"train": {"yolo_data": ["dota8.yaml"]}, "val": {"yolo_data": ["dota8.yaml"]}},
+        epochs=1,
+        imgsz=32,
+        cache="disk",
+        close_mosaic=1,
+        trainer=WorldTrainerFromScratch,
+    )
+
+
+def test_yolov10():
+    """Test YOLOv10 model training, validation, and prediction steps with minimal configurations."""
+    model = YOLO("yolov10n.yaml")
+    # train/val/predict
+    model.train(data="coco8.yaml", epochs=1, imgsz=32, close_mosaic=1, cache="disk")
+    model.val(data="coco8.yaml", imgsz=32)
+    model.predict(imgsz=32, save_txt=True, save_crop=True, augment=True)
+    model(SOURCE)

+ 94 - 0
tests/test_solutions.py

@@ -0,0 +1,94 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import cv2
+import pytest
+
+from tests import TMP
+from ultralytics import YOLO, solutions
+from ultralytics.utils import ASSETS_URL, WEIGHTS_DIR
+from ultralytics.utils.downloads import safe_download
+
+DEMO_VIDEO = "solutions_ci_demo.mp4"
+POSE_VIDEO = "solution_ci_pose_demo.mp4"
+
+
+@pytest.mark.slow
+def test_major_solutions():
+    """Test the object counting, heatmap, speed estimation, trackzone and queue management solution."""
+    safe_download(url=f"{ASSETS_URL}/{DEMO_VIDEO}", dir=TMP)
+    cap = cv2.VideoCapture(str(TMP / DEMO_VIDEO))
+    assert cap.isOpened(), "Error reading video file"
+    region_points = [(20, 400), (1080, 400), (1080, 360), (20, 360)]
+    counter = solutions.ObjectCounter(region=region_points, model="yolo11n.pt", show=False)  # Test object counter
+    heatmap = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA, model="yolo11n.pt", show=False)  # Test heatmaps
+    heatmap_count = solutions.Heatmap(
+        colormap=cv2.COLORMAP_PARULA, model="yolo11n.pt", show=False, region=region_points
+    )  # Test heatmaps with object counting
+    speed = solutions.SpeedEstimator(region=region_points, model="yolo11n.pt", show=False)  # Test queue manager
+    queue = solutions.QueueManager(region=region_points, model="yolo11n.pt", show=False)  # Test speed estimation
+    line_analytics = solutions.Analytics(analytics_type="line", model="yolo11n.pt", show=False)  # line analytics
+    pie_analytics = solutions.Analytics(analytics_type="pie", model="yolo11n.pt", show=False)  # line analytics
+    bar_analytics = solutions.Analytics(analytics_type="bar", model="yolo11n.pt", show=False)  # line analytics
+    area_analytics = solutions.Analytics(analytics_type="area", model="yolo11n.pt", show=False)  # line analytics
+    trackzone = solutions.TrackZone(region=region_points, model="yolo11n.pt", show=False)  # Test trackzone
+    frame_count = 0  # Required for analytics
+    while cap.isOpened():
+        success, im0 = cap.read()
+        if not success:
+            break
+        frame_count += 1
+        original_im0 = im0.copy()
+        _ = counter.count(original_im0.copy())
+        _ = heatmap.generate_heatmap(original_im0.copy())
+        _ = heatmap_count.generate_heatmap(original_im0.copy())
+        _ = speed.estimate_speed(original_im0.copy())
+        _ = queue.process_queue(original_im0.copy())
+        _ = line_analytics.process_data(original_im0.copy(), frame_count)
+        _ = pie_analytics.process_data(original_im0.copy(), frame_count)
+        _ = bar_analytics.process_data(original_im0.copy(), frame_count)
+        _ = area_analytics.process_data(original_im0.copy(), frame_count)
+        _ = trackzone.trackzone(original_im0.copy())
+    cap.release()
+
+    # Test workouts monitoring
+    safe_download(url=f"{ASSETS_URL}/{POSE_VIDEO}", dir=TMP)
+    cap = cv2.VideoCapture(str(TMP / POSE_VIDEO))
+    assert cap.isOpened(), "Error reading video file"
+    gym = solutions.AIGym(kpts=[5, 11, 13], show=False)
+    while cap.isOpened():
+        success, im0 = cap.read()
+        if not success:
+            break
+        _ = gym.monitor(im0)
+    cap.release()
+
+
+@pytest.mark.slow
+def test_instance_segmentation():
+    """Test the instance segmentation solution."""
+    from ultralytics.utils.plotting import Annotator, colors
+
+    model = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
+    names = model.names
+    cap = cv2.VideoCapture(TMP / DEMO_VIDEO)
+    assert cap.isOpened(), "Error reading video file"
+    while cap.isOpened():
+        success, im0 = cap.read()
+        if not success:
+            break
+        results = model.predict(im0)
+        annotator = Annotator(im0, line_width=2)
+        if results[0].masks is not None:
+            clss = results[0].boxes.cls.cpu().tolist()
+            masks = results[0].masks.xy
+            for mask, cls in zip(masks, clss):
+                color = colors(int(cls), True)
+                annotator.seg_bbox(mask=mask, mask_color=color, label=names[int(cls)])
+    cap.release()
+    cv2.destroyAllWindows()
+
+
+@pytest.mark.slow
+def test_streamlit_predict():
+    """Test streamlit predict live inference solution."""
+    solutions.Inference().inference()