Ver código fonte

Add files via upload

Mengqi Lei 2 meses atrás
pai
commit
42d5500130

+ 1 - 0
ultralytics/engine/__init__.py

@@ -0,0 +1 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license

+ 1476 - 0
ultralytics/engine/exporter.py

@@ -0,0 +1,1476 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Export a YOLO PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit.
+
+Format                  | `format=argument`         | Model
+---                     | ---                       | ---
+PyTorch                 | -                         | yolo11n.pt
+TorchScript             | `torchscript`             | yolo11n.torchscript
+ONNX                    | `onnx`                    | yolo11n.onnx
+OpenVINO                | `openvino`                | yolo11n_openvino_model/
+TensorRT                | `engine`                  | yolo11n.engine
+CoreML                  | `coreml`                  | yolo11n.mlpackage
+TensorFlow SavedModel   | `saved_model`             | yolo11n_saved_model/
+TensorFlow GraphDef     | `pb`                      | yolo11n.pb
+TensorFlow Lite         | `tflite`                  | yolo11n.tflite
+TensorFlow Edge TPU     | `edgetpu`                 | yolo11n_edgetpu.tflite
+TensorFlow.js           | `tfjs`                    | yolo11n_web_model/
+PaddlePaddle            | `paddle`                  | yolo11n_paddle_model/
+MNN                     | `mnn`                     | yolo11n.mnn
+NCNN                    | `ncnn`                    | yolo11n_ncnn_model/
+IMX                     | `imx`                     | yolo11n_imx_model/
+
+Requirements:
+    $ pip install "ultralytics[export]"
+
+Python:
+    from ultralytics import YOLO
+    model = YOLO('yolo11n.pt')
+    results = model.export(format='onnx')
+
+CLI:
+    $ yolo mode=export model=yolo11n.pt format=onnx
+
+Inference:
+    $ yolo predict model=yolo11n.pt                 # PyTorch
+                         yolo11n.torchscript        # TorchScript
+                         yolo11n.onnx               # ONNX Runtime or OpenCV DNN with dnn=True
+                         yolo11n_openvino_model     # OpenVINO
+                         yolo11n.engine             # TensorRT
+                         yolo11n.mlpackage          # CoreML (macOS-only)
+                         yolo11n_saved_model        # TensorFlow SavedModel
+                         yolo11n.pb                 # TensorFlow GraphDef
+                         yolo11n.tflite             # TensorFlow Lite
+                         yolo11n_edgetpu.tflite     # TensorFlow Edge TPU
+                         yolo11n_paddle_model       # PaddlePaddle
+                         yolo11n.mnn                # MNN
+                         yolo11n_ncnn_model         # NCNN
+                         yolo11n_imx_model          # IMX
+
+TensorFlow.js:
+    $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
+    $ npm install
+    $ ln -s ../../yolo11n_web_model public/yolo11n_web_model
+    $ npm start
+"""
+
+import gc
+import json
+import os
+import shutil
+import subprocess
+import time
+import warnings
+from copy import deepcopy
+from datetime import datetime
+from pathlib import Path
+
+import numpy as np
+import torch
+
+from ultralytics.cfg import TASK2DATA, get_cfg
+from ultralytics.data import build_dataloader
+from ultralytics.data.dataset import YOLODataset
+from ultralytics.data.utils import check_cls_dataset, check_det_dataset
+from ultralytics.nn.autobackend import check_class_names, default_class_names
+from ultralytics.nn.modules import C2f, Classify, Detect, RTDETRDecoder
+from ultralytics.nn.tasks import DetectionModel, SegmentationModel, WorldModel
+from ultralytics.utils import (
+    ARM64,
+    DEFAULT_CFG,
+    IS_JETSON,
+    LINUX,
+    LOGGER,
+    MACOS,
+    PYTHON_VERSION,
+    ROOT,
+    WINDOWS,
+    __version__,
+    callbacks,
+    colorstr,
+    get_default_args,
+    yaml_save,
+)
+from ultralytics.utils.checks import (
+    check_imgsz,
+    check_is_path_safe,
+    check_requirements,
+    check_version,
+    is_sudo_available,
+)
+from ultralytics.utils.downloads import attempt_download_asset, get_github_assets, safe_download
+from ultralytics.utils.files import file_size, spaces_in_path
+from ultralytics.utils.ops import Profile
+from ultralytics.utils.torch_utils import TORCH_1_13, get_latest_opset, select_device
+
+
+def export_formats():
+    """Ultralytics YOLO export formats."""
+    x = [
+        ["PyTorch", "-", ".pt", True, True, []],
+        ["TorchScript", "torchscript", ".torchscript", True, True, ["batch", "optimize"]],
+        ["ONNX", "onnx", ".onnx", True, True, ["batch", "dynamic", "half", "opset", "simplify"]],
+        ["OpenVINO", "openvino", "_openvino_model", True, False, ["batch", "dynamic", "half", "int8"]],
+        ["TensorRT", "engine", ".engine", False, True, ["batch", "dynamic", "half", "int8", "simplify"]],
+        ["CoreML", "coreml", ".mlpackage", True, False, ["batch", "half", "int8", "nms"]],
+        ["TensorFlow SavedModel", "saved_model", "_saved_model", True, True, ["batch", "int8", "keras"]],
+        ["TensorFlow GraphDef", "pb", ".pb", True, True, ["batch"]],
+        ["TensorFlow Lite", "tflite", ".tflite", True, False, ["batch", "half", "int8"]],
+        ["TensorFlow Edge TPU", "edgetpu", "_edgetpu.tflite", True, False, []],
+        ["TensorFlow.js", "tfjs", "_web_model", True, False, ["batch", "half", "int8"]],
+        ["PaddlePaddle", "paddle", "_paddle_model", True, True, ["batch"]],
+        ["MNN", "mnn", ".mnn", True, True, ["batch", "half", "int8"]],
+        ["NCNN", "ncnn", "_ncnn_model", True, True, ["batch", "half"]],
+        ["IMX", "imx", "_imx_model", True, True, ["int8"]],
+    ]
+    return dict(zip(["Format", "Argument", "Suffix", "CPU", "GPU", "Arguments"], zip(*x)))
+
+
+def validate_args(format, passed_args, valid_args):
+    """
+    Validates arguments based on format.
+
+    Args:
+        format (str): The export format.
+        passed_args (Namespace): The arguments used during export.
+        valid_args (dict): List of valid arguments for the format.
+
+    Raises:
+        AssertionError: If an argument that's not supported by the export format is used, or if format doesn't have the supported arguments listed.
+    """
+    # Only check valid usage of these args
+    export_args = ["half", "int8", "dynamic", "keras", "nms", "batch"]
+
+    assert valid_args is not None, f"ERROR ❌️ valid arguments for '{format}' not listed."
+    custom = {"batch": 1, "data": None, "device": None}  # exporter defaults
+    default_args = get_cfg(DEFAULT_CFG, custom)
+    for arg in export_args:
+        not_default = getattr(passed_args, arg, None) != getattr(default_args, arg, None)
+        if not_default:
+            assert arg in valid_args, f"ERROR ❌️ argument '{arg}' is not supported for format='{format}'"
+
+
+def gd_outputs(gd):
+    """TensorFlow GraphDef model output node names."""
+    name_list, input_list = [], []
+    for node in gd.node:  # tensorflow.core.framework.node_def_pb2.NodeDef
+        name_list.append(node.name)
+        input_list.extend(node.input)
+    return sorted(f"{x}:0" for x in list(set(name_list) - set(input_list)) if not x.startswith("NoOp"))
+
+
+def try_export(inner_func):
+    """YOLO export decorator, i.e. @try_export."""
+    inner_args = get_default_args(inner_func)
+
+    def outer_func(*args, **kwargs):
+        """Export a model."""
+        prefix = inner_args["prefix"]
+        try:
+            with Profile() as dt:
+                f, model = inner_func(*args, **kwargs)
+            LOGGER.info(f"{prefix} export success ✅ {dt.t:.1f}s, saved as '{f}' ({file_size(f):.1f} MB)")
+            return f, model
+        except Exception as e:
+            LOGGER.error(f"{prefix} export failure ❌ {dt.t:.1f}s: {e}")
+            raise e
+
+    return outer_func
+
+
+class Exporter:
+    """
+    A class for exporting a model.
+
+    Attributes:
+        args (SimpleNamespace): Configuration for the exporter.
+        callbacks (list, optional): List of callback functions. Defaults to None.
+    """
+
+    def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
+        """
+        Initializes the Exporter class.
+
+        Args:
+            cfg (str, optional): Path to a configuration file. Defaults to DEFAULT_CFG.
+            overrides (dict, optional): Configuration overrides. Defaults to None.
+            _callbacks (dict, optional): Dictionary of callback functions. Defaults to None.
+        """
+        self.args = get_cfg(cfg, overrides)
+        if self.args.format.lower() in {"coreml", "mlmodel"}:  # fix attempt for protobuf<3.20.x errors
+            os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"  # must run before TensorBoard callback
+
+        self.callbacks = _callbacks or callbacks.get_default_callbacks()
+        callbacks.add_integration_callbacks(self)
+
+    def __call__(self, model=None) -> str:
+        """Returns list of exported files/dirs after running callbacks."""
+        self.run_callbacks("on_export_start")
+        t = time.time()
+        fmt = self.args.format.lower()  # to lowercase
+        if fmt in {"tensorrt", "trt"}:  # 'engine' aliases
+            fmt = "engine"
+        if fmt in {"mlmodel", "mlpackage", "mlprogram", "apple", "ios", "coreml"}:  # 'coreml' aliases
+            fmt = "coreml"
+        fmts_dict = export_formats()
+        fmts = tuple(fmts_dict["Argument"][1:])  # available export formats
+        if fmt not in fmts:
+            import difflib
+
+            # Get the closest match if format is invalid
+            matches = difflib.get_close_matches(fmt, fmts, n=1, cutoff=0.6)  # 60% similarity required to match
+            if not matches:
+                raise ValueError(f"Invalid export format='{fmt}'. Valid formats are {fmts}")
+            LOGGER.warning(f"WARNING ⚠️ Invalid export format='{fmt}', updating to format='{matches[0]}'")
+            fmt = matches[0]
+        flags = [x == fmt for x in fmts]
+        if sum(flags) != 1:
+            raise ValueError(f"Invalid export format='{fmt}'. Valid formats are {fmts}")
+        (
+            jit,
+            onnx,
+            xml,
+            engine,
+            coreml,
+            saved_model,
+            pb,
+            tflite,
+            edgetpu,
+            tfjs,
+            paddle,
+            mnn,
+            ncnn,
+            imx,
+        ) = flags  # export booleans
+        is_tf_format = any((saved_model, pb, tflite, edgetpu, tfjs))
+
+        # Device
+        dla = None
+        if fmt == "engine" and self.args.device is None:
+            LOGGER.warning("WARNING ⚠️ TensorRT requires GPU export, automatically assigning device=0")
+            self.args.device = "0"
+        if fmt == "engine" and "dla" in str(self.args.device):  # convert int/list to str first
+            dla = self.args.device.split(":")[-1]
+            self.args.device = "0"  # update device to "0"
+            assert dla in {"0", "1"}, f"Expected self.args.device='dla:0' or 'dla:1, but got {self.args.device}."
+        self.device = select_device("cpu" if self.args.device is None else self.args.device)
+
+        # Argument compatibility checks
+        fmt_keys = fmts_dict["Arguments"][flags.index(True) + 1]
+        validate_args(fmt, self.args, fmt_keys)
+        if imx and not self.args.int8:
+            LOGGER.warning("WARNING ⚠️ IMX only supports int8 export, setting int8=True.")
+            self.args.int8 = True
+        if not hasattr(model, "names"):
+            model.names = default_class_names()
+        model.names = check_class_names(model.names)
+        if self.args.half and self.args.int8:
+            LOGGER.warning("WARNING ⚠️ half=True and int8=True are mutually exclusive, setting half=False.")
+            self.args.half = False
+        if self.args.half and onnx and self.device.type == "cpu":
+            LOGGER.warning("WARNING ⚠️ half=True only compatible with GPU export, i.e. use device=0")
+            self.args.half = False
+            assert not self.args.dynamic, "half=True not compatible with dynamic=True, i.e. use only one."
+        self.imgsz = check_imgsz(self.args.imgsz, stride=model.stride, min_dim=2)  # check image size
+        if self.args.int8 and engine:
+            self.args.dynamic = True  # enforce dynamic to export TensorRT INT8
+        if self.args.optimize:
+            assert not ncnn, "optimize=True not compatible with format='ncnn', i.e. use optimize=False"
+            assert self.device.type == "cpu", "optimize=True not compatible with cuda devices, i.e. use device='cpu'"
+        if self.args.int8 and tflite:
+            assert not getattr(model, "end2end", False), "TFLite INT8 export not supported for end2end models."
+        if edgetpu:
+            if not LINUX:
+                raise SystemError("Edge TPU export only supported on Linux. See https://coral.ai/docs/edgetpu/compiler")
+            elif self.args.batch != 1:  # see github.com/ultralytics/ultralytics/pull/13420
+                LOGGER.warning("WARNING ⚠️ Edge TPU export requires batch size 1, setting batch=1.")
+                self.args.batch = 1
+        if isinstance(model, WorldModel):
+            LOGGER.warning(
+                "WARNING ⚠️ YOLOWorld (original version) export is not supported to any format.\n"
+                "WARNING ⚠️ YOLOWorldv2 models (i.e. 'yolov8s-worldv2.pt') only support export to "
+                "(torchscript, onnx, openvino, engine, coreml) formats. "
+                "See https://docs.ultralytics.com/models/yolo-world for details."
+            )
+            model.clip_model = None  # openvino int8 export error: https://github.com/ultralytics/ultralytics/pull/18445
+        if self.args.int8 and not self.args.data:
+            self.args.data = DEFAULT_CFG.data or TASK2DATA[getattr(model, "task", "detect")]  # assign default data
+            LOGGER.warning(
+                "WARNING ⚠️ INT8 export requires a missing 'data' arg for calibration. "
+                f"Using default 'data={self.args.data}'."
+            )
+
+        # Input
+        im = torch.zeros(self.args.batch, 3, *self.imgsz).to(self.device)
+        file = Path(
+            getattr(model, "pt_path", None) or getattr(model, "yaml_file", None) or model.yaml.get("yaml_file", "")
+        )
+        if file.suffix in {".yaml", ".yml"}:
+            file = Path(file.name)
+
+        # Update model
+        model = deepcopy(model).to(self.device)
+        for p in model.parameters():
+            p.requires_grad = False
+        model.eval()
+        model.float()
+        model = model.fuse()
+
+        if imx:
+            from ultralytics.utils.torch_utils import FXModel
+
+            model = FXModel(model)
+        for m in model.modules():
+            if isinstance(m, Classify):
+                m.export = True
+            if isinstance(m, (Detect, RTDETRDecoder)):  # includes all Detect subclasses like Segment, Pose, OBB
+                m.dynamic = self.args.dynamic
+                m.export = True
+                m.format = self.args.format
+                m.max_det = self.args.max_det
+            elif isinstance(m, C2f) and not is_tf_format:
+                # EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
+                m.forward = m.forward_split
+            if isinstance(m, Detect) and imx:
+                from ultralytics.utils.tal import make_anchors
+
+                m.anchors, m.strides = (
+                    x.transpose(0, 1)
+                    for x in make_anchors(
+                        torch.cat([s / m.stride.unsqueeze(-1) for s in self.imgsz], dim=1), m.stride, 0.5
+                    )
+                )
+
+        y = None
+        for _ in range(2):
+            y = model(im)  # dry runs
+        if self.args.half and onnx and self.device.type != "cpu":
+            im, model = im.half(), model.half()  # to FP16
+
+        # Filter warnings
+        warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)  # suppress TracerWarning
+        warnings.filterwarnings("ignore", category=UserWarning)  # suppress shape prim::Constant missing ONNX warning
+        warnings.filterwarnings("ignore", category=DeprecationWarning)  # suppress CoreML np.bool deprecation warning
+
+        # Assign
+        self.im = im
+        self.model = model
+        self.file = file
+        self.output_shape = (
+            tuple(y.shape)
+            if isinstance(y, torch.Tensor)
+            else tuple(tuple(x.shape if isinstance(x, torch.Tensor) else []) for x in y)
+        )
+        self.pretty_name = Path(self.model.yaml.get("yaml_file", self.file)).stem.replace("yolo", "YOLO")
+        data = model.args["data"] if hasattr(model, "args") and isinstance(model.args, dict) else ""
+        description = f"Ultralytics {self.pretty_name} model {f'trained on {data}' if data else ''}"
+        self.metadata = {
+            "description": description,
+            "author": "Ultralytics",
+            "date": datetime.now().isoformat(),
+            "version": __version__,
+            "license": "AGPL-3.0 License (https://ultralytics.com/license)",
+            "docs": "https://docs.ultralytics.com",
+            "stride": int(max(model.stride)),
+            "task": model.task,
+            "batch": self.args.batch,
+            "imgsz": self.imgsz,
+            "names": model.names,
+            "args": {k: v for k, v in self.args if k in fmt_keys},
+        }  # model metadata
+        if model.task == "pose":
+            self.metadata["kpt_shape"] = model.model[-1].kpt_shape
+
+        LOGGER.info(
+            f"\n{colorstr('PyTorch:')} starting from '{file}' with input shape {tuple(im.shape)} BCHW and "
+            f"output shape(s) {self.output_shape} ({file_size(file):.1f} MB)"
+        )
+
+        # Exports
+        f = [""] * len(fmts)  # exported filenames
+        if jit or ncnn:  # TorchScript
+            f[0], _ = self.export_torchscript()
+        if engine:  # TensorRT required before ONNX
+            f[1], _ = self.export_engine(dla=dla)
+        if onnx:  # ONNX
+            f[2], _ = self.export_onnx()
+        if xml:  # OpenVINO
+            f[3], _ = self.export_openvino()
+        if coreml:  # CoreML
+            f[4], _ = self.export_coreml()
+        if is_tf_format:  # TensorFlow formats
+            self.args.int8 |= edgetpu
+            f[5], keras_model = self.export_saved_model()
+            if pb or tfjs:  # pb prerequisite to tfjs
+                f[6], _ = self.export_pb(keras_model=keras_model)
+            if tflite:
+                f[7], _ = self.export_tflite(keras_model=keras_model, nms=False, agnostic_nms=self.args.agnostic_nms)
+            if edgetpu:
+                f[8], _ = self.export_edgetpu(tflite_model=Path(f[5]) / f"{self.file.stem}_full_integer_quant.tflite")
+            if tfjs:
+                f[9], _ = self.export_tfjs()
+        if paddle:  # PaddlePaddle
+            f[10], _ = self.export_paddle()
+        if mnn:  # MNN
+            f[11], _ = self.export_mnn()
+        if ncnn:  # NCNN
+            f[12], _ = self.export_ncnn()
+        if imx:
+            f[13], _ = self.export_imx()
+
+        # Finish
+        f = [str(x) for x in f if x]  # filter out '' and None
+        if any(f):
+            f = str(Path(f[-1]))
+            square = self.imgsz[0] == self.imgsz[1]
+            s = (
+                ""
+                if square
+                else f"WARNING ⚠️ non-PyTorch val requires square images, 'imgsz={self.imgsz}' will not "
+                f"work. Use export 'imgsz={max(self.imgsz)}' if val is required."
+            )
+            imgsz = self.imgsz[0] if square else str(self.imgsz)[1:-1].replace(" ", "")
+            predict_data = f"data={data}" if model.task == "segment" and fmt == "pb" else ""
+            q = "int8" if self.args.int8 else "half" if self.args.half else ""  # quantization
+            LOGGER.info(
+                f"\nExport complete ({time.time() - t:.1f}s)"
+                f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
+                f"\nPredict:         yolo predict task={model.task} model={f} imgsz={imgsz} {q} {predict_data}"
+                f"\nValidate:        yolo val task={model.task} model={f} imgsz={imgsz} data={data} {q} {s}"
+                f"\nVisualize:       https://netron.app"
+            )
+
+        self.run_callbacks("on_export_end")
+        return f  # return list of exported files/dirs
+
+    def get_int8_calibration_dataloader(self, prefix=""):
+        """Build and return a dataloader suitable for calibration of INT8 models."""
+        LOGGER.info(f"{prefix} collecting INT8 calibration images from 'data={self.args.data}'")
+        data = (check_cls_dataset if self.model.task == "classify" else check_det_dataset)(self.args.data)
+        # TensorRT INT8 calibration should use 2x batch size
+        batch = self.args.batch * (2 if self.args.format == "engine" else 1)
+        dataset = YOLODataset(
+            data[self.args.split or "val"],
+            data=data,
+            task=self.model.task,
+            imgsz=self.imgsz[0],
+            augment=False,
+            batch_size=batch,
+        )
+        n = len(dataset)
+        if n < self.args.batch:
+            raise ValueError(
+                f"The calibration dataset ({n} images) must have at least as many images as the batch size ('batch={self.args.batch}')."
+            )
+        elif n < 300:
+            LOGGER.warning(f"{prefix} WARNING ⚠️ >300 images recommended for INT8 calibration, found {n} images.")
+        return build_dataloader(dataset, batch=batch, workers=0)  # required for batch loading
+
+    @try_export
+    def export_torchscript(self, prefix=colorstr("TorchScript:")):
+        """YOLO TorchScript model export."""
+        LOGGER.info(f"\n{prefix} starting export with torch {torch.__version__}...")
+        f = self.file.with_suffix(".torchscript")
+
+        ts = torch.jit.trace(self.model, self.im, strict=False)
+        extra_files = {"config.txt": json.dumps(self.metadata)}  # torch._C.ExtraFilesMap()
+        if self.args.optimize:  # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
+            LOGGER.info(f"{prefix} optimizing for mobile...")
+            from torch.utils.mobile_optimizer import optimize_for_mobile
+
+            optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
+        else:
+            ts.save(str(f), _extra_files=extra_files)
+        return f, None
+
+    @try_export
+    def export_onnx(self, prefix=colorstr("ONNX:")):
+        """YOLO ONNX export."""
+        requirements = ["onnx>=1.12.0"]
+        if self.args.simplify:
+            requirements += ["onnxslim", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
+        check_requirements(requirements)
+        import onnx  # noqa
+
+        opset_version = self.args.opset or get_latest_opset()
+        LOGGER.info(f"\n{prefix} starting export with onnx {onnx.__version__} opset {opset_version}...")
+        f = str(self.file.with_suffix(".onnx"))
+
+        output_names = ["output0", "output1"] if isinstance(self.model, SegmentationModel) else ["output0"]
+        dynamic = self.args.dynamic
+        if dynamic:
+            dynamic = {"images": {0: "batch", 2: "height", 3: "width"}}  # shape(1,3,640,640)
+            if isinstance(self.model, SegmentationModel):
+                dynamic["output0"] = {0: "batch", 2: "anchors"}  # shape(1, 116, 8400)
+                dynamic["output1"] = {0: "batch", 2: "mask_height", 3: "mask_width"}  # shape(1,32,160,160)
+            elif isinstance(self.model, DetectionModel):
+                dynamic["output0"] = {0: "batch", 2: "anchors"}  # shape(1, 84, 8400)
+
+        torch.onnx.export(
+            self.model.cpu() if dynamic else self.model,  # dynamic=True only compatible with cpu
+            self.im.cpu() if dynamic else self.im,
+            f,
+            verbose=False,
+            opset_version=opset_version,
+            do_constant_folding=True,  # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
+            input_names=["images"],
+            output_names=output_names,
+            dynamic_axes=dynamic or None,
+        )
+
+        # Checks
+        model_onnx = onnx.load(f)  # load onnx model
+
+        # Simplify
+        if self.args.simplify:
+            try:
+                import onnxslim
+
+                LOGGER.info(f"{prefix} slimming with onnxslim {onnxslim.__version__}...")
+                model_onnx = onnxslim.slim(model_onnx)
+
+            except Exception as e:
+                LOGGER.warning(f"{prefix} simplifier failure: {e}")
+
+        # Metadata
+        for k, v in self.metadata.items():
+            meta = model_onnx.metadata_props.add()
+            meta.key, meta.value = k, str(v)
+
+        onnx.save(model_onnx, f)
+        return f, model_onnx
+
+    @try_export
+    def export_openvino(self, prefix=colorstr("OpenVINO:")):
+        """YOLO OpenVINO export."""
+        check_requirements("openvino>=2024.5.0")
+        import openvino as ov
+
+        LOGGER.info(f"\n{prefix} starting export with openvino {ov.__version__}...")
+        assert TORCH_1_13, f"OpenVINO export requires torch>=1.13.0 but torch=={torch.__version__} is installed"
+        ov_model = ov.convert_model(
+            self.model,
+            input=None if self.args.dynamic else [self.im.shape],
+            example_input=self.im,
+        )
+
+        def serialize(ov_model, file):
+            """Set RT info, serialize and save metadata YAML."""
+            ov_model.set_rt_info("YOLO", ["model_info", "model_type"])
+            ov_model.set_rt_info(True, ["model_info", "reverse_input_channels"])
+            ov_model.set_rt_info(114, ["model_info", "pad_value"])
+            ov_model.set_rt_info([255.0], ["model_info", "scale_values"])
+            ov_model.set_rt_info(self.args.iou, ["model_info", "iou_threshold"])
+            ov_model.set_rt_info([v.replace(" ", "_") for v in self.model.names.values()], ["model_info", "labels"])
+            if self.model.task != "classify":
+                ov_model.set_rt_info("fit_to_window_letterbox", ["model_info", "resize_type"])
+
+            ov.runtime.save_model(ov_model, file, compress_to_fp16=self.args.half)
+            yaml_save(Path(file).parent / "metadata.yaml", self.metadata)  # add metadata.yaml
+
+        if self.args.int8:
+            fq = str(self.file).replace(self.file.suffix, f"_int8_openvino_model{os.sep}")
+            fq_ov = str(Path(fq) / self.file.with_suffix(".xml").name)
+            check_requirements("nncf>=2.14.0")
+            import nncf
+
+            def transform_fn(data_item) -> np.ndarray:
+                """Quantization transform function."""
+                data_item: torch.Tensor = data_item["img"] if isinstance(data_item, dict) else data_item
+                assert data_item.dtype == torch.uint8, "Input image must be uint8 for the quantization preprocessing"
+                im = data_item.numpy().astype(np.float32) / 255.0  # uint8 to fp16/32 and 0 - 255 to 0.0 - 1.0
+                return np.expand_dims(im, 0) if im.ndim == 3 else im
+
+            # Generate calibration data for integer quantization
+            ignored_scope = None
+            if isinstance(self.model.model[-1], Detect):
+                # Includes all Detect subclasses like Segment, Pose, OBB, WorldDetect
+                head_module_name = ".".join(list(self.model.named_modules())[-1][0].split(".")[:2])
+                ignored_scope = nncf.IgnoredScope(  # ignore operations
+                    patterns=[
+                        f".*{head_module_name}/.*/Add",
+                        f".*{head_module_name}/.*/Sub*",
+                        f".*{head_module_name}/.*/Mul*",
+                        f".*{head_module_name}/.*/Div*",
+                        f".*{head_module_name}\\.dfl.*",
+                    ],
+                    types=["Sigmoid"],
+                )
+
+            quantized_ov_model = nncf.quantize(
+                model=ov_model,
+                calibration_dataset=nncf.Dataset(self.get_int8_calibration_dataloader(prefix), transform_fn),
+                preset=nncf.QuantizationPreset.MIXED,
+                ignored_scope=ignored_scope,
+            )
+            serialize(quantized_ov_model, fq_ov)
+            return fq, None
+
+        f = str(self.file).replace(self.file.suffix, f"_openvino_model{os.sep}")
+        f_ov = str(Path(f) / self.file.with_suffix(".xml").name)
+
+        serialize(ov_model, f_ov)
+        return f, None
+
+    @try_export
+    def export_paddle(self, prefix=colorstr("PaddlePaddle:")):
+        """YOLO Paddle export."""
+        check_requirements(("paddlepaddle-gpu" if torch.cuda.is_available() else "paddlepaddle", "x2paddle"))
+        import x2paddle  # noqa
+        from x2paddle.convert import pytorch2paddle  # noqa
+
+        LOGGER.info(f"\n{prefix} starting export with X2Paddle {x2paddle.__version__}...")
+        f = str(self.file).replace(self.file.suffix, f"_paddle_model{os.sep}")
+
+        pytorch2paddle(module=self.model, save_dir=f, jit_type="trace", input_examples=[self.im])  # export
+        yaml_save(Path(f) / "metadata.yaml", self.metadata)  # add metadata.yaml
+        return f, None
+
+    @try_export
+    def export_mnn(self, prefix=colorstr("MNN:")):
+        """YOLOv8 MNN export using MNN https://github.com/alibaba/MNN."""
+        f_onnx, _ = self.export_onnx()  # get onnx model first
+
+        check_requirements("MNN>=2.9.6")
+        import MNN  # noqa
+        from MNN.tools import mnnconvert
+
+        # Setup and checks
+        LOGGER.info(f"\n{prefix} starting export with MNN {MNN.version()}...")
+        assert Path(f_onnx).exists(), f"failed to export ONNX file: {f_onnx}"
+        f = str(self.file.with_suffix(".mnn"))  # MNN model file
+        args = ["", "-f", "ONNX", "--modelFile", f_onnx, "--MNNModel", f, "--bizCode", json.dumps(self.metadata)]
+        if self.args.int8:
+            args.extend(("--weightQuantBits", "8"))
+        if self.args.half:
+            args.append("--fp16")
+        mnnconvert.convert(args)
+        # remove scratch file for model convert optimize
+        convert_scratch = Path(self.file.parent / ".__convert_external_data.bin")
+        if convert_scratch.exists():
+            convert_scratch.unlink()
+        return f, None
+
+    @try_export
+    def export_ncnn(self, prefix=colorstr("NCNN:")):
+        """YOLO NCNN export using PNNX https://github.com/pnnx/pnnx."""
+        check_requirements("ncnn")
+        import ncnn  # noqa
+
+        LOGGER.info(f"\n{prefix} starting export with NCNN {ncnn.__version__}...")
+        f = Path(str(self.file).replace(self.file.suffix, f"_ncnn_model{os.sep}"))
+        f_ts = self.file.with_suffix(".torchscript")
+
+        name = Path("pnnx.exe" if WINDOWS else "pnnx")  # PNNX filename
+        pnnx = name if name.is_file() else (ROOT / name)
+        if not pnnx.is_file():
+            LOGGER.warning(
+                f"{prefix} WARNING ⚠️ PNNX not found. Attempting to download binary file from "
+                "https://github.com/pnnx/pnnx/.\nNote PNNX Binary file must be placed in current working directory "
+                f"or in {ROOT}. See PNNX repo for full installation instructions."
+            )
+            system = "macos" if MACOS else "windows" if WINDOWS else "linux-aarch64" if ARM64 else "linux"
+            try:
+                release, assets = get_github_assets(repo="pnnx/pnnx")
+                asset = [x for x in assets if f"{system}.zip" in x][0]
+                assert isinstance(asset, str), "Unable to retrieve PNNX repo assets"  # i.e. pnnx-20240410-macos.zip
+                LOGGER.info(f"{prefix} successfully found latest PNNX asset file {asset}")
+            except Exception as e:
+                release = "20240410"
+                asset = f"pnnx-{release}-{system}.zip"
+                LOGGER.warning(f"{prefix} WARNING ⚠️ PNNX GitHub assets not found: {e}, using default {asset}")
+            unzip_dir = safe_download(f"https://github.com/pnnx/pnnx/releases/download/{release}/{asset}", delete=True)
+            if check_is_path_safe(Path.cwd(), unzip_dir):  # avoid path traversal security vulnerability
+                shutil.move(src=unzip_dir / name, dst=pnnx)  # move binary to ROOT
+                pnnx.chmod(0o777)  # set read, write, and execute permissions for everyone
+                shutil.rmtree(unzip_dir)  # delete unzip dir
+
+        ncnn_args = [
+            f"ncnnparam={f / 'model.ncnn.param'}",
+            f"ncnnbin={f / 'model.ncnn.bin'}",
+            f"ncnnpy={f / 'model_ncnn.py'}",
+        ]
+
+        pnnx_args = [
+            f"pnnxparam={f / 'model.pnnx.param'}",
+            f"pnnxbin={f / 'model.pnnx.bin'}",
+            f"pnnxpy={f / 'model_pnnx.py'}",
+            f"pnnxonnx={f / 'model.pnnx.onnx'}",
+        ]
+
+        cmd = [
+            str(pnnx),
+            str(f_ts),
+            *ncnn_args,
+            *pnnx_args,
+            f"fp16={int(self.args.half)}",
+            f"device={self.device.type}",
+            f'inputshape="{[self.args.batch, 3, *self.imgsz]}"',
+        ]
+        f.mkdir(exist_ok=True)  # make ncnn_model directory
+        LOGGER.info(f"{prefix} running '{' '.join(cmd)}'")
+        subprocess.run(cmd, check=True)
+
+        # Remove debug files
+        pnnx_files = [x.split("=")[-1] for x in pnnx_args]
+        for f_debug in ("debug.bin", "debug.param", "debug2.bin", "debug2.param", *pnnx_files):
+            Path(f_debug).unlink(missing_ok=True)
+
+        yaml_save(f / "metadata.yaml", self.metadata)  # add metadata.yaml
+        return str(f), None
+
+    @try_export
+    def export_coreml(self, prefix=colorstr("CoreML:")):
+        """YOLO CoreML export."""
+        mlmodel = self.args.format.lower() == "mlmodel"  # legacy *.mlmodel export format requested
+        check_requirements("coremltools>=6.0,<=6.2" if mlmodel else "coremltools>=7.0")
+        import coremltools as ct  # noqa
+
+        LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...")
+        assert not WINDOWS, "CoreML export is not supported on Windows, please run on macOS or Linux."
+        assert self.args.batch == 1, "CoreML batch sizes > 1 are not supported. Please retry at 'batch=1'."
+        f = self.file.with_suffix(".mlmodel" if mlmodel else ".mlpackage")
+        if f.is_dir():
+            shutil.rmtree(f)
+        if self.args.nms and getattr(self.model, "end2end", False):
+            LOGGER.warning(f"{prefix} WARNING ⚠️ 'nms=True' is not available for end2end models. Forcing 'nms=False'.")
+            self.args.nms = False
+
+        bias = [0.0, 0.0, 0.0]
+        scale = 1 / 255
+        classifier_config = None
+        if self.model.task == "classify":
+            classifier_config = ct.ClassifierConfig(list(self.model.names.values())) if self.args.nms else None
+            model = self.model
+        elif self.model.task == "detect":
+            model = IOSDetectModel(self.model, self.im) if self.args.nms else self.model
+        else:
+            if self.args.nms:
+                LOGGER.warning(f"{prefix} WARNING ⚠️ 'nms=True' is only available for Detect models like 'yolov8n.pt'.")
+                # TODO CoreML Segment and Pose model pipelining
+            model = self.model
+
+        ts = torch.jit.trace(model.eval(), self.im, strict=False)  # TorchScript model
+        ct_model = ct.convert(
+            ts,
+            inputs=[ct.ImageType("image", shape=self.im.shape, scale=scale, bias=bias)],
+            classifier_config=classifier_config,
+            convert_to="neuralnetwork" if mlmodel else "mlprogram",
+        )
+        bits, mode = (8, "kmeans") if self.args.int8 else (16, "linear") if self.args.half else (32, None)
+        if bits < 32:
+            if "kmeans" in mode:
+                check_requirements("scikit-learn")  # scikit-learn package required for k-means quantization
+            if mlmodel:
+                ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
+            elif bits == 8:  # mlprogram already quantized to FP16
+                import coremltools.optimize.coreml as cto
+
+                op_config = cto.OpPalettizerConfig(mode="kmeans", nbits=bits, weight_threshold=512)
+                config = cto.OptimizationConfig(global_config=op_config)
+                ct_model = cto.palettize_weights(ct_model, config=config)
+        if self.args.nms and self.model.task == "detect":
+            if mlmodel:
+                # coremltools<=6.2 NMS export requires Python<3.11
+                check_version(PYTHON_VERSION, "<3.11", name="Python ", hard=True)
+                weights_dir = None
+            else:
+                ct_model.save(str(f))  # save otherwise weights_dir does not exist
+                weights_dir = str(f / "Data/com.apple.CoreML/weights")
+            ct_model = self._pipeline_coreml(ct_model, weights_dir=weights_dir)
+
+        m = self.metadata  # metadata dict
+        ct_model.short_description = m.pop("description")
+        ct_model.author = m.pop("author")
+        ct_model.license = m.pop("license")
+        ct_model.version = m.pop("version")
+        ct_model.user_defined_metadata.update({k: str(v) for k, v in m.items()})
+        try:
+            ct_model.save(str(f))  # save *.mlpackage
+        except Exception as e:
+            LOGGER.warning(
+                f"{prefix} WARNING ⚠️ CoreML export to *.mlpackage failed ({e}), reverting to *.mlmodel export. "
+                f"Known coremltools Python 3.11 and Windows bugs https://github.com/apple/coremltools/issues/1928."
+            )
+            f = f.with_suffix(".mlmodel")
+            ct_model.save(str(f))
+        return f, ct_model
+
+    @try_export
+    def export_engine(self, dla=None, prefix=colorstr("TensorRT:")):
+        """YOLO TensorRT export https://developer.nvidia.com/tensorrt."""
+        assert self.im.device.type != "cpu", "export running on CPU but must be on GPU, i.e. use 'device=0'"
+        f_onnx, _ = self.export_onnx()  # run before TRT import https://github.com/ultralytics/ultralytics/issues/7016
+
+        try:
+            import tensorrt as trt  # noqa
+        except ImportError:
+            if LINUX:
+                check_requirements("tensorrt>7.0.0,!=10.1.0")
+            import tensorrt as trt  # noqa
+        check_version(trt.__version__, ">=7.0.0", hard=True)
+        check_version(trt.__version__, "!=10.1.0", msg="https://github.com/ultralytics/ultralytics/pull/14239")
+
+        # Setup and checks
+        LOGGER.info(f"\n{prefix} starting export with TensorRT {trt.__version__}...")
+        is_trt10 = int(trt.__version__.split(".")[0]) >= 10  # is TensorRT >= 10
+        assert Path(f_onnx).exists(), f"failed to export ONNX file: {f_onnx}"
+        f = self.file.with_suffix(".engine")  # TensorRT engine file
+        logger = trt.Logger(trt.Logger.INFO)
+        if self.args.verbose:
+            logger.min_severity = trt.Logger.Severity.VERBOSE
+
+        # Engine builder
+        builder = trt.Builder(logger)
+        config = builder.create_builder_config()
+        workspace = int(self.args.workspace * (1 << 30)) if self.args.workspace is not None else 0
+        if is_trt10 and workspace > 0:
+            config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace)
+        elif workspace > 0:  # TensorRT versions 7, 8
+            config.max_workspace_size = workspace
+        flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
+        network = builder.create_network(flag)
+        half = builder.platform_has_fast_fp16 and self.args.half
+        int8 = builder.platform_has_fast_int8 and self.args.int8
+
+        # Optionally switch to DLA if enabled
+        if dla is not None:
+            if not IS_JETSON:
+                raise ValueError("DLA is only available on NVIDIA Jetson devices")
+            LOGGER.info(f"{prefix} enabling DLA on core {dla}...")
+            if not self.args.half and not self.args.int8:
+                raise ValueError(
+                    "DLA requires either 'half=True' (FP16) or 'int8=True' (INT8) to be enabled. Please enable one of them and try again."
+                )
+            config.default_device_type = trt.DeviceType.DLA
+            config.DLA_core = int(dla)
+            config.set_flag(trt.BuilderFlag.GPU_FALLBACK)
+
+        # Read ONNX file
+        parser = trt.OnnxParser(network, logger)
+        if not parser.parse_from_file(f_onnx):
+            raise RuntimeError(f"failed to load ONNX file: {f_onnx}")
+
+        # Network inputs
+        inputs = [network.get_input(i) for i in range(network.num_inputs)]
+        outputs = [network.get_output(i) for i in range(network.num_outputs)]
+        for inp in inputs:
+            LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}')
+        for out in outputs:
+            LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}')
+
+        if self.args.dynamic:
+            shape = self.im.shape
+            if shape[0] <= 1:
+                LOGGER.warning(f"{prefix} WARNING ⚠️ 'dynamic=True' model requires max batch size, i.e. 'batch=16'")
+            profile = builder.create_optimization_profile()
+            min_shape = (1, shape[1], 32, 32)  # minimum input shape
+            max_shape = (*shape[:2], *(int(max(1, workspace) * d) for d in shape[2:]))  # max input shape
+            for inp in inputs:
+                profile.set_shape(inp.name, min=min_shape, opt=shape, max=max_shape)
+            config.add_optimization_profile(profile)
+
+        LOGGER.info(f"{prefix} building {'INT8' if int8 else 'FP' + ('16' if half else '32')} engine as {f}")
+        if int8:
+            config.set_flag(trt.BuilderFlag.INT8)
+            config.set_calibration_profile(profile)
+            config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED
+
+            class EngineCalibrator(trt.IInt8Calibrator):
+                def __init__(
+                    self,
+                    dataset,  # ultralytics.data.build.InfiniteDataLoader
+                    batch: int,
+                    cache: str = "",
+                ) -> None:
+                    trt.IInt8Calibrator.__init__(self)
+                    self.dataset = dataset
+                    self.data_iter = iter(dataset)
+                    self.algo = trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2
+                    self.batch = batch
+                    self.cache = Path(cache)
+
+                def get_algorithm(self) -> trt.CalibrationAlgoType:
+                    """Get the calibration algorithm to use."""
+                    return self.algo
+
+                def get_batch_size(self) -> int:
+                    """Get the batch size to use for calibration."""
+                    return self.batch or 1
+
+                def get_batch(self, names) -> list:
+                    """Get the next batch to use for calibration, as a list of device memory pointers."""
+                    try:
+                        im0s = next(self.data_iter)["img"] / 255.0
+                        im0s = im0s.to("cuda") if im0s.device.type == "cpu" else im0s
+                        return [int(im0s.data_ptr())]
+                    except StopIteration:
+                        # Return [] or None, signal to TensorRT there is no calibration data remaining
+                        return None
+
+                def read_calibration_cache(self) -> bytes:
+                    """Use existing cache instead of calibrating again, otherwise, implicitly return None."""
+                    if self.cache.exists() and self.cache.suffix == ".cache":
+                        return self.cache.read_bytes()
+
+                def write_calibration_cache(self, cache) -> None:
+                    """Write calibration cache to disk."""
+                    _ = self.cache.write_bytes(cache)
+
+            # Load dataset w/ builder (for batching) and calibrate
+            config.int8_calibrator = EngineCalibrator(
+                dataset=self.get_int8_calibration_dataloader(prefix),
+                batch=2 * self.args.batch,  # TensorRT INT8 calibration should use 2x batch size
+                cache=str(self.file.with_suffix(".cache")),
+            )
+
+        elif half:
+            config.set_flag(trt.BuilderFlag.FP16)
+
+        # Free CUDA memory
+        del self.model
+        gc.collect()
+        torch.cuda.empty_cache()
+
+        # Write file
+        build = builder.build_serialized_network if is_trt10 else builder.build_engine
+        with build(network, config) as engine, open(f, "wb") as t:
+            # Metadata
+            meta = json.dumps(self.metadata)
+            t.write(len(meta).to_bytes(4, byteorder="little", signed=True))
+            t.write(meta.encode())
+            # Model
+            t.write(engine if is_trt10 else engine.serialize())
+
+        return f, None
+
+    @try_export
+    def export_saved_model(self, prefix=colorstr("TensorFlow SavedModel:")):
+        """YOLO TensorFlow SavedModel export."""
+        cuda = torch.cuda.is_available()
+        try:
+            import tensorflow as tf  # noqa
+        except ImportError:
+            suffix = "-macos" if MACOS else "-aarch64" if ARM64 else "" if cuda else "-cpu"
+            version = ">=2.0.0"
+            check_requirements(f"tensorflow{suffix}{version}")
+            import tensorflow as tf  # noqa
+        check_requirements(
+            (
+                "keras",  # required by 'onnx2tf' package
+                "tf_keras",  # required by 'onnx2tf' package
+                "sng4onnx>=1.0.1",  # required by 'onnx2tf' package
+                "onnx_graphsurgeon>=0.3.26",  # required by 'onnx2tf' package
+                "onnx>=1.12.0",
+                "onnx2tf>1.17.5,<=1.26.3",
+                "onnxslim>=0.1.31",
+                "tflite_support<=0.4.3" if IS_JETSON else "tflite_support",  # fix ImportError 'GLIBCXX_3.4.29'
+                "flatbuffers>=23.5.26,<100",  # update old 'flatbuffers' included inside tensorflow package
+                "onnxruntime-gpu" if cuda else "onnxruntime",
+            ),
+            cmds="--extra-index-url https://pypi.ngc.nvidia.com",  # onnx_graphsurgeon only on NVIDIA
+        )
+
+        LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
+        check_version(
+            tf.__version__,
+            ">=2.0.0",
+            name="tensorflow",
+            verbose=True,
+            msg="https://github.com/ultralytics/ultralytics/issues/5161",
+        )
+        import onnx2tf
+
+        f = Path(str(self.file).replace(self.file.suffix, "_saved_model"))
+        if f.is_dir():
+            shutil.rmtree(f)  # delete output folder
+
+        # Pre-download calibration file to fix https://github.com/PINTO0309/onnx2tf/issues/545
+        onnx2tf_file = Path("calibration_image_sample_data_20x128x128x3_float32.npy")
+        if not onnx2tf_file.exists():
+            attempt_download_asset(f"{onnx2tf_file}.zip", unzip=True, delete=True)
+
+        # Export to ONNX
+        self.args.simplify = True
+        f_onnx, _ = self.export_onnx()
+
+        # Export to TF
+        np_data = None
+        if self.args.int8:
+            tmp_file = f / "tmp_tflite_int8_calibration_images.npy"  # int8 calibration images file
+            if self.args.data:
+                f.mkdir()
+                images = [batch["img"] for batch in self.get_int8_calibration_dataloader(prefix)]
+                images = torch.nn.functional.interpolate(torch.cat(images, 0).float(), size=self.imgsz).permute(
+                    0, 2, 3, 1
+                )
+                np.save(str(tmp_file), images.numpy().astype(np.float32))  # BHWC
+                np_data = [["images", tmp_file, [[[[0, 0, 0]]]], [[[[255, 255, 255]]]]]]
+
+        LOGGER.info(f"{prefix} starting TFLite export with onnx2tf {onnx2tf.__version__}...")
+        keras_model = onnx2tf.convert(
+            input_onnx_file_path=f_onnx,
+            output_folder_path=str(f),
+            not_use_onnxsim=True,
+            verbosity="error",  # note INT8-FP16 activation bug https://github.com/ultralytics/ultralytics/issues/15873
+            output_integer_quantized_tflite=self.args.int8,
+            quant_type="per-tensor",  # "per-tensor" (faster) or "per-channel" (slower but more accurate)
+            custom_input_op_name_np_data_path=np_data,
+            disable_group_convolution=True,  # for end-to-end model compatibility
+            enable_batchmatmul_unfold=True,  # for end-to-end model compatibility
+        )
+        yaml_save(f / "metadata.yaml", self.metadata)  # add metadata.yaml
+
+        # Remove/rename TFLite models
+        if self.args.int8:
+            tmp_file.unlink(missing_ok=True)
+            for file in f.rglob("*_dynamic_range_quant.tflite"):
+                file.rename(file.with_name(file.stem.replace("_dynamic_range_quant", "_int8") + file.suffix))
+            for file in f.rglob("*_integer_quant_with_int16_act.tflite"):
+                file.unlink()  # delete extra fp16 activation TFLite files
+
+        # Add TFLite metadata
+        for file in f.rglob("*.tflite"):
+            f.unlink() if "quant_with_int16_act.tflite" in str(f) else self._add_tflite_metadata(file)
+
+        return str(f), keras_model  # or keras_model = tf.saved_model.load(f, tags=None, options=None)
+
+    @try_export
+    def export_pb(self, keras_model, prefix=colorstr("TensorFlow GraphDef:")):
+        """YOLO TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow."""
+        import tensorflow as tf  # noqa
+        from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2  # noqa
+
+        LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
+        f = self.file.with_suffix(".pb")
+
+        m = tf.function(lambda x: keras_model(x))  # full model
+        m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
+        frozen_func = convert_variables_to_constants_v2(m)
+        frozen_func.graph.as_graph_def()
+        tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
+        return f, None
+
+    @try_export
+    def export_tflite(self, keras_model, nms, agnostic_nms, prefix=colorstr("TensorFlow Lite:")):
+        """YOLO TensorFlow Lite export."""
+        # BUG https://github.com/ultralytics/ultralytics/issues/13436
+        import tensorflow as tf  # noqa
+
+        LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
+        saved_model = Path(str(self.file).replace(self.file.suffix, "_saved_model"))
+        if self.args.int8:
+            f = saved_model / f"{self.file.stem}_int8.tflite"  # fp32 in/out
+        elif self.args.half:
+            f = saved_model / f"{self.file.stem}_float16.tflite"  # fp32 in/out
+        else:
+            f = saved_model / f"{self.file.stem}_float32.tflite"
+        return str(f), None
+
+    @try_export
+    def export_edgetpu(self, tflite_model="", prefix=colorstr("Edge TPU:")):
+        """YOLO Edge TPU export https://coral.ai/docs/edgetpu/models-intro/."""
+        LOGGER.warning(f"{prefix} WARNING ⚠️ Edge TPU known bug https://github.com/ultralytics/ultralytics/issues/1185")
+
+        cmd = "edgetpu_compiler --version"
+        help_url = "https://coral.ai/docs/edgetpu/compiler/"
+        assert LINUX, f"export only supported on Linux. See {help_url}"
+        if subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True).returncode != 0:
+            LOGGER.info(f"\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}")
+            for c in (
+                "curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -",
+                'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | '
+                "sudo tee /etc/apt/sources.list.d/coral-edgetpu.list",
+                "sudo apt-get update",
+                "sudo apt-get install edgetpu-compiler",
+            ):
+                subprocess.run(c if is_sudo_available() else c.replace("sudo ", ""), shell=True, check=True)
+        ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
+
+        LOGGER.info(f"\n{prefix} starting export with Edge TPU compiler {ver}...")
+        f = str(tflite_model).replace(".tflite", "_edgetpu.tflite")  # Edge TPU model
+
+        cmd = (
+            "edgetpu_compiler "
+            f'--out_dir "{Path(f).parent}" '
+            "--show_operations "
+            "--search_delegate "
+            "--delegate_search_step 30 "
+            "--timeout_sec 180 "
+            f'"{tflite_model}"'
+        )
+        LOGGER.info(f"{prefix} running '{cmd}'")
+        subprocess.run(cmd, shell=True)
+        self._add_tflite_metadata(f)
+        return f, None
+
+    @try_export
+    def export_tfjs(self, prefix=colorstr("TensorFlow.js:")):
+        """YOLO TensorFlow.js export."""
+        check_requirements("tensorflowjs")
+        if ARM64:
+            # Fix error: `np.object` was a deprecated alias for the builtin `object` when exporting to TF.js on ARM64
+            check_requirements("numpy==1.23.5")
+        import tensorflow as tf
+        import tensorflowjs as tfjs  # noqa
+
+        LOGGER.info(f"\n{prefix} starting export with tensorflowjs {tfjs.__version__}...")
+        f = str(self.file).replace(self.file.suffix, "_web_model")  # js dir
+        f_pb = str(self.file.with_suffix(".pb"))  # *.pb path
+
+        gd = tf.Graph().as_graph_def()  # TF GraphDef
+        with open(f_pb, "rb") as file:
+            gd.ParseFromString(file.read())
+        outputs = ",".join(gd_outputs(gd))
+        LOGGER.info(f"\n{prefix} output node names: {outputs}")
+
+        quantization = "--quantize_float16" if self.args.half else "--quantize_uint8" if self.args.int8 else ""
+        with spaces_in_path(f_pb) as fpb_, spaces_in_path(f) as f_:  # exporter can not handle spaces in path
+            cmd = (
+                "tensorflowjs_converter "
+                f'--input_format=tf_frozen_model {quantization} --output_node_names={outputs} "{fpb_}" "{f_}"'
+            )
+            LOGGER.info(f"{prefix} running '{cmd}'")
+            subprocess.run(cmd, shell=True)
+
+        if " " in f:
+            LOGGER.warning(f"{prefix} WARNING ⚠️ your model may not work correctly with spaces in path '{f}'.")
+
+        # Add metadata
+        yaml_save(Path(f) / "metadata.yaml", self.metadata)  # add metadata.yaml
+        return f, None
+
+    @try_export
+    def export_imx(self, prefix=colorstr("IMX:")):
+        """YOLO IMX export."""
+        gptq = False
+        assert LINUX, (
+            "export only supported on Linux. See https://developer.aitrios.sony-semicon.com/en/raspberrypi-ai-camera/documentation/imx500-converter"
+        )
+        if getattr(self.model, "end2end", False):
+            raise ValueError("IMX export is not supported for end2end models.")
+        if "C2f" not in self.model.__str__():
+            raise ValueError("IMX export is only supported for YOLOv8n detection models")
+        check_requirements(("model-compression-toolkit==2.1.1", "sony-custom-layers==0.2.0", "tensorflow==2.12.0"))
+        check_requirements("imx500-converter[pt]==3.14.3")  # Separate requirements for imx500-converter
+
+        import model_compression_toolkit as mct
+        import onnx
+        from sony_custom_layers.pytorch.object_detection.nms import multiclass_nms
+
+        try:
+            out = subprocess.run(
+                ["java", "--version"], check=True, capture_output=True
+            )  # Java 17 is required for imx500-converter
+            if "openjdk 17" not in str(out.stdout):
+                raise FileNotFoundError
+        except FileNotFoundError:
+            c = ["apt", "install", "-y", "openjdk-17-jdk", "openjdk-17-jre"]
+            if is_sudo_available():
+                c.insert(0, "sudo")
+            subprocess.run(c, check=True)
+
+        def representative_dataset_gen(dataloader=self.get_int8_calibration_dataloader(prefix)):
+            for batch in dataloader:
+                img = batch["img"]
+                img = img / 255.0
+                yield [img]
+
+        tpc = mct.get_target_platform_capabilities(
+            fw_name="pytorch", target_platform_name="imx500", target_platform_version="v1"
+        )
+
+        config = mct.core.CoreConfig(
+            mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=10),
+            quantization_config=mct.core.QuantizationConfig(concat_threshold_update=True),
+        )
+
+        resource_utilization = mct.core.ResourceUtilization(weights_memory=3146176 * 0.76)
+
+        quant_model = (
+            mct.gptq.pytorch_gradient_post_training_quantization(  # Perform Gradient-Based Post Training Quantization
+                model=self.model,
+                representative_data_gen=representative_dataset_gen,
+                target_resource_utilization=resource_utilization,
+                gptq_config=mct.gptq.get_pytorch_gptq_config(n_epochs=1000, use_hessian_based_weights=False),
+                core_config=config,
+                target_platform_capabilities=tpc,
+            )[0]
+            if gptq
+            else mct.ptq.pytorch_post_training_quantization(  # Perform post training quantization
+                in_module=self.model,
+                representative_data_gen=representative_dataset_gen,
+                target_resource_utilization=resource_utilization,
+                core_config=config,
+                target_platform_capabilities=tpc,
+            )[0]
+        )
+
+        class NMSWrapper(torch.nn.Module):
+            def __init__(
+                self,
+                model: torch.nn.Module,
+                score_threshold: float = 0.001,
+                iou_threshold: float = 0.7,
+                max_detections: int = 300,
+            ):
+                """
+                Wrapping PyTorch Module with multiclass_nms layer from sony_custom_layers.
+
+                Args:
+                    model (nn.Module): Model instance.
+                    score_threshold (float): Score threshold for non-maximum suppression.
+                    iou_threshold (float): Intersection over union threshold for non-maximum suppression.
+                    max_detections (float): The number of detections to return.
+                """
+                super().__init__()
+                self.model = model
+                self.score_threshold = score_threshold
+                self.iou_threshold = iou_threshold
+                self.max_detections = max_detections
+
+            def forward(self, images):
+                # model inference
+                outputs = self.model(images)
+
+                boxes = outputs[0]
+                scores = outputs[1]
+                nms = multiclass_nms(
+                    boxes=boxes,
+                    scores=scores,
+                    score_threshold=self.score_threshold,
+                    iou_threshold=self.iou_threshold,
+                    max_detections=self.max_detections,
+                )
+                return nms
+
+        quant_model = NMSWrapper(
+            model=quant_model,
+            score_threshold=self.args.conf or 0.001,
+            iou_threshold=self.args.iou,
+            max_detections=self.args.max_det,
+        ).to(self.device)
+
+        f = Path(str(self.file).replace(self.file.suffix, "_imx_model"))
+        f.mkdir(exist_ok=True)
+        onnx_model = f / Path(str(self.file).replace(self.file.suffix, "_imx.onnx"))  # js dir
+        mct.exporter.pytorch_export_model(
+            model=quant_model, save_model_path=onnx_model, repr_dataset=representative_dataset_gen
+        )
+
+        model_onnx = onnx.load(onnx_model)  # load onnx model
+        for k, v in self.metadata.items():
+            meta = model_onnx.metadata_props.add()
+            meta.key, meta.value = k, str(v)
+
+        onnx.save(model_onnx, onnx_model)
+
+        subprocess.run(
+            ["imxconv-pt", "-i", str(onnx_model), "-o", str(f), "--no-input-persistency", "--overwrite-output"],
+            check=True,
+        )
+
+        # Needed for imx models.
+        with open(f / "labels.txt", "w") as file:
+            file.writelines([f"{name}\n" for _, name in self.model.names.items()])
+
+        return f, None
+
+    def _add_tflite_metadata(self, file):
+        """Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata."""
+        import flatbuffers
+
+        try:
+            # TFLite Support bug https://github.com/tensorflow/tflite-support/issues/954#issuecomment-2108570845
+            from tensorflow_lite_support.metadata import metadata_schema_py_generated as schema  # noqa
+            from tensorflow_lite_support.metadata.python import metadata  # noqa
+        except ImportError:  # ARM64 systems may not have the 'tensorflow_lite_support' package available
+            from tflite_support import metadata  # noqa
+            from tflite_support import metadata_schema_py_generated as schema  # noqa
+
+        # Create model info
+        model_meta = schema.ModelMetadataT()
+        model_meta.name = self.metadata["description"]
+        model_meta.version = self.metadata["version"]
+        model_meta.author = self.metadata["author"]
+        model_meta.license = self.metadata["license"]
+
+        # Label file
+        tmp_file = Path(file).parent / "temp_meta.txt"
+        with open(tmp_file, "w") as f:
+            f.write(str(self.metadata))
+
+        label_file = schema.AssociatedFileT()
+        label_file.name = tmp_file.name
+        label_file.type = schema.AssociatedFileType.TENSOR_AXIS_LABELS
+
+        # Create input info
+        input_meta = schema.TensorMetadataT()
+        input_meta.name = "image"
+        input_meta.description = "Input image to be detected."
+        input_meta.content = schema.ContentT()
+        input_meta.content.contentProperties = schema.ImagePropertiesT()
+        input_meta.content.contentProperties.colorSpace = schema.ColorSpaceType.RGB
+        input_meta.content.contentPropertiesType = schema.ContentProperties.ImageProperties
+
+        # Create output info
+        output1 = schema.TensorMetadataT()
+        output1.name = "output"
+        output1.description = "Coordinates of detected objects, class labels, and confidence score"
+        output1.associatedFiles = [label_file]
+        if self.model.task == "segment":
+            output2 = schema.TensorMetadataT()
+            output2.name = "output"
+            output2.description = "Mask protos"
+            output2.associatedFiles = [label_file]
+
+        # Create subgraph info
+        subgraph = schema.SubGraphMetadataT()
+        subgraph.inputTensorMetadata = [input_meta]
+        subgraph.outputTensorMetadata = [output1, output2] if self.model.task == "segment" else [output1]
+        model_meta.subgraphMetadata = [subgraph]
+
+        b = flatbuffers.Builder(0)
+        b.Finish(model_meta.Pack(b), metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
+        metadata_buf = b.Output()
+
+        populator = metadata.MetadataPopulator.with_model_file(str(file))
+        populator.load_metadata_buffer(metadata_buf)
+        populator.load_associated_files([str(tmp_file)])
+        populator.populate()
+        tmp_file.unlink()
+
+    def _pipeline_coreml(self, model, weights_dir=None, prefix=colorstr("CoreML Pipeline:")):
+        """YOLO CoreML pipeline."""
+        import coremltools as ct  # noqa
+
+        LOGGER.info(f"{prefix} starting pipeline with coremltools {ct.__version__}...")
+        _, _, h, w = list(self.im.shape)  # BCHW
+
+        # Output shapes
+        spec = model.get_spec()
+        out0, out1 = iter(spec.description.output)
+        if MACOS:
+            from PIL import Image
+
+            img = Image.new("RGB", (w, h))  # w=192, h=320
+            out = model.predict({"image": img})
+            out0_shape = out[out0.name].shape  # (3780, 80)
+            out1_shape = out[out1.name].shape  # (3780, 4)
+        else:  # linux and windows can not run model.predict(), get sizes from PyTorch model output y
+            out0_shape = self.output_shape[2], self.output_shape[1] - 4  # (3780, 80)
+            out1_shape = self.output_shape[2], 4  # (3780, 4)
+
+        # Checks
+        names = self.metadata["names"]
+        nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height
+        _, nc = out0_shape  # number of anchors, number of classes
+        assert len(names) == nc, f"{len(names)} names found for nc={nc}"  # check
+
+        # Define output shapes (missing)
+        out0.type.multiArrayType.shape[:] = out0_shape  # (3780, 80)
+        out1.type.multiArrayType.shape[:] = out1_shape  # (3780, 4)
+
+        # Model from spec
+        model = ct.models.MLModel(spec, weights_dir=weights_dir)
+
+        # 3. Create NMS protobuf
+        nms_spec = ct.proto.Model_pb2.Model()
+        nms_spec.specificationVersion = 5
+        for i in range(2):
+            decoder_output = model._spec.description.output[i].SerializeToString()
+            nms_spec.description.input.add()
+            nms_spec.description.input[i].ParseFromString(decoder_output)
+            nms_spec.description.output.add()
+            nms_spec.description.output[i].ParseFromString(decoder_output)
+
+        nms_spec.description.output[0].name = "confidence"
+        nms_spec.description.output[1].name = "coordinates"
+
+        output_sizes = [nc, 4]
+        for i in range(2):
+            ma_type = nms_spec.description.output[i].type.multiArrayType
+            ma_type.shapeRange.sizeRanges.add()
+            ma_type.shapeRange.sizeRanges[0].lowerBound = 0
+            ma_type.shapeRange.sizeRanges[0].upperBound = -1
+            ma_type.shapeRange.sizeRanges.add()
+            ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i]
+            ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i]
+            del ma_type.shape[:]
+
+        nms = nms_spec.nonMaximumSuppression
+        nms.confidenceInputFeatureName = out0.name  # 1x507x80
+        nms.coordinatesInputFeatureName = out1.name  # 1x507x4
+        nms.confidenceOutputFeatureName = "confidence"
+        nms.coordinatesOutputFeatureName = "coordinates"
+        nms.iouThresholdInputFeatureName = "iouThreshold"
+        nms.confidenceThresholdInputFeatureName = "confidenceThreshold"
+        nms.iouThreshold = 0.45
+        nms.confidenceThreshold = 0.25
+        nms.pickTop.perClass = True
+        nms.stringClassLabels.vector.extend(names.values())
+        nms_model = ct.models.MLModel(nms_spec)
+
+        # 4. Pipeline models together
+        pipeline = ct.models.pipeline.Pipeline(
+            input_features=[
+                ("image", ct.models.datatypes.Array(3, ny, nx)),
+                ("iouThreshold", ct.models.datatypes.Double()),
+                ("confidenceThreshold", ct.models.datatypes.Double()),
+            ],
+            output_features=["confidence", "coordinates"],
+        )
+        pipeline.add_model(model)
+        pipeline.add_model(nms_model)
+
+        # Correct datatypes
+        pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString())
+        pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString())
+        pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())
+
+        # Update metadata
+        pipeline.spec.specificationVersion = 5
+        pipeline.spec.description.metadata.userDefined.update(
+            {"IoU threshold": str(nms.iouThreshold), "Confidence threshold": str(nms.confidenceThreshold)}
+        )
+
+        # Save the model
+        model = ct.models.MLModel(pipeline.spec, weights_dir=weights_dir)
+        model.input_description["image"] = "Input image"
+        model.input_description["iouThreshold"] = f"(optional) IoU threshold override (default: {nms.iouThreshold})"
+        model.input_description["confidenceThreshold"] = (
+            f"(optional) Confidence threshold override (default: {nms.confidenceThreshold})"
+        )
+        model.output_description["confidence"] = 'Boxes × Class confidence (see user-defined metadata "classes")'
+        model.output_description["coordinates"] = "Boxes × [x, y, width, height] (relative to image size)"
+        LOGGER.info(f"{prefix} pipeline success")
+        return model
+
+    def add_callback(self, event: str, callback):
+        """Appends the given callback."""
+        self.callbacks[event].append(callback)
+
+    def run_callbacks(self, event: str):
+        """Execute all callbacks for a given event."""
+        for callback in self.callbacks.get(event, []):
+            callback(self)
+
+
+class IOSDetectModel(torch.nn.Module):
+    """Wrap an Ultralytics YOLO model for Apple iOS CoreML export."""
+
+    def __init__(self, model, im):
+        """Initialize the IOSDetectModel class with a YOLO model and example image."""
+        super().__init__()
+        _, _, h, w = im.shape  # batch, channel, height, width
+        self.model = model
+        self.nc = len(model.names)  # number of classes
+        if w == h:
+            self.normalize = 1.0 / w  # scalar
+        else:
+            self.normalize = torch.tensor([1.0 / w, 1.0 / h, 1.0 / w, 1.0 / h])  # broadcast (slower, smaller)
+
+    def forward(self, x):
+        """Normalize predictions of object detection model with input size-dependent factors."""
+        xywh, cls = self.model(x)[0].transpose(0, 1).split((4, self.nc), 1)
+        return cls, xywh * self.normalize  # confidence (3780, 80), coordinates (3780, 4)

+ 1175 - 0
ultralytics/engine/model.py

@@ -0,0 +1,1175 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import inspect
+from pathlib import Path
+from typing import Any, Dict, List, Union
+
+import numpy as np
+import torch
+from PIL import Image
+
+from huggingface_hub import PyTorchModelHubMixin
+
+from ultralytics.cfg import TASK2DATA, get_cfg, get_save_dir
+from ultralytics.engine.results import Results
+from ultralytics.hub import HUB_WEB_ROOT, HUBTrainingSession
+from ultralytics.nn.tasks import attempt_load_one_weight, guess_model_task, nn, yaml_model_load
+from ultralytics.utils import (
+    ARGV,
+    ASSETS,
+    DEFAULT_CFG_DICT,
+    LOGGER,
+    RANK,
+    SETTINGS,
+    callbacks,
+    checks,
+    emojis,
+    yaml_load,
+)
+
+
+class Model(nn.Module, PyTorchModelHubMixin, repo_url="https://github.com/ultralytics/ultralytics", pipeline_tag="object-detection", license="agpl-3.0"):
+    """
+    A base class for implementing YOLO models, unifying APIs across different model types.
+
+    This class provides a common interface for various operations related to YOLO models, such as training,
+    validation, prediction, exporting, and benchmarking. It handles different types of models, including those
+    loaded from local files, Ultralytics HUB, or Triton Server.
+
+    Attributes:
+        callbacks (Dict): A dictionary of callback functions for various events during model operations.
+        predictor (BasePredictor): The predictor object used for making predictions.
+        model (nn.Module): The underlying PyTorch model.
+        trainer (BaseTrainer): The trainer object used for training the model.
+        ckpt (Dict): The checkpoint data if the model is loaded from a *.pt file.
+        cfg (str): The configuration of the model if loaded from a *.yaml file.
+        ckpt_path (str): The path to the checkpoint file.
+        overrides (Dict): A dictionary of overrides for model configuration.
+        metrics (Dict): The latest training/validation metrics.
+        session (HUBTrainingSession): The Ultralytics HUB session, if applicable.
+        task (str): The type of task the model is intended for.
+        model_name (str): The name of the model.
+
+    Methods:
+        __call__: Alias for the predict method, enabling the model instance to be callable.
+        _new: Initializes a new model based on a configuration file.
+        _load: Loads a model from a checkpoint file.
+        _check_is_pytorch_model: Ensures that the model is a PyTorch model.
+        reset_weights: Resets the model's weights to their initial state.
+        load: Loads model weights from a specified file.
+        save: Saves the current state of the model to a file.
+        info: Logs or returns information about the model.
+        fuse: Fuses Conv2d and BatchNorm2d layers for optimized inference.
+        predict: Performs object detection predictions.
+        track: Performs object tracking.
+        val: Validates the model on a dataset.
+        benchmark: Benchmarks the model on various export formats.
+        export: Exports the model to different formats.
+        train: Trains the model on a dataset.
+        tune: Performs hyperparameter tuning.
+        _apply: Applies a function to the model's tensors.
+        add_callback: Adds a callback function for an event.
+        clear_callback: Clears all callbacks for an event.
+        reset_callbacks: Resets all callbacks to their default functions.
+
+    Examples:
+        >>> from ultralytics import YOLO
+        >>> model = YOLO("yolo11n.pt")
+        >>> results = model.predict("image.jpg")
+        >>> model.train(data="coco8.yaml", epochs=3)
+        >>> metrics = model.val()
+        >>> model.export(format="onnx")
+    """
+
+    def __init__(
+        self,
+        model: Union[str, Path] = "yolo11n.pt",
+        task: str = None,
+        verbose: bool = False,
+    ) -> None:
+        """
+        Initializes a new instance of the YOLO model class.
+
+        This constructor sets up the model based on the provided model path or name. It handles various types of
+        model sources, including local files, Ultralytics HUB models, and Triton Server models. The method
+        initializes several important attributes of the model and prepares it for operations like training,
+        prediction, or export.
+
+        Args:
+            model (Union[str, Path]): Path or name of the model to load or create. Can be a local file path, a
+                model name from Ultralytics HUB, or a Triton Server model.
+            task (str | None): The task type associated with the YOLO model, specifying its application domain.
+            verbose (bool): If True, enables verbose output during the model's initialization and subsequent
+                operations.
+
+        Raises:
+            FileNotFoundError: If the specified model file does not exist or is inaccessible.
+            ValueError: If the model file or configuration is invalid or unsupported.
+            ImportError: If required dependencies for specific model types (like HUB SDK) are not installed.
+
+        Examples:
+            >>> model = Model("yolo11n.pt")
+            >>> model = Model("path/to/model.yaml", task="detect")
+            >>> model = Model("hub_model", verbose=True)
+        """
+        super().__init__()
+        self.callbacks = callbacks.get_default_callbacks()
+        self.predictor = None  # reuse predictor
+        self.model = None  # model object
+        self.trainer = None  # trainer object
+        self.ckpt = {}  # if loaded from *.pt
+        self.cfg = None  # if loaded from *.yaml
+        self.ckpt_path = None
+        self.overrides = {}  # overrides for trainer object
+        self.metrics = None  # validation/training metrics
+        self.session = None  # HUB session
+        self.task = task  # task type
+        model = str(model).strip()  
+
+        # Check if Ultralytics HUB model from https://hub.ultralytics.com
+        if self.is_hub_model(model):
+            # Fetch model from HUB
+            checks.check_requirements("hub-sdk>=0.0.12")
+            session = HUBTrainingSession.create_session(model)
+            model = session.model_file
+            if session.train_args:  # training sent from HUB
+                self.session = session
+
+        # Check if Triton Server model
+        elif self.is_triton_model(model):
+            self.model_name = self.model = model
+            self.overrides["task"] = task or "detect"  # set `task=detect` if not explicitly set
+            return
+
+        # Load or create new YOLO model
+        if Path(model).suffix in {".yaml", ".yml"}:
+            self._new(model, task=task, verbose=verbose)
+        else:
+            self._load(model, task=task)
+
+        # Delete super().training for accessing self.model.training
+        del self.training
+
+    def __call__(
+        self,
+        source: Union[str, Path, int, Image.Image, list, tuple, np.ndarray, torch.Tensor] = None,
+        stream: bool = False,
+        **kwargs: Any,
+    ) -> list:
+        """
+        Alias for the predict method, enabling the model instance to be callable for predictions.
+
+        This method simplifies the process of making predictions by allowing the model instance to be called
+        directly with the required arguments.
+
+        Args:
+            source (str | Path | int | PIL.Image | np.ndarray | torch.Tensor | List | Tuple): The source of
+                the image(s) to make predictions on. Can be a file path, URL, PIL image, numpy array, PyTorch
+                tensor, or a list/tuple of these.
+            stream (bool): If True, treat the input source as a continuous stream for predictions.
+            **kwargs: Additional keyword arguments to configure the prediction process.
+
+        Returns:
+            (List[ultralytics.engine.results.Results]): A list of prediction results, each encapsulated in a
+                Results object.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> results = model("https://ultralytics.com/images/bus.jpg")
+            >>> for r in results:
+            ...     print(f"Detected {len(r)} objects in image")
+        """
+        return self.predict(source, stream, **kwargs)
+
+    @staticmethod
+    def is_triton_model(model: str) -> bool:
+        """
+        Checks if the given model string is a Triton Server URL.
+
+        This static method determines whether the provided model string represents a valid Triton Server URL by
+        parsing its components using urllib.parse.urlsplit().
+
+        Args:
+            model (str): The model string to be checked.
+
+        Returns:
+            (bool): True if the model string is a valid Triton Server URL, False otherwise.
+
+        Examples:
+            >>> Model.is_triton_model("http://localhost:8000/v2/models/yolov8n")
+            True
+            >>> Model.is_triton_model("yolo11n.pt")
+            False
+        """
+        from urllib.parse import urlsplit
+
+        url = urlsplit(model)
+        return url.netloc and url.path and url.scheme in {"http", "grpc"}
+
+    @staticmethod
+    def is_hub_model(model: str) -> bool:
+        """
+        Check if the provided model is an Ultralytics HUB model.
+
+        This static method determines whether the given model string represents a valid Ultralytics HUB model
+        identifier.
+
+        Args:
+            model (str): The model string to check.
+
+        Returns:
+            (bool): True if the model is a valid Ultralytics HUB model, False otherwise.
+
+        Examples:
+            >>> Model.is_hub_model("https://hub.ultralytics.com/models/MODEL")
+            True
+            >>> Model.is_hub_model("yolo11n.pt")
+            False
+        """
+        return model.startswith(f"{HUB_WEB_ROOT}/models/")
+
+    def _new(self, cfg: str, task=None, model=None, verbose=False) -> None:
+        """
+        Initializes a new model and infers the task type from the model definitions.
+
+        This method creates a new model instance based on the provided configuration file. It loads the model
+        configuration, infers the task type if not specified, and initializes the model using the appropriate
+        class from the task map.
+
+        Args:
+            cfg (str): Path to the model configuration file in YAML format.
+            task (str | None): The specific task for the model. If None, it will be inferred from the config.
+            model (torch.nn.Module | None): A custom model instance. If provided, it will be used instead of creating
+                a new one.
+            verbose (bool): If True, displays model information during loading.
+
+        Raises:
+            ValueError: If the configuration file is invalid or the task cannot be inferred.
+            ImportError: If the required dependencies for the specified task are not installed.
+
+        Examples:
+            >>> model = Model()
+            >>> model._new("yolov8n.yaml", task="detect", verbose=True)
+        """
+        cfg_dict = yaml_model_load(cfg)
+        self.cfg = cfg
+        self.task = task or guess_model_task(cfg_dict)
+        self.model = (model or self._smart_load("model"))(cfg_dict, verbose=verbose and RANK == -1)  # build model
+        self.overrides["model"] = self.cfg
+        self.overrides["task"] = self.task
+
+        # Below added to allow export from YAMLs
+        self.model.args = {**DEFAULT_CFG_DICT, **self.overrides}  # combine default and model args (prefer model args)
+        self.model.task = self.task
+        self.model_name = cfg
+
+    def _load(self, weights: str, task=None) -> None:
+        """
+        Loads a model from a checkpoint file or initializes it from a weights file.
+
+        This method handles loading models from either .pt checkpoint files or other weight file formats. It sets
+        up the model, task, and related attributes based on the loaded weights.
+
+        Args:
+            weights (str): Path to the model weights file to be loaded.
+            task (str | None): The task associated with the model. If None, it will be inferred from the model.
+
+        Raises:
+            FileNotFoundError: If the specified weights file does not exist or is inaccessible.
+            ValueError: If the weights file format is unsupported or invalid.
+
+        Examples:
+            >>> model = Model()
+            >>> model._load("yolo11n.pt")
+            >>> model._load("path/to/weights.pth", task="detect")
+        """
+        if weights.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://")):
+            weights = checks.check_file(weights, download_dir=SETTINGS["weights_dir"])  # download and return local file
+        weights = checks.check_model_file_from_stem(weights)  # add suffix, i.e. yolov8n -> yolov8n.pt
+
+        if Path(weights).suffix == ".pt":
+            self.model, self.ckpt = attempt_load_one_weight(weights)
+            self.task = self.model.args["task"]
+            self.overrides = self.model.args = self._reset_ckpt_args(self.model.args)
+            self.ckpt_path = self.model.pt_path
+        else:
+            weights = checks.check_file(weights)  # runs in all cases, not redundant with above call
+            self.model, self.ckpt = weights, None
+            self.task = task or guess_model_task(weights)
+            self.ckpt_path = weights
+        self.overrides["model"] = weights
+        self.overrides["task"] = self.task
+        self.model_name = weights
+
+    def _check_is_pytorch_model(self) -> None:
+        """
+        Checks if the model is a PyTorch model and raises a TypeError if it's not.
+
+        This method verifies that the model is either a PyTorch module or a .pt file. It's used to ensure that
+        certain operations that require a PyTorch model are only performed on compatible model types.
+
+        Raises:
+            TypeError: If the model is not a PyTorch module or a .pt file. The error message provides detailed
+                information about supported model formats and operations.
+
+        Examples:
+            >>> model = Model("yolo11n.pt")
+            >>> model._check_is_pytorch_model()  # No error raised
+            >>> model = Model("yolov8n.onnx")
+            >>> model._check_is_pytorch_model()  # Raises TypeError
+        """
+        pt_str = isinstance(self.model, (str, Path)) and Path(self.model).suffix == ".pt"
+        pt_module = isinstance(self.model, nn.Module)
+        if not (pt_module or pt_str):
+            raise TypeError(
+                f"model='{self.model}' should be a *.pt PyTorch model to run this method, but is a different format. "
+                f"PyTorch models can train, val, predict and export, i.e. 'model.train(data=...)', but exported "
+                f"formats like ONNX, TensorRT etc. only support 'predict' and 'val' modes, "
+                f"i.e. 'yolo predict model=yolov8n.onnx'.\nTo run CUDA or MPS inference please pass the device "
+                f"argument directly in your inference command, i.e. 'model.predict(source=..., device=0)'"
+            )
+
+    def reset_weights(self) -> "Model":
+        """
+        Resets the model's weights to their initial state.
+
+        This method iterates through all modules in the model and resets their parameters if they have a
+        'reset_parameters' method. It also ensures that all parameters have 'requires_grad' set to True,
+        enabling them to be updated during training.
+
+        Returns:
+            (Model): The instance of the class with reset weights.
+
+        Raises:
+            AssertionError: If the model is not a PyTorch model.
+
+        Examples:
+            >>> model = Model("yolo11n.pt")
+            >>> model.reset_weights()
+        """
+        self._check_is_pytorch_model()
+        for m in self.model.modules():
+            if hasattr(m, "reset_parameters"):
+                m.reset_parameters()
+        for p in self.model.parameters():
+            p.requires_grad = True
+        return self
+
+    def load(self, weights: Union[str, Path] = "yolo11n.pt") -> "Model":
+        """
+        Loads parameters from the specified weights file into the model.
+
+        This method supports loading weights from a file or directly from a weights object. It matches parameters by
+        name and shape and transfers them to the model.
+
+        Args:
+            weights (Union[str, Path]): Path to the weights file or a weights object.
+
+        Returns:
+            (Model): The instance of the class with loaded weights.
+
+        Raises:
+            AssertionError: If the model is not a PyTorch model.
+
+        Examples:
+            >>> model = Model()
+            >>> model.load("yolo11n.pt")
+            >>> model.load(Path("path/to/weights.pt"))
+        """
+        self._check_is_pytorch_model()
+        if isinstance(weights, (str, Path)):
+            self.overrides["pretrained"] = weights  # remember the weights for DDP training
+            weights, self.ckpt = attempt_load_one_weight(weights)
+        self.model.load(weights)
+        return self
+
+    def save(self, filename: Union[str, Path] = "saved_model.pt") -> None:
+        """
+        Saves the current model state to a file.
+
+        This method exports the model's checkpoint (ckpt) to the specified filename. It includes metadata such as
+        the date, Ultralytics version, license information, and a link to the documentation.
+
+        Args:
+            filename (Union[str, Path]): The name of the file to save the model to.
+
+        Raises:
+            AssertionError: If the model is not a PyTorch model.
+
+        Examples:
+            >>> model = Model("yolo11n.pt")
+            >>> model.save("my_model.pt")
+        """
+        self._check_is_pytorch_model()
+        from copy import deepcopy
+        from datetime import datetime
+
+        from ultralytics import __version__
+
+        updates = {
+            "model": deepcopy(self.model).half() if isinstance(self.model, nn.Module) else self.model,
+            "date": datetime.now().isoformat(),
+            "version": __version__,
+            "license": "AGPL-3.0 License (https://ultralytics.com/license)",
+            "docs": "https://docs.ultralytics.com",
+        }
+        torch.save({**self.ckpt, **updates}, filename)
+
+    def info(self, detailed: bool = False, verbose: bool = True):
+        """
+        Logs or returns model information.
+
+        This method provides an overview or detailed information about the model, depending on the arguments
+        passed. It can control the verbosity of the output and return the information as a list.
+
+        Args:
+            detailed (bool): If True, shows detailed information about the model layers and parameters.
+            verbose (bool): If True, prints the information. If False, returns the information as a list.
+
+        Returns:
+            (List[str]): A list of strings containing various types of information about the model, including
+                model summary, layer details, and parameter counts. Empty if verbose is True.
+
+        Raises:
+            TypeError: If the model is not a PyTorch model.
+
+        Examples:
+            >>> model = Model("yolo11n.pt")
+            >>> model.info()  # Prints model summary
+            >>> info_list = model.info(detailed=True, verbose=False)  # Returns detailed info as a list
+        """
+        self._check_is_pytorch_model()
+        return self.model.info(detailed=detailed, verbose=verbose)
+
+    def fuse(self):
+        """
+        Fuses Conv2d and BatchNorm2d layers in the model for optimized inference.
+
+        This method iterates through the model's modules and fuses consecutive Conv2d and BatchNorm2d layers
+        into a single layer. This fusion can significantly improve inference speed by reducing the number of
+        operations and memory accesses required during forward passes.
+
+        The fusion process typically involves folding the BatchNorm2d parameters (mean, variance, weight, and
+        bias) into the preceding Conv2d layer's weights and biases. This results in a single Conv2d layer that
+        performs both convolution and normalization in one step.
+
+        Raises:
+            TypeError: If the model is not a PyTorch nn.Module.
+
+        Examples:
+            >>> model = Model("yolo11n.pt")
+            >>> model.fuse()
+            >>> # Model is now fused and ready for optimized inference
+        """
+        self._check_is_pytorch_model()
+        self.model.fuse()
+
+    def embed(
+        self,
+        source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None,
+        stream: bool = False,
+        **kwargs: Any,
+    ) -> list:
+        """
+        Generates image embeddings based on the provided source.
+
+        This method is a wrapper around the 'predict()' method, focusing on generating embeddings from an image
+        source. It allows customization of the embedding process through various keyword arguments.
+
+        Args:
+            source (str | Path | int | List | Tuple | np.ndarray | torch.Tensor): The source of the image for
+                generating embeddings. Can be a file path, URL, PIL image, numpy array, etc.
+            stream (bool): If True, predictions are streamed.
+            **kwargs: Additional keyword arguments for configuring the embedding process.
+
+        Returns:
+            (List[torch.Tensor]): A list containing the image embeddings.
+
+        Raises:
+            AssertionError: If the model is not a PyTorch model.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> image = "https://ultralytics.com/images/bus.jpg"
+            >>> embeddings = model.embed(image)
+            >>> print(embeddings[0].shape)
+        """
+        if not kwargs.get("embed"):
+            kwargs["embed"] = [len(self.model.model) - 2]  # embed second-to-last layer if no indices passed
+        return self.predict(source, stream, **kwargs)
+
+    def predict(
+        self,
+        source: Union[str, Path, int, Image.Image, list, tuple, np.ndarray, torch.Tensor] = None,
+        stream: bool = False,
+        predictor=None,
+        **kwargs: Any,
+    ) -> List[Results]:
+        """
+        Performs predictions on the given image source using the YOLO model.
+
+        This method facilitates the prediction process, allowing various configurations through keyword arguments.
+        It supports predictions with custom predictors or the default predictor method. The method handles different
+        types of image sources and can operate in a streaming mode.
+
+        Args:
+            source (str | Path | int | PIL.Image | np.ndarray | torch.Tensor | List | Tuple): The source
+                of the image(s) to make predictions on. Accepts various types including file paths, URLs, PIL
+                images, numpy arrays, and torch tensors.
+            stream (bool): If True, treats the input source as a continuous stream for predictions.
+            predictor (BasePredictor | None): An instance of a custom predictor class for making predictions.
+                If None, the method uses a default predictor.
+            **kwargs: Additional keyword arguments for configuring the prediction process.
+
+        Returns:
+            (List[ultralytics.engine.results.Results]): A list of prediction results, each encapsulated in a
+                Results object.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> results = model.predict(source="path/to/image.jpg", conf=0.25)
+            >>> for r in results:
+            ...     print(r.boxes.data)  # print detection bounding boxes
+
+        Notes:
+            - If 'source' is not provided, it defaults to the ASSETS constant with a warning.
+            - The method sets up a new predictor if not already present and updates its arguments with each call.
+            - For SAM-type models, 'prompts' can be passed as a keyword argument.
+        """
+        if source is None:
+            source = ASSETS
+            LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using 'source={source}'.")
+
+        is_cli = (ARGV[0].endswith("yolo") or ARGV[0].endswith("ultralytics")) and any(
+            x in ARGV for x in ("predict", "track", "mode=predict", "mode=track")
+        )
+
+        custom = {"conf": 0.25, "batch": 1, "save": is_cli, "mode": "predict"}  # method defaults
+        args = {**self.overrides, **custom, **kwargs}  # highest priority args on the right
+        prompts = args.pop("prompts", None)  # for SAM-type models
+
+        if not self.predictor:
+            self.predictor = (predictor or self._smart_load("predictor"))(overrides=args, _callbacks=self.callbacks)
+            self.predictor.setup_model(model=self.model, verbose=is_cli)
+        else:  # only update args if predictor is already setup
+            self.predictor.args = get_cfg(self.predictor.args, args)
+            if "project" in args or "name" in args:
+                self.predictor.save_dir = get_save_dir(self.predictor.args)
+        if prompts and hasattr(self.predictor, "set_prompts"):  # for SAM-type models
+            self.predictor.set_prompts(prompts)
+        return self.predictor.predict_cli(source=source) if is_cli else self.predictor(source=source, stream=stream)
+
+    def track(
+        self,
+        source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None,
+        stream: bool = False,
+        persist: bool = False,
+        **kwargs: Any,
+    ) -> List[Results]:
+        """
+        Conducts object tracking on the specified input source using the registered trackers.
+
+        This method performs object tracking using the model's predictors and optionally registered trackers. It handles
+        various input sources such as file paths or video streams, and supports customization through keyword arguments.
+        The method registers trackers if not already present and can persist them between calls.
+
+        Args:
+            source (Union[str, Path, int, List, Tuple, np.ndarray, torch.Tensor], optional): Input source for object
+                tracking. Can be a file path, URL, or video stream.
+            stream (bool): If True, treats the input source as a continuous video stream. Defaults to False.
+            persist (bool): If True, persists trackers between different calls to this method. Defaults to False.
+            **kwargs: Additional keyword arguments for configuring the tracking process.
+
+        Returns:
+            (List[ultralytics.engine.results.Results]): A list of tracking results, each a Results object.
+
+        Raises:
+            AttributeError: If the predictor does not have registered trackers.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> results = model.track(source="path/to/video.mp4", show=True)
+            >>> for r in results:
+            ...     print(r.boxes.id)  # print tracking IDs
+
+        Notes:
+            - This method sets a default confidence threshold of 0.1 for ByteTrack-based tracking.
+            - The tracking mode is explicitly set in the keyword arguments.
+            - Batch size is set to 1 for tracking in videos.
+        """
+        if not hasattr(self.predictor, "trackers"):
+            from ultralytics.trackers import register_tracker
+
+            register_tracker(self, persist)
+        kwargs["conf"] = kwargs.get("conf") or 0.1  # ByteTrack-based method needs low confidence predictions as input
+        kwargs["batch"] = kwargs.get("batch") or 1  # batch-size 1 for tracking in videos
+        kwargs["mode"] = "track"
+        return self.predict(source=source, stream=stream, **kwargs)
+
+    def val(
+        self,
+        validator=None,
+        **kwargs: Any,
+    ):
+        """
+        Validates the model using a specified dataset and validation configuration.
+
+        This method facilitates the model validation process, allowing for customization through various settings. It
+        supports validation with a custom validator or the default validation approach. The method combines default
+        configurations, method-specific defaults, and user-provided arguments to configure the validation process.
+
+        Args:
+            validator (ultralytics.engine.validator.BaseValidator | None): An instance of a custom validator class for
+                validating the model.
+            **kwargs: Arbitrary keyword arguments for customizing the validation process.
+
+        Returns:
+            (ultralytics.utils.metrics.DetMetrics): Validation metrics obtained from the validation process.
+
+        Raises:
+            AssertionError: If the model is not a PyTorch model.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> results = model.val(data="coco8.yaml", imgsz=640)
+            >>> print(results.box.map)  # Print mAP50-95
+        """
+        custom = {"rect": True}  # method defaults
+        args = {**self.overrides, **custom, **kwargs, "mode": "val"}  # highest priority args on the right
+
+        validator = (validator or self._smart_load("validator"))(args=args, _callbacks=self.callbacks)
+        validator(model=self.model)
+        self.metrics = validator.metrics
+        return validator.metrics
+
+    def benchmark(
+        self,
+        **kwargs: Any,
+    ):
+        """
+        Benchmarks the model across various export formats to evaluate performance.
+
+        This method assesses the model's performance in different export formats, such as ONNX, TorchScript, etc.
+        It uses the 'benchmark' function from the ultralytics.utils.benchmarks module. The benchmarking is
+        configured using a combination of default configuration values, model-specific arguments, method-specific
+        defaults, and any additional user-provided keyword arguments.
+
+        Args:
+            **kwargs: Arbitrary keyword arguments to customize the benchmarking process. These are combined with
+                default configurations, model-specific arguments, and method defaults. Common options include:
+                - data (str): Path to the dataset for benchmarking.
+                - imgsz (int | List[int]): Image size for benchmarking.
+                - half (bool): Whether to use half-precision (FP16) mode.
+                - int8 (bool): Whether to use int8 precision mode.
+                - device (str): Device to run the benchmark on (e.g., 'cpu', 'cuda').
+                - verbose (bool): Whether to print detailed benchmark information.
+
+        Returns:
+            (Dict): A dictionary containing the results of the benchmarking process, including metrics for
+                different export formats.
+
+        Raises:
+            AssertionError: If the model is not a PyTorch model.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> results = model.benchmark(data="coco8.yaml", imgsz=640, half=True)
+            >>> print(results)
+        """
+        self._check_is_pytorch_model()
+        from ultralytics.utils.benchmarks import benchmark
+
+        custom = {"verbose": False}  # method defaults
+        args = {**DEFAULT_CFG_DICT, **self.model.args, **custom, **kwargs, "mode": "benchmark"}
+        return benchmark(
+            model=self,
+            data=kwargs.get("data"),  # if no 'data' argument passed set data=None for default datasets
+            imgsz=args["imgsz"],
+            half=args["half"],
+            int8=args["int8"],
+            device=args["device"],
+            verbose=kwargs.get("verbose"),
+        )
+
+    def export(
+        self,
+        **kwargs: Any,
+    ) -> str:
+        """
+        Exports the model to a different format suitable for deployment.
+
+        This method facilitates the export of the model to various formats (e.g., ONNX, TorchScript) for deployment
+        purposes. It uses the 'Exporter' class for the export process, combining model-specific overrides, method
+        defaults, and any additional arguments provided.
+
+        Args:
+            **kwargs: Arbitrary keyword arguments to customize the export process. These are combined with
+                the model's overrides and method defaults. Common arguments include:
+                format (str): Export format (e.g., 'onnx', 'engine', 'coreml').
+                half (bool): Export model in half-precision.
+                int8 (bool): Export model in int8 precision.
+                device (str): Device to run the export on.
+                workspace (int): Maximum memory workspace size for TensorRT engines.
+                nms (bool): Add Non-Maximum Suppression (NMS) module to model.
+                simplify (bool): Simplify ONNX model.
+
+        Returns:
+            (str): The path to the exported model file.
+
+        Raises:
+            AssertionError: If the model is not a PyTorch model.
+            ValueError: If an unsupported export format is specified.
+            RuntimeError: If the export process fails due to errors.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> model.export(format="onnx", dynamic=True, simplify=True)
+            'path/to/exported/model.onnx'
+        """
+        self._check_is_pytorch_model()
+        from .exporter import Exporter
+
+        custom = {
+            "imgsz": self.model.args["imgsz"],
+            "batch": 1,
+            "data": None,
+            "device": None,  # reset to avoid multi-GPU errors
+            "verbose": False,
+        }  # method defaults
+        args = {**self.overrides, **custom, **kwargs, "mode": "export"}  # highest priority args on the right
+        return Exporter(overrides=args, _callbacks=self.callbacks)(model=self.model)
+
+    def train(
+        self,
+        trainer=None,
+        **kwargs: Any,
+    ):
+        """
+        Trains the model using the specified dataset and training configuration.
+
+        This method facilitates model training with a range of customizable settings. It supports training with a
+        custom trainer or the default training approach. The method handles scenarios such as resuming training
+        from a checkpoint, integrating with Ultralytics HUB, and updating model and configuration after training.
+
+        When using Ultralytics HUB, if the session has a loaded model, the method prioritizes HUB training
+        arguments and warns if local arguments are provided. It checks for pip updates and combines default
+        configurations, method-specific defaults, and user-provided arguments to configure the training process.
+
+        Args:
+            trainer (BaseTrainer | None): Custom trainer instance for model training. If None, uses default.
+            **kwargs: Arbitrary keyword arguments for training configuration. Common options include:
+                data (str): Path to dataset configuration file.
+                epochs (int): Number of training epochs.
+                batch_size (int): Batch size for training.
+                imgsz (int): Input image size.
+                device (str): Device to run training on (e.g., 'cuda', 'cpu').
+                workers (int): Number of worker threads for data loading.
+                optimizer (str): Optimizer to use for training.
+                lr0 (float): Initial learning rate.
+                patience (int): Epochs to wait for no observable improvement for early stopping of training.
+
+        Returns:
+            (Dict | None): Training metrics if available and training is successful; otherwise, None.
+
+        Raises:
+            AssertionError: If the model is not a PyTorch model.
+            PermissionError: If there is a permission issue with the HUB session.
+            ModuleNotFoundError: If the HUB SDK is not installed.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> results = model.train(data="coco8.yaml", epochs=3)
+        """
+        self._check_is_pytorch_model()
+        if hasattr(self.session, "model") and self.session.model.id:  # Ultralytics HUB session with loaded model
+            if any(kwargs):
+                LOGGER.warning("WARNING ⚠️ using HUB training arguments, ignoring local training arguments.")
+            kwargs = self.session.train_args  # overwrite kwargs
+
+        checks.check_pip_update_available()
+
+        overrides = yaml_load(checks.check_yaml(kwargs["cfg"])) if kwargs.get("cfg") else self.overrides
+        custom = {
+            # NOTE: handle the case when 'cfg' includes 'data'.
+            "data": overrides.get("data") or DEFAULT_CFG_DICT["data"] or TASK2DATA[self.task],
+            "model": self.overrides["model"],
+            "task": self.task,
+        }  # method defaults
+        args = {**overrides, **custom, **kwargs, "mode": "train"}  # highest priority args on the right
+        if args.get("resume"):
+            args["resume"] = self.ckpt_path
+
+        self.trainer = (trainer or self._smart_load("trainer"))(overrides=args, _callbacks=self.callbacks)
+        if not args.get("resume"):  # manually set model only if not resuming
+            self.trainer.model = self.trainer.get_model(weights=self.model if self.ckpt else None, cfg=self.model.yaml)
+            self.model = self.trainer.model
+
+        self.trainer.hub_session = self.session  # attach optional HUB session
+        self.trainer.train()
+        # Update model and cfg after training
+        if RANK in {-1, 0}:
+            ckpt = self.trainer.best if self.trainer.best.exists() else self.trainer.last
+            self.model, self.ckpt = attempt_load_one_weight(ckpt)
+            self.overrides = self.model.args
+            self.metrics = getattr(self.trainer.validator, "metrics", None)  # TODO: no metrics returned by DDP
+        return self.metrics
+
+    def tune(
+        self,
+        use_ray=False,
+        iterations=10,
+        *args: Any,
+        **kwargs: Any,
+    ):
+        """
+        Conducts hyperparameter tuning for the model, with an option to use Ray Tune.
+
+        This method supports two modes of hyperparameter tuning: using Ray Tune or a custom tuning method.
+        When Ray Tune is enabled, it leverages the 'run_ray_tune' function from the ultralytics.utils.tuner module.
+        Otherwise, it uses the internal 'Tuner' class for tuning. The method combines default, overridden, and
+        custom arguments to configure the tuning process.
+
+        Args:
+            use_ray (bool): If True, uses Ray Tune for hyperparameter tuning. Defaults to False.
+            iterations (int): The number of tuning iterations to perform. Defaults to 10.
+            *args: Variable length argument list for additional arguments.
+            **kwargs: Arbitrary keyword arguments. These are combined with the model's overrides and defaults.
+
+        Returns:
+            (Dict): A dictionary containing the results of the hyperparameter search.
+
+        Raises:
+            AssertionError: If the model is not a PyTorch model.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> results = model.tune(use_ray=True, iterations=20)
+            >>> print(results)
+        """
+        self._check_is_pytorch_model()
+        if use_ray:
+            from ultralytics.utils.tuner import run_ray_tune
+
+            return run_ray_tune(self, max_samples=iterations, *args, **kwargs)
+        else:
+            from .tuner import Tuner
+
+            custom = {}  # method defaults
+            args = {**self.overrides, **custom, **kwargs, "mode": "train"}  # highest priority args on the right
+            return Tuner(args=args, _callbacks=self.callbacks)(model=self, iterations=iterations)
+
+    def _apply(self, fn) -> "Model":
+        """
+        Applies a function to model tensors that are not parameters or registered buffers.
+
+        This method extends the functionality of the parent class's _apply method by additionally resetting the
+        predictor and updating the device in the model's overrides. It's typically used for operations like
+        moving the model to a different device or changing its precision.
+
+        Args:
+            fn (Callable): A function to be applied to the model's tensors. This is typically a method like
+                to(), cpu(), cuda(), half(), or float().
+
+        Returns:
+            (Model): The model instance with the function applied and updated attributes.
+
+        Raises:
+            AssertionError: If the model is not a PyTorch model.
+
+        Examples:
+            >>> model = Model("yolo11n.pt")
+            >>> model = model._apply(lambda t: t.cuda())  # Move model to GPU
+        """
+        self._check_is_pytorch_model()
+        self = super()._apply(fn)  # noqa
+        self.predictor = None  # reset predictor as device may have changed
+        self.overrides["device"] = self.device  # was str(self.device) i.e. device(type='cuda', index=0) -> 'cuda:0'
+        return self
+
+    @property
+    def names(self) -> Dict[int, str]:
+        """
+        Retrieves the class names associated with the loaded model.
+
+        This property returns the class names if they are defined in the model. It checks the class names for validity
+        using the 'check_class_names' function from the ultralytics.nn.autobackend module. If the predictor is not
+        initialized, it sets it up before retrieving the names.
+
+        Returns:
+            (Dict[int, str]): A dict of class names associated with the model.
+
+        Raises:
+            AttributeError: If the model or predictor does not have a 'names' attribute.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> print(model.names)
+            {0: 'person', 1: 'bicycle', 2: 'car', ...}
+        """
+        from ultralytics.nn.autobackend import check_class_names
+
+        if hasattr(self.model, "names"):
+            return check_class_names(self.model.names)
+        if not self.predictor:  # export formats will not have predictor defined until predict() is called
+            self.predictor = self._smart_load("predictor")(overrides=self.overrides, _callbacks=self.callbacks)
+            self.predictor.setup_model(model=self.model, verbose=False)
+        return self.predictor.model.names
+
+    @property
+    def device(self) -> torch.device:
+        """
+        Retrieves the device on which the model's parameters are allocated.
+
+        This property determines the device (CPU or GPU) where the model's parameters are currently stored. It is
+        applicable only to models that are instances of nn.Module.
+
+        Returns:
+            (torch.device): The device (CPU/GPU) of the model.
+
+        Raises:
+            AttributeError: If the model is not a PyTorch nn.Module instance.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> print(model.device)
+            device(type='cuda', index=0)  # if CUDA is available
+            >>> model = model.to("cpu")
+            >>> print(model.device)
+            device(type='cpu')
+        """
+        return next(self.model.parameters()).device if isinstance(self.model, nn.Module) else None
+
+    @property
+    def transforms(self):
+        """
+        Retrieves the transformations applied to the input data of the loaded model.
+
+        This property returns the transformations if they are defined in the model. The transforms
+        typically include preprocessing steps like resizing, normalization, and data augmentation
+        that are applied to input data before it is fed into the model.
+
+        Returns:
+            (object | None): The transform object of the model if available, otherwise None.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> transforms = model.transforms
+            >>> if transforms:
+            ...     print(f"Model transforms: {transforms}")
+            ... else:
+            ...     print("No transforms defined for this model.")
+        """
+        return self.model.transforms if hasattr(self.model, "transforms") else None
+
+    def add_callback(self, event: str, func) -> None:
+        """
+        Adds a callback function for a specified event.
+
+        This method allows registering custom callback functions that are triggered on specific events during
+        model operations such as training or inference. Callbacks provide a way to extend and customize the
+        behavior of the model at various stages of its lifecycle.
+
+        Args:
+            event (str): The name of the event to attach the callback to. Must be a valid event name recognized
+                by the Ultralytics framework.
+            func (Callable): The callback function to be registered. This function will be called when the
+                specified event occurs.
+
+        Raises:
+            ValueError: If the event name is not recognized or is invalid.
+
+        Examples:
+            >>> def on_train_start(trainer):
+            ...     print("Training is starting!")
+            >>> model = YOLO("yolo11n.pt")
+            >>> model.add_callback("on_train_start", on_train_start)
+            >>> model.train(data="coco8.yaml", epochs=1)
+        """
+        self.callbacks[event].append(func)
+
+    def clear_callback(self, event: str) -> None:
+        """
+        Clears all callback functions registered for a specified event.
+
+        This method removes all custom and default callback functions associated with the given event.
+        It resets the callback list for the specified event to an empty list, effectively removing all
+        registered callbacks for that event.
+
+        Args:
+            event (str): The name of the event for which to clear the callbacks. This should be a valid event name
+                recognized by the Ultralytics callback system.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> model.add_callback("on_train_start", lambda: print("Training started"))
+            >>> model.clear_callback("on_train_start")
+            >>> # All callbacks for 'on_train_start' are now removed
+
+        Notes:
+            - This method affects both custom callbacks added by the user and default callbacks
+              provided by the Ultralytics framework.
+            - After calling this method, no callbacks will be executed for the specified event
+              until new ones are added.
+            - Use with caution as it removes all callbacks, including essential ones that might
+              be required for proper functioning of certain operations.
+        """
+        self.callbacks[event] = []
+
+    def reset_callbacks(self) -> None:
+        """
+        Resets all callbacks to their default functions.
+
+        This method reinstates the default callback functions for all events, removing any custom callbacks that were
+        previously added. It iterates through all default callback events and replaces the current callbacks with the
+        default ones.
+
+        The default callbacks are defined in the 'callbacks.default_callbacks' dictionary, which contains predefined
+        functions for various events in the model's lifecycle, such as on_train_start, on_epoch_end, etc.
+
+        This method is useful when you want to revert to the original set of callbacks after making custom
+        modifications, ensuring consistent behavior across different runs or experiments.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> model.add_callback("on_train_start", custom_function)
+            >>> model.reset_callbacks()
+            # All callbacks are now reset to their default functions
+        """
+        for event in callbacks.default_callbacks.keys():
+            self.callbacks[event] = [callbacks.default_callbacks[event][0]]
+
+    @staticmethod
+    def _reset_ckpt_args(args: dict) -> dict:
+        """
+        Resets specific arguments when loading a PyTorch model checkpoint.
+
+        This static method filters the input arguments dictionary to retain only a specific set of keys that are
+        considered important for model loading. It's used to ensure that only relevant arguments are preserved
+        when loading a model from a checkpoint, discarding any unnecessary or potentially conflicting settings.
+
+        Args:
+            args (dict): A dictionary containing various model arguments and settings.
+
+        Returns:
+            (dict): A new dictionary containing only the specified include keys from the input arguments.
+
+        Examples:
+            >>> original_args = {"imgsz": 640, "data": "coco.yaml", "task": "detect", "batch": 16, "epochs": 100}
+            >>> reset_args = Model._reset_ckpt_args(original_args)
+            >>> print(reset_args)
+            {'imgsz': 640, 'data': 'coco.yaml', 'task': 'detect'}
+        """
+        include = {"imgsz", "data", "task", "single_cls"}  # only remember these arguments when loading a PyTorch model
+        return {k: v for k, v in args.items() if k in include}
+
+    # def __getattr__(self, attr):
+    #    """Raises error if object has no requested attribute."""
+    #    name = self.__class__.__name__
+    #    raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}")
+
+    def _smart_load(self, key: str):
+        """
+        Loads the appropriate module based on the model task.
+
+        This method dynamically selects and returns the correct module (model, trainer, validator, or predictor)
+        based on the current task of the model and the provided key. It uses the task_map attribute to determine
+        the correct module to load.
+
+        Args:
+            key (str): The type of module to load. Must be one of 'model', 'trainer', 'validator', or 'predictor'.
+
+        Returns:
+            (object): The loaded module corresponding to the specified key and current task.
+
+        Raises:
+            NotImplementedError: If the specified key is not supported for the current task.
+
+        Examples:
+            >>> model = Model(task="detect")
+            >>> predictor = model._smart_load("predictor")
+            >>> trainer = model._smart_load("trainer")
+
+        Notes:
+            - This method is typically used internally by other methods of the Model class.
+            - The task_map attribute should be properly initialized with the correct mappings for each task.
+        """
+        try:
+            return self.task_map[self.task][key]
+        except Exception as e:
+            name = self.__class__.__name__
+            mode = inspect.stack()[1][3]  # get the function name.
+            raise NotImplementedError(
+                emojis(f"WARNING ⚠️ '{name}' model does not support '{mode}' mode for '{self.task}' task yet.")
+            ) from e
+
+    @property
+    def task_map(self) -> dict:
+        """
+        Provides a mapping from model tasks to corresponding classes for different modes.
+
+        This property method returns a dictionary that maps each supported task (e.g., detect, segment, classify)
+        to a nested dictionary. The nested dictionary contains mappings for different operational modes
+        (model, trainer, validator, predictor) to their respective class implementations.
+
+        The mapping allows for dynamic loading of appropriate classes based on the model's task and the
+        desired operational mode. This facilitates a flexible and extensible architecture for handling
+        various tasks and modes within the Ultralytics framework.
+
+        Returns:
+            (Dict[str, Dict[str, Any]]): A dictionary where keys are task names (str) and values are
+            nested dictionaries. Each nested dictionary has keys 'model', 'trainer', 'validator', and
+            'predictor', mapping to their respective class implementations.
+
+        Examples:
+            >>> model = Model()
+            >>> task_map = model.task_map
+            >>> detect_class_map = task_map["detect"]
+            >>> segment_class_map = task_map["segment"]
+
+        Note:
+            The actual implementation of this method may vary depending on the specific tasks and
+            classes supported by the Ultralytics framework. The docstring provides a general
+            description of the expected behavior and structure.
+        """
+        raise NotImplementedError("Please provide task map for your model!")
+
+    def eval(self):
+        """
+        Sets the model to evaluation mode.
+
+        This method changes the model's mode to evaluation, which affects layers like dropout and batch normalization
+        that behave differently during training and evaluation.
+
+        Returns:
+            (Model): The model instance with evaluation mode set.
+
+        Examples:
+            >> model = YOLO("yolo11n.pt")
+            >> model.eval()
+        """
+        self.model.eval()
+        return self
+
+    def __getattr__(self, name):
+        """
+        Enables accessing model attributes directly through the Model class.
+
+        This method provides a way to access attributes of the underlying model directly through the Model class
+        instance. It first checks if the requested attribute is 'model', in which case it returns the model from
+        the module dictionary. Otherwise, it delegates the attribute lookup to the underlying model.
+
+        Args:
+            name (str): The name of the attribute to retrieve.
+
+        Returns:
+            (Any): The requested attribute value.
+
+        Raises:
+            AttributeError: If the requested attribute does not exist in the model.
+
+        Examples:
+            >>> model = YOLO("yolo11n.pt")
+            >>> print(model.stride)
+            >>> print(model.task)
+        """
+        return self._modules["model"] if name == "model" else getattr(self.model, name)

+ 408 - 0
ultralytics/engine/predictor.py

@@ -0,0 +1,408 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Run prediction on images, videos, directories, globs, YouTube, webcam, streams, etc.
+
+Usage - sources:
+    $ yolo mode=predict model=yolov8n.pt source=0                               # webcam
+                                                img.jpg                         # image
+                                                vid.mp4                         # video
+                                                screen                          # screenshot
+                                                path/                           # directory
+                                                list.txt                        # list of images
+                                                list.streams                    # list of streams
+                                                'path/*.jpg'                    # glob
+                                                'https://youtu.be/LNwODJXcvt4'  # YouTube
+                                                'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP, TCP stream
+
+Usage - formats:
+    $ yolo mode=predict model=yolov8n.pt                 # PyTorch
+                              yolov8n.torchscript        # TorchScript
+                              yolov8n.onnx               # ONNX Runtime or OpenCV DNN with dnn=True
+                              yolov8n_openvino_model     # OpenVINO
+                              yolov8n.engine             # TensorRT
+                              yolov8n.mlpackage          # CoreML (macOS-only)
+                              yolov8n_saved_model        # TensorFlow SavedModel
+                              yolov8n.pb                 # TensorFlow GraphDef
+                              yolov8n.tflite             # TensorFlow Lite
+                              yolov8n_edgetpu.tflite     # TensorFlow Edge TPU
+                              yolov8n_paddle_model       # PaddlePaddle
+                              yolov8n.mnn                # MNN
+                              yolov8n_ncnn_model         # NCNN
+"""
+
+import platform
+import re
+import threading
+from pathlib import Path
+
+import cv2
+import numpy as np
+import torch
+
+from ultralytics.cfg import get_cfg, get_save_dir
+from ultralytics.data import load_inference_source
+from ultralytics.data.augment import LetterBox, classify_transforms
+from ultralytics.nn.autobackend import AutoBackend
+from ultralytics.utils import DEFAULT_CFG, LOGGER, MACOS, WINDOWS, callbacks, colorstr, ops
+from ultralytics.utils.checks import check_imgsz, check_imshow
+from ultralytics.utils.files import increment_path
+from ultralytics.utils.torch_utils import select_device, smart_inference_mode
+
+STREAM_WARNING = """
+WARNING ⚠️ inference results will accumulate in RAM unless `stream=True` is passed, causing potential out-of-memory
+errors for large sources or long-running streams and videos. See https://docs.ultralytics.com/modes/predict/ for help.
+
+Example:
+    results = model(source=..., stream=True)  # generator of Results objects
+    for r in results:
+        boxes = r.boxes  # Boxes object for bbox outputs
+        masks = r.masks  # Masks object for segment masks outputs
+        probs = r.probs  # Class probabilities for classification outputs
+"""
+
+
+class BasePredictor:
+    """
+    BasePredictor.
+
+    A base class for creating predictors.
+
+    Attributes:
+        args (SimpleNamespace): Configuration for the predictor.
+        save_dir (Path): Directory to save results.
+        done_warmup (bool): Whether the predictor has finished setup.
+        model (nn.Module): Model used for prediction.
+        data (dict): Data configuration.
+        device (torch.device): Device used for prediction.
+        dataset (Dataset): Dataset used for prediction.
+        vid_writer (dict): Dictionary of {save_path: video_writer, ...} writer for saving video output.
+    """
+
+    def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
+        """
+        Initializes the BasePredictor class.
+
+        Args:
+            cfg (str, optional): Path to a configuration file. Defaults to DEFAULT_CFG.
+            overrides (dict, optional): Configuration overrides. Defaults to None.
+        """
+        self.args = get_cfg(cfg, overrides)
+        self.save_dir = get_save_dir(self.args)
+        if self.args.conf is None:
+            self.args.conf = 0.25  # default conf=0.25
+        self.done_warmup = False
+        if self.args.show:
+            self.args.show = check_imshow(warn=True)
+
+        # Usable if setup is done
+        self.model = None
+        self.data = self.args.data  # data_dict
+        self.imgsz = None
+        self.device = None
+        self.dataset = None
+        self.vid_writer = {}  # dict of {save_path: video_writer, ...}
+        self.plotted_img = None
+        self.source_type = None
+        self.seen = 0
+        self.windows = []
+        self.batch = None
+        self.results = None
+        self.transforms = None
+        self.callbacks = _callbacks or callbacks.get_default_callbacks()
+        self.txt_path = None
+        self._lock = threading.Lock()  # for automatic thread-safe inference
+        callbacks.add_integration_callbacks(self)
+
+    def preprocess(self, im):
+        """
+        Prepares input image before inference.
+
+        Args:
+            im (torch.Tensor | List(np.ndarray)): BCHW for tensor, [(HWC) x B] for list.
+        """
+        not_tensor = not isinstance(im, torch.Tensor)
+        if not_tensor:
+            im = np.stack(self.pre_transform(im))
+            im = im[..., ::-1].transpose((0, 3, 1, 2))  # BGR to RGB, BHWC to BCHW, (n, 3, h, w)
+            im = np.ascontiguousarray(im)  # contiguous
+            im = torch.from_numpy(im)
+
+        im = im.to(self.device)
+        im = im.half() if self.model.fp16 else im.float()  # uint8 to fp16/32
+        if not_tensor:
+            im /= 255  # 0 - 255 to 0.0 - 1.0
+        return im
+
+    def inference(self, im, *args, **kwargs):
+        """Runs inference on a given image using the specified model and arguments."""
+        visualize = (
+            increment_path(self.save_dir / Path(self.batch[0][0]).stem, mkdir=True)
+            if self.args.visualize and (not self.source_type.tensor)
+            else False
+        )
+        return self.model(im, augment=self.args.augment, visualize=visualize, embed=self.args.embed, *args, **kwargs)
+
+    def pre_transform(self, im):
+        """
+        Pre-transform input image before inference.
+
+        Args:
+            im (List(np.ndarray)): (N, 3, h, w) for tensor, [(h, w, 3) x N] for list.
+
+        Returns:
+            (list): A list of transformed images.
+        """
+        same_shapes = len({x.shape for x in im}) == 1
+        letterbox = LetterBox(
+            self.imgsz,
+            auto=same_shapes and (self.model.pt or (getattr(self.model, "dynamic", False) and not self.model.imx)),
+            stride=self.model.stride,
+        )
+        return [letterbox(image=x) for x in im]
+
+    def postprocess(self, preds, img, orig_imgs):
+        """Post-processes predictions for an image and returns them."""
+        return preds
+
+    def __call__(self, source=None, model=None, stream=False, *args, **kwargs):
+        """Performs inference on an image or stream."""
+        self.stream = stream
+        if stream:
+            return self.stream_inference(source, model, *args, **kwargs)
+        else:
+            return list(self.stream_inference(source, model, *args, **kwargs))  # merge list of Result into one
+
+    def predict_cli(self, source=None, model=None):
+        """
+        Method used for Command Line Interface (CLI) prediction.
+
+        This function is designed to run predictions using the CLI. It sets up the source and model, then processes
+        the inputs in a streaming manner. This method ensures that no outputs accumulate in memory by consuming the
+        generator without storing results.
+
+        Note:
+            Do not modify this function or remove the generator. The generator ensures that no outputs are
+            accumulated in memory, which is critical for preventing memory issues during long-running predictions.
+        """
+        gen = self.stream_inference(source, model)
+        for _ in gen:  # sourcery skip: remove-empty-nested-block, noqa
+            pass
+
+    def setup_source(self, source):
+        """Sets up source and inference mode."""
+        self.imgsz = check_imgsz(self.args.imgsz, stride=self.model.stride, min_dim=2)  # check image size
+        self.transforms = (
+            getattr(
+                self.model.model,
+                "transforms",
+                classify_transforms(self.imgsz[0], crop_fraction=self.args.crop_fraction),
+            )
+            if self.args.task == "classify"
+            else None
+        )
+        self.dataset = load_inference_source(
+            source=source,
+            batch=self.args.batch,
+            vid_stride=self.args.vid_stride,
+            buffer=self.args.stream_buffer,
+        )
+        self.source_type = self.dataset.source_type
+        if not getattr(self, "stream", True) and (
+            self.source_type.stream
+            or self.source_type.screenshot
+            or len(self.dataset) > 1000  # many images
+            or any(getattr(self.dataset, "video_flag", [False]))
+        ):  # videos
+            LOGGER.warning(STREAM_WARNING)
+        self.vid_writer = {}
+
+    @smart_inference_mode()
+    def stream_inference(self, source=None, model=None, *args, **kwargs):
+        """Streams real-time inference on camera feed and saves results to file."""
+        if self.args.verbose:
+            LOGGER.info("")
+
+        # Setup model
+        if not self.model:
+            self.setup_model(model)
+
+        with self._lock:  # for thread-safe inference
+            # Setup source every time predict is called
+            self.setup_source(source if source is not None else self.args.source)
+
+            # Check if save_dir/ label file exists
+            if self.args.save or self.args.save_txt:
+                (self.save_dir / "labels" if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)
+
+            # Warmup model
+            if not self.done_warmup:
+                self.model.warmup(imgsz=(1 if self.model.pt or self.model.triton else self.dataset.bs, 3, *self.imgsz))
+                self.done_warmup = True
+
+            self.seen, self.windows, self.batch = 0, [], None
+            profilers = (
+                ops.Profile(device=self.device),
+                ops.Profile(device=self.device),
+                ops.Profile(device=self.device),
+            )
+            self.run_callbacks("on_predict_start")
+            for self.batch in self.dataset:
+                self.run_callbacks("on_predict_batch_start")
+                paths, im0s, s = self.batch
+
+                # Preprocess
+                with profilers[0]:
+                    im = self.preprocess(im0s)
+
+                # Inference
+                with profilers[1]:
+                    preds = self.inference(im, *args, **kwargs)
+                    if self.args.embed:
+                        yield from [preds] if isinstance(preds, torch.Tensor) else preds  # yield embedding tensors
+                        continue
+
+                # Postprocess
+                with profilers[2]:
+                    self.results = self.postprocess(preds, im, im0s)
+                self.run_callbacks("on_predict_postprocess_end")
+
+                # Visualize, save, write results
+                n = len(im0s)
+                for i in range(n):
+                    self.seen += 1
+                    self.results[i].speed = {
+                        "preprocess": profilers[0].dt * 1e3 / n,
+                        "inference": profilers[1].dt * 1e3 / n,
+                        "postprocess": profilers[2].dt * 1e3 / n,
+                    }
+                    if self.args.verbose or self.args.save or self.args.save_txt or self.args.show:
+                        s[i] += self.write_results(i, Path(paths[i]), im, s)
+
+                # Print batch results
+                if self.args.verbose:
+                    LOGGER.info("\n".join(s))
+
+                self.run_callbacks("on_predict_batch_end")
+                yield from self.results
+
+        # Release assets
+        for v in self.vid_writer.values():
+            if isinstance(v, cv2.VideoWriter):
+                v.release()
+
+        # Print final results
+        if self.args.verbose and self.seen:
+            t = tuple(x.t / self.seen * 1e3 for x in profilers)  # speeds per image
+            LOGGER.info(
+                f"Speed: %.1fms preprocess, %.1fms inference, %.1fms postprocess per image at shape "
+                f"{(min(self.args.batch, self.seen), 3, *im.shape[2:])}" % t
+            )
+        if self.args.save or self.args.save_txt or self.args.save_crop:
+            nl = len(list(self.save_dir.glob("labels/*.txt")))  # number of labels
+            s = f"\n{nl} label{'s' * (nl > 1)} saved to {self.save_dir / 'labels'}" if self.args.save_txt else ""
+            LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}{s}")
+        self.run_callbacks("on_predict_end")
+
+    def setup_model(self, model, verbose=True):
+        """Initialize YOLO model with given parameters and set it to evaluation mode."""
+        self.model = AutoBackend(
+            weights=model or self.args.model,
+            device=select_device(self.args.device, verbose=verbose),
+            dnn=self.args.dnn,
+            data=self.args.data,
+            fp16=self.args.half,
+            batch=self.args.batch,
+            fuse=True,
+            verbose=verbose,
+        )
+
+        self.device = self.model.device  # update device
+        self.args.half = self.model.fp16  # update half
+        self.model.eval()
+
+    def write_results(self, i, p, im, s):
+        """Write inference results to a file or directory."""
+        string = ""  # print string
+        if len(im.shape) == 3:
+            im = im[None]  # expand for batch dim
+        if self.source_type.stream or self.source_type.from_img or self.source_type.tensor:  # batch_size >= 1
+            string += f"{i}: "
+            frame = self.dataset.count
+        else:
+            match = re.search(r"frame (\d+)/", s[i])
+            frame = int(match[1]) if match else None  # 0 if frame undetermined
+
+        self.txt_path = self.save_dir / "labels" / (p.stem + ("" if self.dataset.mode == "image" else f"_{frame}"))
+        string += "{:g}x{:g} ".format(*im.shape[2:])
+        result = self.results[i]
+        result.save_dir = self.save_dir.__str__()  # used in other locations
+        string += f"{result.verbose()}{result.speed['inference']:.1f}ms"
+
+        # Add predictions to image
+        if self.args.save or self.args.show:
+            self.plotted_img = result.plot(
+                line_width=self.args.line_width,
+                boxes=self.args.show_boxes,
+                conf=self.args.show_conf,
+                labels=self.args.show_labels,
+                im_gpu=None if self.args.retina_masks else im[i],
+            )
+
+        # Save results
+        if self.args.save_txt:
+            result.save_txt(f"{self.txt_path}.txt", save_conf=self.args.save_conf)
+        if self.args.save_crop:
+            result.save_crop(save_dir=self.save_dir / "crops", file_name=self.txt_path.stem)
+        if self.args.show:
+            self.show(str(p))
+        if self.args.save:
+            self.save_predicted_images(str(self.save_dir / p.name), frame)
+
+        return string
+
+    def save_predicted_images(self, save_path="", frame=0):
+        """Save video predictions as mp4 at specified path."""
+        im = self.plotted_img
+
+        # Save videos and streams
+        if self.dataset.mode in {"stream", "video"}:
+            fps = self.dataset.fps if self.dataset.mode == "video" else 30
+            frames_path = f"{save_path.split('.', 1)[0]}_frames/"
+            if save_path not in self.vid_writer:  # new video
+                if self.args.save_frames:
+                    Path(frames_path).mkdir(parents=True, exist_ok=True)
+                suffix, fourcc = (".mp4", "avc1") if MACOS else (".avi", "WMV2") if WINDOWS else (".avi", "MJPG")
+                self.vid_writer[save_path] = cv2.VideoWriter(
+                    filename=str(Path(save_path).with_suffix(suffix)),
+                    fourcc=cv2.VideoWriter_fourcc(*fourcc),
+                    fps=fps,  # integer required, floats produce error in MP4 codec
+                    frameSize=(im.shape[1], im.shape[0]),  # (width, height)
+                )
+
+            # Save video
+            self.vid_writer[save_path].write(im)
+            if self.args.save_frames:
+                cv2.imwrite(f"{frames_path}{frame}.jpg", im)
+
+        # Save images
+        else:
+            cv2.imwrite(str(Path(save_path).with_suffix(".jpg")), im)  # save to JPG for best support
+
+    def show(self, p=""):
+        """Display an image in a window using the OpenCV imshow function."""
+        im = self.plotted_img
+        if platform.system() == "Linux" and p not in self.windows:
+            self.windows.append(p)
+            cv2.namedWindow(p, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)
+            cv2.resizeWindow(p, im.shape[1], im.shape[0])  # (width, height)
+        cv2.imshow(p, im)
+        cv2.waitKey(300 if self.dataset.mode == "image" else 1)  # 1 millisecond
+
+    def run_callbacks(self, event: str):
+        """Runs all registered callbacks for a specific event."""
+        for callback in self.callbacks.get(event, []):
+            callback(self)
+
+    def add_callback(self, event: str, func):
+        """Add callback."""
+        self.callbacks[event].append(func)

+ 1740 - 0
ultralytics/engine/results.py

@@ -0,0 +1,1740 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Ultralytics Results, Boxes and Masks classes for handling inference results.
+
+Usage: See https://docs.ultralytics.com/modes/predict/
+"""
+
+from copy import deepcopy
+from functools import lru_cache
+from pathlib import Path
+
+import numpy as np
+import torch
+
+from ultralytics.data.augment import LetterBox
+from ultralytics.utils import LOGGER, SimpleClass, ops
+from ultralytics.utils.checks import check_requirements
+from ultralytics.utils.plotting import Annotator, colors, save_one_box
+from ultralytics.utils.torch_utils import smart_inference_mode
+
+
+class BaseTensor(SimpleClass):
+    """
+    Base tensor class with additional methods for easy manipulation and device handling.
+
+    Attributes:
+        data (torch.Tensor | np.ndarray): Prediction data such as bounding boxes, masks, or keypoints.
+        orig_shape (Tuple[int, int]): Original shape of the image, typically in the format (height, width).
+
+    Methods:
+        cpu: Return a copy of the tensor stored in CPU memory.
+        numpy: Returns a copy of the tensor as a numpy array.
+        cuda: Moves the tensor to GPU memory, returning a new instance if necessary.
+        to: Return a copy of the tensor with the specified device and dtype.
+
+    Examples:
+        >>> import torch
+        >>> data = torch.tensor([[1, 2, 3], [4, 5, 6]])
+        >>> orig_shape = (720, 1280)
+        >>> base_tensor = BaseTensor(data, orig_shape)
+        >>> cpu_tensor = base_tensor.cpu()
+        >>> numpy_array = base_tensor.numpy()
+        >>> gpu_tensor = base_tensor.cuda()
+    """
+
+    def __init__(self, data, orig_shape) -> None:
+        """
+        Initialize BaseTensor with prediction data and the original shape of the image.
+
+        Args:
+            data (torch.Tensor | np.ndarray): Prediction data such as bounding boxes, masks, or keypoints.
+            orig_shape (Tuple[int, int]): Original shape of the image in (height, width) format.
+
+        Examples:
+            >>> import torch
+            >>> data = torch.tensor([[1, 2, 3], [4, 5, 6]])
+            >>> orig_shape = (720, 1280)
+            >>> base_tensor = BaseTensor(data, orig_shape)
+        """
+        assert isinstance(data, (torch.Tensor, np.ndarray)), "data must be torch.Tensor or np.ndarray"
+        self.data = data
+        self.orig_shape = orig_shape
+
+    @property
+    def shape(self):
+        """
+        Returns the shape of the underlying data tensor.
+
+        Returns:
+            (Tuple[int, ...]): The shape of the data tensor.
+
+        Examples:
+            >>> data = torch.rand(100, 4)
+            >>> base_tensor = BaseTensor(data, orig_shape=(720, 1280))
+            >>> print(base_tensor.shape)
+            (100, 4)
+        """
+        return self.data.shape
+
+    def cpu(self):
+        """
+        Returns a copy of the tensor stored in CPU memory.
+
+        Returns:
+            (BaseTensor): A new BaseTensor object with the data tensor moved to CPU memory.
+
+        Examples:
+            >>> data = torch.tensor([[1, 2, 3], [4, 5, 6]]).cuda()
+            >>> base_tensor = BaseTensor(data, orig_shape=(720, 1280))
+            >>> cpu_tensor = base_tensor.cpu()
+            >>> isinstance(cpu_tensor, BaseTensor)
+            True
+            >>> cpu_tensor.data.device
+            device(type='cpu')
+        """
+        return self if isinstance(self.data, np.ndarray) else self.__class__(self.data.cpu(), self.orig_shape)
+
+    def numpy(self):
+        """
+        Returns a copy of the tensor as a numpy array.
+
+        Returns:
+            (np.ndarray): A numpy array containing the same data as the original tensor.
+
+        Examples:
+            >>> data = torch.tensor([[1, 2, 3], [4, 5, 6]])
+            >>> orig_shape = (720, 1280)
+            >>> base_tensor = BaseTensor(data, orig_shape)
+            >>> numpy_array = base_tensor.numpy()
+            >>> print(type(numpy_array))
+            <class 'numpy.ndarray'>
+        """
+        return self if isinstance(self.data, np.ndarray) else self.__class__(self.data.numpy(), self.orig_shape)
+
+    def cuda(self):
+        """
+        Moves the tensor to GPU memory.
+
+        Returns:
+            (BaseTensor): A new BaseTensor instance with the data moved to GPU memory if it's not already a
+                numpy array, otherwise returns self.
+
+        Examples:
+            >>> import torch
+            >>> from ultralytics.engine.results import BaseTensor
+            >>> data = torch.tensor([[1, 2, 3], [4, 5, 6]])
+            >>> base_tensor = BaseTensor(data, orig_shape=(720, 1280))
+            >>> gpu_tensor = base_tensor.cuda()
+            >>> print(gpu_tensor.data.device)
+            cuda:0
+        """
+        return self.__class__(torch.as_tensor(self.data).cuda(), self.orig_shape)
+
+    def to(self, *args, **kwargs):
+        """
+        Return a copy of the tensor with the specified device and dtype.
+
+        Args:
+            *args (Any): Variable length argument list to be passed to torch.Tensor.to().
+            **kwargs (Any): Arbitrary keyword arguments to be passed to torch.Tensor.to().
+
+        Returns:
+            (BaseTensor): A new BaseTensor instance with the data moved to the specified device and/or dtype.
+
+        Examples:
+            >>> base_tensor = BaseTensor(torch.randn(3, 4), orig_shape=(480, 640))
+            >>> cuda_tensor = base_tensor.to("cuda")
+            >>> float16_tensor = base_tensor.to(dtype=torch.float16)
+        """
+        return self.__class__(torch.as_tensor(self.data).to(*args, **kwargs), self.orig_shape)
+
+    def __len__(self):  # override len(results)
+        """
+        Returns the length of the underlying data tensor.
+
+        Returns:
+            (int): The number of elements in the first dimension of the data tensor.
+
+        Examples:
+            >>> data = torch.tensor([[1, 2, 3], [4, 5, 6]])
+            >>> base_tensor = BaseTensor(data, orig_shape=(720, 1280))
+            >>> len(base_tensor)
+            2
+        """
+        return len(self.data)
+
+    def __getitem__(self, idx):
+        """
+        Returns a new BaseTensor instance containing the specified indexed elements of the data tensor.
+
+        Args:
+            idx (int | List[int] | torch.Tensor): Index or indices to select from the data tensor.
+
+        Returns:
+            (BaseTensor): A new BaseTensor instance containing the indexed data.
+
+        Examples:
+            >>> data = torch.tensor([[1, 2, 3], [4, 5, 6]])
+            >>> base_tensor = BaseTensor(data, orig_shape=(720, 1280))
+            >>> result = base_tensor[0]  # Select the first row
+            >>> print(result.data)
+            tensor([1, 2, 3])
+        """
+        return self.__class__(self.data[idx], self.orig_shape)
+
+
+class Results(SimpleClass):
+    """
+    A class for storing and manipulating inference results.
+
+    This class encapsulates the functionality for handling detection, segmentation, pose estimation,
+    and classification results from YOLO models.
+
+    Attributes:
+        orig_img (numpy.ndarray): Original image as a numpy array.
+        orig_shape (Tuple[int, int]): Original image shape in (height, width) format.
+        boxes (Boxes | None): Object containing detection bounding boxes.
+        masks (Masks | None): Object containing detection masks.
+        probs (Probs | None): Object containing class probabilities for classification tasks.
+        keypoints (Keypoints | None): Object containing detected keypoints for each object.
+        obb (OBB | None): Object containing oriented bounding boxes.
+        speed (Dict[str, float | None]): Dictionary of preprocess, inference, and postprocess speeds.
+        names (Dict[int, str]): Dictionary mapping class IDs to class names.
+        path (str): Path to the image file.
+        _keys (Tuple[str, ...]): Tuple of attribute names for internal use.
+
+    Methods:
+        update: Updates object attributes with new detection results.
+        cpu: Returns a copy of the Results object with all tensors on CPU memory.
+        numpy: Returns a copy of the Results object with all tensors as numpy arrays.
+        cuda: Returns a copy of the Results object with all tensors on GPU memory.
+        to: Returns a copy of the Results object with tensors on a specified device and dtype.
+        new: Returns a new Results object with the same image, path, and names.
+        plot: Plots detection results on an input image, returning an annotated image.
+        show: Shows annotated results on screen.
+        save: Saves annotated results to file.
+        verbose: Returns a log string for each task, detailing detections and classifications.
+        save_txt: Saves detection results to a text file.
+        save_crop: Saves cropped detection images.
+        tojson: Converts detection results to JSON format.
+
+    Examples:
+        >>> results = model("path/to/image.jpg")
+        >>> for result in results:
+        ...     print(result.boxes)  # Print detection boxes
+        ...     result.show()  # Display the annotated image
+        ...     result.save(filename="result.jpg")  # Save annotated image
+    """
+
+    def __init__(
+        self, orig_img, path, names, boxes=None, masks=None, probs=None, keypoints=None, obb=None, speed=None
+    ) -> None:
+        """
+        Initialize the Results class for storing and manipulating inference results.
+
+        Args:
+            orig_img (numpy.ndarray): The original image as a numpy array.
+            path (str): The path to the image file.
+            names (Dict): A dictionary of class names.
+            boxes (torch.Tensor | None): A 2D tensor of bounding box coordinates for each detection.
+            masks (torch.Tensor | None): A 3D tensor of detection masks, where each mask is a binary image.
+            probs (torch.Tensor | None): A 1D tensor of probabilities of each class for classification task.
+            keypoints (torch.Tensor | None): A 2D tensor of keypoint coordinates for each detection.
+            obb (torch.Tensor | None): A 2D tensor of oriented bounding box coordinates for each detection.
+            speed (Dict | None): A dictionary containing preprocess, inference, and postprocess speeds (ms/image).
+
+        Examples:
+            >>> results = model("path/to/image.jpg")
+            >>> result = results[0]  # Get the first result
+            >>> boxes = result.boxes  # Get the boxes for the first result
+            >>> masks = result.masks  # Get the masks for the first result
+
+        Notes:
+            For the default pose model, keypoint indices for human body pose estimation are:
+            0: Nose, 1: Left Eye, 2: Right Eye, 3: Left Ear, 4: Right Ear
+            5: Left Shoulder, 6: Right Shoulder, 7: Left Elbow, 8: Right Elbow
+            9: Left Wrist, 10: Right Wrist, 11: Left Hip, 12: Right Hip
+            13: Left Knee, 14: Right Knee, 15: Left Ankle, 16: Right Ankle
+        """
+        self.orig_img = orig_img
+        self.orig_shape = orig_img.shape[:2]
+        self.boxes = Boxes(boxes, self.orig_shape) if boxes is not None else None  # native size boxes
+        self.masks = Masks(masks, self.orig_shape) if masks is not None else None  # native size or imgsz masks
+        self.probs = Probs(probs) if probs is not None else None
+        self.keypoints = Keypoints(keypoints, self.orig_shape) if keypoints is not None else None
+        self.obb = OBB(obb, self.orig_shape) if obb is not None else None
+        self.speed = speed if speed is not None else {"preprocess": None, "inference": None, "postprocess": None}
+        self.names = names
+        self.path = path
+        self.save_dir = None
+        self._keys = "boxes", "masks", "probs", "keypoints", "obb"
+
+    def __getitem__(self, idx):
+        """
+        Return a Results object for a specific index of inference results.
+
+        Args:
+            idx (int | slice): Index or slice to retrieve from the Results object.
+
+        Returns:
+            (Results): A new Results object containing the specified subset of inference results.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")  # Perform inference
+            >>> single_result = results[0]  # Get the first result
+            >>> subset_results = results[1:4]  # Get a slice of results
+        """
+        return self._apply("__getitem__", idx)
+
+    def __len__(self):
+        """
+        Return the number of detections in the Results object.
+
+        Returns:
+            (int): The number of detections, determined by the length of the first non-empty attribute
+                (boxes, masks, probs, keypoints, or obb).
+
+        Examples:
+            >>> results = Results(orig_img, path, names, boxes=torch.rand(5, 4))
+            >>> len(results)
+            5
+        """
+        for k in self._keys:
+            v = getattr(self, k)
+            if v is not None:
+                return len(v)
+
+    def update(self, boxes=None, masks=None, probs=None, obb=None):
+        """
+        Updates the Results object with new detection data.
+
+        This method allows updating the boxes, masks, probabilities, and oriented bounding boxes (OBB) of the
+        Results object. It ensures that boxes are clipped to the original image shape.
+
+        Args:
+            boxes (torch.Tensor | None): A tensor of shape (N, 6) containing bounding box coordinates and
+                confidence scores. The format is (x1, y1, x2, y2, conf, class).
+            masks (torch.Tensor | None): A tensor of shape (N, H, W) containing segmentation masks.
+            probs (torch.Tensor | None): A tensor of shape (num_classes,) containing class probabilities.
+            obb (torch.Tensor | None): A tensor of shape (N, 5) containing oriented bounding box coordinates.
+
+        Examples:
+            >>> results = model("image.jpg")
+            >>> new_boxes = torch.tensor([[100, 100, 200, 200, 0.9, 0]])
+            >>> results[0].update(boxes=new_boxes)
+        """
+        if boxes is not None:
+            self.boxes = Boxes(ops.clip_boxes(boxes, self.orig_shape), self.orig_shape)
+        if masks is not None:
+            self.masks = Masks(masks, self.orig_shape)
+        if probs is not None:
+            self.probs = probs
+        if obb is not None:
+            self.obb = OBB(obb, self.orig_shape)
+
+    def _apply(self, fn, *args, **kwargs):
+        """
+        Applies a function to all non-empty attributes and returns a new Results object with modified attributes.
+
+        This method is internally called by methods like .to(), .cuda(), .cpu(), etc.
+
+        Args:
+            fn (str): The name of the function to apply.
+            *args (Any): Variable length argument list to pass to the function.
+            **kwargs (Any): Arbitrary keyword arguments to pass to the function.
+
+        Returns:
+            (Results): A new Results object with attributes modified by the applied function.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")
+            >>> for result in results:
+            ...     result_cuda = result.cuda()
+            ...     result_cpu = result.cpu()
+        """
+        r = self.new()
+        for k in self._keys:
+            v = getattr(self, k)
+            if v is not None:
+                setattr(r, k, getattr(v, fn)(*args, **kwargs))
+        return r
+
+    def cpu(self):
+        """
+        Returns a copy of the Results object with all its tensors moved to CPU memory.
+
+        This method creates a new Results object with all tensor attributes (boxes, masks, probs, keypoints, obb)
+        transferred to CPU memory. It's useful for moving data from GPU to CPU for further processing or saving.
+
+        Returns:
+            (Results): A new Results object with all tensor attributes on CPU memory.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")  # Perform inference
+            >>> cpu_result = results[0].cpu()  # Move the first result to CPU
+            >>> print(cpu_result.boxes.device)  # Output: cpu
+        """
+        return self._apply("cpu")
+
+    def numpy(self):
+        """
+        Converts all tensors in the Results object to numpy arrays.
+
+        Returns:
+            (Results): A new Results object with all tensors converted to numpy arrays.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")
+            >>> numpy_result = results[0].numpy()
+            >>> type(numpy_result.boxes.data)
+            <class 'numpy.ndarray'>
+
+        Notes:
+            This method creates a new Results object, leaving the original unchanged. It's useful for
+            interoperability with numpy-based libraries or when CPU-based operations are required.
+        """
+        return self._apply("numpy")
+
+    def cuda(self):
+        """
+        Moves all tensors in the Results object to GPU memory.
+
+        Returns:
+            (Results): A new Results object with all tensors moved to CUDA device.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")
+            >>> cuda_results = results[0].cuda()  # Move first result to GPU
+            >>> for result in results:
+            ...     result_cuda = result.cuda()  # Move each result to GPU
+        """
+        return self._apply("cuda")
+
+    def to(self, *args, **kwargs):
+        """
+        Moves all tensors in the Results object to the specified device and dtype.
+
+        Args:
+            *args (Any): Variable length argument list to be passed to torch.Tensor.to().
+            **kwargs (Any): Arbitrary keyword arguments to be passed to torch.Tensor.to().
+
+        Returns:
+            (Results): A new Results object with all tensors moved to the specified device and dtype.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")
+            >>> result_cuda = results[0].to("cuda")  # Move first result to GPU
+            >>> result_cpu = results[0].to("cpu")  # Move first result to CPU
+            >>> result_half = results[0].to(dtype=torch.float16)  # Convert first result to half precision
+        """
+        return self._apply("to", *args, **kwargs)
+
+    def new(self):
+        """
+        Creates a new Results object with the same image, path, names, and speed attributes.
+
+        Returns:
+            (Results): A new Results object with copied attributes from the original instance.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")
+            >>> new_result = results[0].new()
+        """
+        return Results(orig_img=self.orig_img, path=self.path, names=self.names, speed=self.speed)
+
+    def plot(
+        self,
+        conf=True,
+        line_width=None,
+        font_size=None,
+        font="Arial.ttf",
+        pil=False,
+        img=None,
+        im_gpu=None,
+        kpt_radius=5,
+        kpt_line=True,
+        labels=True,
+        boxes=True,
+        masks=True,
+        probs=True,
+        show=False,
+        save=False,
+        filename=None,
+        color_mode="class",
+    ):
+        """
+        Plots detection results on an input RGB image.
+
+        Args:
+            conf (bool): Whether to plot detection confidence scores.
+            line_width (float | None): Line width of bounding boxes. If None, scaled to image size.
+            font_size (float | None): Font size for text. If None, scaled to image size.
+            font (str): Font to use for text.
+            pil (bool): Whether to return the image as a PIL Image.
+            img (np.ndarray | None): Image to plot on. If None, uses original image.
+            im_gpu (torch.Tensor | None): Normalized image on GPU for faster mask plotting.
+            kpt_radius (int): Radius of drawn keypoints.
+            kpt_line (bool): Whether to draw lines connecting keypoints.
+            labels (bool): Whether to plot labels of bounding boxes.
+            boxes (bool): Whether to plot bounding boxes.
+            masks (bool): Whether to plot masks.
+            probs (bool): Whether to plot classification probabilities.
+            show (bool): Whether to display the annotated image.
+            save (bool): Whether to save the annotated image.
+            filename (str | None): Filename to save image if save is True.
+            color_mode (bool): Specify the color mode, e.g., 'instance' or 'class'. Default to 'class'.
+
+        Returns:
+            (np.ndarray): Annotated image as a numpy array.
+
+        Examples:
+            >>> results = model("image.jpg")
+            >>> for result in results:
+            ...     im = result.plot()
+            ...     im.show()
+        """
+        assert color_mode in {"instance", "class"}, f"Expected color_mode='instance' or 'class', not {color_mode}."
+        if img is None and isinstance(self.orig_img, torch.Tensor):
+            img = (self.orig_img[0].detach().permute(1, 2, 0).contiguous() * 255).to(torch.uint8).cpu().numpy()
+
+        names = self.names
+        is_obb = self.obb is not None
+        pred_boxes, show_boxes = self.obb if is_obb else self.boxes, boxes
+        pred_masks, show_masks = self.masks, masks
+        pred_probs, show_probs = self.probs, probs
+        annotator = Annotator(
+            deepcopy(self.orig_img if img is None else img),
+            line_width,
+            font_size,
+            font,
+            pil or (pred_probs is not None and show_probs),  # Classify tasks default to pil=True
+            example=names,
+        )
+
+        # Plot Segment results
+        if pred_masks and show_masks:
+            if im_gpu is None:
+                img = LetterBox(pred_masks.shape[1:])(image=annotator.result())
+                im_gpu = (
+                    torch.as_tensor(img, dtype=torch.float16, device=pred_masks.data.device)
+                    .permute(2, 0, 1)
+                    .flip(0)
+                    .contiguous()
+                    / 255
+                )
+            idx = (
+                pred_boxes.id
+                if pred_boxes.id is not None and color_mode == "instance"
+                else pred_boxes.cls
+                if pred_boxes and color_mode == "class"
+                else reversed(range(len(pred_masks)))
+            )
+            annotator.masks(pred_masks.data, colors=[colors(x, True) for x in idx], im_gpu=im_gpu)
+
+        # Plot Detect results
+        if pred_boxes is not None and show_boxes:
+            for i, d in enumerate(reversed(pred_boxes)):
+                c, d_conf, id = int(d.cls), float(d.conf) if conf else None, None if d.id is None else int(d.id.item())
+                name = ("" if id is None else f"id:{id} ") + names[c]
+                label = (f"{name} {d_conf:.2f}" if conf else name) if labels else None
+                box = d.xyxyxyxy.reshape(-1, 4, 2).squeeze() if is_obb else d.xyxy.squeeze()
+                annotator.box_label(
+                    box,
+                    label,
+                    color=colors(
+                        c
+                        if color_mode == "class"
+                        else id
+                        if id is not None
+                        else i
+                        if color_mode == "instance"
+                        else None,
+                        True,
+                    ),
+                    rotated=is_obb,
+                )
+
+        # Plot Classify results
+        if pred_probs is not None and show_probs:
+            text = ",\n".join(f"{names[j] if names else j} {pred_probs.data[j]:.2f}" for j in pred_probs.top5)
+            x = round(self.orig_shape[0] * 0.03)
+            annotator.text([x, x], text, txt_color=(255, 255, 255))  # TODO: allow setting colors
+
+        # Plot Pose results
+        if self.keypoints is not None:
+            for i, k in enumerate(reversed(self.keypoints.data)):
+                annotator.kpts(
+                    k,
+                    self.orig_shape,
+                    radius=kpt_radius,
+                    kpt_line=kpt_line,
+                    kpt_color=colors(i, True) if color_mode == "instance" else None,
+                )
+
+        # Show results
+        if show:
+            annotator.show(self.path)
+
+        # Save results
+        if save:
+            annotator.save(filename)
+
+        return annotator.result()
+
+    def show(self, *args, **kwargs):
+        """
+        Display the image with annotated inference results.
+
+        This method plots the detection results on the original image and displays it. It's a convenient way to
+        visualize the model's predictions directly.
+
+        Args:
+            *args (Any): Variable length argument list to be passed to the `plot()` method.
+            **kwargs (Any): Arbitrary keyword arguments to be passed to the `plot()` method.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")
+            >>> results[0].show()  # Display the first result
+            >>> for result in results:
+            ...     result.show()  # Display all results
+        """
+        self.plot(show=True, *args, **kwargs)
+
+    def save(self, filename=None, *args, **kwargs):
+        """
+        Saves annotated inference results image to file.
+
+        This method plots the detection results on the original image and saves the annotated image to a file. It
+        utilizes the `plot` method to generate the annotated image and then saves it to the specified filename.
+
+        Args:
+            filename (str | Path | None): The filename to save the annotated image. If None, a default filename
+                is generated based on the original image path.
+            *args (Any): Variable length argument list to be passed to the `plot` method.
+            **kwargs (Any): Arbitrary keyword arguments to be passed to the `plot` method.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")
+            >>> for result in results:
+            ...     result.save("annotated_image.jpg")
+            >>> # Or with custom plot arguments
+            >>> for result in results:
+            ...     result.save("annotated_image.jpg", conf=False, line_width=2)
+        """
+        if not filename:
+            filename = f"results_{Path(self.path).name}"
+        self.plot(save=True, filename=filename, *args, **kwargs)
+        return filename
+
+    def verbose(self):
+        """
+        Returns a log string for each task in the results, detailing detection and classification outcomes.
+
+        This method generates a human-readable string summarizing the detection and classification results. It includes
+        the number of detections for each class and the top probabilities for classification tasks.
+
+        Returns:
+            (str): A formatted string containing a summary of the results. For detection tasks, it includes the
+                number of detections per class. For classification tasks, it includes the top 5 class probabilities.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")
+            >>> for result in results:
+            ...     print(result.verbose())
+            2 persons, 1 car, 3 traffic lights,
+            dog 0.92, cat 0.78, horse 0.64,
+
+        Notes:
+            - If there are no detections, the method returns "(no detections), " for detection tasks.
+            - For classification tasks, it returns the top 5 class probabilities and their corresponding class names.
+            - The returned string is comma-separated and ends with a comma and a space.
+        """
+        log_string = ""
+        probs = self.probs
+        if len(self) == 0:
+            return log_string if probs is not None else f"{log_string}(no detections), "
+        if probs is not None:
+            log_string += f"{', '.join(f'{self.names[j]} {probs.data[j]:.2f}' for j in probs.top5)}, "
+        if boxes := self.boxes:
+            for c in boxes.cls.unique():
+                n = (boxes.cls == c).sum()  # detections per class
+                log_string += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, "
+        return log_string
+
+    def save_txt(self, txt_file, save_conf=False):
+        """
+        Save detection results to a text file.
+
+        Args:
+            txt_file (str | Path): Path to the output text file.
+            save_conf (bool): Whether to include confidence scores in the output.
+
+        Returns:
+            (str): Path to the saved text file.
+
+        Examples:
+            >>> from ultralytics import YOLO
+            >>> model = YOLO("yolo11n.pt")
+            >>> results = model("path/to/image.jpg")
+            >>> for result in results:
+            ...     result.save_txt("output.txt")
+
+        Notes:
+            - The file will contain one line per detection or classification with the following structure:
+              - For detections: `class confidence x_center y_center width height`
+              - For classifications: `confidence class_name`
+              - For masks and keypoints, the specific formats will vary accordingly.
+            - The function will create the output directory if it does not exist.
+            - If save_conf is False, the confidence scores will be excluded from the output.
+            - Existing contents of the file will not be overwritten; new results will be appended.
+        """
+        is_obb = self.obb is not None
+        boxes = self.obb if is_obb else self.boxes
+        masks = self.masks
+        probs = self.probs
+        kpts = self.keypoints
+        texts = []
+        if probs is not None:
+            # Classify
+            [texts.append(f"{probs.data[j]:.2f} {self.names[j]}") for j in probs.top5]
+        elif boxes:
+            # Detect/segment/pose
+            for j, d in enumerate(boxes):
+                c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item())
+                line = (c, *(d.xyxyxyxyn.view(-1) if is_obb else d.xywhn.view(-1)))
+                if masks:
+                    seg = masks[j].xyn[0].copy().reshape(-1)  # reversed mask.xyn, (n,2) to (n*2)
+                    line = (c, *seg)
+                if kpts is not None:
+                    kpt = torch.cat((kpts[j].xyn, kpts[j].conf[..., None]), 2) if kpts[j].has_visible else kpts[j].xyn
+                    line += (*kpt.reshape(-1).tolist(),)
+                line += (conf,) * save_conf + (() if id is None else (id,))
+                texts.append(("%g " * len(line)).rstrip() % line)
+
+        if texts:
+            Path(txt_file).parent.mkdir(parents=True, exist_ok=True)  # make directory
+            with open(txt_file, "a") as f:
+                f.writelines(text + "\n" for text in texts)
+
+    def save_crop(self, save_dir, file_name=Path("im.jpg")):
+        """
+        Saves cropped detection images to specified directory.
+
+        This method saves cropped images of detected objects to a specified directory. Each crop is saved in a
+        subdirectory named after the object's class, with the filename based on the input file_name.
+
+        Args:
+            save_dir (str | Path): Directory path where cropped images will be saved.
+            file_name (str | Path): Base filename for the saved cropped images. Default is Path("im.jpg").
+
+        Notes:
+            - This method does not support Classify or Oriented Bounding Box (OBB) tasks.
+            - Crops are saved as 'save_dir/class_name/file_name.jpg'.
+            - The method will create necessary subdirectories if they don't exist.
+            - Original image is copied before cropping to avoid modifying the original.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")
+            >>> for result in results:
+            ...     result.save_crop(save_dir="path/to/crops", file_name="detection")
+        """
+        if self.probs is not None:
+            LOGGER.warning("WARNING ⚠️ Classify task do not support `save_crop`.")
+            return
+        if self.obb is not None:
+            LOGGER.warning("WARNING ⚠️ OBB task do not support `save_crop`.")
+            return
+        for d in self.boxes:
+            save_one_box(
+                d.xyxy,
+                self.orig_img.copy(),
+                file=Path(save_dir) / self.names[int(d.cls)] / Path(file_name).with_suffix(".jpg"),
+                BGR=True,
+            )
+
+    def summary(self, normalize=False, decimals=5):
+        """
+        Converts inference results to a summarized dictionary with optional normalization for box coordinates.
+
+        This method creates a list of detection dictionaries, each containing information about a single
+        detection or classification result. For classification tasks, it returns the top class and its
+        confidence. For detection tasks, it includes class information, bounding box coordinates, and
+        optionally mask segments and keypoints.
+
+        Args:
+            normalize (bool): Whether to normalize bounding box coordinates by image dimensions. Defaults to False.
+            decimals (int): Number of decimal places to round the output values to. Defaults to 5.
+
+        Returns:
+            (List[Dict]): A list of dictionaries, each containing summarized information for a single
+                detection or classification result. The structure of each dictionary varies based on the
+                task type (classification or detection) and available information (boxes, masks, keypoints).
+
+        Examples:
+            >>> results = model("image.jpg")
+            >>> summary = results[0].summary()
+            >>> print(summary)
+        """
+        # Create list of detection dictionaries
+        results = []
+        if self.probs is not None:
+            class_id = self.probs.top1
+            results.append(
+                {
+                    "name": self.names[class_id],
+                    "class": class_id,
+                    "confidence": round(self.probs.top1conf.item(), decimals),
+                }
+            )
+            return results
+
+        is_obb = self.obb is not None
+        data = self.obb if is_obb else self.boxes
+        h, w = self.orig_shape if normalize else (1, 1)
+        for i, row in enumerate(data):  # xyxy, track_id if tracking, conf, class_id
+            class_id, conf = int(row.cls), round(row.conf.item(), decimals)
+            box = (row.xyxyxyxy if is_obb else row.xyxy).squeeze().reshape(-1, 2).tolist()
+            xy = {}
+            for j, b in enumerate(box):
+                xy[f"x{j + 1}"] = round(b[0] / w, decimals)
+                xy[f"y{j + 1}"] = round(b[1] / h, decimals)
+            result = {"name": self.names[class_id], "class": class_id, "confidence": conf, "box": xy}
+            if data.is_track:
+                result["track_id"] = int(row.id.item())  # track ID
+            if self.masks:
+                result["segments"] = {
+                    "x": (self.masks.xy[i][:, 0] / w).round(decimals).tolist(),
+                    "y": (self.masks.xy[i][:, 1] / h).round(decimals).tolist(),
+                }
+            if self.keypoints is not None:
+                x, y, visible = self.keypoints[i].data[0].cpu().unbind(dim=1)  # torch Tensor
+                result["keypoints"] = {
+                    "x": (x / w).numpy().round(decimals).tolist(),  # decimals named argument required
+                    "y": (y / h).numpy().round(decimals).tolist(),
+                    "visible": visible.numpy().round(decimals).tolist(),
+                }
+            results.append(result)
+
+        return results
+
+    def to_df(self, normalize=False, decimals=5):
+        """
+        Converts detection results to a Pandas Dataframe.
+
+        This method converts the detection results into Pandas Dataframe format. It includes information
+        about detected objects such as bounding boxes, class names, confidence scores, and optionally
+        segmentation masks and keypoints.
+
+        Args:
+            normalize (bool): Whether to normalize the bounding box coordinates by the image dimensions.
+                If True, coordinates will be returned as float values between 0 and 1. Defaults to False.
+            decimals (int): Number of decimal places to round the output values to. Defaults to 5.
+
+        Returns:
+            (DataFrame): A Pandas Dataframe containing all the information in results in an organized way.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")
+            >>> df_result = results[0].to_df()
+            >>> print(df_result)
+        """
+        import pandas as pd  # scope for faster 'import ultralytics'
+
+        return pd.DataFrame(self.summary(normalize=normalize, decimals=decimals))
+
+    def to_csv(self, normalize=False, decimals=5, *args, **kwargs):
+        """
+        Converts detection results to a CSV format.
+
+        This method serializes the detection results into a CSV format. It includes information
+        about detected objects such as bounding boxes, class names, confidence scores, and optionally
+        segmentation masks and keypoints.
+
+        Args:
+            normalize (bool): Whether to normalize the bounding box coordinates by the image dimensions.
+                If True, coordinates will be returned as float values between 0 and 1. Defaults to False.
+            decimals (int): Number of decimal places to round the output values to. Defaults to 5.
+            *args (Any): Variable length argument list to be passed to pandas.DataFrame.to_csv().
+            **kwargs (Any): Arbitrary keyword arguments to be passed to pandas.DataFrame.to_csv().
+
+
+        Returns:
+            (str): CSV containing all the information in results in an organized way.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")
+            >>> csv_result = results[0].to_csv()
+            >>> print(csv_result)
+        """
+        return self.to_df(normalize=normalize, decimals=decimals).to_csv(*args, **kwargs)
+
+    def to_xml(self, normalize=False, decimals=5, *args, **kwargs):
+        """
+        Converts detection results to XML format.
+
+        This method serializes the detection results into an XML format. It includes information
+        about detected objects such as bounding boxes, class names, confidence scores, and optionally
+        segmentation masks and keypoints.
+
+        Args:
+            normalize (bool): Whether to normalize the bounding box coordinates by the image dimensions.
+                If True, coordinates will be returned as float values between 0 and 1. Defaults to False.
+            decimals (int): Number of decimal places to round the output values to. Defaults to 5.
+            *args (Any): Variable length argument list to be passed to pandas.DataFrame.to_xml().
+            **kwargs (Any): Arbitrary keyword arguments to be passed to pandas.DataFrame.to_xml().
+
+        Returns:
+            (str): An XML string containing all the information in results in an organized way.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")
+            >>> xml_result = results[0].to_xml()
+            >>> print(xml_result)
+        """
+        check_requirements("lxml")
+        df = self.to_df(normalize=normalize, decimals=decimals)
+        return '<?xml version="1.0" encoding="utf-8"?>\n<root></root>' if df.empty else df.to_xml(*args, **kwargs)
+
+    def tojson(self, normalize=False, decimals=5):
+        """Deprecated version of to_json()."""
+        LOGGER.warning("WARNING ⚠️ 'result.tojson()' is deprecated, replace with 'result.to_json()'.")
+        return self.to_json(normalize, decimals)
+
+    def to_json(self, normalize=False, decimals=5):
+        """
+        Converts detection results to JSON format.
+
+        This method serializes the detection results into a JSON-compatible format. It includes information
+        about detected objects such as bounding boxes, class names, confidence scores, and optionally
+        segmentation masks and keypoints.
+
+        Args:
+            normalize (bool): Whether to normalize the bounding box coordinates by the image dimensions.
+                If True, coordinates will be returned as float values between 0 and 1. Defaults to False.
+            decimals (int): Number of decimal places to round the output values to. Defaults to 5.
+
+        Returns:
+            (str): A JSON string containing the serialized detection results.
+
+        Examples:
+            >>> results = model("path/to/image.jpg")
+            >>> json_result = results[0].to_json()
+            >>> print(json_result)
+
+        Notes:
+            - For classification tasks, the JSON will contain class probabilities instead of bounding boxes.
+            - For object detection tasks, the JSON will include bounding box coordinates, class names, and
+              confidence scores.
+            - If available, segmentation masks and keypoints will also be included in the JSON output.
+            - The method uses the `summary` method internally to generate the data structure before
+              converting it to JSON.
+        """
+        import json
+
+        return json.dumps(self.summary(normalize=normalize, decimals=decimals), indent=2)
+
+
+class Boxes(BaseTensor):
+    """
+    A class for managing and manipulating detection boxes.
+
+    This class provides functionality for handling detection boxes, including their coordinates, confidence scores,
+    class labels, and optional tracking IDs. It supports various box formats and offers methods for easy manipulation
+    and conversion between different coordinate systems.
+
+    Attributes:
+        data (torch.Tensor | numpy.ndarray): The raw tensor containing detection boxes and associated data.
+        orig_shape (Tuple[int, int]): The original image dimensions (height, width).
+        is_track (bool): Indicates whether tracking IDs are included in the box data.
+        xyxy (torch.Tensor | numpy.ndarray): Boxes in [x1, y1, x2, y2] format.
+        conf (torch.Tensor | numpy.ndarray): Confidence scores for each box.
+        cls (torch.Tensor | numpy.ndarray): Class labels for each box.
+        id (torch.Tensor | numpy.ndarray): Tracking IDs for each box (if available).
+        xywh (torch.Tensor | numpy.ndarray): Boxes in [x, y, width, height] format.
+        xyxyn (torch.Tensor | numpy.ndarray): Normalized [x1, y1, x2, y2] boxes relative to orig_shape.
+        xywhn (torch.Tensor | numpy.ndarray): Normalized [x, y, width, height] boxes relative to orig_shape.
+
+    Methods:
+        cpu(): Returns a copy of the object with all tensors on CPU memory.
+        numpy(): Returns a copy of the object with all tensors as numpy arrays.
+        cuda(): Returns a copy of the object with all tensors on GPU memory.
+        to(*args, **kwargs): Returns a copy of the object with tensors on specified device and dtype.
+
+    Examples:
+        >>> import torch
+        >>> boxes_data = torch.tensor([[100, 50, 150, 100, 0.9, 0], [200, 150, 300, 250, 0.8, 1]])
+        >>> orig_shape = (480, 640)  # height, width
+        >>> boxes = Boxes(boxes_data, orig_shape)
+        >>> print(boxes.xyxy)
+        >>> print(boxes.conf)
+        >>> print(boxes.cls)
+        >>> print(boxes.xywhn)
+    """
+
+    def __init__(self, boxes, orig_shape) -> None:
+        """
+        Initialize the Boxes class with detection box data and the original image shape.
+
+        This class manages detection boxes, providing easy access and manipulation of box coordinates,
+        confidence scores, class identifiers, and optional tracking IDs. It supports multiple formats
+        for box coordinates, including both absolute and normalized forms.
+
+        Args:
+            boxes (torch.Tensor | np.ndarray): A tensor or numpy array with detection boxes of shape
+                (num_boxes, 6) or (num_boxes, 7). Columns should contain
+                [x1, y1, x2, y2, confidence, class, (optional) track_id].
+            orig_shape (Tuple[int, int]): The original image shape as (height, width). Used for normalization.
+
+        Attributes:
+            data (torch.Tensor): The raw tensor containing detection boxes and their associated data.
+            orig_shape (Tuple[int, int]): The original image size, used for normalization.
+            is_track (bool): Indicates whether tracking IDs are included in the box data.
+
+        Examples:
+            >>> import torch
+            >>> boxes = torch.tensor([[100, 50, 150, 100, 0.9, 0]])
+            >>> orig_shape = (480, 640)
+            >>> detection_boxes = Boxes(boxes, orig_shape)
+            >>> print(detection_boxes.xyxy)
+            tensor([[100.,  50., 150., 100.]])
+        """
+        if boxes.ndim == 1:
+            boxes = boxes[None, :]
+        n = boxes.shape[-1]
+        assert n in {6, 7}, f"expected 6 or 7 values but got {n}"  # xyxy, track_id, conf, cls
+        super().__init__(boxes, orig_shape)
+        self.is_track = n == 7
+        self.orig_shape = orig_shape
+
+    @property
+    def xyxy(self):
+        """
+        Returns bounding boxes in [x1, y1, x2, y2] format.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): A tensor or numpy array of shape (n, 4) containing bounding box
+                coordinates in [x1, y1, x2, y2] format, where n is the number of boxes.
+
+        Examples:
+            >>> results = model("image.jpg")
+            >>> boxes = results[0].boxes
+            >>> xyxy = boxes.xyxy
+            >>> print(xyxy)
+        """
+        return self.data[:, :4]
+
+    @property
+    def conf(self):
+        """
+        Returns the confidence scores for each detection box.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): A 1D tensor or array containing confidence scores for each detection,
+                with shape (N,) where N is the number of detections.
+
+        Examples:
+            >>> boxes = Boxes(torch.tensor([[10, 20, 30, 40, 0.9, 0]]), orig_shape=(100, 100))
+            >>> conf_scores = boxes.conf
+            >>> print(conf_scores)
+            tensor([0.9000])
+        """
+        return self.data[:, -2]
+
+    @property
+    def cls(self):
+        """
+        Returns the class ID tensor representing category predictions for each bounding box.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): A tensor or numpy array containing the class IDs for each detection box.
+                The shape is (N,), where N is the number of boxes.
+
+        Examples:
+            >>> results = model("image.jpg")
+            >>> boxes = results[0].boxes
+            >>> class_ids = boxes.cls
+            >>> print(class_ids)  # tensor([0., 2., 1.])
+        """
+        return self.data[:, -1]
+
+    @property
+    def id(self):
+        """
+        Returns the tracking IDs for each detection box if available.
+
+        Returns:
+            (torch.Tensor | None): A tensor containing tracking IDs for each box if tracking is enabled,
+                otherwise None. Shape is (N,) where N is the number of boxes.
+
+        Examples:
+            >>> results = model.track("path/to/video.mp4")
+            >>> for result in results:
+            ...     boxes = result.boxes
+            ...     if boxes.is_track:
+            ...         track_ids = boxes.id
+            ...         print(f"Tracking IDs: {track_ids}")
+            ...     else:
+            ...         print("Tracking is not enabled for these boxes.")
+
+        Notes:
+            - This property is only available when tracking is enabled (i.e., when `is_track` is True).
+            - The tracking IDs are typically used to associate detections across multiple frames in video analysis.
+        """
+        return self.data[:, -3] if self.is_track else None
+
+    @property
+    @lru_cache(maxsize=2)  # maxsize 1 should suffice
+    def xywh(self):
+        """
+        Convert bounding boxes from [x1, y1, x2, y2] format to [x, y, width, height] format.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): Boxes in [x_center, y_center, width, height] format, where x_center, y_center are the coordinates of
+                the center point of the bounding box, width, height are the dimensions of the bounding box and the
+                shape of the returned tensor is (N, 4), where N is the number of boxes.
+
+        Examples:
+            >>> boxes = Boxes(torch.tensor([[100, 50, 150, 100], [200, 150, 300, 250]]), orig_shape=(480, 640))
+            >>> xywh = boxes.xywh
+            >>> print(xywh)
+            tensor([[100.0000,  50.0000,  50.0000,  50.0000],
+                    [200.0000, 150.0000, 100.0000, 100.0000]])
+        """
+        return ops.xyxy2xywh(self.xyxy)
+
+    @property
+    @lru_cache(maxsize=2)
+    def xyxyn(self):
+        """
+        Returns normalized bounding box coordinates relative to the original image size.
+
+        This property calculates and returns the bounding box coordinates in [x1, y1, x2, y2] format,
+        normalized to the range [0, 1] based on the original image dimensions.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): Normalized bounding box coordinates with shape (N, 4), where N is
+                the number of boxes. Each row contains [x1, y1, x2, y2] values normalized to [0, 1].
+
+        Examples:
+            >>> boxes = Boxes(torch.tensor([[100, 50, 300, 400, 0.9, 0]]), orig_shape=(480, 640))
+            >>> normalized = boxes.xyxyn
+            >>> print(normalized)
+            tensor([[0.1562, 0.1042, 0.4688, 0.8333]])
+        """
+        xyxy = self.xyxy.clone() if isinstance(self.xyxy, torch.Tensor) else np.copy(self.xyxy)
+        xyxy[..., [0, 2]] /= self.orig_shape[1]
+        xyxy[..., [1, 3]] /= self.orig_shape[0]
+        return xyxy
+
+    @property
+    @lru_cache(maxsize=2)
+    def xywhn(self):
+        """
+        Returns normalized bounding boxes in [x, y, width, height] format.
+
+        This property calculates and returns the normalized bounding box coordinates in the format
+        [x_center, y_center, width, height], where all values are relative to the original image dimensions.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): Normalized bounding boxes with shape (N, 4), where N is the
+                number of boxes. Each row contains [x_center, y_center, width, height] values normalized
+                to [0, 1] based on the original image dimensions.
+
+        Examples:
+            >>> boxes = Boxes(torch.tensor([[100, 50, 150, 100, 0.9, 0]]), orig_shape=(480, 640))
+            >>> normalized = boxes.xywhn
+            >>> print(normalized)
+            tensor([[0.1953, 0.1562, 0.0781, 0.1042]])
+        """
+        xywh = ops.xyxy2xywh(self.xyxy)
+        xywh[..., [0, 2]] /= self.orig_shape[1]
+        xywh[..., [1, 3]] /= self.orig_shape[0]
+        return xywh
+
+
+class Masks(BaseTensor):
+    """
+    A class for storing and manipulating detection masks.
+
+    This class extends BaseTensor and provides functionality for handling segmentation masks,
+    including methods for converting between pixel and normalized coordinates.
+
+    Attributes:
+        data (torch.Tensor | numpy.ndarray): The raw tensor or array containing mask data.
+        orig_shape (tuple): Original image shape in (height, width) format.
+        xy (List[numpy.ndarray]): A list of segments in pixel coordinates.
+        xyn (List[numpy.ndarray]): A list of normalized segments.
+
+    Methods:
+        cpu(): Returns a copy of the Masks object with the mask tensor on CPU memory.
+        numpy(): Returns a copy of the Masks object with the mask tensor as a numpy array.
+        cuda(): Returns a copy of the Masks object with the mask tensor on GPU memory.
+        to(*args, **kwargs): Returns a copy of the Masks object with the mask tensor on specified device and dtype.
+
+    Examples:
+        >>> masks_data = torch.rand(1, 160, 160)
+        >>> orig_shape = (720, 1280)
+        >>> masks = Masks(masks_data, orig_shape)
+        >>> pixel_coords = masks.xy
+        >>> normalized_coords = masks.xyn
+    """
+
+    def __init__(self, masks, orig_shape) -> None:
+        """
+        Initialize the Masks class with detection mask data and the original image shape.
+
+        Args:
+            masks (torch.Tensor | np.ndarray): Detection masks with shape (num_masks, height, width).
+            orig_shape (tuple): The original image shape as (height, width). Used for normalization.
+
+        Examples:
+            >>> import torch
+            >>> from ultralytics.engine.results import Masks
+            >>> masks = torch.rand(10, 160, 160)  # 10 masks of 160x160 resolution
+            >>> orig_shape = (720, 1280)  # Original image shape
+            >>> mask_obj = Masks(masks, orig_shape)
+        """
+        if masks.ndim == 2:
+            masks = masks[None, :]
+        super().__init__(masks, orig_shape)
+
+    @property
+    @lru_cache(maxsize=1)
+    def xyn(self):
+        """
+        Returns normalized xy-coordinates of the segmentation masks.
+
+        This property calculates and caches the normalized xy-coordinates of the segmentation masks. The coordinates
+        are normalized relative to the original image shape.
+
+        Returns:
+            (List[numpy.ndarray]): A list of numpy arrays, where each array contains the normalized xy-coordinates
+                of a single segmentation mask. Each array has shape (N, 2), where N is the number of points in the
+                mask contour.
+
+        Examples:
+            >>> results = model("image.jpg")
+            >>> masks = results[0].masks
+            >>> normalized_coords = masks.xyn
+            >>> print(normalized_coords[0])  # Normalized coordinates of the first mask
+        """
+        return [
+            ops.scale_coords(self.data.shape[1:], x, self.orig_shape, normalize=True)
+            for x in ops.masks2segments(self.data)
+        ]
+
+    @property
+    @lru_cache(maxsize=1)
+    def xy(self):
+        """
+        Returns the [x, y] pixel coordinates for each segment in the mask tensor.
+
+        This property calculates and returns a list of pixel coordinates for each segmentation mask in the
+        Masks object. The coordinates are scaled to match the original image dimensions.
+
+        Returns:
+            (List[numpy.ndarray]): A list of numpy arrays, where each array contains the [x, y] pixel
+                coordinates for a single segmentation mask. Each array has shape (N, 2), where N is the
+                number of points in the segment.
+
+        Examples:
+            >>> results = model("image.jpg")
+            >>> masks = results[0].masks
+            >>> xy_coords = masks.xy
+            >>> print(len(xy_coords))  # Number of masks
+            >>> print(xy_coords[0].shape)  # Shape of first mask's coordinates
+        """
+        return [
+            ops.scale_coords(self.data.shape[1:], x, self.orig_shape, normalize=False)
+            for x in ops.masks2segments(self.data)
+        ]
+
+
+class Keypoints(BaseTensor):
+    """
+    A class for storing and manipulating detection keypoints.
+
+    This class encapsulates functionality for handling keypoint data, including coordinate manipulation,
+    normalization, and confidence values.
+
+    Attributes:
+        data (torch.Tensor): The raw tensor containing keypoint data.
+        orig_shape (Tuple[int, int]): The original image dimensions (height, width).
+        has_visible (bool): Indicates whether visibility information is available for keypoints.
+        xy (torch.Tensor): Keypoint coordinates in [x, y] format.
+        xyn (torch.Tensor): Normalized keypoint coordinates in [x, y] format, relative to orig_shape.
+        conf (torch.Tensor): Confidence values for each keypoint, if available.
+
+    Methods:
+        cpu(): Returns a copy of the keypoints tensor on CPU memory.
+        numpy(): Returns a copy of the keypoints tensor as a numpy array.
+        cuda(): Returns a copy of the keypoints tensor on GPU memory.
+        to(*args, **kwargs): Returns a copy of the keypoints tensor with specified device and dtype.
+
+    Examples:
+        >>> import torch
+        >>> from ultralytics.engine.results import Keypoints
+        >>> keypoints_data = torch.rand(1, 17, 3)  # 1 detection, 17 keypoints, (x, y, conf)
+        >>> orig_shape = (480, 640)  # Original image shape (height, width)
+        >>> keypoints = Keypoints(keypoints_data, orig_shape)
+        >>> print(keypoints.xy.shape)  # Access xy coordinates
+        >>> print(keypoints.conf)  # Access confidence values
+        >>> keypoints_cpu = keypoints.cpu()  # Move keypoints to CPU
+    """
+
+    @smart_inference_mode()  # avoid keypoints < conf in-place error
+    def __init__(self, keypoints, orig_shape) -> None:
+        """
+        Initializes the Keypoints object with detection keypoints and original image dimensions.
+
+        This method processes the input keypoints tensor, handling both 2D and 3D formats. For 3D tensors
+        (x, y, confidence), it masks out low-confidence keypoints by setting their coordinates to zero.
+
+        Args:
+            keypoints (torch.Tensor): A tensor containing keypoint data. Shape can be either:
+                - (num_objects, num_keypoints, 2) for x, y coordinates only
+                - (num_objects, num_keypoints, 3) for x, y coordinates and confidence scores
+            orig_shape (Tuple[int, int]): The original image dimensions (height, width).
+
+        Examples:
+            >>> kpts = torch.rand(1, 17, 3)  # 1 object, 17 keypoints (COCO format), x,y,conf
+            >>> orig_shape = (720, 1280)  # Original image height, width
+            >>> keypoints = Keypoints(kpts, orig_shape)
+        """
+        if keypoints.ndim == 2:
+            keypoints = keypoints[None, :]
+        if keypoints.shape[2] == 3:  # x, y, conf
+            mask = keypoints[..., 2] < 0.5  # points with conf < 0.5 (not visible)
+            keypoints[..., :2][mask] = 0
+        super().__init__(keypoints, orig_shape)
+        self.has_visible = self.data.shape[-1] == 3
+
+    @property
+    @lru_cache(maxsize=1)
+    def xy(self):
+        """
+        Returns x, y coordinates of keypoints.
+
+        Returns:
+            (torch.Tensor): A tensor containing the x, y coordinates of keypoints with shape (N, K, 2), where N is
+                the number of detections and K is the number of keypoints per detection.
+
+        Examples:
+            >>> results = model("image.jpg")
+            >>> keypoints = results[0].keypoints
+            >>> xy = keypoints.xy
+            >>> print(xy.shape)  # (N, K, 2)
+            >>> print(xy[0])  # x, y coordinates of keypoints for first detection
+
+        Notes:
+            - The returned coordinates are in pixel units relative to the original image dimensions.
+            - If keypoints were initialized with confidence values, only keypoints with confidence >= 0.5 are returned.
+            - This property uses LRU caching to improve performance on repeated access.
+        """
+        return self.data[..., :2]
+
+    @property
+    @lru_cache(maxsize=1)
+    def xyn(self):
+        """
+        Returns normalized coordinates (x, y) of keypoints relative to the original image size.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): A tensor or array of shape (N, K, 2) containing normalized keypoint
+                coordinates, where N is the number of instances, K is the number of keypoints, and the last
+                dimension contains [x, y] values in the range [0, 1].
+
+        Examples:
+            >>> keypoints = Keypoints(torch.rand(1, 17, 2), orig_shape=(480, 640))
+            >>> normalized_kpts = keypoints.xyn
+            >>> print(normalized_kpts.shape)
+            torch.Size([1, 17, 2])
+        """
+        xy = self.xy.clone() if isinstance(self.xy, torch.Tensor) else np.copy(self.xy)
+        xy[..., 0] /= self.orig_shape[1]
+        xy[..., 1] /= self.orig_shape[0]
+        return xy
+
+    @property
+    @lru_cache(maxsize=1)
+    def conf(self):
+        """
+        Returns confidence values for each keypoint.
+
+        Returns:
+            (torch.Tensor | None): A tensor containing confidence scores for each keypoint if available,
+                otherwise None. Shape is (num_detections, num_keypoints) for batched data or (num_keypoints,)
+                for single detection.
+
+        Examples:
+            >>> keypoints = Keypoints(torch.rand(1, 17, 3), orig_shape=(640, 640))  # 1 detection, 17 keypoints
+            >>> conf = keypoints.conf
+            >>> print(conf.shape)  # torch.Size([1, 17])
+        """
+        return self.data[..., 2] if self.has_visible else None
+
+
+class Probs(BaseTensor):
+    """
+    A class for storing and manipulating classification probabilities.
+
+    This class extends BaseTensor and provides methods for accessing and manipulating
+    classification probabilities, including top-1 and top-5 predictions.
+
+    Attributes:
+        data (torch.Tensor | numpy.ndarray): The raw tensor or array containing classification probabilities.
+        orig_shape (tuple | None): The original image shape as (height, width). Not used in this class.
+        top1 (int): Index of the class with the highest probability.
+        top5 (List[int]): Indices of the top 5 classes by probability.
+        top1conf (torch.Tensor | numpy.ndarray): Confidence score of the top 1 class.
+        top5conf (torch.Tensor | numpy.ndarray): Confidence scores of the top 5 classes.
+
+    Methods:
+        cpu(): Returns a copy of the probabilities tensor on CPU memory.
+        numpy(): Returns a copy of the probabilities tensor as a numpy array.
+        cuda(): Returns a copy of the probabilities tensor on GPU memory.
+        to(*args, **kwargs): Returns a copy of the probabilities tensor with specified device and dtype.
+
+    Examples:
+        >>> probs = torch.tensor([0.1, 0.3, 0.6])
+        >>> p = Probs(probs)
+        >>> print(p.top1)
+        2
+        >>> print(p.top5)
+        [2, 1, 0]
+        >>> print(p.top1conf)
+        tensor(0.6000)
+        >>> print(p.top5conf)
+        tensor([0.6000, 0.3000, 0.1000])
+    """
+
+    def __init__(self, probs, orig_shape=None) -> None:
+        """
+        Initialize the Probs class with classification probabilities.
+
+        This class stores and manages classification probabilities, providing easy access to top predictions and their
+        confidences.
+
+        Args:
+            probs (torch.Tensor | np.ndarray): A 1D tensor or array of classification probabilities.
+            orig_shape (tuple | None): The original image shape as (height, width). Not used in this class but kept for
+                consistency with other result classes.
+
+        Attributes:
+            data (torch.Tensor | np.ndarray): The raw tensor or array containing classification probabilities.
+            top1 (int): Index of the top 1 class.
+            top5 (List[int]): Indices of the top 5 classes.
+            top1conf (torch.Tensor | np.ndarray): Confidence of the top 1 class.
+            top5conf (torch.Tensor | np.ndarray): Confidences of the top 5 classes.
+
+        Examples:
+            >>> import torch
+            >>> probs = torch.tensor([0.1, 0.3, 0.2, 0.4])
+            >>> p = Probs(probs)
+            >>> print(p.top1)
+            3
+            >>> print(p.top1conf)
+            tensor(0.4000)
+            >>> print(p.top5)
+            [3, 1, 2, 0]
+        """
+        super().__init__(probs, orig_shape)
+
+    @property
+    @lru_cache(maxsize=1)
+    def top1(self):
+        """
+        Returns the index of the class with the highest probability.
+
+        Returns:
+            (int): Index of the class with the highest probability.
+
+        Examples:
+            >>> probs = Probs(torch.tensor([0.1, 0.3, 0.6]))
+            >>> probs.top1
+            2
+        """
+        return int(self.data.argmax())
+
+    @property
+    @lru_cache(maxsize=1)
+    def top5(self):
+        """
+        Returns the indices of the top 5 class probabilities.
+
+        Returns:
+            (List[int]): A list containing the indices of the top 5 class probabilities, sorted in descending order.
+
+        Examples:
+            >>> probs = Probs(torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5]))
+            >>> print(probs.top5)
+            [4, 3, 2, 1, 0]
+        """
+        return (-self.data).argsort(0)[:5].tolist()  # this way works with both torch and numpy.
+
+    @property
+    @lru_cache(maxsize=1)
+    def top1conf(self):
+        """
+        Returns the confidence score of the highest probability class.
+
+        This property retrieves the confidence score (probability) of the class with the highest predicted probability
+        from the classification results.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): A tensor containing the confidence score of the top 1 class.
+
+        Examples:
+            >>> results = model("image.jpg")  # classify an image
+            >>> probs = results[0].probs  # get classification probabilities
+            >>> top1_confidence = probs.top1conf  # get confidence of top 1 class
+            >>> print(f"Top 1 class confidence: {top1_confidence.item():.4f}")
+        """
+        return self.data[self.top1]
+
+    @property
+    @lru_cache(maxsize=1)
+    def top5conf(self):
+        """
+        Returns confidence scores for the top 5 classification predictions.
+
+        This property retrieves the confidence scores corresponding to the top 5 class probabilities
+        predicted by the model. It provides a quick way to access the most likely class predictions
+        along with their associated confidence levels.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): A tensor or array containing the confidence scores for the
+                top 5 predicted classes, sorted in descending order of probability.
+
+        Examples:
+            >>> results = model("image.jpg")
+            >>> probs = results[0].probs
+            >>> top5_conf = probs.top5conf
+            >>> print(top5_conf)  # Prints confidence scores for top 5 classes
+        """
+        return self.data[self.top5]
+
+
+class OBB(BaseTensor):
+    """
+    A class for storing and manipulating Oriented Bounding Boxes (OBB).
+
+    This class provides functionality to handle oriented bounding boxes, including conversion between
+    different formats, normalization, and access to various properties of the boxes.
+
+    Attributes:
+        data (torch.Tensor): The raw OBB tensor containing box coordinates and associated data.
+        orig_shape (tuple): Original image size as (height, width).
+        is_track (bool): Indicates whether tracking IDs are included in the box data.
+        xywhr (torch.Tensor | numpy.ndarray): Boxes in [x_center, y_center, width, height, rotation] format.
+        conf (torch.Tensor | numpy.ndarray): Confidence scores for each box.
+        cls (torch.Tensor | numpy.ndarray): Class labels for each box.
+        id (torch.Tensor | numpy.ndarray): Tracking IDs for each box, if available.
+        xyxyxyxy (torch.Tensor | numpy.ndarray): Boxes in 8-point [x1, y1, x2, y2, x3, y3, x4, y4] format.
+        xyxyxyxyn (torch.Tensor | numpy.ndarray): Normalized 8-point coordinates relative to orig_shape.
+        xyxy (torch.Tensor | numpy.ndarray): Axis-aligned bounding boxes in [x1, y1, x2, y2] format.
+
+    Methods:
+        cpu(): Returns a copy of the OBB object with all tensors on CPU memory.
+        numpy(): Returns a copy of the OBB object with all tensors as numpy arrays.
+        cuda(): Returns a copy of the OBB object with all tensors on GPU memory.
+        to(*args, **kwargs): Returns a copy of the OBB object with tensors on specified device and dtype.
+
+    Examples:
+        >>> boxes = torch.tensor([[100, 50, 150, 100, 30, 0.9, 0]])  # xywhr, conf, cls
+        >>> obb = OBB(boxes, orig_shape=(480, 640))
+        >>> print(obb.xyxyxyxy)
+        >>> print(obb.conf)
+        >>> print(obb.cls)
+    """
+
+    def __init__(self, boxes, orig_shape) -> None:
+        """
+        Initialize an OBB (Oriented Bounding Box) instance with oriented bounding box data and original image shape.
+
+        This class stores and manipulates Oriented Bounding Boxes (OBB) for object detection tasks. It provides
+        various properties and methods to access and transform the OBB data.
+
+        Args:
+            boxes (torch.Tensor | numpy.ndarray): A tensor or numpy array containing the detection boxes,
+                with shape (num_boxes, 7) or (num_boxes, 8). The last two columns contain confidence and class values.
+                If present, the third last column contains track IDs, and the fifth column contains rotation.
+            orig_shape (Tuple[int, int]): Original image size, in the format (height, width).
+
+        Attributes:
+            data (torch.Tensor | numpy.ndarray): The raw OBB tensor.
+            orig_shape (Tuple[int, int]): The original image shape.
+            is_track (bool): Whether the boxes include tracking IDs.
+
+        Raises:
+            AssertionError: If the number of values per box is not 7 or 8.
+
+        Examples:
+            >>> import torch
+            >>> boxes = torch.rand(3, 7)  # 3 boxes with 7 values each
+            >>> orig_shape = (640, 480)
+            >>> obb = OBB(boxes, orig_shape)
+            >>> print(obb.xywhr)  # Access the boxes in xywhr format
+        """
+        if boxes.ndim == 1:
+            boxes = boxes[None, :]
+        n = boxes.shape[-1]
+        assert n in {7, 8}, f"expected 7 or 8 values but got {n}"  # xywh, rotation, track_id, conf, cls
+        super().__init__(boxes, orig_shape)
+        self.is_track = n == 8
+        self.orig_shape = orig_shape
+
+    @property
+    def xywhr(self):
+        """
+        Returns boxes in [x_center, y_center, width, height, rotation] format.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): A tensor or numpy array containing the oriented bounding boxes with format
+                [x_center, y_center, width, height, rotation]. The shape is (N, 5) where N is the number of boxes.
+
+        Examples:
+            >>> results = model("image.jpg")
+            >>> obb = results[0].obb
+            >>> xywhr = obb.xywhr
+            >>> print(xywhr.shape)
+            torch.Size([3, 5])
+        """
+        return self.data[:, :5]
+
+    @property
+    def conf(self):
+        """
+        Returns the confidence scores for Oriented Bounding Boxes (OBBs).
+
+        This property retrieves the confidence values associated with each OBB detection. The confidence score
+        represents the model's certainty in the detection.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): A tensor or numpy array of shape (N,) containing confidence scores
+                for N detections, where each score is in the range [0, 1].
+
+        Examples:
+            >>> results = model("image.jpg")
+            >>> obb_result = results[0].obb
+            >>> confidence_scores = obb_result.conf
+            >>> print(confidence_scores)
+        """
+        return self.data[:, -2]
+
+    @property
+    def cls(self):
+        """
+        Returns the class values of the oriented bounding boxes.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): A tensor or numpy array containing the class values for each oriented
+                bounding box. The shape is (N,), where N is the number of boxes.
+
+        Examples:
+            >>> results = model("image.jpg")
+            >>> result = results[0]
+            >>> obb = result.obb
+            >>> class_values = obb.cls
+            >>> print(class_values)
+        """
+        return self.data[:, -1]
+
+    @property
+    def id(self):
+        """
+        Returns the tracking IDs of the oriented bounding boxes (if available).
+
+        Returns:
+            (torch.Tensor | numpy.ndarray | None): A tensor or numpy array containing the tracking IDs for each
+                oriented bounding box. Returns None if tracking IDs are not available.
+
+        Examples:
+            >>> results = model("image.jpg", tracker=True)  # Run inference with tracking
+            >>> for result in results:
+            ...     if result.obb is not None:
+            ...         track_ids = result.obb.id
+            ...         if track_ids is not None:
+            ...             print(f"Tracking IDs: {track_ids}")
+        """
+        return self.data[:, -3] if self.is_track else None
+
+    @property
+    @lru_cache(maxsize=2)
+    def xyxyxyxy(self):
+        """
+        Converts OBB format to 8-point (xyxyxyxy) coordinate format for rotated bounding boxes.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): Rotated bounding boxes in xyxyxyxy format with shape (N, 4, 2), where N is
+                the number of boxes. Each box is represented by 4 points (x, y), starting from the top-left corner and
+                moving clockwise.
+
+        Examples:
+            >>> obb = OBB(torch.tensor([[100, 100, 50, 30, 0.5, 0.9, 0]]), orig_shape=(640, 640))
+            >>> xyxyxyxy = obb.xyxyxyxy
+            >>> print(xyxyxyxy.shape)
+            torch.Size([1, 4, 2])
+        """
+        return ops.xywhr2xyxyxyxy(self.xywhr)
+
+    @property
+    @lru_cache(maxsize=2)
+    def xyxyxyxyn(self):
+        """
+        Converts rotated bounding boxes to normalized xyxyxyxy format.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): Normalized rotated bounding boxes in xyxyxyxy format with shape (N, 4, 2),
+                where N is the number of boxes. Each box is represented by 4 points (x, y), normalized relative to
+                the original image dimensions.
+
+        Examples:
+            >>> obb = OBB(torch.rand(10, 7), orig_shape=(640, 480))  # 10 random OBBs
+            >>> normalized_boxes = obb.xyxyxyxyn
+            >>> print(normalized_boxes.shape)
+            torch.Size([10, 4, 2])
+        """
+        xyxyxyxyn = self.xyxyxyxy.clone() if isinstance(self.xyxyxyxy, torch.Tensor) else np.copy(self.xyxyxyxy)
+        xyxyxyxyn[..., 0] /= self.orig_shape[1]
+        xyxyxyxyn[..., 1] /= self.orig_shape[0]
+        return xyxyxyxyn
+
+    @property
+    @lru_cache(maxsize=2)
+    def xyxy(self):
+        """
+        Converts oriented bounding boxes (OBB) to axis-aligned bounding boxes in xyxy format.
+
+        This property calculates the minimal enclosing rectangle for each oriented bounding box and returns it in
+        xyxy format (x1, y1, x2, y2). This is useful for operations that require axis-aligned bounding boxes, such
+        as IoU calculation with non-rotated boxes.
+
+        Returns:
+            (torch.Tensor | numpy.ndarray): Axis-aligned bounding boxes in xyxy format with shape (N, 4), where N
+                is the number of boxes. Each row contains [x1, y1, x2, y2] coordinates.
+
+        Examples:
+            >>> import torch
+            >>> from ultralytics import YOLO
+            >>> model = YOLO("yolov8n-obb.pt")
+            >>> results = model("path/to/image.jpg")
+            >>> for result in results:
+            ...     obb = result.obb
+            ...     if obb is not None:
+            ...         xyxy_boxes = obb.xyxy
+            ...         print(xyxy_boxes.shape)  # (N, 4)
+
+        Notes:
+            - This method approximates the OBB by its minimal enclosing rectangle.
+            - The returned format is compatible with standard object detection metrics and visualization tools.
+            - The property uses caching to improve performance for repeated access.
+        """
+        x = self.xyxyxyxy[..., 0]
+        y = self.xyxyxyxy[..., 1]
+        return (
+            torch.stack([x.amin(1), y.amin(1), x.amax(1), y.amax(1)], -1)
+            if isinstance(x, torch.Tensor)
+            else np.stack([x.min(1), y.min(1), x.max(1), y.max(1)], -1)
+        )

+ 820 - 0
ultralytics/engine/trainer.py

@@ -0,0 +1,820 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Train a model on a dataset.
+
+Usage:
+    $ yolo mode=train model=yolov8n.pt data=coco8.yaml imgsz=640 epochs=100 batch=16
+"""
+
+import gc
+import math
+import os
+import subprocess
+import time
+import warnings
+from copy import copy, deepcopy
+from datetime import datetime, timedelta
+from pathlib import Path
+
+import numpy as np
+import torch
+from torch import distributed as dist
+from torch import nn, optim
+
+from ultralytics.cfg import get_cfg, get_save_dir
+from ultralytics.data.utils import check_cls_dataset, check_det_dataset
+from ultralytics.nn.tasks import attempt_load_one_weight, attempt_load_weights
+from ultralytics.utils import (
+    DEFAULT_CFG,
+    LOCAL_RANK,
+    LOGGER,
+    RANK,
+    TQDM,
+    __version__,
+    callbacks,
+    clean_url,
+    colorstr,
+    emojis,
+    yaml_save,
+)
+from ultralytics.utils.autobatch import check_train_batch_size
+from ultralytics.utils.checks import check_amp, check_file, check_imgsz, check_model_file_from_stem, print_args
+from ultralytics.utils.dist import ddp_cleanup, generate_ddp_command
+from ultralytics.utils.files import get_latest_run
+from ultralytics.utils.torch_utils import (
+    TORCH_2_4,
+    EarlyStopping,
+    ModelEMA,
+    autocast,
+    convert_optimizer_state_dict_to_fp16,
+    init_seeds,
+    one_cycle,
+    select_device,
+    strip_optimizer,
+    torch_distributed_zero_first,
+)
+
+
+class BaseTrainer:
+    """
+    A base class for creating trainers.
+
+    Attributes:
+        args (SimpleNamespace): Configuration for the trainer.
+        validator (BaseValidator): Validator instance.
+        model (nn.Module): Model instance.
+        callbacks (defaultdict): Dictionary of callbacks.
+        save_dir (Path): Directory to save results.
+        wdir (Path): Directory to save weights.
+        last (Path): Path to the last checkpoint.
+        best (Path): Path to the best checkpoint.
+        save_period (int): Save checkpoint every x epochs (disabled if < 1).
+        batch_size (int): Batch size for training.
+        epochs (int): Number of epochs to train for.
+        start_epoch (int): Starting epoch for training.
+        device (torch.device): Device to use for training.
+        amp (bool): Flag to enable AMP (Automatic Mixed Precision).
+        scaler (amp.GradScaler): Gradient scaler for AMP.
+        data (str): Path to data.
+        trainset (torch.utils.data.Dataset): Training dataset.
+        testset (torch.utils.data.Dataset): Testing dataset.
+        ema (nn.Module): EMA (Exponential Moving Average) of the model.
+        resume (bool): Resume training from a checkpoint.
+        lf (nn.Module): Loss function.
+        scheduler (torch.optim.lr_scheduler._LRScheduler): Learning rate scheduler.
+        best_fitness (float): The best fitness value achieved.
+        fitness (float): Current fitness value.
+        loss (float): Current loss value.
+        tloss (float): Total loss value.
+        loss_names (list): List of loss names.
+        csv (Path): Path to results CSV file.
+    """
+
+    def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
+        """
+        Initializes the BaseTrainer class.
+
+        Args:
+            cfg (str, optional): Path to a configuration file. Defaults to DEFAULT_CFG.
+            overrides (dict, optional): Configuration overrides. Defaults to None.
+        """
+        self.args = get_cfg(cfg, overrides)
+        self.check_resume(overrides)
+        self.device = select_device(self.args.device, self.args.batch)
+        self.validator = None
+        self.metrics = None
+        self.plots = {}
+        init_seeds(self.args.seed + 1 + RANK, deterministic=self.args.deterministic)
+
+        # Dirs
+        self.save_dir = get_save_dir(self.args)
+        self.args.name = self.save_dir.name  # update name for loggers
+        self.wdir = self.save_dir / "weights"  # weights dir
+        if RANK in {-1, 0}:
+            self.wdir.mkdir(parents=True, exist_ok=True)  # make dir
+            self.args.save_dir = str(self.save_dir)
+            yaml_save(self.save_dir / "args.yaml", vars(self.args))  # save run args
+        self.last, self.best = self.wdir / "last.pt", self.wdir / "best.pt"  # checkpoint paths
+        self.save_period = self.args.save_period
+
+        self.batch_size = self.args.batch
+        self.epochs = self.args.epochs or 100  # in case users accidentally pass epochs=None with timed training
+        self.start_epoch = 0
+        if RANK == -1:
+            print_args(vars(self.args))
+
+        # Device
+        if self.device.type in {"cpu", "mps"}:
+            self.args.workers = 0  # faster CPU training as time dominated by inference, not dataloading
+
+        # Model and Dataset
+        self.model = check_model_file_from_stem(self.args.model)  # add suffix, i.e. yolov8n -> yolov8n.pt
+        with torch_distributed_zero_first(LOCAL_RANK):  # avoid auto-downloading dataset multiple times
+            self.trainset, self.testset = self.get_dataset()
+        self.ema = None
+
+        # Optimization utils init
+        self.lf = None
+        self.scheduler = None
+
+        # Epoch level metrics
+        self.best_fitness = None
+        self.fitness = None
+        self.loss = None
+        self.tloss = None
+        self.loss_names = ["Loss"]
+        self.csv = self.save_dir / "results.csv"
+        self.plot_idx = [0, 1, 2]
+
+        # HUB
+        self.hub_session = None
+
+        # Callbacks
+        self.callbacks = _callbacks or callbacks.get_default_callbacks()
+        if RANK in {-1, 0}:
+            callbacks.add_integration_callbacks(self)
+
+    def add_callback(self, event: str, callback):
+        """Appends the given callback."""
+        self.callbacks[event].append(callback)
+
+    def set_callback(self, event: str, callback):
+        """Overrides the existing callbacks with the given callback."""
+        self.callbacks[event] = [callback]
+
+    def run_callbacks(self, event: str):
+        """Run all existing callbacks associated with a particular event."""
+        for callback in self.callbacks.get(event, []):
+            callback(self)
+
+    def train(self):
+        """Allow device='', device=None on Multi-GPU systems to default to device=0."""
+        if isinstance(self.args.device, str) and len(self.args.device):  # i.e. device='0' or device='0,1,2,3'
+            world_size = len(self.args.device.split(","))
+        elif isinstance(self.args.device, (tuple, list)):  # i.e. device=[0, 1, 2, 3] (multi-GPU from CLI is list)
+            world_size = len(self.args.device)
+        elif self.args.device in {"cpu", "mps"}:  # i.e. device='cpu' or 'mps'
+            world_size = 0
+        elif torch.cuda.is_available():  # i.e. device=None or device='' or device=number
+            world_size = 1  # default to device 0
+        else:  # i.e. device=None or device=''
+            world_size = 0
+
+        # Run subprocess if DDP training, else train normally
+        if world_size > 1 and "LOCAL_RANK" not in os.environ:
+            # Argument checks
+            if self.args.rect:
+                LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with Multi-GPU training, setting 'rect=False'")
+                self.args.rect = False
+            if self.args.batch < 1.0:
+                LOGGER.warning(
+                    "WARNING ⚠️ 'batch<1' for AutoBatch is incompatible with Multi-GPU training, setting "
+                    "default 'batch=16'"
+                )
+                self.args.batch = 16
+
+            # Command
+            cmd, file = generate_ddp_command(world_size, self)
+            try:
+                LOGGER.info(f"{colorstr('DDP:')} debug command {' '.join(cmd)}")
+                subprocess.run(cmd, check=True)
+            except Exception as e:
+                raise e
+            finally:
+                ddp_cleanup(self, str(file))
+
+        else:
+            self._do_train(world_size)
+
+    def _setup_scheduler(self):
+        """Initialize training learning rate scheduler."""
+        if self.args.cos_lr:
+            self.lf = one_cycle(1, self.args.lrf, self.epochs)  # cosine 1->hyp['lrf']
+        else:
+            self.lf = lambda x: max(1 - x / self.epochs, 0) * (1.0 - self.args.lrf) + self.args.lrf  # linear
+        self.scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=self.lf)
+
+    def _setup_ddp(self, world_size):
+        """Initializes and sets the DistributedDataParallel parameters for training."""
+        torch.cuda.set_device(RANK)
+        self.device = torch.device("cuda", RANK)
+        # LOGGER.info(f'DDP info: RANK {RANK}, WORLD_SIZE {world_size}, DEVICE {self.device}')
+        os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1"  # set to enforce timeout
+        dist.init_process_group(
+            backend="nccl" if dist.is_nccl_available() else "gloo",
+            timeout=timedelta(seconds=10800),  # 3 hours
+            rank=RANK,
+            world_size=world_size,
+        )
+
+    def _setup_train(self, world_size):
+        """Builds dataloaders and optimizer on correct rank process."""
+        # Model
+        self.run_callbacks("on_pretrain_routine_start")
+        ckpt = self.setup_model()
+        self.model = self.model.to(self.device)
+        self.set_model_attributes()
+
+        # Freeze layers
+        freeze_list = (
+            self.args.freeze
+            if isinstance(self.args.freeze, list)
+            else range(self.args.freeze)
+            if isinstance(self.args.freeze, int)
+            else []
+        )
+        always_freeze_names = [".dfl"]  # always freeze these layers
+        freeze_layer_names = [f"model.{x}." for x in freeze_list] + always_freeze_names
+        for k, v in self.model.named_parameters():
+            # v.register_hook(lambda x: torch.nan_to_num(x))  # NaN to 0 (commented for erratic training results)
+            if any(x in k for x in freeze_layer_names):
+                LOGGER.info(f"Freezing layer '{k}'")
+                v.requires_grad = False
+            elif not v.requires_grad and v.dtype.is_floating_point:  # only floating point Tensor can require gradients
+                LOGGER.info(
+                    f"WARNING ⚠️ setting 'requires_grad=True' for frozen layer '{k}'. "
+                    "See ultralytics.engine.trainer for customization of frozen layers."
+                )
+                v.requires_grad = True
+
+        # Check AMP
+        self.amp = torch.tensor(self.args.amp).to(self.device)  # True or False
+        if self.amp and RANK in {-1, 0}:  # Single-GPU and DDP
+            callbacks_backup = callbacks.default_callbacks.copy()  # backup callbacks as check_amp() resets them
+            self.amp = torch.tensor(check_amp(self.model), device=self.device)
+            callbacks.default_callbacks = callbacks_backup  # restore callbacks
+        if RANK > -1 and world_size > 1:  # DDP
+            dist.broadcast(self.amp, src=0)  # broadcast the tensor from rank 0 to all other ranks (returns None)
+        self.amp = bool(self.amp)  # as boolean
+        self.scaler = (
+            torch.amp.GradScaler("cuda", enabled=self.amp) if TORCH_2_4 else torch.cuda.amp.GradScaler(enabled=self.amp)
+        )
+        if world_size > 1:
+            self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)
+            self.set_model_attributes()  # set again after DDP wrapper
+
+        # Check imgsz
+        gs = max(int(self.model.stride.max() if hasattr(self.model, "stride") else 32), 32)  # grid size (max stride)
+        self.args.imgsz = check_imgsz(self.args.imgsz, stride=gs, floor=gs, max_dim=1)
+        self.stride = gs  # for multiscale training
+
+        # Batch size
+        if self.batch_size < 1 and RANK == -1:  # single-GPU only, estimate best batch size
+            self.args.batch = self.batch_size = self.auto_batch()
+
+        # Dataloaders
+        batch_size = self.batch_size // max(world_size, 1)
+        self.train_loader = self.get_dataloader(self.trainset, batch_size=batch_size, rank=LOCAL_RANK, mode="train")
+        if RANK in {-1, 0}:
+            # Note: When training DOTA dataset, double batch size could get OOM on images with >2000 objects.
+            self.test_loader = self.get_dataloader(
+                self.testset, batch_size=batch_size if self.args.task == "obb" else batch_size * 2, rank=-1, mode="val"
+            )
+            self.validator = self.get_validator()
+            metric_keys = self.validator.metrics.keys + self.label_loss_items(prefix="val")
+            self.metrics = dict(zip(metric_keys, [0] * len(metric_keys)))
+            self.ema = ModelEMA(self.model)
+            if self.args.plots:
+                self.plot_training_labels()
+
+        # Optimizer
+        self.accumulate = max(round(self.args.nbs / self.batch_size), 1)  # accumulate loss before optimizing
+        weight_decay = self.args.weight_decay * self.batch_size * self.accumulate / self.args.nbs  # scale weight_decay
+        iterations = math.ceil(len(self.train_loader.dataset) / max(self.batch_size, self.args.nbs)) * self.epochs
+        self.optimizer = self.build_optimizer(
+            model=self.model,
+            name=self.args.optimizer,
+            lr=self.args.lr0,
+            momentum=self.args.momentum,
+            decay=weight_decay,
+            iterations=iterations,
+        )
+        # Scheduler
+        self._setup_scheduler()
+        self.stopper, self.stop = EarlyStopping(patience=self.args.patience), False
+        self.resume_training(ckpt)
+        self.scheduler.last_epoch = self.start_epoch - 1  # do not move
+        self.run_callbacks("on_pretrain_routine_end")
+
+    def _do_train(self, world_size=1):
+        """Train completed, evaluate and plot if specified by arguments."""
+        if world_size > 1:
+            self._setup_ddp(world_size)
+        self._setup_train(world_size)
+
+        nb = len(self.train_loader)  # number of batches
+        nw = max(round(self.args.warmup_epochs * nb), 100) if self.args.warmup_epochs > 0 else -1  # warmup iterations
+        last_opt_step = -1
+        self.epoch_time = None
+        self.epoch_time_start = time.time()
+        self.train_time_start = time.time()
+        self.run_callbacks("on_train_start")
+        LOGGER.info(
+            f"Image sizes {self.args.imgsz} train, {self.args.imgsz} val\n"
+            f"Using {self.train_loader.num_workers * (world_size or 1)} dataloader workers\n"
+            f"Logging results to {colorstr('bold', self.save_dir)}\n"
+            f"Starting training for " + (f"{self.args.time} hours..." if self.args.time else f"{self.epochs} epochs...")
+        )
+        if self.args.close_mosaic:
+            base_idx = (self.epochs - self.args.close_mosaic) * nb
+            self.plot_idx.extend([base_idx, base_idx + 1, base_idx + 2])
+        epoch = self.start_epoch
+        self.optimizer.zero_grad()  # zero any resumed gradients to ensure stability on train start
+        while True:
+            self.epoch = epoch
+            self.run_callbacks("on_train_epoch_start")
+            with warnings.catch_warnings():
+                warnings.simplefilter("ignore")  # suppress 'Detected lr_scheduler.step() before optimizer.step()'
+                self.scheduler.step()
+
+            self.model.train()
+            if RANK != -1:
+                self.train_loader.sampler.set_epoch(epoch)
+            pbar = enumerate(self.train_loader)
+            # Update dataloader attributes (optional)
+            if epoch == (self.epochs - self.args.close_mosaic):
+                self._close_dataloader_mosaic()
+                self.train_loader.reset()
+
+            if RANK in {-1, 0}:
+                LOGGER.info(self.progress_string())
+                pbar = TQDM(enumerate(self.train_loader), total=nb)
+            self.tloss = None
+            for i, batch in pbar:
+                self.run_callbacks("on_train_batch_start")
+                # Warmup
+                ni = i + nb * epoch
+                if ni <= nw:
+                    xi = [0, nw]  # x interp
+                    self.accumulate = max(1, int(np.interp(ni, xi, [1, self.args.nbs / self.batch_size]).round()))
+                    for j, x in enumerate(self.optimizer.param_groups):
+                        # Bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
+                        x["lr"] = np.interp(
+                            ni, xi, [self.args.warmup_bias_lr if j == 0 else 0.0, x["initial_lr"] * self.lf(epoch)]
+                        )
+                        if "momentum" in x:
+                            x["momentum"] = np.interp(ni, xi, [self.args.warmup_momentum, self.args.momentum])
+
+                # Forward
+                with autocast(self.amp):
+                    batch = self.preprocess_batch(batch)
+                    self.loss, self.loss_items = self.model(batch)
+                    if RANK != -1:
+                        self.loss *= world_size
+                    self.tloss = (
+                        (self.tloss * i + self.loss_items) / (i + 1) if self.tloss is not None else self.loss_items
+                    )
+
+                # Backward
+                self.scaler.scale(self.loss).backward()
+
+                # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
+                if ni - last_opt_step >= self.accumulate:
+                    self.optimizer_step()
+                    last_opt_step = ni
+
+                    # Timed stopping
+                    if self.args.time:
+                        self.stop = (time.time() - self.train_time_start) > (self.args.time * 3600)
+                        if RANK != -1:  # if DDP training
+                            broadcast_list = [self.stop if RANK == 0 else None]
+                            dist.broadcast_object_list(broadcast_list, 0)  # broadcast 'stop' to all ranks
+                            self.stop = broadcast_list[0]
+                        if self.stop:  # training time exceeded
+                            break
+
+                # Log
+                if RANK in {-1, 0}:
+                    loss_length = self.tloss.shape[0] if len(self.tloss.shape) else 1
+                    pbar.set_description(
+                        ("%11s" * 2 + "%11.4g" * (2 + loss_length))
+                        % (
+                            f"{epoch + 1}/{self.epochs}",
+                            f"{self._get_memory():.3g}G",  # (GB) GPU memory util
+                            *(self.tloss if loss_length > 1 else torch.unsqueeze(self.tloss, 0)),  # losses
+                            batch["cls"].shape[0],  # batch size, i.e. 8
+                            batch["img"].shape[-1],  # imgsz, i.e 640
+                        )
+                    )
+                    self.run_callbacks("on_batch_end")
+                    if self.args.plots and ni in self.plot_idx:
+                        self.plot_training_samples(batch, ni)
+
+                self.run_callbacks("on_train_batch_end")
+
+            self.lr = {f"lr/pg{ir}": x["lr"] for ir, x in enumerate(self.optimizer.param_groups)}  # for loggers
+            self.run_callbacks("on_train_epoch_end")
+            if RANK in {-1, 0}:
+                final_epoch = epoch + 1 >= self.epochs
+                self.ema.update_attr(self.model, include=["yaml", "nc", "args", "names", "stride", "class_weights"])
+
+                # Validation
+                if self.args.val or final_epoch or self.stopper.possible_stop or self.stop:
+                    self.metrics, self.fitness = self.validate()
+                self.save_metrics(metrics={**self.label_loss_items(self.tloss), **self.metrics, **self.lr})
+                self.stop |= self.stopper(epoch + 1, self.fitness) or final_epoch
+                if self.args.time:
+                    self.stop |= (time.time() - self.train_time_start) > (self.args.time * 3600)
+
+                # Save model
+                if self.args.save or final_epoch:
+                    self.save_model()
+                    self.run_callbacks("on_model_save")
+
+            # Scheduler
+            t = time.time()
+            self.epoch_time = t - self.epoch_time_start
+            self.epoch_time_start = t
+            if self.args.time:
+                mean_epoch_time = (t - self.train_time_start) / (epoch - self.start_epoch + 1)
+                self.epochs = self.args.epochs = math.ceil(self.args.time * 3600 / mean_epoch_time)
+                self._setup_scheduler()
+                self.scheduler.last_epoch = self.epoch  # do not move
+                self.stop |= epoch >= self.epochs  # stop if exceeded epochs
+            self.run_callbacks("on_fit_epoch_end")
+            self._clear_memory()
+
+            # Early Stopping
+            if RANK != -1:  # if DDP training
+                broadcast_list = [self.stop if RANK == 0 else None]
+                dist.broadcast_object_list(broadcast_list, 0)  # broadcast 'stop' to all ranks
+                self.stop = broadcast_list[0]
+            if self.stop:
+                break  # must break all DDP ranks
+            epoch += 1
+
+        if RANK in {-1, 0}:
+            # Do final val with best.pt
+            seconds = time.time() - self.train_time_start
+            LOGGER.info(f"\n{epoch - self.start_epoch + 1} epochs completed in {seconds / 3600:.3f} hours.")
+            self.final_eval()
+            if self.args.plots:
+                self.plot_metrics()
+            self.run_callbacks("on_train_end")
+        self._clear_memory()
+        self.run_callbacks("teardown")
+
+    def auto_batch(self, max_num_obj=0):
+        """Get batch size by calculating memory occupation of model."""
+        return check_train_batch_size(
+            model=self.model,
+            imgsz=self.args.imgsz,
+            amp=self.amp,
+            batch=self.batch_size,
+            max_num_obj=max_num_obj,
+        )  # returns batch size
+
+    def _get_memory(self):
+        """Get accelerator memory utilization in GB."""
+        if self.device.type == "mps":
+            memory = torch.mps.driver_allocated_memory()
+        elif self.device.type == "cpu":
+            memory = 0
+        else:
+            memory = torch.cuda.memory_reserved()
+        return memory / 1e9
+
+    def _clear_memory(self):
+        """Clear accelerator memory on different platforms."""
+        gc.collect()
+        if self.device.type == "mps":
+            torch.mps.empty_cache()
+        elif self.device.type == "cpu":
+            return
+        else:
+            torch.cuda.empty_cache()
+
+    def read_results_csv(self):
+        """Read results.csv into a dict using pandas."""
+        import pandas as pd  # scope for faster 'import ultralytics'
+
+        return pd.read_csv(self.csv).to_dict(orient="list")
+
+    def save_model(self):
+        """Save model training checkpoints with additional metadata."""
+        import io
+
+        # Serialize ckpt to a byte buffer once (faster than repeated torch.save() calls)
+        buffer = io.BytesIO()
+        torch.save(
+            {
+                "epoch": self.epoch,
+                "best_fitness": self.best_fitness,
+                "model": None,  # resume and final checkpoints derive from EMA
+                "ema": deepcopy(self.ema.ema).half(),
+                "updates": self.ema.updates,
+                "optimizer": convert_optimizer_state_dict_to_fp16(deepcopy(self.optimizer.state_dict())),
+                "train_args": vars(self.args),  # save as dict
+                "train_metrics": {**self.metrics, **{"fitness": self.fitness}},
+                "train_results": self.read_results_csv(),
+                "date": datetime.now().isoformat(),
+                "version": __version__,
+                "license": "AGPL-3.0 (https://ultralytics.com/license)",
+                "docs": "https://docs.ultralytics.com",
+            },
+            buffer,
+        )
+        serialized_ckpt = buffer.getvalue()  # get the serialized content to save
+
+        # Save checkpoints
+        self.last.write_bytes(serialized_ckpt)  # save last.pt
+        if self.best_fitness == self.fitness:
+            self.best.write_bytes(serialized_ckpt)  # save best.pt
+        if (self.save_period > 0) and (self.epoch % self.save_period == 0):
+            (self.wdir / f"epoch{self.epoch}.pt").write_bytes(serialized_ckpt)  # save epoch, i.e. 'epoch3.pt'
+        # if self.args.close_mosaic and self.epoch == (self.epochs - self.args.close_mosaic - 1):
+        #    (self.wdir / "last_mosaic.pt").write_bytes(serialized_ckpt)  # save mosaic checkpoint
+
+    def get_dataset(self):
+        """
+        Get train, val path from data dict if it exists.
+
+        Returns None if data format is not recognized.
+        """
+        try:
+            if self.args.task == "classify":
+                data = check_cls_dataset(self.args.data)
+            elif self.args.data.split(".")[-1] in {"yaml", "yml"} or self.args.task in {
+                "detect",
+                "segment",
+                "pose",
+                "obb",
+            }:
+                data = check_det_dataset(self.args.data)
+                if "yaml_file" in data:
+                    self.args.data = data["yaml_file"]  # for validating 'yolo train data=url.zip' usage
+        except Exception as e:
+            raise RuntimeError(emojis(f"Dataset '{clean_url(self.args.data)}' error ❌ {e}")) from e
+        self.data = data
+        return data["train"], data.get("val") or data.get("test")
+
+    def setup_model(self):
+        """Load/create/download model for any task."""
+        if isinstance(self.model, torch.nn.Module):  # if model is loaded beforehand. No setup needed
+            return
+
+        cfg, weights = self.model, None
+        ckpt = None
+        if str(self.model).endswith(".pt"):
+            weights, ckpt = attempt_load_one_weight(self.model)
+            cfg = weights.yaml
+        elif isinstance(self.args.pretrained, (str, Path)):
+            weights, _ = attempt_load_one_weight(self.args.pretrained)
+        self.model = self.get_model(cfg=cfg, weights=weights, verbose=RANK == -1)  # calls Model(cfg, weights)
+        return ckpt
+
+    def optimizer_step(self):
+        """Perform a single step of the training optimizer with gradient clipping and EMA update."""
+        self.scaler.unscale_(self.optimizer)  # unscale gradients
+        torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10.0)  # clip gradients
+        self.scaler.step(self.optimizer)
+        self.scaler.update()
+        self.optimizer.zero_grad()
+        if self.ema:
+            self.ema.update(self.model)
+
+    def preprocess_batch(self, batch):
+        """Allows custom preprocessing model inputs and ground truths depending on task type."""
+        return batch
+
+    def validate(self):
+        """
+        Runs validation on test set using self.validator.
+
+        The returned dict is expected to contain "fitness" key.
+        """
+        metrics = self.validator(self)
+        fitness = metrics.pop("fitness", -self.loss.detach().cpu().numpy())  # use loss as fitness measure if not found
+        if not self.best_fitness or self.best_fitness < fitness:
+            self.best_fitness = fitness
+        return metrics, fitness
+
+    def get_model(self, cfg=None, weights=None, verbose=True):
+        """Get model and raise NotImplementedError for loading cfg files."""
+        raise NotImplementedError("This task trainer doesn't support loading cfg files")
+
+    def get_validator(self):
+        """Returns a NotImplementedError when the get_validator function is called."""
+        raise NotImplementedError("get_validator function not implemented in trainer")
+
+    def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"):
+        """Returns dataloader derived from torch.data.Dataloader."""
+        raise NotImplementedError("get_dataloader function not implemented in trainer")
+
+    def build_dataset(self, img_path, mode="train", batch=None):
+        """Build dataset."""
+        raise NotImplementedError("build_dataset function not implemented in trainer")
+
+    def label_loss_items(self, loss_items=None, prefix="train"):
+        """
+        Returns a loss dict with labelled training loss items tensor.
+
+        Note:
+            This is not needed for classification but necessary for segmentation & detection
+        """
+        return {"loss": loss_items} if loss_items is not None else ["loss"]
+
+    def set_model_attributes(self):
+        """To set or update model parameters before training."""
+        self.model.names = self.data["names"]
+
+    def build_targets(self, preds, targets):
+        """Builds target tensors for training YOLO model."""
+        pass
+
+    def progress_string(self):
+        """Returns a string describing training progress."""
+        return ""
+
+    # TODO: may need to put these following functions into callback
+    def plot_training_samples(self, batch, ni):
+        """Plots training samples during YOLO training."""
+        pass
+
+    def plot_training_labels(self):
+        """Plots training labels for YOLO model."""
+        pass
+
+    def save_metrics(self, metrics):
+        """Saves training metrics to a CSV file."""
+        keys, vals = list(metrics.keys()), list(metrics.values())
+        n = len(metrics) + 2  # number of cols
+        s = "" if self.csv.exists() else (("%s," * n % tuple(["epoch", "time"] + keys)).rstrip(",") + "\n")  # header
+        t = time.time() - self.train_time_start
+        with open(self.csv, "a") as f:
+            f.write(s + ("%.6g," * n % tuple([self.epoch + 1, t] + vals)).rstrip(",") + "\n")
+
+    def plot_metrics(self):
+        """Plot and display metrics visually."""
+        pass
+
+    def on_plot(self, name, data=None):
+        """Registers plots (e.g. to be consumed in callbacks)."""
+        path = Path(name)
+        self.plots[path] = {"data": data, "timestamp": time.time()}
+
+    def final_eval(self):
+        """Performs final evaluation and validation for object detection YOLO model."""
+        ckpt = {}
+        for f in self.last, self.best:
+            if f.exists():
+                if f is self.last:
+                    ckpt = strip_optimizer(f)
+                elif f is self.best:
+                    k = "train_results"  # update best.pt train_metrics from last.pt
+                    strip_optimizer(f, updates={k: ckpt[k]} if k in ckpt else None)
+                    LOGGER.info(f"\nValidating {f}...")
+                    self.validator.args.plots = self.args.plots
+                    self.metrics = self.validator(model=f)
+                    self.metrics.pop("fitness", None)
+                    self.run_callbacks("on_fit_epoch_end")
+
+    def check_resume(self, overrides):
+        """Check if resume checkpoint exists and update arguments accordingly."""
+        resume = self.args.resume
+        if resume:
+            try:
+                exists = isinstance(resume, (str, Path)) and Path(resume).exists()
+                last = Path(check_file(resume) if exists else get_latest_run())
+
+                # Check that resume data YAML exists, otherwise strip to force re-download of dataset
+                ckpt_args = attempt_load_weights(last).args
+                if not Path(ckpt_args["data"]).exists():
+                    ckpt_args["data"] = self.args.data
+
+                resume = True
+                self.args = get_cfg(ckpt_args)
+                self.args.model = self.args.resume = str(last)  # reinstate model
+                for k in (
+                    "imgsz",
+                    "batch",
+                    "device",
+                    "close_mosaic",
+                ):  # allow arg updates to reduce memory or update device on resume
+                    if k in overrides:
+                        setattr(self.args, k, overrides[k])
+
+            except Exception as e:
+                raise FileNotFoundError(
+                    "Resume checkpoint not found. Please pass a valid checkpoint to resume from, "
+                    "i.e. 'yolo train resume model=path/to/last.pt'"
+                ) from e
+        self.resume = resume
+
+    def resume_training(self, ckpt):
+        """Resume YOLO training from given epoch and best fitness."""
+        if ckpt is None or not self.resume:
+            return
+        best_fitness = 0.0
+        start_epoch = ckpt.get("epoch", -1) + 1
+        if ckpt.get("optimizer", None) is not None:
+            self.optimizer.load_state_dict(ckpt["optimizer"])  # optimizer
+            best_fitness = ckpt["best_fitness"]
+        if self.ema and ckpt.get("ema"):
+            self.ema.ema.load_state_dict(ckpt["ema"].float().state_dict())  # EMA
+            self.ema.updates = ckpt["updates"]
+        assert start_epoch > 0, (
+            f"{self.args.model} training to {self.epochs} epochs is finished, nothing to resume.\n"
+            f"Start a new training without resuming, i.e. 'yolo train model={self.args.model}'"
+        )
+        LOGGER.info(f"Resuming training {self.args.model} from epoch {start_epoch + 1} to {self.epochs} total epochs")
+        if self.epochs < start_epoch:
+            LOGGER.info(
+                f"{self.model} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {self.epochs} more epochs."
+            )
+            self.epochs += ckpt["epoch"]  # finetune additional epochs
+        self.best_fitness = best_fitness
+        self.start_epoch = start_epoch
+        if start_epoch > (self.epochs - self.args.close_mosaic):
+            self._close_dataloader_mosaic()
+
+    def _close_dataloader_mosaic(self):
+        """Update dataloaders to stop using mosaic augmentation."""
+        if hasattr(self.train_loader.dataset, "mosaic"):
+            self.train_loader.dataset.mosaic = False
+        if hasattr(self.train_loader.dataset, "close_mosaic"):
+            LOGGER.info("Closing dataloader mosaic")
+            self.train_loader.dataset.close_mosaic(hyp=copy(self.args))
+
+    def build_optimizer(self, model, name="auto", lr=0.001, momentum=0.9, decay=1e-5, iterations=1e5):
+        """
+        Constructs an optimizer for the given model, based on the specified optimizer name, learning rate, momentum,
+        weight decay, and number of iterations.
+
+        Args:
+            model (torch.nn.Module): The model for which to build an optimizer.
+            name (str, optional): The name of the optimizer to use. If 'auto', the optimizer is selected
+                based on the number of iterations. Default: 'auto'.
+            lr (float, optional): The learning rate for the optimizer. Default: 0.001.
+            momentum (float, optional): The momentum factor for the optimizer. Default: 0.9.
+            decay (float, optional): The weight decay for the optimizer. Default: 1e-5.
+            iterations (float, optional): The number of iterations, which determines the optimizer if
+                name is 'auto'. Default: 1e5.
+
+        Returns:
+            (torch.optim.Optimizer): The constructed optimizer.
+        """
+        g = [], [], []  # optimizer parameter groups
+        bn = tuple(v for k, v in nn.__dict__.items() if "Norm" in k)  # normalization layers, i.e. BatchNorm2d()
+        if name == "auto":
+            LOGGER.info(
+                f"{colorstr('optimizer:')} 'optimizer=auto' found, "
+                f"ignoring 'lr0={self.args.lr0}' and 'momentum={self.args.momentum}' and "
+                f"determining best 'optimizer', 'lr0' and 'momentum' automatically... "
+            )
+            nc = getattr(model, "nc", 10)  # number of classes
+            lr_fit = round(0.002 * 5 / (4 + nc), 6)  # lr0 fit equation to 6 decimal places
+            name, lr, momentum = ("SGD", 0.01, 0.9) if iterations > 10000 else ("AdamW", lr_fit, 0.9)
+            self.args.warmup_bias_lr = 0.0  # no higher than 0.01 for Adam
+
+        for module_name, module in model.named_modules():
+            for param_name, param in module.named_parameters(recurse=False):
+                fullname = f"{module_name}.{param_name}" if module_name else param_name
+                if "bias" in fullname:  # bias (no decay)
+                    g[2].append(param)
+                elif isinstance(module, bn):  # weight (no decay)
+                    g[1].append(param)
+                else:  # weight (with decay)
+                    g[0].append(param)
+
+        optimizers = {"Adam", "Adamax", "AdamW", "NAdam", "RAdam", "RMSProp", "SGD", "auto"}
+        name = {x.lower(): x for x in optimizers}.get(name.lower())
+        if name in {"Adam", "Adamax", "AdamW", "NAdam", "RAdam"}:
+            optimizer = getattr(optim, name, optim.Adam)(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0)
+        elif name == "RMSProp":
+            optimizer = optim.RMSprop(g[2], lr=lr, momentum=momentum)
+        elif name == "SGD":
+            optimizer = optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True)
+        else:
+            raise NotImplementedError(
+                f"Optimizer '{name}' not found in list of available optimizers {optimizers}. "
+                "Request support for addition optimizers at https://github.com/ultralytics/ultralytics."
+            )
+
+        optimizer.add_param_group({"params": g[0], "weight_decay": decay})  # add g0 with weight_decay
+        optimizer.add_param_group({"params": g[1], "weight_decay": 0.0})  # add g1 (BatchNorm2d weights)
+        LOGGER.info(
+            f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}, momentum={momentum}) with parameter groups "
+            f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias(decay=0.0)"
+        )
+        return optimizer

+ 242 - 0
ultralytics/engine/tuner.py

@@ -0,0 +1,242 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Module provides functionalities for hyperparameter tuning of the Ultralytics YOLO models for object detection, instance
+segmentation, image classification, pose estimation, and multi-object tracking.
+
+Hyperparameter tuning is the process of systematically searching for the optimal set of hyperparameters
+that yield the best model performance. This is particularly crucial in deep learning models like YOLO,
+where small changes in hyperparameters can lead to significant differences in model accuracy and efficiency.
+
+Example:
+    Tune hyperparameters for YOLOv8n on COCO8 at imgsz=640 and epochs=30 for 300 tuning iterations.
+    ```python
+    from ultralytics import YOLO
+
+    model = YOLO("yolo11n.pt")
+    model.tune(data="coco8.yaml", epochs=10, iterations=300, optimizer="AdamW", plots=False, save=False, val=False)
+    ```
+"""
+
+import random
+import shutil
+import subprocess
+import time
+
+import numpy as np
+import torch
+
+from ultralytics.cfg import get_cfg, get_save_dir
+from ultralytics.utils import DEFAULT_CFG, LOGGER, callbacks, colorstr, remove_colorstr, yaml_print, yaml_save
+from ultralytics.utils.plotting import plot_tune_results
+
+
+class Tuner:
+    """
+    Class responsible for hyperparameter tuning of YOLO models.
+
+    The class evolves YOLO model hyperparameters over a given number of iterations
+    by mutating them according to the search space and retraining the model to evaluate their performance.
+
+    Attributes:
+        space (dict): Hyperparameter search space containing bounds and scaling factors for mutation.
+        tune_dir (Path): Directory where evolution logs and results will be saved.
+        tune_csv (Path): Path to the CSV file where evolution logs are saved.
+
+    Methods:
+        _mutate(hyp: dict) -> dict:
+            Mutates the given hyperparameters within the bounds specified in `self.space`.
+
+        __call__():
+            Executes the hyperparameter evolution across multiple iterations.
+
+    Example:
+        Tune hyperparameters for YOLOv8n on COCO8 at imgsz=640 and epochs=30 for 300 tuning iterations.
+        ```python
+        from ultralytics import YOLO
+
+        model = YOLO("yolo11n.pt")
+        model.tune(data="coco8.yaml", epochs=10, iterations=300, optimizer="AdamW", plots=False, save=False, val=False)
+        ```
+
+        Tune with custom search space.
+        ```python
+        from ultralytics import YOLO
+
+        model = YOLO("yolo11n.pt")
+        model.tune(space={key1: val1, key2: val2})  # custom search space dictionary
+        ```
+    """
+
+    def __init__(self, args=DEFAULT_CFG, _callbacks=None):
+        """
+        Initialize the Tuner with configurations.
+
+        Args:
+            args (dict, optional): Configuration for hyperparameter evolution.
+        """
+        self.space = args.pop("space", None) or {  # key: (min, max, gain(optional))
+            # 'optimizer': tune.choice(['SGD', 'Adam', 'AdamW', 'NAdam', 'RAdam', 'RMSProp']),
+            "lr0": (1e-5, 1e-1),  # initial learning rate (i.e. SGD=1E-2, Adam=1E-3)
+            "lrf": (0.0001, 0.1),  # final OneCycleLR learning rate (lr0 * lrf)
+            "momentum": (0.7, 0.98, 0.3),  # SGD momentum/Adam beta1
+            "weight_decay": (0.0, 0.001),  # optimizer weight decay 5e-4
+            "warmup_epochs": (0.0, 5.0),  # warmup epochs (fractions ok)
+            "warmup_momentum": (0.0, 0.95),  # warmup initial momentum
+            "box": (1.0, 20.0),  # box loss gain
+            "cls": (0.2, 4.0),  # cls loss gain (scale with pixels)
+            "dfl": (0.4, 6.0),  # dfl loss gain
+            "hsv_h": (0.0, 0.1),  # image HSV-Hue augmentation (fraction)
+            "hsv_s": (0.0, 0.9),  # image HSV-Saturation augmentation (fraction)
+            "hsv_v": (0.0, 0.9),  # image HSV-Value augmentation (fraction)
+            "degrees": (0.0, 45.0),  # image rotation (+/- deg)
+            "translate": (0.0, 0.9),  # image translation (+/- fraction)
+            "scale": (0.0, 0.95),  # image scale (+/- gain)
+            "shear": (0.0, 10.0),  # image shear (+/- deg)
+            "perspective": (0.0, 0.001),  # image perspective (+/- fraction), range 0-0.001
+            "flipud": (0.0, 1.0),  # image flip up-down (probability)
+            "fliplr": (0.0, 1.0),  # image flip left-right (probability)
+            "bgr": (0.0, 1.0),  # image channel bgr (probability)
+            "mosaic": (0.0, 1.0),  # image mixup (probability)
+            "mixup": (0.0, 1.0),  # image mixup (probability)
+            "copy_paste": (0.0, 1.0),  # segment copy-paste (probability)
+        }
+        self.args = get_cfg(overrides=args)
+        self.tune_dir = get_save_dir(self.args, name=self.args.name or "tune")
+        self.args.name = None  # reset to not affect training directory
+        self.tune_csv = self.tune_dir / "tune_results.csv"
+        self.callbacks = _callbacks or callbacks.get_default_callbacks()
+        self.prefix = colorstr("Tuner: ")
+        callbacks.add_integration_callbacks(self)
+        LOGGER.info(
+            f"{self.prefix}Initialized Tuner instance with 'tune_dir={self.tune_dir}'\n"
+            f"{self.prefix}💡 Learn about tuning at https://docs.ultralytics.com/guides/hyperparameter-tuning"
+        )
+
+    def _mutate(self, parent="single", n=5, mutation=0.8, sigma=0.2):
+        """
+        Mutates the hyperparameters based on bounds and scaling factors specified in `self.space`.
+
+        Args:
+            parent (str): Parent selection method: 'single' or 'weighted'.
+            n (int): Number of parents to consider.
+            mutation (float): Probability of a parameter mutation in any given iteration.
+            sigma (float): Standard deviation for Gaussian random number generator.
+
+        Returns:
+            (dict): A dictionary containing mutated hyperparameters.
+        """
+        if self.tune_csv.exists():  # if CSV file exists: select best hyps and mutate
+            # Select parent(s)
+            x = np.loadtxt(self.tune_csv, ndmin=2, delimiter=",", skiprows=1)
+            fitness = x[:, 0]  # first column
+            n = min(n, len(x))  # number of previous results to consider
+            x = x[np.argsort(-fitness)][:n]  # top n mutations
+            w = x[:, 0] - x[:, 0].min() + 1e-6  # weights (sum > 0)
+            if parent == "single" or len(x) == 1:
+                # x = x[random.randint(0, n - 1)]  # random selection
+                x = x[random.choices(range(n), weights=w)[0]]  # weighted selection
+            elif parent == "weighted":
+                x = (x * w.reshape(n, 1)).sum(0) / w.sum()  # weighted combination
+
+            # Mutate
+            r = np.random  # method
+            r.seed(int(time.time()))
+            g = np.array([v[2] if len(v) == 3 else 1.0 for v in self.space.values()])  # gains 0-1
+            ng = len(self.space)
+            v = np.ones(ng)
+            while all(v == 1):  # mutate until a change occurs (prevent duplicates)
+                v = (g * (r.random(ng) < mutation) * r.randn(ng) * r.random() * sigma + 1).clip(0.3, 3.0)
+            hyp = {k: float(x[i + 1] * v[i]) for i, k in enumerate(self.space.keys())}
+        else:
+            hyp = {k: getattr(self.args, k) for k in self.space.keys()}
+
+        # Constrain to limits
+        for k, v in self.space.items():
+            hyp[k] = max(hyp[k], v[0])  # lower limit
+            hyp[k] = min(hyp[k], v[1])  # upper limit
+            hyp[k] = round(hyp[k], 5)  # significant digits
+
+        return hyp
+
+    def __call__(self, model=None, iterations=10, cleanup=True):
+        """
+        Executes the hyperparameter evolution process when the Tuner instance is called.
+
+        This method iterates through the number of iterations, performing the following steps in each iteration:
+        1. Load the existing hyperparameters or initialize new ones.
+        2. Mutate the hyperparameters using the `mutate` method.
+        3. Train a YOLO model with the mutated hyperparameters.
+        4. Log the fitness score and mutated hyperparameters to a CSV file.
+
+        Args:
+           model (Model): A pre-initialized YOLO model to be used for training.
+           iterations (int): The number of generations to run the evolution for.
+           cleanup (bool): Whether to delete iteration weights to reduce storage space used during tuning.
+
+        Note:
+           The method utilizes the `self.tune_csv` Path object to read and log hyperparameters and fitness scores.
+           Ensure this path is set correctly in the Tuner instance.
+        """
+        t0 = time.time()
+        best_save_dir, best_metrics = None, None
+        (self.tune_dir / "weights").mkdir(parents=True, exist_ok=True)
+        for i in range(iterations):
+            # Mutate hyperparameters
+            mutated_hyp = self._mutate()
+            LOGGER.info(f"{self.prefix}Starting iteration {i + 1}/{iterations} with hyperparameters: {mutated_hyp}")
+
+            metrics = {}
+            train_args = {**vars(self.args), **mutated_hyp}
+            save_dir = get_save_dir(get_cfg(train_args))
+            weights_dir = save_dir / "weights"
+            try:
+                # Train YOLO model with mutated hyperparameters (run in subprocess to avoid dataloader hang)
+                cmd = ["yolo", "train", *(f"{k}={v}" for k, v in train_args.items())]
+                return_code = subprocess.run(" ".join(cmd), check=True, shell=True).returncode
+                ckpt_file = weights_dir / ("best.pt" if (weights_dir / "best.pt").exists() else "last.pt")
+                metrics = torch.load(ckpt_file)["train_metrics"]
+                assert return_code == 0, "training failed"
+
+            except Exception as e:
+                LOGGER.warning(f"WARNING ❌️ training failure for hyperparameter tuning iteration {i + 1}\n{e}")
+
+            # Save results and mutated_hyp to CSV
+            fitness = metrics.get("fitness", 0.0)
+            log_row = [round(fitness, 5)] + [mutated_hyp[k] for k in self.space.keys()]
+            headers = "" if self.tune_csv.exists() else (",".join(["fitness"] + list(self.space.keys())) + "\n")
+            with open(self.tune_csv, "a") as f:
+                f.write(headers + ",".join(map(str, log_row)) + "\n")
+
+            # Get best results
+            x = np.loadtxt(self.tune_csv, ndmin=2, delimiter=",", skiprows=1)
+            fitness = x[:, 0]  # first column
+            best_idx = fitness.argmax()
+            best_is_current = best_idx == i
+            if best_is_current:
+                best_save_dir = save_dir
+                best_metrics = {k: round(v, 5) for k, v in metrics.items()}
+                for ckpt in weights_dir.glob("*.pt"):
+                    shutil.copy2(ckpt, self.tune_dir / "weights")
+            elif cleanup:
+                shutil.rmtree(weights_dir, ignore_errors=True)  # remove iteration weights/ dir to reduce storage space
+
+            # Plot tune results
+            plot_tune_results(self.tune_csv)
+
+            # Save and print tune results
+            header = (
+                f"{self.prefix}{i + 1}/{iterations} iterations complete ✅ ({time.time() - t0:.2f}s)\n"
+                f"{self.prefix}Results saved to {colorstr('bold', self.tune_dir)}\n"
+                f"{self.prefix}Best fitness={fitness[best_idx]} observed at iteration {best_idx + 1}\n"
+                f"{self.prefix}Best fitness metrics are {best_metrics}\n"
+                f"{self.prefix}Best fitness model is {best_save_dir}\n"
+                f"{self.prefix}Best fitness hyperparameters are printed below.\n"
+            )
+            LOGGER.info("\n" + header)
+            data = {k: float(x[best_idx, i + 1]) for i, k in enumerate(self.space.keys())}
+            yaml_save(
+                self.tune_dir / "best_hyperparameters.yaml",
+                data=data,
+                header=remove_colorstr(header.replace(self.prefix, "# ")) + "\n",
+            )
+            yaml_print(self.tune_dir / "best_hyperparameters.yaml")

+ 341 - 0
ultralytics/engine/validator.py

@@ -0,0 +1,341 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+"""
+Check a model's accuracy on a test or val split of a dataset.
+
+Usage:
+    $ yolo mode=val model=yolov8n.pt data=coco8.yaml imgsz=640
+
+Usage - formats:
+    $ yolo mode=val model=yolov8n.pt                 # PyTorch
+                          yolov8n.torchscript        # TorchScript
+                          yolov8n.onnx               # ONNX Runtime or OpenCV DNN with dnn=True
+                          yolov8n_openvino_model     # OpenVINO
+                          yolov8n.engine             # TensorRT
+                          yolov8n.mlpackage          # CoreML (macOS-only)
+                          yolov8n_saved_model        # TensorFlow SavedModel
+                          yolov8n.pb                 # TensorFlow GraphDef
+                          yolov8n.tflite             # TensorFlow Lite
+                          yolov8n_edgetpu.tflite     # TensorFlow Edge TPU
+                          yolov8n_paddle_model       # PaddlePaddle
+                          yolov8n.mnn                # MNN
+                          yolov8n_ncnn_model         # NCNN
+"""
+
+import json
+import time
+from pathlib import Path
+
+import numpy as np
+import torch
+
+from ultralytics.cfg import get_cfg, get_save_dir
+from ultralytics.data.utils import check_cls_dataset, check_det_dataset
+from ultralytics.nn.autobackend import AutoBackend
+from ultralytics.utils import LOGGER, TQDM, callbacks, colorstr, emojis
+from ultralytics.utils.checks import check_imgsz
+from ultralytics.utils.ops import Profile
+from ultralytics.utils.torch_utils import de_parallel, select_device, smart_inference_mode
+
+
+class BaseValidator:
+    """
+    BaseValidator.
+
+    A base class for creating validators.
+
+    Attributes:
+        args (SimpleNamespace): Configuration for the validator.
+        dataloader (DataLoader): Dataloader to use for validation.
+        pbar (tqdm): Progress bar to update during validation.
+        model (nn.Module): Model to validate.
+        data (dict): Data dictionary.
+        device (torch.device): Device to use for validation.
+        batch_i (int): Current batch index.
+        training (bool): Whether the model is in training mode.
+        names (dict): Class names.
+        seen: Records the number of images seen so far during validation.
+        stats: Placeholder for statistics during validation.
+        confusion_matrix: Placeholder for a confusion matrix.
+        nc: Number of classes.
+        iouv: (torch.Tensor): IoU thresholds from 0.50 to 0.95 in spaces of 0.05.
+        jdict (dict): Dictionary to store JSON validation results.
+        speed (dict): Dictionary with keys 'preprocess', 'inference', 'loss', 'postprocess' and their respective
+                      batch processing times in milliseconds.
+        save_dir (Path): Directory to save results.
+        plots (dict): Dictionary to store plots for visualization.
+        callbacks (dict): Dictionary to store various callback functions.
+    """
+
+    def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
+        """
+        Initializes a BaseValidator instance.
+
+        Args:
+            dataloader (torch.utils.data.DataLoader): Dataloader to be used for validation.
+            save_dir (Path, optional): Directory to save results.
+            pbar (tqdm.tqdm): Progress bar for displaying progress.
+            args (SimpleNamespace): Configuration for the validator.
+            _callbacks (dict): Dictionary to store various callback functions.
+        """
+        self.args = get_cfg(overrides=args)
+        self.dataloader = dataloader
+        self.pbar = pbar
+        self.stride = None
+        self.data = None
+        self.device = None
+        self.batch_i = None
+        self.training = True
+        self.names = None
+        self.seen = None
+        self.stats = None
+        self.confusion_matrix = None
+        self.nc = None
+        self.iouv = None
+        self.jdict = None
+        self.speed = {"preprocess": 0.0, "inference": 0.0, "loss": 0.0, "postprocess": 0.0}
+
+        self.save_dir = save_dir or get_save_dir(self.args)
+        (self.save_dir / "labels" if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)
+        if self.args.conf is None:
+            self.args.conf = 0.001  # default conf=0.001
+        self.args.imgsz = check_imgsz(self.args.imgsz, max_dim=1)
+
+        self.plots = {}
+        self.callbacks = _callbacks or callbacks.get_default_callbacks()
+
+    @smart_inference_mode()
+    def __call__(self, trainer=None, model=None):
+        """Executes validation process, running inference on dataloader and computing performance metrics."""
+        self.training = trainer is not None
+        augment = self.args.augment and (not self.training)
+        if self.training:
+            self.device = trainer.device
+            self.data = trainer.data
+            # force FP16 val during training
+            self.args.half = self.device.type != "cpu" and trainer.amp
+            model = trainer.ema.ema or trainer.model
+            model = model.half() if self.args.half else model.float()
+            # self.model = model
+            self.loss = torch.zeros_like(trainer.loss_items, device=trainer.device)
+            self.args.plots &= trainer.stopper.possible_stop or (trainer.epoch == trainer.epochs - 1)
+            model.eval()
+        else:
+            if str(self.args.model).endswith(".yaml") and model is None:
+                LOGGER.warning("WARNING ⚠️ validating an untrained model YAML will result in 0 mAP.")
+            callbacks.add_integration_callbacks(self)
+            model = AutoBackend(
+                weights=model or self.args.model,
+                device=select_device(self.args.device, self.args.batch),
+                dnn=self.args.dnn,
+                data=self.args.data,
+                fp16=self.args.half,
+            )
+            # self.model = model
+            self.device = model.device  # update device
+            self.args.half = model.fp16  # update half
+            stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
+            imgsz = check_imgsz(self.args.imgsz, stride=stride)
+            if engine:
+                self.args.batch = model.batch_size
+            elif not pt and not jit:
+                self.args.batch = model.metadata.get("batch", 1)  # export.py models default to batch-size 1
+                LOGGER.info(f"Setting batch={self.args.batch} input of shape ({self.args.batch}, 3, {imgsz}, {imgsz})")
+
+            if str(self.args.data).split(".")[-1] in {"yaml", "yml"}:
+                self.data = check_det_dataset(self.args.data)
+            elif self.args.task == "classify":
+                self.data = check_cls_dataset(self.args.data, split=self.args.split)
+            else:
+                raise FileNotFoundError(emojis(f"Dataset '{self.args.data}' for task={self.args.task} not found ❌"))
+
+            if self.device.type in {"cpu", "mps"}:
+                self.args.workers = 0  # faster CPU val as time dominated by inference, not dataloading
+            if not pt:
+                self.args.rect = False
+            self.stride = model.stride  # used in get_dataloader() for padding
+            self.dataloader = self.dataloader or self.get_dataloader(self.data.get(self.args.split), self.args.batch)
+
+            model.eval()
+            model.warmup(imgsz=(1 if pt else self.args.batch, 3, imgsz, imgsz))  # warmup
+
+        self.run_callbacks("on_val_start")
+        dt = (
+            Profile(device=self.device),
+            Profile(device=self.device),
+            Profile(device=self.device),
+            Profile(device=self.device),
+        )
+        bar = TQDM(self.dataloader, desc=self.get_desc(), total=len(self.dataloader))
+        self.init_metrics(de_parallel(model))
+        self.jdict = []  # empty before each val
+        for batch_i, batch in enumerate(bar):
+            self.run_callbacks("on_val_batch_start")
+            self.batch_i = batch_i
+            # Preprocess
+            with dt[0]:
+                batch = self.preprocess(batch)
+
+            # Inference
+            with dt[1]:
+                preds = model(batch["img"], augment=augment)
+
+            # Loss
+            with dt[2]:
+                if self.training:
+                    self.loss += model.loss(batch, preds)[1]
+
+            # Postprocess
+            with dt[3]:
+                preds = self.postprocess(preds)
+
+            self.update_metrics(preds, batch)
+            if self.args.plots and batch_i < 3:
+                self.plot_val_samples(batch, batch_i)
+                self.plot_predictions(batch, preds, batch_i)
+
+            self.run_callbacks("on_val_batch_end")
+        stats = self.get_stats()
+        self.check_stats(stats)
+        self.speed = dict(zip(self.speed.keys(), (x.t / len(self.dataloader.dataset) * 1e3 for x in dt)))
+        self.finalize_metrics()
+        self.print_results()
+        self.run_callbacks("on_val_end")
+        if self.training:
+            model.float()
+            results = {**stats, **trainer.label_loss_items(self.loss.cpu() / len(self.dataloader), prefix="val")}
+            return {k: round(float(v), 5) for k, v in results.items()}  # return results as 5 decimal place floats
+        else:
+            LOGGER.info(
+                "Speed: {:.1f}ms preprocess, {:.1f}ms inference, {:.1f}ms loss, {:.1f}ms postprocess per image".format(
+                    *tuple(self.speed.values())
+                )
+            )
+            if self.args.save_json and self.jdict:
+                with open(str(self.save_dir / "predictions.json"), "w") as f:
+                    LOGGER.info(f"Saving {f.name}...")
+                    json.dump(self.jdict, f)  # flatten and save
+                stats = self.eval_json(stats)  # update stats
+            if self.args.plots or self.args.save_json:
+                LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}")
+            return stats
+
+    def match_predictions(self, pred_classes, true_classes, iou, use_scipy=False):
+        """
+        Matches predictions to ground truth objects (pred_classes, true_classes) using IoU.
+
+        Args:
+            pred_classes (torch.Tensor): Predicted class indices of shape(N,).
+            true_classes (torch.Tensor): Target class indices of shape(M,).
+            iou (torch.Tensor): An NxM tensor containing the pairwise IoU values for predictions and ground of truth
+            use_scipy (bool): Whether to use scipy for matching (more precise).
+
+        Returns:
+            (torch.Tensor): Correct tensor of shape(N,10) for 10 IoU thresholds.
+        """
+        # Dx10 matrix, where D - detections, 10 - IoU thresholds
+        correct = np.zeros((pred_classes.shape[0], self.iouv.shape[0])).astype(bool)
+        # LxD matrix where L - labels (rows), D - detections (columns)
+        correct_class = true_classes[:, None] == pred_classes
+        iou = iou * correct_class  # zero out the wrong classes
+        iou = iou.cpu().numpy()
+        for i, threshold in enumerate(self.iouv.cpu().tolist()):
+            if use_scipy:
+                # WARNING: known issue that reduces mAP in https://github.com/ultralytics/ultralytics/pull/4708
+                import scipy  # scope import to avoid importing for all commands
+
+                cost_matrix = iou * (iou >= threshold)
+                if cost_matrix.any():
+                    labels_idx, detections_idx = scipy.optimize.linear_sum_assignment(cost_matrix)
+                    valid = cost_matrix[labels_idx, detections_idx] > 0
+                    if valid.any():
+                        correct[detections_idx[valid], i] = True
+            else:
+                matches = np.nonzero(iou >= threshold)  # IoU > threshold and classes match
+                matches = np.array(matches).T
+                if matches.shape[0]:
+                    if matches.shape[0] > 1:
+                        matches = matches[iou[matches[:, 0], matches[:, 1]].argsort()[::-1]]
+                        matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
+                        # matches = matches[matches[:, 2].argsort()[::-1]]
+                        matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
+                    correct[matches[:, 1].astype(int), i] = True
+        return torch.tensor(correct, dtype=torch.bool, device=pred_classes.device)
+
+    def add_callback(self, event: str, callback):
+        """Appends the given callback."""
+        self.callbacks[event].append(callback)
+
+    def run_callbacks(self, event: str):
+        """Runs all callbacks associated with a specified event."""
+        for callback in self.callbacks.get(event, []):
+            callback(self)
+
+    def get_dataloader(self, dataset_path, batch_size):
+        """Get data loader from dataset path and batch size."""
+        raise NotImplementedError("get_dataloader function not implemented for this validator")
+
+    def build_dataset(self, img_path):
+        """Build dataset."""
+        raise NotImplementedError("build_dataset function not implemented in validator")
+
+    def preprocess(self, batch):
+        """Preprocesses an input batch."""
+        return batch
+
+    def postprocess(self, preds):
+        """Preprocesses the predictions."""
+        return preds
+
+    def init_metrics(self, model):
+        """Initialize performance metrics for the YOLO model."""
+        pass
+
+    def update_metrics(self, preds, batch):
+        """Updates metrics based on predictions and batch."""
+        pass
+
+    def finalize_metrics(self, *args, **kwargs):
+        """Finalizes and returns all metrics."""
+        pass
+
+    def get_stats(self):
+        """Returns statistics about the model's performance."""
+        return {}
+
+    def check_stats(self, stats):
+        """Checks statistics."""
+        pass
+
+    def print_results(self):
+        """Prints the results of the model's predictions."""
+        pass
+
+    def get_desc(self):
+        """Get description of the YOLO model."""
+        pass
+
+    @property
+    def metric_keys(self):
+        """Returns the metric keys used in YOLO training/validation."""
+        return []
+
+    def on_plot(self, name, data=None):
+        """Registers plots (e.g. to be consumed in callbacks)."""
+        self.plots[Path(name)] = {"data": data, "timestamp": time.time()}
+
+    # TODO: may need to put these following functions into callback
+    def plot_val_samples(self, batch, ni):
+        """Plots validation samples during training."""
+        pass
+
+    def plot_predictions(self, batch, preds, ni):
+        """Plots YOLO model predictions on batch images."""
+        pass
+
+    def pred_to_json(self, preds, batch):
+        """Convert predictions to JSON format."""
+        pass
+
+    def eval_json(self, stats):
+        """Evaluate and return JSON format of prediction statistics."""
+        pass

+ 146 - 0
ultralytics/hub/__init__.py

@@ -0,0 +1,146 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import requests
+
+from ultralytics.data.utils import HUBDatasetStats
+from ultralytics.hub.auth import Auth
+from ultralytics.hub.session import HUBTrainingSession
+from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX, events
+from ultralytics.utils import LOGGER, SETTINGS, checks
+
+__all__ = (
+    "PREFIX",
+    "HUB_WEB_ROOT",
+    "HUBTrainingSession",
+    "login",
+    "logout",
+    "reset_model",
+    "export_fmts_hub",
+    "export_model",
+    "get_export",
+    "check_dataset",
+    "events",
+)
+
+
+def login(api_key: str = None, save=True) -> bool:
+    """
+    Log in to the Ultralytics HUB API using the provided API key.
+
+    The session is not stored; a new session is created when needed using the saved SETTINGS or the HUB_API_KEY
+    environment variable if successfully authenticated.
+
+    Args:
+        api_key (str, optional): API key to use for authentication.
+            If not provided, it will be retrieved from SETTINGS or HUB_API_KEY environment variable.
+        save (bool, optional): Whether to save the API key to SETTINGS if authentication is successful.
+
+    Returns:
+        (bool): True if authentication is successful, False otherwise.
+    """
+    checks.check_requirements("hub-sdk>=0.0.12")
+    from hub_sdk import HUBClient
+
+    api_key_url = f"{HUB_WEB_ROOT}/settings?tab=api+keys"  # set the redirect URL
+    saved_key = SETTINGS.get("api_key")
+    active_key = api_key or saved_key
+    credentials = {"api_key": active_key} if active_key and active_key != "" else None  # set credentials
+
+    client = HUBClient(credentials)  # initialize HUBClient
+
+    if client.authenticated:
+        # Successfully authenticated with HUB
+
+        if save and client.api_key != saved_key:
+            SETTINGS.update({"api_key": client.api_key})  # update settings with valid API key
+
+        # Set message based on whether key was provided or retrieved from settings
+        log_message = (
+            "New authentication successful ✅" if client.api_key == api_key or not credentials else "Authenticated ✅"
+        )
+        LOGGER.info(f"{PREFIX}{log_message}")
+
+        return True
+    else:
+        # Failed to authenticate with HUB
+        LOGGER.info(f"{PREFIX}Get API key from {api_key_url} and then run 'yolo login API_KEY'")
+        return False
+
+
+def logout():
+    """
+    Log out of Ultralytics HUB by removing the API key from the settings file. To log in again, use 'yolo login'.
+
+    Example:
+        ```python
+        from ultralytics import hub
+
+        hub.logout()
+        ```
+    """
+    SETTINGS["api_key"] = ""
+    LOGGER.info(f"{PREFIX}logged out ✅. To log in again, use 'yolo login'.")
+
+
+def reset_model(model_id=""):
+    """Reset a trained model to an untrained state."""
+    r = requests.post(f"{HUB_API_ROOT}/model-reset", json={"modelId": model_id}, headers={"x-api-key": Auth().api_key})
+    if r.status_code == 200:
+        LOGGER.info(f"{PREFIX}Model reset successfully")
+        return
+    LOGGER.warning(f"{PREFIX}Model reset failure {r.status_code} {r.reason}")
+
+
+def export_fmts_hub():
+    """Returns a list of HUB-supported export formats."""
+    from ultralytics.engine.exporter import export_formats
+
+    return list(export_formats()["Argument"][1:]) + ["ultralytics_tflite", "ultralytics_coreml"]
+
+
+def export_model(model_id="", format="torchscript"):
+    """Export a model to all formats."""
+    assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}"
+    r = requests.post(
+        f"{HUB_API_ROOT}/v1/models/{model_id}/export", json={"format": format}, headers={"x-api-key": Auth().api_key}
+    )
+    assert r.status_code == 200, f"{PREFIX}{format} export failure {r.status_code} {r.reason}"
+    LOGGER.info(f"{PREFIX}{format} export started ✅")
+
+
+def get_export(model_id="", format="torchscript"):
+    """Get an exported model dictionary with download URL."""
+    assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}"
+    r = requests.post(
+        f"{HUB_API_ROOT}/get-export",
+        json={"apiKey": Auth().api_key, "modelId": model_id, "format": format},
+        headers={"x-api-key": Auth().api_key},
+    )
+    assert r.status_code == 200, f"{PREFIX}{format} get_export failure {r.status_code} {r.reason}"
+    return r.json()
+
+
+def check_dataset(path: str, task: str) -> None:
+    """
+    Function for error-checking HUB dataset Zip file before upload. It checks a dataset for errors before it is uploaded
+    to the HUB. Usage examples are given below.
+
+    Args:
+        path (str): Path to data.zip (with data.yaml inside data.zip).
+        task (str): Dataset task. Options are 'detect', 'segment', 'pose', 'classify', 'obb'.
+
+    Example:
+        Download *.zip files from https://github.com/ultralytics/hub/tree/main/example_datasets
+            i.e. https://github.com/ultralytics/hub/raw/main/example_datasets/coco8.zip for coco8.zip.
+        ```python
+        from ultralytics.hub import check_dataset
+
+        check_dataset("path/to/coco8.zip", task="detect")  # detect dataset
+        check_dataset("path/to/coco8-seg.zip", task="segment")  # segment dataset
+        check_dataset("path/to/coco8-pose.zip", task="pose")  # pose dataset
+        check_dataset("path/to/dota8.zip", task="obb")  # OBB dataset
+        check_dataset("path/to/imagenet10.zip", task="classify")  # classification dataset
+        ```
+    """
+    HUBDatasetStats(path=path, task=task).get_json()
+    LOGGER.info(f"Checks completed correctly ✅. Upload this dataset to {HUB_WEB_ROOT}/datasets/.")

+ 140 - 0
ultralytics/hub/auth.py

@@ -0,0 +1,140 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import requests
+
+from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX, request_with_credentials
+from ultralytics.utils import IS_COLAB, LOGGER, SETTINGS, emojis
+
+API_KEY_URL = f"{HUB_WEB_ROOT}/settings?tab=api+keys"
+
+
+class Auth:
+    """
+    Manages authentication processes including API key handling, cookie-based authentication, and header generation.
+
+    The class supports different methods of authentication:
+    1. Directly using an API key.
+    2. Authenticating using browser cookies (specifically in Google Colab).
+    3. Prompting the user to enter an API key.
+
+    Attributes:
+        id_token (str or bool): Token used for identity verification, initialized as False.
+        api_key (str or bool): API key for authentication, initialized as False.
+        model_key (bool): Placeholder for model key, initialized as False.
+    """
+
+    id_token = api_key = model_key = False
+
+    def __init__(self, api_key="", verbose=False):
+        """
+        Initialize Auth class and authenticate user.
+
+        Handles API key validation, Google Colab authentication, and new key requests. Updates SETTINGS upon successful
+        authentication.
+
+        Args:
+            api_key (str): API key or combined key_id format.
+            verbose (bool): Enable verbose logging.
+        """
+        # Split the input API key in case it contains a combined key_model and keep only the API key part
+        api_key = api_key.split("_")[0]
+
+        # Set API key attribute as value passed or SETTINGS API key if none passed
+        self.api_key = api_key or SETTINGS.get("api_key", "")
+
+        # If an API key is provided
+        if self.api_key:
+            # If the provided API key matches the API key in the SETTINGS
+            if self.api_key == SETTINGS.get("api_key"):
+                # Log that the user is already logged in
+                if verbose:
+                    LOGGER.info(f"{PREFIX}Authenticated ✅")
+                return
+            else:
+                # Attempt to authenticate with the provided API key
+                success = self.authenticate()
+        # If the API key is not provided and the environment is a Google Colab notebook
+        elif IS_COLAB:
+            # Attempt to authenticate using browser cookies
+            success = self.auth_with_cookies()
+        else:
+            # Request an API key
+            success = self.request_api_key()
+
+        # Update SETTINGS with the new API key after successful authentication
+        if success:
+            SETTINGS.update({"api_key": self.api_key})
+            # Log that the new login was successful
+            if verbose:
+                LOGGER.info(f"{PREFIX}New authentication successful ✅")
+        elif verbose:
+            LOGGER.info(f"{PREFIX}Get API key from {API_KEY_URL} and then run 'yolo login API_KEY'")
+
+    def request_api_key(self, max_attempts=3):
+        """
+        Prompt the user to input their API key.
+
+        Returns the model ID.
+        """
+        import getpass
+
+        for attempts in range(max_attempts):
+            LOGGER.info(f"{PREFIX}Login. Attempt {attempts + 1} of {max_attempts}")
+            input_key = getpass.getpass(f"Enter API key from {API_KEY_URL} ")
+            self.api_key = input_key.split("_")[0]  # remove model id if present
+            if self.authenticate():
+                return True
+        raise ConnectionError(emojis(f"{PREFIX}Failed to authenticate ❌"))
+
+    def authenticate(self) -> bool:
+        """
+        Attempt to authenticate with the server using either id_token or API key.
+
+        Returns:
+            (bool): True if authentication is successful, False otherwise.
+        """
+        try:
+            if header := self.get_auth_header():
+                r = requests.post(f"{HUB_API_ROOT}/v1/auth", headers=header)
+                if not r.json().get("success", False):
+                    raise ConnectionError("Unable to authenticate.")
+                return True
+            raise ConnectionError("User has not authenticated locally.")
+        except ConnectionError:
+            self.id_token = self.api_key = False  # reset invalid
+            LOGGER.warning(f"{PREFIX}Invalid API key ⚠️")
+            return False
+
+    def auth_with_cookies(self) -> bool:
+        """
+        Attempt to fetch authentication via cookies and set id_token. User must be logged in to HUB and running in a
+        supported browser.
+
+        Returns:
+            (bool): True if authentication is successful, False otherwise.
+        """
+        if not IS_COLAB:
+            return False  # Currently only works with Colab
+        try:
+            authn = request_with_credentials(f"{HUB_API_ROOT}/v1/auth/auto")
+            if authn.get("success", False):
+                self.id_token = authn.get("data", {}).get("idToken", None)
+                self.authenticate()
+                return True
+            raise ConnectionError("Unable to fetch browser authentication details.")
+        except ConnectionError:
+            self.id_token = False  # reset invalid
+            return False
+
+    def get_auth_header(self):
+        """
+        Get the authentication header for making API requests.
+
+        Returns:
+            (dict): The authentication header if id_token or API key is set, None otherwise.
+        """
+        if self.id_token:
+            return {"authorization": f"Bearer {self.id_token}"}
+        elif self.api_key:
+            return {"x-api-key": self.api_key}
+        # else returns None

+ 159 - 0
ultralytics/hub/google/__init__.py

@@ -0,0 +1,159 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import concurrent.futures
+import statistics
+import time
+from typing import List, Optional, Tuple
+
+import requests
+
+
+class GCPRegions:
+    """
+    A class for managing and analyzing Google Cloud Platform (GCP) regions.
+
+    This class provides functionality to initialize, categorize, and analyze GCP regions based on their
+    geographical location, tier classification, and network latency.
+
+    Attributes:
+        regions (Dict[str, Tuple[int, str, str]]): A dictionary of GCP regions with their tier, city, and country.
+
+    Methods:
+        tier1: Returns a list of tier 1 GCP regions.
+        tier2: Returns a list of tier 2 GCP regions.
+        lowest_latency: Determines the GCP region(s) with the lowest network latency.
+
+    Examples:
+        >>> from ultralytics.hub.google import GCPRegions
+        >>> regions = GCPRegions()
+        >>> lowest_latency_region = regions.lowest_latency(verbose=True, attempts=3)
+        >>> print(f"Lowest latency region: {lowest_latency_region[0][0]}")
+    """
+
+    def __init__(self):
+        """Initializes the GCPRegions class with predefined Google Cloud Platform regions and their details."""
+        self.regions = {
+            "asia-east1": (1, "Taiwan", "China"),
+            "asia-east2": (2, "Hong Kong", "China"),
+            "asia-northeast1": (1, "Tokyo", "Japan"),
+            "asia-northeast2": (1, "Osaka", "Japan"),
+            "asia-northeast3": (2, "Seoul", "South Korea"),
+            "asia-south1": (2, "Mumbai", "India"),
+            "asia-south2": (2, "Delhi", "India"),
+            "asia-southeast1": (2, "Jurong West", "Singapore"),
+            "asia-southeast2": (2, "Jakarta", "Indonesia"),
+            "australia-southeast1": (2, "Sydney", "Australia"),
+            "australia-southeast2": (2, "Melbourne", "Australia"),
+            "europe-central2": (2, "Warsaw", "Poland"),
+            "europe-north1": (1, "Hamina", "Finland"),
+            "europe-southwest1": (1, "Madrid", "Spain"),
+            "europe-west1": (1, "St. Ghislain", "Belgium"),
+            "europe-west10": (2, "Berlin", "Germany"),
+            "europe-west12": (2, "Turin", "Italy"),
+            "europe-west2": (2, "London", "United Kingdom"),
+            "europe-west3": (2, "Frankfurt", "Germany"),
+            "europe-west4": (1, "Eemshaven", "Netherlands"),
+            "europe-west6": (2, "Zurich", "Switzerland"),
+            "europe-west8": (1, "Milan", "Italy"),
+            "europe-west9": (1, "Paris", "France"),
+            "me-central1": (2, "Doha", "Qatar"),
+            "me-west1": (1, "Tel Aviv", "Israel"),
+            "northamerica-northeast1": (2, "Montreal", "Canada"),
+            "northamerica-northeast2": (2, "Toronto", "Canada"),
+            "southamerica-east1": (2, "São Paulo", "Brazil"),
+            "southamerica-west1": (2, "Santiago", "Chile"),
+            "us-central1": (1, "Iowa", "United States"),
+            "us-east1": (1, "South Carolina", "United States"),
+            "us-east4": (1, "Northern Virginia", "United States"),
+            "us-east5": (1, "Columbus", "United States"),
+            "us-south1": (1, "Dallas", "United States"),
+            "us-west1": (1, "Oregon", "United States"),
+            "us-west2": (2, "Los Angeles", "United States"),
+            "us-west3": (2, "Salt Lake City", "United States"),
+            "us-west4": (2, "Las Vegas", "United States"),
+        }
+
+    def tier1(self) -> List[str]:
+        """Returns a list of GCP regions classified as tier 1 based on predefined criteria."""
+        return [region for region, info in self.regions.items() if info[0] == 1]
+
+    def tier2(self) -> List[str]:
+        """Returns a list of GCP regions classified as tier 2 based on predefined criteria."""
+        return [region for region, info in self.regions.items() if info[0] == 2]
+
+    @staticmethod
+    def _ping_region(region: str, attempts: int = 1) -> Tuple[str, float, float, float, float]:
+        """Pings a specified GCP region and returns latency statistics: mean, min, max, and standard deviation."""
+        url = f"https://{region}-docker.pkg.dev"
+        latencies = []
+        for _ in range(attempts):
+            try:
+                start_time = time.time()
+                _ = requests.head(url, timeout=5)
+                latency = (time.time() - start_time) * 1000  # convert latency to milliseconds
+                if latency != float("inf"):
+                    latencies.append(latency)
+            except requests.RequestException:
+                pass
+        if not latencies:
+            return region, float("inf"), float("inf"), float("inf"), float("inf")
+
+        std_dev = statistics.stdev(latencies) if len(latencies) > 1 else 0
+        return region, statistics.mean(latencies), std_dev, min(latencies), max(latencies)
+
+    def lowest_latency(
+        self,
+        top: int = 1,
+        verbose: bool = False,
+        tier: Optional[int] = None,
+        attempts: int = 1,
+    ) -> List[Tuple[str, float, float, float, float]]:
+        """
+        Determines the GCP regions with the lowest latency based on ping tests.
+
+        Args:
+            top (int): Number of top regions to return.
+            verbose (bool): If True, prints detailed latency information for all tested regions.
+            tier (int | None): Filter regions by tier (1 or 2). If None, all regions are tested.
+            attempts (int): Number of ping attempts per region.
+
+        Returns:
+            (List[Tuple[str, float, float, float, float]]): List of tuples containing region information and
+            latency statistics. Each tuple contains (region, mean_latency, std_dev, min_latency, max_latency).
+
+        Examples:
+            >>> regions = GCPRegions()
+            >>> results = regions.lowest_latency(top=3, verbose=True, tier=1, attempts=2)
+            >>> print(results[0][0])  # Print the name of the lowest latency region
+        """
+        if verbose:
+            print(f"Testing GCP regions for latency (with {attempts} {'retry' if attempts == 1 else 'attempts'})...")
+
+        regions_to_test = [k for k, v in self.regions.items() if v[0] == tier] if tier else list(self.regions.keys())
+        with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
+            results = list(executor.map(lambda r: self._ping_region(r, attempts), regions_to_test))
+
+        sorted_results = sorted(results, key=lambda x: x[1])
+
+        if verbose:
+            print(f"{'Region':<25} {'Location':<35} {'Tier':<5} Latency (ms)")
+            for region, mean, std, min_, max_ in sorted_results:
+                tier, city, country = self.regions[region]
+                location = f"{city}, {country}"
+                if mean == float("inf"):
+                    print(f"{region:<25} {location:<35} {tier:<5} Timeout")
+                else:
+                    print(f"{region:<25} {location:<35} {tier:<5} {mean:.0f} ± {std:.0f} ({min_:.0f} - {max_:.0f})")
+            print(f"\nLowest latency region{'s' if top > 1 else ''}:")
+            for region, mean, std, min_, max_ in sorted_results[:top]:
+                tier, city, country = self.regions[region]
+                location = f"{city}, {country}"
+                print(f"{region} ({location}, {mean:.0f} ± {std:.0f} ms ({min_:.0f} - {max_:.0f}))")
+
+        return sorted_results[:top]
+
+
+# Usage example
+if __name__ == "__main__":
+    regions = GCPRegions()
+    top_3_latency_tier1 = regions.lowest_latency(top=3, verbose=True, tier=1, attempts=3)

+ 390 - 0
ultralytics/hub/session.py

@@ -0,0 +1,390 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import shutil
+import threading
+import time
+from http import HTTPStatus
+from pathlib import Path
+from urllib.parse import parse_qs, urlparse
+
+import requests
+
+from ultralytics.hub.utils import HELP_MSG, HUB_WEB_ROOT, PREFIX, TQDM
+from ultralytics.utils import IS_COLAB, LOGGER, SETTINGS, __version__, checks, emojis
+from ultralytics.utils.errors import HUBModelError
+
+AGENT_NAME = f"python-{__version__}-colab" if IS_COLAB else f"python-{__version__}-local"
+
+
+class HUBTrainingSession:
+    """
+    HUB training session for Ultralytics HUB YOLO models. Handles model initialization, heartbeats, and checkpointing.
+
+    Attributes:
+        model_id (str): Identifier for the YOLO model being trained.
+        model_url (str): URL for the model in Ultralytics HUB.
+        rate_limits (dict): Rate limits for different API calls (in seconds).
+        timers (dict): Timers for rate limiting.
+        metrics_queue (dict): Queue for the model's metrics.
+        model (dict): Model data fetched from Ultralytics HUB.
+    """
+
+    def __init__(self, identifier):
+        """
+        Initialize the HUBTrainingSession with the provided model identifier.
+
+        Args:
+            identifier (str): Model identifier used to initialize the HUB training session.
+                It can be a URL string or a model key with specific format.
+
+        Raises:
+            ValueError: If the provided model identifier is invalid.
+            ConnectionError: If connecting with global API key is not supported.
+            ModuleNotFoundError: If hub-sdk package is not installed.
+        """
+        from hub_sdk import HUBClient
+
+        self.rate_limits = {"metrics": 3, "ckpt": 900, "heartbeat": 300}  # rate limits (seconds)
+        self.metrics_queue = {}  # holds metrics for each epoch until upload
+        self.metrics_upload_failed_queue = {}  # holds metrics for each epoch if upload failed
+        self.timers = {}  # holds timers in ultralytics/utils/callbacks/hub.py
+        self.model = None
+        self.model_url = None
+        self.model_file = None
+        self.train_args = None
+
+        # Parse input
+        api_key, model_id, self.filename = self._parse_identifier(identifier)
+
+        # Get credentials
+        active_key = api_key or SETTINGS.get("api_key")
+        credentials = {"api_key": active_key} if active_key else None  # set credentials
+
+        # Initialize client
+        self.client = HUBClient(credentials)
+
+        # Load models
+        try:
+            if model_id:
+                self.load_model(model_id)  # load existing model
+            else:
+                self.model = self.client.model()  # load empty model
+        except Exception:
+            if identifier.startswith(f"{HUB_WEB_ROOT}/models/") and not self.client.authenticated:
+                LOGGER.warning(
+                    f"{PREFIX}WARNING ⚠️ Please log in using 'yolo login API_KEY'. "
+                    "You can find your API Key at: https://hub.ultralytics.com/settings?tab=api+keys."
+                )
+
+    @classmethod
+    def create_session(cls, identifier, args=None):
+        """Class method to create an authenticated HUBTrainingSession or return None."""
+        try:
+            session = cls(identifier)
+            if args and not identifier.startswith(f"{HUB_WEB_ROOT}/models/"):  # not a HUB model URL
+                session.create_model(args)
+                assert session.model.id, "HUB model not loaded correctly"
+            return session
+        # PermissionError and ModuleNotFoundError indicate hub-sdk not installed
+        except (PermissionError, ModuleNotFoundError, AssertionError):
+            return None
+
+    def load_model(self, model_id):
+        """Loads an existing model from Ultralytics HUB using the provided model identifier."""
+        self.model = self.client.model(model_id)
+        if not self.model.data:  # then model does not exist
+            raise ValueError(emojis("❌ The specified HUB model does not exist"))  # TODO: improve error handling
+
+        self.model_url = f"{HUB_WEB_ROOT}/models/{self.model.id}"
+        if self.model.is_trained():
+            print(emojis(f"Loading trained HUB model {self.model_url} 🚀"))
+            url = self.model.get_weights_url("best")  # download URL with auth
+            self.model_file = checks.check_file(url, download_dir=Path(SETTINGS["weights_dir"]) / "hub" / self.model.id)
+            return
+
+        # Set training args and start heartbeats for HUB to monitor agent
+        self._set_train_args()
+        self.model.start_heartbeat(self.rate_limits["heartbeat"])
+        LOGGER.info(f"{PREFIX}View model at {self.model_url} 🚀")
+
+    def create_model(self, model_args):
+        """Initializes a HUB training session with the specified model identifier."""
+        payload = {
+            "config": {
+                "batchSize": model_args.get("batch", -1),
+                "epochs": model_args.get("epochs", 300),
+                "imageSize": model_args.get("imgsz", 640),
+                "patience": model_args.get("patience", 100),
+                "device": str(model_args.get("device", "")),  # convert None to string
+                "cache": str(model_args.get("cache", "ram")),  # convert True, False, None to string
+            },
+            "dataset": {"name": model_args.get("data")},
+            "lineage": {
+                "architecture": {"name": self.filename.replace(".pt", "").replace(".yaml", "")},
+                "parent": {},
+            },
+            "meta": {"name": self.filename},
+        }
+
+        if self.filename.endswith(".pt"):
+            payload["lineage"]["parent"]["name"] = self.filename
+
+        self.model.create_model(payload)
+
+        # Model could not be created
+        # TODO: improve error handling
+        if not self.model.id:
+            return None
+
+        self.model_url = f"{HUB_WEB_ROOT}/models/{self.model.id}"
+
+        # Start heartbeats for HUB to monitor agent
+        self.model.start_heartbeat(self.rate_limits["heartbeat"])
+
+        LOGGER.info(f"{PREFIX}View model at {self.model_url} 🚀")
+
+    @staticmethod
+    def _parse_identifier(identifier):
+        """
+        Parses the given identifier to determine the type of identifier and extract relevant components.
+
+        The method supports different identifier formats:
+            - A HUB model URL https://hub.ultralytics.com/models/MODEL
+            - A HUB model URL with API Key https://hub.ultralytics.com/models/MODEL?api_key=APIKEY
+            - A local filename that ends with '.pt' or '.yaml'
+
+        Args:
+            identifier (str): The identifier string to be parsed.
+
+        Returns:
+            (tuple): A tuple containing the API key, model ID, and filename as applicable.
+
+        Raises:
+            HUBModelError: If the identifier format is not recognized.
+        """
+        api_key, model_id, filename = None, None, None
+        if Path(identifier).suffix in {".pt", ".yaml"}:
+            filename = identifier
+        elif identifier.startswith(f"{HUB_WEB_ROOT}/models/"):
+            parsed_url = urlparse(identifier)
+            model_id = Path(parsed_url.path).stem  # handle possible final backslash robustly
+            query_params = parse_qs(parsed_url.query)  # dictionary, i.e. {"api_key": ["API_KEY_HERE"]}
+            api_key = query_params.get("api_key", [None])[0]
+        else:
+            raise HUBModelError(f"model='{identifier} invalid, correct format is {HUB_WEB_ROOT}/models/MODEL_ID")
+        return api_key, model_id, filename
+
+    def _set_train_args(self):
+        """
+        Initializes training arguments and creates a model entry on the Ultralytics HUB.
+
+        This method sets up training arguments based on the model's state and updates them with any additional
+        arguments provided. It handles different states of the model, such as whether it's resumable, pretrained,
+        or requires specific file setup.
+
+        Raises:
+            ValueError: If the model is already trained, if required dataset information is missing, or if there are
+                issues with the provided training arguments.
+        """
+        if self.model.is_resumable():
+            # Model has saved weights
+            self.train_args = {"data": self.model.get_dataset_url(), "resume": True}
+            self.model_file = self.model.get_weights_url("last")
+        else:
+            # Model has no saved weights
+            self.train_args = self.model.data.get("train_args")  # new response
+
+            # Set the model file as either a *.pt or *.yaml file
+            self.model_file = (
+                self.model.get_weights_url("parent") if self.model.is_pretrained() else self.model.get_architecture()
+            )
+
+        if "data" not in self.train_args:
+            # RF bug - datasets are sometimes not exported
+            raise ValueError("Dataset may still be processing. Please wait a minute and try again.")
+
+        self.model_file = checks.check_yolov5u_filename(self.model_file, verbose=False)  # YOLOv5->YOLOv5u
+        self.model_id = self.model.id
+
+    def request_queue(
+        self,
+        request_func,
+        retry=3,
+        timeout=30,
+        thread=True,
+        verbose=True,
+        progress_total=None,
+        stream_response=None,
+        *args,
+        **kwargs,
+    ):
+        """Attempts to execute `request_func` with retries, timeout handling, optional threading, and progress."""
+
+        def retry_request():
+            """Attempts to call `request_func` with retries, timeout, and optional threading."""
+            t0 = time.time()  # Record the start time for the timeout
+            response = None
+            for i in range(retry + 1):
+                if (time.time() - t0) > timeout:
+                    LOGGER.warning(f"{PREFIX}Timeout for request reached. {HELP_MSG}")
+                    break  # Timeout reached, exit loop
+
+                response = request_func(*args, **kwargs)
+                if response is None:
+                    LOGGER.warning(f"{PREFIX}Received no response from the request. {HELP_MSG}")
+                    time.sleep(2**i)  # Exponential backoff before retrying
+                    continue  # Skip further processing and retry
+
+                if progress_total:
+                    self._show_upload_progress(progress_total, response)
+                elif stream_response:
+                    self._iterate_content(response)
+
+                if HTTPStatus.OK <= response.status_code < HTTPStatus.MULTIPLE_CHOICES:
+                    # if request related to metrics upload
+                    if kwargs.get("metrics"):
+                        self.metrics_upload_failed_queue = {}
+                    return response  # Success, no need to retry
+
+                if i == 0:
+                    # Initial attempt, check status code and provide messages
+                    message = self._get_failure_message(response, retry, timeout)
+
+                    if verbose:
+                        LOGGER.warning(f"{PREFIX}{message} {HELP_MSG} ({response.status_code})")
+
+                if not self._should_retry(response.status_code):
+                    LOGGER.warning(f"{PREFIX}Request failed. {HELP_MSG} ({response.status_code}")
+                    break  # Not an error that should be retried, exit loop
+
+                time.sleep(2**i)  # Exponential backoff for retries
+
+            # if request related to metrics upload and exceed retries
+            if response is None and kwargs.get("metrics"):
+                self.metrics_upload_failed_queue.update(kwargs.get("metrics"))
+
+            return response
+
+        if thread:
+            # Start a new thread to run the retry_request function
+            threading.Thread(target=retry_request, daemon=True).start()
+        else:
+            # If running in the main thread, call retry_request directly
+            return retry_request()
+
+    @staticmethod
+    def _should_retry(status_code):
+        """Determines if a request should be retried based on the HTTP status code."""
+        retry_codes = {
+            HTTPStatus.REQUEST_TIMEOUT,
+            HTTPStatus.BAD_GATEWAY,
+            HTTPStatus.GATEWAY_TIMEOUT,
+        }
+        return status_code in retry_codes
+
+    def _get_failure_message(self, response: requests.Response, retry: int, timeout: int):
+        """
+        Generate a retry message based on the response status code.
+
+        Args:
+            response: The HTTP response object.
+            retry: The number of retry attempts allowed.
+            timeout: The maximum timeout duration.
+
+        Returns:
+            (str): The retry message.
+        """
+        if self._should_retry(response.status_code):
+            return f"Retrying {retry}x for {timeout}s." if retry else ""
+        elif response.status_code == HTTPStatus.TOO_MANY_REQUESTS:  # rate limit
+            headers = response.headers
+            return (
+                f"Rate limit reached ({headers['X-RateLimit-Remaining']}/{headers['X-RateLimit-Limit']}). "
+                f"Please retry after {headers['Retry-After']}s."
+            )
+        else:
+            try:
+                return response.json().get("message", "No JSON message.")
+            except AttributeError:
+                return "Unable to read JSON."
+
+    def upload_metrics(self):
+        """Upload model metrics to Ultralytics HUB."""
+        return self.request_queue(self.model.upload_metrics, metrics=self.metrics_queue.copy(), thread=True)
+
+    def upload_model(
+        self,
+        epoch: int,
+        weights: str,
+        is_best: bool = False,
+        map: float = 0.0,
+        final: bool = False,
+    ) -> None:
+        """
+        Upload a model checkpoint to Ultralytics HUB.
+
+        Args:
+            epoch (int): The current training epoch.
+            weights (str): Path to the model weights file.
+            is_best (bool): Indicates if the current model is the best one so far.
+            map (float): Mean average precision of the model.
+            final (bool): Indicates if the model is the final model after training.
+        """
+        weights = Path(weights)
+        if not weights.is_file():
+            last = weights.with_name(f"last{weights.suffix}")
+            if final and last.is_file():
+                LOGGER.warning(
+                    f"{PREFIX} WARNING ⚠️ Model 'best.pt' not found, copying 'last.pt' to 'best.pt' and uploading. "
+                    "This often happens when resuming training in transient environments like Google Colab. "
+                    "For more reliable training, consider using Ultralytics HUB Cloud. "
+                    "Learn more at https://docs.ultralytics.com/hub/cloud-training."
+                )
+                shutil.copy(last, weights)  # copy last.pt to best.pt
+            else:
+                LOGGER.warning(f"{PREFIX} WARNING ⚠️ Model upload issue. Missing model {weights}.")
+                return
+
+        self.request_queue(
+            self.model.upload_model,
+            epoch=epoch,
+            weights=str(weights),
+            is_best=is_best,
+            map=map,
+            final=final,
+            retry=10,
+            timeout=3600,
+            thread=not final,
+            progress_total=weights.stat().st_size if final else None,  # only show progress if final
+            stream_response=True,
+        )
+
+    @staticmethod
+    def _show_upload_progress(content_length: int, response: requests.Response) -> None:
+        """
+        Display a progress bar to track the upload progress of a file download.
+
+        Args:
+            content_length (int): The total size of the content to be downloaded in bytes.
+            response (requests.Response): The response object from the file download request.
+
+        Returns:
+            None
+        """
+        with TQDM(total=content_length, unit="B", unit_scale=True, unit_divisor=1024) as pbar:
+            for data in response.iter_content(chunk_size=1024):
+                pbar.update(len(data))
+
+    @staticmethod
+    def _iterate_content(response: requests.Response) -> None:
+        """
+        Process the streamed HTTP response data.
+
+        Args:
+            response (requests.Response): The response object from the file download request.
+
+        Returns:
+            None
+        """
+        for _ in response.iter_content(chunk_size=1024):
+            pass  # Do nothing with data chunks

+ 246 - 0
ultralytics/hub/utils.py

@@ -0,0 +1,246 @@
+# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
+
+import os
+import platform
+import random
+import threading
+import time
+from pathlib import Path
+
+import requests
+
+from ultralytics.utils import (
+    ARGV,
+    ENVIRONMENT,
+    IS_COLAB,
+    IS_GIT_DIR,
+    IS_PIP_PACKAGE,
+    LOGGER,
+    ONLINE,
+    RANK,
+    SETTINGS,
+    TESTS_RUNNING,
+    TQDM,
+    TryExcept,
+    __version__,
+    colorstr,
+    get_git_origin_url,
+)
+from ultralytics.utils.downloads import GITHUB_ASSETS_NAMES
+
+HUB_API_ROOT = os.environ.get("ULTRALYTICS_HUB_API", "https://api.ultralytics.com")
+HUB_WEB_ROOT = os.environ.get("ULTRALYTICS_HUB_WEB", "https://hub.ultralytics.com")
+
+PREFIX = colorstr("Ultralytics HUB: ")
+HELP_MSG = "If this issue persists please visit https://github.com/ultralytics/hub/issues for assistance."
+
+
+def request_with_credentials(url: str) -> any:
+    """
+    Make an AJAX request with cookies attached in a Google Colab environment.
+
+    Args:
+        url (str): The URL to make the request to.
+
+    Returns:
+        (any): The response data from the AJAX request.
+
+    Raises:
+        OSError: If the function is not run in a Google Colab environment.
+    """
+    if not IS_COLAB:
+        raise OSError("request_with_credentials() must run in a Colab environment")
+    from google.colab import output  # noqa
+    from IPython import display  # noqa
+
+    display.display(
+        display.Javascript(
+            f"""
+            window._hub_tmp = new Promise((resolve, reject) => {{
+                const timeout = setTimeout(() => reject("Failed authenticating existing browser session"), 5000)
+                fetch("{url}", {{
+                    method: 'POST',
+                    credentials: 'include'
+                }})
+                    .then((response) => resolve(response.json()))
+                    .then((json) => {{
+                    clearTimeout(timeout);
+                    }}).catch((err) => {{
+                    clearTimeout(timeout);
+                    reject(err);
+                }});
+            }});
+            """
+        )
+    )
+    return output.eval_js("_hub_tmp")
+
+
+def requests_with_progress(method, url, **kwargs):
+    """
+    Make an HTTP request using the specified method and URL, with an optional progress bar.
+
+    Args:
+        method (str): The HTTP method to use (e.g. 'GET', 'POST').
+        url (str): The URL to send the request to.
+        **kwargs (any): Additional keyword arguments to pass to the underlying `requests.request` function.
+
+    Returns:
+        (requests.Response): The response object from the HTTP request.
+
+    Note:
+        - If 'progress' is set to True, the progress bar will display the download progress for responses with a known
+        content length.
+        - If 'progress' is a number then progress bar will display assuming content length = progress.
+    """
+    progress = kwargs.pop("progress", False)
+    if not progress:
+        return requests.request(method, url, **kwargs)
+    response = requests.request(method, url, stream=True, **kwargs)
+    total = int(response.headers.get("content-length", 0) if isinstance(progress, bool) else progress)  # total size
+    try:
+        pbar = TQDM(total=total, unit="B", unit_scale=True, unit_divisor=1024)
+        for data in response.iter_content(chunk_size=1024):
+            pbar.update(len(data))
+        pbar.close()
+    except requests.exceptions.ChunkedEncodingError:  # avoid 'Connection broken: IncompleteRead' warnings
+        response.close()
+    return response
+
+
+def smart_request(method, url, retry=3, timeout=30, thread=True, code=-1, verbose=True, progress=False, **kwargs):
+    """
+    Makes an HTTP request using the 'requests' library, with exponential backoff retries up to a specified timeout.
+
+    Args:
+        method (str): The HTTP method to use for the request. Choices are 'post' and 'get'.
+        url (str): The URL to make the request to.
+        retry (int, optional): Number of retries to attempt before giving up. Default is 3.
+        timeout (int, optional): Timeout in seconds after which the function will give up retrying. Default is 30.
+        thread (bool, optional): Whether to execute the request in a separate daemon thread. Default is True.
+        code (int, optional): An identifier for the request, used for logging purposes. Default is -1.
+        verbose (bool, optional): A flag to determine whether to print out to console or not. Default is True.
+        progress (bool, optional): Whether to show a progress bar during the request. Default is False.
+        **kwargs (any): Keyword arguments to be passed to the requests function specified in method.
+
+    Returns:
+        (requests.Response): The HTTP response object. If the request is executed in a separate thread, returns None.
+    """
+    retry_codes = (408, 500)  # retry only these codes
+
+    @TryExcept(verbose=verbose)
+    def func(func_method, func_url, **func_kwargs):
+        """Make HTTP requests with retries and timeouts, with optional progress tracking."""
+        r = None  # response
+        t0 = time.time()  # initial time for timer
+        for i in range(retry + 1):
+            if (time.time() - t0) > timeout:
+                break
+            r = requests_with_progress(func_method, func_url, **func_kwargs)  # i.e. get(url, data, json, files)
+            if r.status_code < 300:  # return codes in the 2xx range are generally considered "good" or "successful"
+                break
+            try:
+                m = r.json().get("message", "No JSON message.")
+            except AttributeError:
+                m = "Unable to read JSON."
+            if i == 0:
+                if r.status_code in retry_codes:
+                    m += f" Retrying {retry}x for {timeout}s." if retry else ""
+                elif r.status_code == 429:  # rate limit
+                    h = r.headers  # response headers
+                    m = (
+                        f"Rate limit reached ({h['X-RateLimit-Remaining']}/{h['X-RateLimit-Limit']}). "
+                        f"Please retry after {h['Retry-After']}s."
+                    )
+                if verbose:
+                    LOGGER.warning(f"{PREFIX}{m} {HELP_MSG} ({r.status_code} #{code})")
+                if r.status_code not in retry_codes:
+                    return r
+            time.sleep(2**i)  # exponential standoff
+        return r
+
+    args = method, url
+    kwargs["progress"] = progress
+    if thread:
+        threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True).start()
+    else:
+        return func(*args, **kwargs)
+
+
+class Events:
+    """
+    A class for collecting anonymous event analytics. Event analytics are enabled when sync=True in settings and
+    disabled when sync=False. Run 'yolo settings' to see and update settings.
+
+    Attributes:
+        url (str): The URL to send anonymous events.
+        rate_limit (float): The rate limit in seconds for sending events.
+        metadata (dict): A dictionary containing metadata about the environment.
+        enabled (bool): A flag to enable or disable Events based on certain conditions.
+    """
+
+    url = "https://www.google-analytics.com/mp/collect?measurement_id=G-X8NCJYTQXM&api_secret=QLQrATrNSwGRFRLE-cbHJw"
+
+    def __init__(self):
+        """Initializes the Events object with default values for events, rate_limit, and metadata."""
+        self.events = []  # events list
+        self.rate_limit = 30.0  # rate limit (seconds)
+        self.t = 0.0  # rate limit timer (seconds)
+        self.metadata = {
+            "cli": Path(ARGV[0]).name == "yolo",
+            "install": "git" if IS_GIT_DIR else "pip" if IS_PIP_PACKAGE else "other",
+            "python": ".".join(platform.python_version_tuple()[:2]),  # i.e. 3.10
+            "version": __version__,
+            "env": ENVIRONMENT,
+            "session_id": round(random.random() * 1e15),
+            "engagement_time_msec": 1000,
+        }
+        self.enabled = (
+            SETTINGS["sync"]
+            and RANK in {-1, 0}
+            and not TESTS_RUNNING
+            and ONLINE
+            and (IS_PIP_PACKAGE or get_git_origin_url() == "https://github.com/ultralytics/ultralytics.git")
+        )
+
+    def __call__(self, cfg):
+        """
+        Attempts to add a new event to the events list and send events if the rate limit is reached.
+
+        Args:
+            cfg (IterableSimpleNamespace): The configuration object containing mode and task information.
+        """
+        if not self.enabled:
+            # Events disabled, do nothing
+            return
+
+        # Attempt to add to events
+        if len(self.events) < 25:  # Events list limited to 25 events (drop any events past this)
+            params = {
+                **self.metadata,
+                "task": cfg.task,
+                "model": cfg.model if cfg.model in GITHUB_ASSETS_NAMES else "custom",
+            }
+            if cfg.mode == "export":
+                params["format"] = cfg.format
+            self.events.append({"name": cfg.mode, "params": params})
+
+        # Check rate limit
+        t = time.time()
+        if (t - self.t) < self.rate_limit:
+            # Time is under rate limiter, wait to send
+            return
+
+        # Time is over rate limiter, send now
+        data = {"client_id": SETTINGS["uuid"], "events": self.events}  # SHA-256 anonymized UUID hash and events list
+
+        # POST equivalent to requests.post(self.url, json=data)
+        smart_request("post", self.url, json=data, retry=0, verbose=False)
+
+        # Reset events and rate limit timer
+        self.events = []
+        self.t = t
+
+
+# Run below code on hub/utils init -------------------------------------------------------------------------------------
+events = Events()