train.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
  2. import math
  3. import random
  4. from copy import copy
  5. import numpy as np
  6. import torch.nn as nn
  7. from ultralytics.data import build_dataloader, build_yolo_dataset
  8. from ultralytics.engine.trainer import BaseTrainer
  9. from ultralytics.models import yolo
  10. from ultralytics.nn.tasks import DetectionModel
  11. from ultralytics.utils import LOGGER, RANK
  12. from ultralytics.utils.plotting import plot_images, plot_labels, plot_results
  13. from ultralytics.utils.torch_utils import de_parallel, torch_distributed_zero_first
  14. class DetectionTrainer(BaseTrainer):
  15. """
  16. A class extending the BaseTrainer class for training based on a detection model.
  17. Example:
  18. ```python
  19. from ultralytics.models.yolo.detect import DetectionTrainer
  20. args = dict(model="yolo11n.pt", data="coco8.yaml", epochs=3)
  21. trainer = DetectionTrainer(overrides=args)
  22. trainer.train()
  23. ```
  24. """
  25. def build_dataset(self, img_path, mode="train", batch=None):
  26. """
  27. Build YOLO Dataset.
  28. Args:
  29. img_path (str): Path to the folder containing images.
  30. mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode.
  31. batch (int, optional): Size of batches, this is for `rect`. Defaults to None.
  32. """
  33. gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32)
  34. return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs)
  35. def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"):
  36. """Construct and return dataloader."""
  37. assert mode in {"train", "val"}, f"Mode must be 'train' or 'val', not {mode}."
  38. with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
  39. dataset = self.build_dataset(dataset_path, mode, batch_size)
  40. shuffle = mode == "train"
  41. if getattr(dataset, "rect", False) and shuffle:
  42. LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with DataLoader shuffle, setting shuffle=False")
  43. shuffle = False
  44. workers = self.args.workers if mode == "train" else self.args.workers * 2
  45. return build_dataloader(dataset, batch_size, workers, shuffle, rank) # return dataloader
  46. def preprocess_batch(self, batch):
  47. """Preprocesses a batch of images by scaling and converting to float."""
  48. batch["img"] = batch["img"].to(self.device, non_blocking=True).float() / 255
  49. if self.args.multi_scale:
  50. imgs = batch["img"]
  51. sz = (
  52. random.randrange(int(self.args.imgsz * 0.5), int(self.args.imgsz * 1.5 + self.stride))
  53. // self.stride
  54. * self.stride
  55. ) # size
  56. sf = sz / max(imgs.shape[2:]) # scale factor
  57. if sf != 1:
  58. ns = [
  59. math.ceil(x * sf / self.stride) * self.stride for x in imgs.shape[2:]
  60. ] # new shape (stretched to gs-multiple)
  61. imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)
  62. batch["img"] = imgs
  63. return batch
  64. def set_model_attributes(self):
  65. """Nl = de_parallel(self.model).model[-1].nl # number of detection layers (to scale hyps)."""
  66. # self.args.box *= 3 / nl # scale to layers
  67. # self.args.cls *= self.data["nc"] / 80 * 3 / nl # scale to classes and layers
  68. # self.args.cls *= (self.args.imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
  69. self.model.nc = self.data["nc"] # attach number of classes to model
  70. self.model.names = self.data["names"] # attach class names to model
  71. self.model.args = self.args # attach hyperparameters to model
  72. # TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc
  73. def get_model(self, cfg=None, weights=None, verbose=True):
  74. """Return a YOLO detection model."""
  75. model = DetectionModel(cfg, nc=self.data["nc"], verbose=verbose and RANK == -1)
  76. if weights:
  77. model.load(weights)
  78. return model
  79. def get_validator(self):
  80. """Returns a DetectionValidator for YOLO model validation."""
  81. self.loss_names = "box_loss", "cls_loss", "dfl_loss"
  82. return yolo.detect.DetectionValidator(
  83. self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
  84. )
  85. def label_loss_items(self, loss_items=None, prefix="train"):
  86. """
  87. Returns a loss dict with labelled training loss items tensor.
  88. Not needed for classification but necessary for segmentation & detection
  89. """
  90. keys = [f"{prefix}/{x}" for x in self.loss_names]
  91. if loss_items is not None:
  92. loss_items = [round(float(x), 5) for x in loss_items] # convert tensors to 5 decimal place floats
  93. return dict(zip(keys, loss_items))
  94. else:
  95. return keys
  96. def progress_string(self):
  97. """Returns a formatted string of training progress with epoch, GPU memory, loss, instances and size."""
  98. return ("\n" + "%11s" * (4 + len(self.loss_names))) % (
  99. "Epoch",
  100. "GPU_mem",
  101. *self.loss_names,
  102. "Instances",
  103. "Size",
  104. )
  105. def plot_training_samples(self, batch, ni):
  106. """Plots training samples with their annotations."""
  107. plot_images(
  108. images=batch["img"],
  109. batch_idx=batch["batch_idx"],
  110. cls=batch["cls"].squeeze(-1),
  111. bboxes=batch["bboxes"],
  112. paths=batch["im_file"],
  113. fname=self.save_dir / f"train_batch{ni}.jpg",
  114. on_plot=self.on_plot,
  115. )
  116. def plot_metrics(self):
  117. """Plots metrics from a CSV file."""
  118. plot_results(file=self.csv, on_plot=self.on_plot) # save results.png
  119. def plot_training_labels(self):
  120. """Create a labeled training plot of the YOLO model."""
  121. boxes = np.concatenate([lb["bboxes"] for lb in self.train_loader.dataset.labels], 0)
  122. cls = np.concatenate([lb["cls"] for lb in self.train_loader.dataset.labels], 0)
  123. plot_labels(boxes, cls.squeeze(), names=self.data["names"], save_dir=self.save_dir, on_plot=self.on_plot)
  124. def auto_batch(self):
  125. """Get batch size by calculating memory occupation of model."""
  126. train_dataset = self.build_dataset(self.trainset, mode="train", batch=16)
  127. # 4 for mosaic augmentation
  128. max_num_obj = max(len(label["cls"]) for label in train_dataset.labels) * 4
  129. return super().auto_batch(max_num_obj)