_meta.py 1.4 KB

123456789101112131415161718192021222324252627282930313233343536
  1. from typing import Any, Dict, Union
  2. from torchvision import tv_tensors
  3. from torchvision.transforms.v2 import functional as F, Transform
  4. class ConvertBoundingBoxFormat(Transform):
  5. """Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
  6. Args:
  7. format (str or tv_tensors.BoundingBoxFormat): output bounding box format.
  8. Possible values are defined by :class:`~torchvision.tv_tensors.BoundingBoxFormat` and
  9. string values match the enums, e.g. "XYXY" or "XYWH" etc.
  10. """
  11. _transformed_types = (tv_tensors.BoundingBoxes,)
  12. def __init__(self, format: Union[str, tv_tensors.BoundingBoxFormat]) -> None:
  13. super().__init__()
  14. self.format = format
  15. def _transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> tv_tensors.BoundingBoxes:
  16. return F.convert_bounding_box_format(inpt, new_format=self.format) # type: ignore[return-value, arg-type]
  17. class ClampBoundingBoxes(Transform):
  18. """Clamp bounding boxes to their corresponding image dimensions.
  19. The clamping is done according to the bounding boxes' ``canvas_size`` meta-data.
  20. """
  21. _transformed_types = (tv_tensors.BoundingBoxes,)
  22. def _transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> tv_tensors.BoundingBoxes:
  23. return F.clamp_bounding_boxes(inpt) # type: ignore[return-value]