presets.py 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. from collections import defaultdict
  2. import torch
  3. from tools import transforms as reference_transforms
  4. # import transforms as reference_transforms
  5. def get_modules(use_v2):
  6. # We need a protected import to avoid the V2 warning in case just V1 is used
  7. if use_v2:
  8. import torchvision.transforms.v2
  9. import torchvision.tv_tensors
  10. return torchvision.transforms.v2, torchvision.tv_tensors
  11. else:
  12. return reference_transforms, None
  13. class DetectionPresetTrain:
  14. # Note: this transform assumes that the input to forward() are always PIL
  15. # images, regardless of the backend parameter.
  16. def __init__(
  17. self,
  18. *,
  19. data_augmentation,
  20. hflip_prob=0.5,
  21. mean=(123.0, 117.0, 104.0),
  22. backend="pil",
  23. use_v2=False,
  24. ):
  25. T, tv_tensors = get_modules(use_v2)
  26. transforms = []
  27. backend = backend.lower()
  28. if backend == "tv_tensor":
  29. transforms.append(T.ToImage())
  30. elif backend == "tensor":
  31. transforms.append(T.PILToTensor())
  32. elif backend != "pil":
  33. raise ValueError(f"backend can be 'tv_tensor', 'tensor' or 'pil', but got {backend}")
  34. if data_augmentation == "hflip":
  35. transforms += [T.RandomHorizontalFlip(p=hflip_prob)]
  36. elif data_augmentation == "lsj":
  37. transforms += [
  38. T.ScaleJitter(target_size=(1024, 1024), antialias=True),
  39. # TODO: FixedSizeCrop below doesn't work on tensors!
  40. reference_transforms.FixedSizeCrop(size=(1024, 1024), fill=mean),
  41. T.RandomHorizontalFlip(p=hflip_prob),
  42. ]
  43. elif data_augmentation == "multiscale":
  44. transforms += [
  45. T.RandomShortestSize(min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333),
  46. T.RandomHorizontalFlip(p=hflip_prob),
  47. ]
  48. elif data_augmentation == "ssd":
  49. fill = defaultdict(lambda: mean, {tv_tensors.Mask: 0}) if use_v2 else list(mean)
  50. transforms += [
  51. T.RandomPhotometricDistort(),
  52. T.RandomZoomOut(fill=fill),
  53. T.RandomIoUCrop(),
  54. T.RandomHorizontalFlip(p=hflip_prob),
  55. ]
  56. elif data_augmentation == "ssdlite":
  57. transforms += [
  58. T.RandomIoUCrop(),
  59. T.RandomHorizontalFlip(p=hflip_prob),
  60. ]
  61. else:
  62. raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
  63. if backend == "pil":
  64. # Note: we could just convert to pure tensors even in v2.
  65. transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
  66. transforms += [T.ToDtype(torch.float, scale=True)]
  67. if use_v2:
  68. transforms += [
  69. T.ConvertBoundingBoxFormat(tv_tensors.BoundingBoxFormat.XYXY),
  70. T.SanitizeBoundingBoxes(),
  71. T.ToPureTensor(),
  72. ]
  73. self.transforms = T.Compose(transforms)
  74. def __call__(self, img, target):
  75. return self.transforms(img, target)
  76. class DetectionPresetEval:
  77. def __init__(self, backend="pil", use_v2=False):
  78. T, _ = get_modules(use_v2)
  79. transforms = []
  80. backend = backend.lower()
  81. if backend == "pil":
  82. # Note: we could just convert to pure tensors even in v2?
  83. transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
  84. elif backend == "tensor":
  85. transforms += [T.PILToTensor()]
  86. elif backend == "tv_tensor":
  87. transforms += [T.ToImage()]
  88. else:
  89. raise ValueError(f"backend can be 'tv_tensor', 'tensor' or 'pil', but got {backend}")
  90. transforms += [T.ToDtype(torch.float, scale=True)]
  91. if use_v2:
  92. transforms += [T.ToPureTensor()]
  93. self.transforms = T.Compose(transforms)
  94. def __call__(self, img, target):
  95. return self.transforms(img, target)