line_rcnn.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490
  1. from typing import Any, Optional
  2. import torch
  3. from torch import nn
  4. from torchvision.ops import MultiScaleRoIAlign
  5. from libs.vision_libs.ops import misc as misc_nn_ops
  6. from libs.vision_libs.transforms._presets import ObjectDetection
  7. from .roi_heads import RoIHeads
  8. from libs.vision_libs.models._api import register_model, Weights, WeightsEnum
  9. from libs.vision_libs.models._meta import _COCO_PERSON_CATEGORIES, _COCO_PERSON_KEYPOINT_NAMES
  10. from libs.vision_libs.models._utils import _ovewrite_value_param, handle_legacy_interface
  11. from libs.vision_libs.models.resnet import resnet50, ResNet50_Weights
  12. from libs.vision_libs.models.detection._utils import overwrite_eps
  13. from libs.vision_libs.models.detection.backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers
  14. from libs.vision_libs.models.detection.faster_rcnn import FasterRCNN
  15. __all__ = [
  16. "LineRCNN",
  17. "LineRCNN_ResNet50_FPN_Weights",
  18. "linercnn_resnet50_fpn",
  19. ]
  20. class LineRCNN(FasterRCNN):
  21. """
  22. Implements Keypoint R-CNN.
  23. The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
  24. image, and should be in 0-1 range. Different images can have different sizes.
  25. The behavior of the model changes depending on if it is in training or evaluation mode.
  26. During training, the model expects both the input tensors and targets (list of dictionary),
  27. containing:
  28. - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
  29. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  30. - labels (Int64Tensor[N]): the class label for each ground-truth box
  31. - keypoints (FloatTensor[N, K, 3]): the K keypoints location for each of the N instances, in the
  32. format [x, y, visibility], where visibility=0 means that the keypoint is not visible.
  33. The model returns a Dict[Tensor] during training, containing the classification and regression
  34. losses for both the RPN and the R-CNN, and the keypoint loss.
  35. During inference, the model requires only the input tensors, and returns the post-processed
  36. predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
  37. follows:
  38. - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
  39. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  40. - labels (Int64Tensor[N]): the predicted labels for each image
  41. - scores (Tensor[N]): the scores or each prediction
  42. - keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format.
  43. Args:
  44. backbone (nn.Module): the network used to compute the features for the model.
  45. It should contain an out_channels attribute, which indicates the number of output
  46. channels that each feature map has (and it should be the same for all feature maps).
  47. The backbone should return a single Tensor or and OrderedDict[Tensor].
  48. num_classes (int): number of output classes of the model (including the background).
  49. If box_predictor is specified, num_classes should be None.
  50. min_size (int): minimum size of the image to be rescaled before feeding it to the backbone
  51. max_size (int): maximum size of the image to be rescaled before feeding it to the backbone
  52. image_mean (Tuple[float, float, float]): mean values used for input normalization.
  53. They are generally the mean values of the dataset on which the backbone has been trained
  54. on
  55. image_std (Tuple[float, float, float]): std values used for input normalization.
  56. They are generally the std values of the dataset on which the backbone has been trained on
  57. rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
  58. maps.
  59. rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
  60. rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
  61. rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
  62. rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
  63. rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
  64. rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
  65. rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
  66. considered as positive during training of the RPN.
  67. rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
  68. considered as negative during training of the RPN.
  69. rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
  70. for computing the loss
  71. rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
  72. of the RPN
  73. rpn_score_thresh (float): during inference, only return proposals with a classification score
  74. greater than rpn_score_thresh
  75. box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
  76. the locations indicated by the bounding boxes
  77. box_head (nn.Module): module that takes the cropped feature maps as input
  78. box_predictor (nn.Module): module that takes the output of box_head and returns the
  79. classification logits and box regression deltas.
  80. box_score_thresh (float): during inference, only return proposals with a classification score
  81. greater than box_score_thresh
  82. box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
  83. box_detections_per_img (int): maximum number of detections per image, for all classes.
  84. box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
  85. considered as positive during training of the classification head
  86. box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
  87. considered as negative during training of the classification head
  88. box_batch_size_per_image (int): number of proposals that are sampled during training of the
  89. classification head
  90. box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
  91. of the classification head
  92. bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
  93. bounding boxes
  94. keypoint_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
  95. the locations indicated by the bounding boxes, which will be used for the keypoint head.
  96. keypoint_head (nn.Module): module that takes the cropped feature maps as input
  97. keypoint_predictor (nn.Module): module that takes the output of the keypoint_head and returns the
  98. heatmap logits
  99. Example::
  100. >>> import torch
  101. >>> import torchvision
  102. >>> from torchvision.models.detection import KeypointRCNN
  103. >>> from torchvision.models.detection.anchor_utils import AnchorGenerator
  104. >>>
  105. >>> # load a pre-trained model for classification and return
  106. >>> # only the features
  107. >>> backbone = torchvision.models.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).features
  108. >>> # KeypointRCNN needs to know the number of
  109. >>> # output channels in a backbone. For mobilenet_v2, it's 1280,
  110. >>> # so we need to add it here
  111. >>> backbone.out_channels = 1280
  112. >>>
  113. >>> # let's make the RPN generate 5 x 3 anchors per spatial
  114. >>> # location, with 5 different sizes and 3 different aspect
  115. >>> # ratios. We have a Tuple[Tuple[int]] because each feature
  116. >>> # map could potentially have different sizes and
  117. >>> # aspect ratios
  118. >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
  119. >>> aspect_ratios=((0.5, 1.0, 2.0),))
  120. >>>
  121. >>> # let's define what are the feature maps that we will
  122. >>> # use to perform the region of interest cropping, as well as
  123. >>> # the size of the crop after rescaling.
  124. >>> # if your backbone returns a Tensor, featmap_names is expected to
  125. >>> # be ['0']. More generally, the backbone should return an
  126. >>> # OrderedDict[Tensor], and in featmap_names you can choose which
  127. >>> # feature maps to use.
  128. >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
  129. >>> output_size=7,
  130. >>> sampling_ratio=2)
  131. >>>
  132. >>> keypoint_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
  133. >>> output_size=14,
  134. >>> sampling_ratio=2)
  135. >>> # put the pieces together inside a KeypointRCNN model
  136. >>> model = KeypointRCNN(backbone,
  137. >>> num_classes=2,
  138. >>> rpn_anchor_generator=anchor_generator,
  139. >>> box_roi_pool=roi_pooler,
  140. >>> keypoint_roi_pool=keypoint_roi_pooler)
  141. >>> model.eval()
  142. >>> model.eval()
  143. >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
  144. >>> predictions = model(x)
  145. """
  146. def __init__(
  147. self,
  148. backbone,
  149. num_classes=None,
  150. # transform parameters
  151. min_size=None,
  152. max_size=1333,
  153. image_mean=None,
  154. image_std=None,
  155. # RPN parameters
  156. rpn_anchor_generator=None,
  157. rpn_head=None,
  158. rpn_pre_nms_top_n_train=2000,
  159. rpn_pre_nms_top_n_test=1000,
  160. rpn_post_nms_top_n_train=2000,
  161. rpn_post_nms_top_n_test=1000,
  162. rpn_nms_thresh=0.7,
  163. rpn_fg_iou_thresh=0.7,
  164. rpn_bg_iou_thresh=0.3,
  165. rpn_batch_size_per_image=256,
  166. rpn_positive_fraction=0.5,
  167. rpn_score_thresh=0.0,
  168. # Box parameters
  169. box_roi_pool=None,
  170. box_head=None,
  171. box_predictor=None,
  172. box_score_thresh=0.05,
  173. box_nms_thresh=0.5,
  174. box_detections_per_img=100,
  175. box_fg_iou_thresh=0.5,
  176. box_bg_iou_thresh=0.5,
  177. box_batch_size_per_image=512,
  178. box_positive_fraction=0.25,
  179. bbox_reg_weights=None,
  180. # line parameters
  181. line_head=None,
  182. line_predictor=None,
  183. **kwargs,
  184. ):
  185. # if not isinstance(keypoint_roi_pool, (MultiScaleRoIAlign, type(None))):
  186. # raise TypeError(
  187. # "keypoint_roi_pool should be of type MultiScaleRoIAlign or None instead of {type(keypoint_roi_pool)}"
  188. # )
  189. # if min_size is None:
  190. # min_size = (640, 672, 704, 736, 768, 800)
  191. #
  192. # if num_keypoints is not None:
  193. # if keypoint_predictor is not None:
  194. # raise ValueError("num_keypoints should be None when keypoint_predictor is specified")
  195. # else:
  196. # num_keypoints = 17
  197. out_channels = backbone.out_channels
  198. if line_head is None:
  199. keypoint_layers = tuple(512 for _ in range(8))
  200. line_head = LineRCNNHeads(out_channels, keypoint_layers)
  201. if line_predictor is None:
  202. keypoint_dim_reduced = 512 # == keypoint_layers[-1]
  203. line_predictor = LineRCNNPredictor(keypoint_dim_reduced)
  204. super().__init__(
  205. backbone,
  206. num_classes,
  207. # transform parameters
  208. min_size,
  209. max_size,
  210. image_mean,
  211. image_std,
  212. # RPN-specific parameters
  213. rpn_anchor_generator,
  214. rpn_head,
  215. rpn_pre_nms_top_n_train,
  216. rpn_pre_nms_top_n_test,
  217. rpn_post_nms_top_n_train,
  218. rpn_post_nms_top_n_test,
  219. rpn_nms_thresh,
  220. rpn_fg_iou_thresh,
  221. rpn_bg_iou_thresh,
  222. rpn_batch_size_per_image,
  223. rpn_positive_fraction,
  224. rpn_score_thresh,
  225. # Box parameters
  226. box_roi_pool,
  227. box_head,
  228. box_predictor,
  229. box_score_thresh,
  230. box_nms_thresh,
  231. box_detections_per_img,
  232. box_fg_iou_thresh,
  233. box_bg_iou_thresh,
  234. box_batch_size_per_image,
  235. box_positive_fraction,
  236. bbox_reg_weights,
  237. **kwargs,
  238. )
  239. roi_heads = RoIHeads(
  240. # Box
  241. box_roi_pool,
  242. box_head,
  243. box_predictor,
  244. line_head,
  245. line_predictor,
  246. box_fg_iou_thresh,
  247. box_bg_iou_thresh,
  248. box_batch_size_per_image,
  249. box_positive_fraction,
  250. bbox_reg_weights,
  251. box_score_thresh,
  252. box_nms_thresh,
  253. box_detections_per_img,
  254. )
  255. super().roi_heads = roi_heads
  256. # self.roi_heads = roi_heads
  257. # self.roi_heads.line_head = line_head
  258. # self.roi_heads.line_predictor = line_predictor
  259. class LineRCNNHeads(nn.Sequential):
  260. pass
  261. # def __init__(self, in_channels, layers):
  262. # d = []
  263. # next_feature = in_channels
  264. # for out_channels in layers:
  265. # d.append(nn.Conv2d(next_feature, out_channels, 3, stride=1, padding=1))
  266. # d.append(nn.ReLU(inplace=True))
  267. # next_feature = out_channels
  268. # super().__init__(*d)
  269. # for m in self.children():
  270. # if isinstance(m, nn.Conv2d):
  271. # nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
  272. # nn.init.constant_(m.bias, 0)
  273. class LineRCNNPredictor(nn.Module):
  274. pass
  275. # def __init__(self, in_channels, num_keypoints):
  276. # super().__init__()
  277. # input_features = in_channels
  278. # deconv_kernel = 4
  279. # self.kps_score_lowres = nn.ConvTranspose2d(
  280. # input_features,
  281. # num_keypoints,
  282. # deconv_kernel,
  283. # stride=2,
  284. # padding=deconv_kernel // 2 - 1,
  285. # )
  286. # nn.init.kaiming_normal_(self.kps_score_lowres.weight, mode="fan_out", nonlinearity="relu")
  287. # nn.init.constant_(self.kps_score_lowres.bias, 0)
  288. # self.up_scale = 2
  289. # self.out_channels = num_keypoints
  290. #
  291. # def forward(self, x):
  292. # x = self.kps_score_lowres(x)
  293. # return torch.nn.functional.interpolate(
  294. # x, scale_factor=float(self.up_scale), mode="bilinear", align_corners=False, recompute_scale_factor=False
  295. # )
  296. _COMMON_META = {
  297. "categories": _COCO_PERSON_CATEGORIES,
  298. "keypoint_names": _COCO_PERSON_KEYPOINT_NAMES,
  299. "min_size": (1, 1),
  300. }
  301. class LineRCNN_ResNet50_FPN_Weights(WeightsEnum):
  302. COCO_LEGACY = Weights(
  303. url="https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-9f466800.pth",
  304. transforms=ObjectDetection,
  305. meta={
  306. **_COMMON_META,
  307. "num_params": 59137258,
  308. "recipe": "https://github.com/pytorch/vision/issues/1606",
  309. "_metrics": {
  310. "COCO-val2017": {
  311. "box_map": 50.6,
  312. "kp_map": 61.1,
  313. }
  314. },
  315. "_ops": 133.924,
  316. "_file_size": 226.054,
  317. "_docs": """
  318. These weights were produced by following a similar training recipe as on the paper but use a checkpoint
  319. from an early epoch.
  320. """,
  321. },
  322. )
  323. COCO_V1 = Weights(
  324. url="https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-fc266e95.pth",
  325. transforms=ObjectDetection,
  326. meta={
  327. **_COMMON_META,
  328. "num_params": 59137258,
  329. "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#keypoint-r-cnn",
  330. "_metrics": {
  331. "COCO-val2017": {
  332. "box_map": 54.6,
  333. "kp_map": 65.0,
  334. }
  335. },
  336. "_ops": 137.42,
  337. "_file_size": 226.054,
  338. "_docs": """These weights were produced by following a similar training recipe as on the paper.""",
  339. },
  340. )
  341. DEFAULT = COCO_V1
  342. @register_model()
  343. @handle_legacy_interface(
  344. weights=(
  345. "pretrained",
  346. lambda kwargs: LineRCNN_ResNet50_FPN_Weights.COCO_LEGACY
  347. if kwargs["pretrained"] == "legacy"
  348. else LineRCNN_ResNet50_FPN_Weights.COCO_V1,
  349. ),
  350. weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
  351. )
  352. def linercnn_resnet50_fpn(
  353. *,
  354. weights: Optional[LineRCNN_ResNet50_FPN_Weights] = None,
  355. progress: bool = True,
  356. num_classes: Optional[int] = None,
  357. num_keypoints: Optional[int] = None,
  358. weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1,
  359. trainable_backbone_layers: Optional[int] = None,
  360. **kwargs: Any,
  361. ) -> LineRCNN:
  362. """
  363. Constructs a Keypoint R-CNN model with a ResNet-50-FPN backbone.
  364. .. betastatus:: detection module
  365. Reference: `Mask R-CNN <https://arxiv.org/abs/1703.06870>`__.
  366. The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each
  367. image, and should be in ``0-1`` range. Different images can have different sizes.
  368. The behavior of the model changes depending on if it is in training or evaluation mode.
  369. During training, the model expects both the input tensors and targets (list of dictionary),
  370. containing:
  371. - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
  372. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  373. - labels (``Int64Tensor[N]``): the class label for each ground-truth box
  374. - keypoints (``FloatTensor[N, K, 3]``): the ``K`` keypoints location for each of the ``N`` instances, in the
  375. format ``[x, y, visibility]``, where ``visibility=0`` means that the keypoint is not visible.
  376. The model returns a ``Dict[Tensor]`` during training, containing the classification and regression
  377. losses for both the RPN and the R-CNN, and the keypoint loss.
  378. During inference, the model requires only the input tensors, and returns the post-processed
  379. predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as
  380. follows, where ``N`` is the number of detected instances:
  381. - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
  382. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  383. - labels (``Int64Tensor[N]``): the predicted labels for each instance
  384. - scores (``Tensor[N]``): the scores or each instance
  385. - keypoints (``FloatTensor[N, K, 3]``): the locations of the predicted keypoints, in ``[x, y, v]`` format.
  386. For more details on the output, you may refer to :ref:`instance_seg_output`.
  387. Keypoint R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.
  388. Example::
  389. >>> model = torchvision.models.detection.keypointrcnn_resnet50_fpn(weights=KeypointRCNN_ResNet50_FPN_Weights.DEFAULT)
  390. >>> model.eval()
  391. >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
  392. >>> predictions = model(x)
  393. >>>
  394. >>> # optionally, if you want to export the model to ONNX:
  395. >>> torch.onnx.export(model, x, "keypoint_rcnn.onnx", opset_version = 11)
  396. Args:
  397. weights (:class:`~torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights`, optional): The
  398. pretrained weights to use. See
  399. :class:`~torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights`
  400. below for more details, and possible values. By default, no
  401. pre-trained weights are used.
  402. progress (bool): If True, displays a progress bar of the download to stderr
  403. num_classes (int, optional): number of output classes of the model (including the background)
  404. num_keypoints (int, optional): number of keypoints
  405. weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The
  406. pretrained weights for the backbone.
  407. trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from final block.
  408. Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is
  409. passed (the default) this value is set to 3.
  410. .. autoclass:: torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights
  411. :members:
  412. """
  413. weights = LineRCNN_ResNet50_FPN_Weights.verify(weights)
  414. weights_backbone = ResNet50_Weights.verify(weights_backbone)
  415. if weights is not None:
  416. weights_backbone = None
  417. num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
  418. num_keypoints = _ovewrite_value_param("num_keypoints", num_keypoints, len(weights.meta["keypoint_names"]))
  419. else:
  420. if num_classes is None:
  421. num_classes = 2
  422. if num_keypoints is None:
  423. num_keypoints = 17
  424. is_trained = weights is not None or weights_backbone is not None
  425. trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
  426. norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
  427. backbone = resnet50(weights=weights_backbone, progress=progress, norm_layer=norm_layer)
  428. backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers)
  429. model = LineRCNN(backbone, num_classes, num_keypoints=num_keypoints, **kwargs)
  430. if weights is not None:
  431. model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
  432. if weights == LineRCNN_ResNet50_FPN_Weights.COCO_V1:
  433. overwrite_eps(model, 0.0)
  434. return model