line_rcnn.py 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806
  1. from typing import Any, Optional
  2. import torch
  3. from torch import nn
  4. from torchvision.ops import MultiScaleRoIAlign
  5. from libs.vision_libs.ops import misc as misc_nn_ops
  6. from libs.vision_libs.transforms._presets import ObjectDetection
  7. from .roi_heads import RoIHeads
  8. from libs.vision_libs.models._api import register_model, Weights, WeightsEnum
  9. from libs.vision_libs.models._meta import _COCO_PERSON_CATEGORIES, _COCO_PERSON_KEYPOINT_NAMES
  10. from libs.vision_libs.models._utils import _ovewrite_value_param, handle_legacy_interface
  11. from libs.vision_libs.models.resnet import resnet50, ResNet50_Weights
  12. from libs.vision_libs.models.detection._utils import overwrite_eps
  13. from libs.vision_libs.models.detection.backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers
  14. from libs.vision_libs.models.detection.faster_rcnn import FasterRCNN, TwoMLPHead, FastRCNNPredictor
  15. from models.config.config_tool import read_yaml
  16. import numpy as np
  17. import torch.nn.functional as F
  18. FEATURE_DIM = 8
  19. device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
  20. __all__ = [
  21. "LineRCNN",
  22. "LineRCNN_ResNet50_FPN_Weights",
  23. "linercnn_resnet50_fpn",
  24. ]
  25. def non_maximum_suppression(a):
  26. ap = F.max_pool2d(a, 3, stride=1, padding=1)
  27. mask = (a == ap).float().clamp(min=0.0)
  28. return a * mask
  29. class Bottleneck1D(nn.Module):
  30. def __init__(self, inplanes, outplanes):
  31. super(Bottleneck1D, self).__init__()
  32. planes = outplanes // 2
  33. self.op = nn.Sequential(
  34. nn.BatchNorm1d(inplanes),
  35. nn.ReLU(inplace=True),
  36. nn.Conv1d(inplanes, planes, kernel_size=1),
  37. nn.BatchNorm1d(planes),
  38. nn.ReLU(inplace=True),
  39. nn.Conv1d(planes, planes, kernel_size=3, padding=1),
  40. nn.BatchNorm1d(planes),
  41. nn.ReLU(inplace=True),
  42. nn.Conv1d(planes, outplanes, kernel_size=1),
  43. )
  44. def forward(self, x):
  45. return x + self.op(x)
  46. class LineRCNN(FasterRCNN):
  47. """
  48. Implements Keypoint R-CNN.
  49. The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
  50. image, and should be in 0-1 range. Different images can have different sizes.
  51. The behavior of the model changes depending on if it is in training or evaluation mode.
  52. During training, the model expects both the input tensors and targets (list of dictionary),
  53. containing:
  54. - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
  55. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  56. - labels (Int64Tensor[N]): the class label for each ground-truth box
  57. - keypoints (FloatTensor[N, K, 3]): the K keypoints location for each of the N instances, in the
  58. format [x, y, visibility], where visibility=0 means that the keypoint is not visible.
  59. The model returns a Dict[Tensor] during training, containing the classification and regression
  60. losses for both the RPN and the R-CNN, and the keypoint loss.
  61. During inference, the model requires only the input tensors, and returns the post-processed
  62. predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
  63. follows:
  64. - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
  65. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  66. - labels (Int64Tensor[N]): the predicted labels for each image
  67. - scores (Tensor[N]): the scores or each prediction
  68. - keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format.
  69. Args:
  70. backbone (nn.Module): the network used to compute the features for the model.
  71. It should contain an out_channels attribute, which indicates the number of output
  72. channels that each feature map has (and it should be the same for all feature maps).
  73. The backbone should return a single Tensor or and OrderedDict[Tensor].
  74. num_classes (int): number of output classes of the model (including the background).
  75. If box_predictor is specified, num_classes should be None.
  76. min_size (int): minimum size of the image to be rescaled before feeding it to the backbone
  77. max_size (int): maximum size of the image to be rescaled before feeding it to the backbone
  78. image_mean (Tuple[float, float, float]): mean values used for input normalization.
  79. They are generally the mean values of the dataset on which the backbone has been trained
  80. on
  81. image_std (Tuple[float, float, float]): std values used for input normalization.
  82. They are generally the std values of the dataset on which the backbone has been trained on
  83. rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
  84. maps.
  85. rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
  86. rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
  87. rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
  88. rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
  89. rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
  90. rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
  91. rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
  92. considered as positive during training of the RPN.
  93. rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
  94. considered as negative during training of the RPN.
  95. rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
  96. for computing the loss
  97. rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
  98. of the RPN
  99. rpn_score_thresh (float): during inference, only return proposals with a classification score
  100. greater than rpn_score_thresh
  101. box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
  102. the locations indicated by the bounding boxes
  103. box_head (nn.Module): module that takes the cropped feature maps as input
  104. box_predictor (nn.Module): module that takes the output of box_head and returns the
  105. classification logits and box regression deltas.
  106. box_score_thresh (float): during inference, only return proposals with a classification score
  107. greater than box_score_thresh
  108. box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
  109. box_detections_per_img (int): maximum number of detections per image, for all classes.
  110. box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
  111. considered as positive during training of the classification head
  112. box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
  113. considered as negative during training of the classification head
  114. box_batch_size_per_image (int): number of proposals that are sampled during training of the
  115. classification head
  116. box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
  117. of the classification head
  118. bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
  119. bounding boxes
  120. keypoint_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
  121. the locations indicated by the bounding boxes, which will be used for the keypoint head.
  122. keypoint_head (nn.Module): module that takes the cropped feature maps as input
  123. keypoint_predictor (nn.Module): module that takes the output of the keypoint_head and returns the
  124. heatmap logits
  125. Example::
  126. >>> import torch
  127. >>> import torchvision
  128. >>> from torchvision.models.detection import KeypointRCNN
  129. >>> from torchvision.models.detection.anchor_utils import AnchorGenerator
  130. >>>
  131. >>> # load a pre-trained model for classification and return
  132. >>> # only the features
  133. >>> backbone = torchvision.models.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).features
  134. >>> # KeypointRCNN needs to know the number of
  135. >>> # output channels in a backbone. For mobilenet_v2, it's 1280,
  136. >>> # so we need to add it here
  137. >>> backbone.out_channels = 1280
  138. >>>
  139. >>> # let's make the RPN generate 5 x 3 anchors per spatial
  140. >>> # location, with 5 different sizes and 3 different aspect
  141. >>> # ratios. We have a Tuple[Tuple[int]] because each feature
  142. >>> # map could potentially have different sizes and
  143. >>> # aspect ratios
  144. >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
  145. >>> aspect_ratios=((0.5, 1.0, 2.0),))
  146. >>>
  147. >>> # let's define what are the feature maps that we will
  148. >>> # use to perform the region of interest cropping, as well as
  149. >>> # the size of the crop after rescaling.
  150. >>> # if your backbone returns a Tensor, featmap_names is expected to
  151. >>> # be ['0']. More generally, the backbone should return an
  152. >>> # OrderedDict[Tensor], and in featmap_names you can choose which
  153. >>> # feature maps to use.
  154. >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
  155. >>> output_size=7,
  156. >>> sampling_ratio=2)
  157. >>>
  158. >>> keypoint_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
  159. >>> output_size=14,
  160. >>> sampling_ratio=2)
  161. >>> # put the pieces together inside a KeypointRCNN model
  162. >>> model = KeypointRCNN(backbone,
  163. >>> num_classes=2,
  164. >>> rpn_anchor_generator=anchor_generator,
  165. >>> box_roi_pool=roi_pooler,
  166. >>> keypoint_roi_pool=keypoint_roi_pooler)
  167. >>> model.eval()
  168. >>> model.eval()
  169. >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
  170. >>> predictions = model(x)
  171. """
  172. def __init__(
  173. self,
  174. backbone,
  175. num_classes=None,
  176. # transform parameters
  177. min_size=512, # 原为None
  178. max_size=1333,
  179. image_mean=None,
  180. image_std=None,
  181. # RPN parameters
  182. rpn_anchor_generator=None,
  183. rpn_head=None,
  184. rpn_pre_nms_top_n_train=2000,
  185. rpn_pre_nms_top_n_test=1000,
  186. rpn_post_nms_top_n_train=2000,
  187. rpn_post_nms_top_n_test=1000,
  188. rpn_nms_thresh=0.7,
  189. rpn_fg_iou_thresh=0.7,
  190. rpn_bg_iou_thresh=0.3,
  191. rpn_batch_size_per_image=256,
  192. rpn_positive_fraction=0.5,
  193. rpn_score_thresh=0.0,
  194. # Box parameters
  195. box_roi_pool=None,
  196. box_head=None,
  197. box_predictor=None,
  198. box_score_thresh=0.05,
  199. box_nms_thresh=0.5,
  200. box_detections_per_img=100,
  201. box_fg_iou_thresh=0.5,
  202. box_bg_iou_thresh=0.5,
  203. box_batch_size_per_image=512,
  204. box_positive_fraction=0.25,
  205. bbox_reg_weights=None,
  206. # line parameters
  207. line_head=None,
  208. line_predictor=None,
  209. **kwargs,
  210. ):
  211. # if not isinstance(keypoint_roi_pool, (MultiScaleRoIAlign, type(None))):
  212. # raise TypeError(
  213. # "keypoint_roi_pool should be of type MultiScaleRoIAlign or None instead of {type(keypoint_roi_pool)}"
  214. # )
  215. # if min_size is None:
  216. # min_size = (640, 672, 704, 736, 768, 800)
  217. #
  218. # if num_keypoints is not None:
  219. # if keypoint_predictor is not None:
  220. # raise ValueError("num_keypoints should be None when keypoint_predictor is specified")
  221. # else:
  222. # num_keypoints = 17
  223. out_channels = backbone.out_channels
  224. if line_head is None:
  225. num_class = 5
  226. line_head = LineRCNNHeads(out_channels, num_class)
  227. if line_predictor is None:
  228. line_predictor = LineRCNNPredictor()
  229. super().__init__(
  230. backbone,
  231. num_classes,
  232. # transform parameters
  233. min_size,
  234. max_size,
  235. image_mean,
  236. image_std,
  237. # RPN-specific parameters
  238. rpn_anchor_generator,
  239. rpn_head,
  240. rpn_pre_nms_top_n_train,
  241. rpn_pre_nms_top_n_test,
  242. rpn_post_nms_top_n_train,
  243. rpn_post_nms_top_n_test,
  244. rpn_nms_thresh,
  245. rpn_fg_iou_thresh,
  246. rpn_bg_iou_thresh,
  247. rpn_batch_size_per_image,
  248. rpn_positive_fraction,
  249. rpn_score_thresh,
  250. # Box parameters
  251. box_roi_pool,
  252. box_head,
  253. box_predictor,
  254. box_score_thresh,
  255. box_nms_thresh,
  256. box_detections_per_img,
  257. box_fg_iou_thresh,
  258. box_bg_iou_thresh,
  259. box_batch_size_per_image,
  260. box_positive_fraction,
  261. bbox_reg_weights,
  262. **kwargs,
  263. )
  264. if box_roi_pool is None:
  265. box_roi_pool = MultiScaleRoIAlign(featmap_names=["0", "1", "2", "3"], output_size=14, sampling_ratio=2)
  266. if box_head is None:
  267. resolution = box_roi_pool.output_size[0]
  268. representation_size = 1024
  269. box_head = TwoMLPHead(out_channels * resolution ** 2, representation_size)
  270. if box_predictor is None:
  271. representation_size = 1024
  272. box_predictor = FastRCNNPredictor(representation_size, num_classes)
  273. roi_heads = RoIHeads(
  274. # Box
  275. box_roi_pool,
  276. box_head,
  277. box_predictor,
  278. line_head,
  279. line_predictor,
  280. box_fg_iou_thresh,
  281. box_bg_iou_thresh,
  282. box_batch_size_per_image,
  283. box_positive_fraction,
  284. bbox_reg_weights,
  285. box_score_thresh,
  286. box_nms_thresh,
  287. box_detections_per_img,
  288. )
  289. # super().roi_heads = roi_heads
  290. self.roi_heads = roi_heads
  291. self.roi_heads.line_head = line_head
  292. self.roi_heads.line_predictor = line_predictor
  293. class LineRCNNHeads(nn.Sequential):
  294. def __init__(self, input_channels, num_class):
  295. super(LineRCNNHeads, self).__init__()
  296. # print("输入的维度是:", input_channels)
  297. m = int(input_channels / 4)
  298. heads = []
  299. self.head_size = [[2], [1], [2]]
  300. for output_channels in sum(self.head_size, []):
  301. heads.append(
  302. nn.Sequential(
  303. nn.Conv2d(input_channels, m, kernel_size=3, padding=1),
  304. nn.ReLU(inplace=True),
  305. nn.Conv2d(m, output_channels, kernel_size=1),
  306. )
  307. )
  308. self.heads = nn.ModuleList(heads)
  309. assert num_class == sum(sum(self.head_size, []))
  310. def forward(self, x):
  311. return torch.cat([head(x) for head in self.heads], dim=1)
  312. class LineRCNNPredictor(nn.Module):
  313. def __init__(self):
  314. super().__init__()
  315. # self.backbone = backbone
  316. # self.cfg = read_yaml(cfg)
  317. self.cfg = read_yaml(r'./config/wireframe.yaml')
  318. self.n_pts0 = self.cfg['model']['n_pts0']
  319. self.n_pts1 = self.cfg['model']['n_pts1']
  320. self.n_stc_posl = self.cfg['model']['n_stc_posl']
  321. self.dim_loi = self.cfg['model']['dim_loi']
  322. self.use_conv = self.cfg['model']['use_conv']
  323. self.dim_fc = self.cfg['model']['dim_fc']
  324. self.n_out_line = self.cfg['model']['n_out_line']
  325. self.n_out_junc = self.cfg['model']['n_out_junc']
  326. self.loss_weight = self.cfg['model']['loss_weight']
  327. self.n_dyn_junc = self.cfg['model']['n_dyn_junc']
  328. self.eval_junc_thres = self.cfg['model']['eval_junc_thres']
  329. self.n_dyn_posl = self.cfg['model']['n_dyn_posl']
  330. self.n_dyn_negl = self.cfg['model']['n_dyn_negl']
  331. self.n_dyn_othr = self.cfg['model']['n_dyn_othr']
  332. self.use_cood = self.cfg['model']['use_cood']
  333. self.use_slop = self.cfg['model']['use_slop']
  334. self.n_stc_negl = self.cfg['model']['n_stc_negl']
  335. self.head_size = self.cfg['model']['head_size']
  336. self.num_class = sum(sum(self.head_size, []))
  337. self.head_off = np.cumsum([sum(h) for h in self.head_size])
  338. lambda_ = torch.linspace(0, 1, self.n_pts0)[:, None]
  339. self.register_buffer("lambda_", lambda_)
  340. self.do_static_sampling = self.n_stc_posl + self.n_stc_negl > 0
  341. self.fc1 = nn.Conv2d(256, self.dim_loi, 1)
  342. scale_factor = self.n_pts0 // self.n_pts1
  343. if self.use_conv:
  344. self.pooling = nn.Sequential(
  345. nn.MaxPool1d(scale_factor, scale_factor),
  346. Bottleneck1D(self.dim_loi, self.dim_loi),
  347. )
  348. self.fc2 = nn.Sequential(
  349. nn.ReLU(inplace=True), nn.Linear(self.dim_loi * self.n_pts1 + FEATURE_DIM, 1)
  350. )
  351. else:
  352. self.pooling = nn.MaxPool1d(scale_factor, scale_factor)
  353. self.fc2 = nn.Sequential(
  354. nn.Linear(self.dim_loi * self.n_pts1 + FEATURE_DIM, self.dim_fc),
  355. nn.ReLU(inplace=True),
  356. nn.Linear(self.dim_fc, self.dim_fc),
  357. nn.ReLU(inplace=True),
  358. nn.Linear(self.dim_fc, 1),
  359. )
  360. self.loss = nn.BCEWithLogitsLoss(reduction="none")
  361. def forward(self, inputs, features, targets=None):
  362. # outputs, features = input
  363. # for out in outputs:
  364. # print(f'out:{out.shape}')
  365. # outputs=merge_features(outputs,100)
  366. batch, channel, row, col = inputs.shape
  367. # print(f'outputs:{inputs.shape}')
  368. # print(f'batch:{batch}, channel:{channel}, row:{row}, col:{col}')
  369. if targets is not None:
  370. self.training = True
  371. # print(f'target:{targets}')
  372. wires_targets = [t["wires"] for t in targets]
  373. # print(f'wires_target:{wires_targets}')
  374. # 提取所有 'junc_map', 'junc_offset', 'line_map' 的张量
  375. junc_maps = [d["junc_map"] for d in wires_targets]
  376. junc_offsets = [d["junc_offset"] for d in wires_targets]
  377. line_maps = [d["line_map"] for d in wires_targets]
  378. junc_map_tensor = torch.stack(junc_maps, dim=0)
  379. junc_offset_tensor = torch.stack(junc_offsets, dim=0)
  380. line_map_tensor = torch.stack(line_maps, dim=0)
  381. wires_meta = {
  382. "junc_map": junc_map_tensor,
  383. "junc_offset": junc_offset_tensor,
  384. # "line_map": line_map_tensor,
  385. }
  386. else:
  387. self.training = False
  388. t = {
  389. "junc_coords": torch.zeros(1, 2),
  390. "jtyp": torch.zeros(1, dtype=torch.uint8),
  391. "line_pos_idx": torch.zeros(2, 2, dtype=torch.uint8),
  392. "line_neg_idx": torch.zeros(2, 2, dtype=torch.uint8),
  393. "junc_map": torch.zeros([1, 1, 128, 128]),
  394. "junc_offset": torch.zeros([1, 1, 2, 128, 128]),
  395. }
  396. wires_targets = [t for b in range(inputs.size(0))]
  397. wires_meta = {
  398. "junc_map": torch.zeros([1, 1, 128, 128]),
  399. "junc_offset": torch.zeros([1, 1, 2, 128, 128]),
  400. }
  401. T = wires_meta.copy()
  402. n_jtyp = T["junc_map"].shape[1]
  403. offset = self.head_off
  404. result = {}
  405. for stack, output in enumerate([inputs]):
  406. output = output.transpose(0, 1).reshape([-1, batch, row, col]).contiguous()
  407. # print(f"Stack {stack} output shape: {output.shape}") # 打印每层的输出形状
  408. jmap = output[0: offset[0]].reshape(n_jtyp, 2, batch, row, col)
  409. lmap = output[offset[0]: offset[1]].squeeze(0)
  410. joff = output[offset[1]: offset[2]].reshape(n_jtyp, 2, batch, row, col)
  411. if stack == 0:
  412. result["preds"] = {
  413. "jmap": jmap.permute(2, 0, 1, 3, 4).softmax(2)[:, :, 1],
  414. "lmap": lmap.sigmoid(),
  415. "joff": joff.permute(2, 0, 1, 3, 4).sigmoid() - 0.5,
  416. }
  417. # visualize_feature_map(jmap[0, 0], title=f"jmap - Stack {stack}")
  418. # visualize_feature_map(lmap, title=f"lmap - Stack {stack}")
  419. # visualize_feature_map(joff[0, 0], title=f"joff - Stack {stack}")
  420. h = result["preds"]
  421. # print(f'features shape:{features.shape}')
  422. x = self.fc1(features)
  423. # print(f'x:{x.shape}')
  424. n_batch, n_channel, row, col = x.shape
  425. # print(f'n_batch:{n_batch}, n_channel:{n_channel}, row:{row}, col:{col}')
  426. xs, ys, fs, ps, idx, jcs = [], [], [], [], [0], []
  427. for i, meta in enumerate(wires_targets):
  428. p, label, feat, jc = self.sample_lines(
  429. meta, h["jmap"][i], h["joff"][i],
  430. )
  431. # print(f"p.shape:{p.shape},label:{label.shape},feat:{feat.shape},jc:{len(jc)}")
  432. ys.append(label)
  433. if self.training and self.do_static_sampling:
  434. p = torch.cat([p, meta["lpre"]])
  435. feat = torch.cat([feat, meta["lpre_feat"]])
  436. ys.append(meta["lpre_label"])
  437. del jc
  438. else:
  439. jcs.append(jc)
  440. ps.append(p)
  441. fs.append(feat)
  442. p = p[:, 0:1, :] * self.lambda_ + p[:, 1:2, :] * (1 - self.lambda_) - 0.5
  443. p = p.reshape(-1, 2) # [N_LINE x N_POINT, 2_XY]
  444. px, py = p[:, 0].contiguous(), p[:, 1].contiguous()
  445. px0 = px.floor().clamp(min=0, max=127)
  446. py0 = py.floor().clamp(min=0, max=127)
  447. px1 = (px0 + 1).clamp(min=0, max=127)
  448. py1 = (py0 + 1).clamp(min=0, max=127)
  449. px0l, py0l, px1l, py1l = px0.long(), py0.long(), px1.long(), py1.long()
  450. # xp: [N_LINE, N_CHANNEL, N_POINT]
  451. xp = (
  452. (
  453. x[i, :, px0l, py0l] * (px1 - px) * (py1 - py)
  454. + x[i, :, px1l, py0l] * (px - px0) * (py1 - py)
  455. + x[i, :, px0l, py1l] * (px1 - px) * (py - py0)
  456. + x[i, :, px1l, py1l] * (px - px0) * (py - py0)
  457. )
  458. .reshape(n_channel, -1, self.n_pts0)
  459. .permute(1, 0, 2)
  460. )
  461. xp = self.pooling(xp)
  462. # print(f'xp.shape:{xp.shape}')
  463. xs.append(xp)
  464. idx.append(idx[-1] + xp.shape[0])
  465. # print(f'idx__:{idx}')
  466. x, y = torch.cat(xs), torch.cat(ys)
  467. f = torch.cat(fs)
  468. x = x.reshape(-1, self.n_pts1 * self.dim_loi)
  469. # print("Weight dtype:", self.fc2.weight.dtype)
  470. x = torch.cat([x, f], 1)
  471. # print("Input dtype:", x.dtype)
  472. x = x.to(dtype=torch.float32)
  473. # print("Input dtype1:", x.dtype)
  474. x = self.fc2(x).flatten()
  475. # return x,idx,jcs,n_batch,ps,self.n_out_line,self.n_out_junc
  476. return x, y, idx, jcs, n_batch, ps, self.n_out_line, self.n_out_junc
  477. # if mode != "training":
  478. # self.inference(x, idx, jcs, n_batch, ps)
  479. # return result
  480. def sample_lines(self, meta, jmap, joff):
  481. with torch.no_grad():
  482. junc = meta["junc_coords"] # [N, 2]
  483. jtyp = meta["jtyp"] # [N]
  484. Lpos = meta["line_pos_idx"]
  485. Lneg = meta["line_neg_idx"]
  486. n_type = jmap.shape[0]
  487. jmap = non_maximum_suppression(jmap).reshape(n_type, -1)
  488. joff = joff.reshape(n_type, 2, -1)
  489. max_K = self.n_dyn_junc // n_type
  490. N = len(junc)
  491. # if mode != "training":
  492. if not self.training:
  493. K = min(int((jmap > self.eval_junc_thres).float().sum().item()), max_K)
  494. else:
  495. K = min(int(N * 2 + 2), max_K)
  496. if K < 2:
  497. K = 2
  498. device = jmap.device
  499. # index: [N_TYPE, K]
  500. score, index = torch.topk(jmap, k=K)
  501. y = (index // 128).float() + torch.gather(joff[:, 0], 1, index) + 0.5
  502. x = (index % 128).float() + torch.gather(joff[:, 1], 1, index) + 0.5
  503. # xy: [N_TYPE, K, 2]
  504. xy = torch.cat([y[..., None], x[..., None]], dim=-1)
  505. xy_ = xy[..., None, :]
  506. del x, y, index
  507. # dist: [N_TYPE, K, N]
  508. dist = torch.sum((xy_ - junc) ** 2, -1)
  509. cost, match = torch.min(dist, -1)
  510. # xy: [N_TYPE * K, 2]
  511. # match: [N_TYPE, K]
  512. for t in range(n_type):
  513. match[t, jtyp[match[t]] != t] = N
  514. match[cost > 1.5 * 1.5] = N
  515. match = match.flatten()
  516. _ = torch.arange(n_type * K, device=device)
  517. u, v = torch.meshgrid(_, _)
  518. u, v = u.flatten(), v.flatten()
  519. up, vp = match[u], match[v]
  520. label = Lpos[up, vp]
  521. # if mode == "training":
  522. if self.training:
  523. c = torch.zeros_like(label, dtype=torch.bool)
  524. # sample positive lines
  525. cdx = label.nonzero().flatten()
  526. if len(cdx) > self.n_dyn_posl:
  527. # print("too many positive lines")
  528. perm = torch.randperm(len(cdx), device=device)[: self.n_dyn_posl]
  529. cdx = cdx[perm]
  530. c[cdx] = 1
  531. # sample negative lines
  532. cdx = Lneg[up, vp].nonzero().flatten()
  533. if len(cdx) > self.n_dyn_negl:
  534. # print("too many negative lines")
  535. perm = torch.randperm(len(cdx), device=device)[: self.n_dyn_negl]
  536. cdx = cdx[perm]
  537. c[cdx] = 1
  538. # sample other (unmatched) lines
  539. cdx = torch.randint(len(c), (self.n_dyn_othr,), device=device)
  540. c[cdx] = 1
  541. else:
  542. c = (u < v).flatten()
  543. # sample lines
  544. u, v, label = u[c], v[c], label[c]
  545. xy = xy.reshape(n_type * K, 2)
  546. xyu, xyv = xy[u], xy[v]
  547. u2v = xyu - xyv
  548. u2v /= torch.sqrt((u2v ** 2).sum(-1, keepdim=True)).clamp(min=1e-6)
  549. feat = torch.cat(
  550. [
  551. xyu / 128 * self.use_cood,
  552. xyv / 128 * self.use_cood,
  553. u2v * self.use_slop,
  554. (u[:, None] > K).float(),
  555. (v[:, None] > K).float(),
  556. ],
  557. 1,
  558. )
  559. line = torch.cat([xyu[:, None], xyv[:, None]], 1)
  560. xy = xy.reshape(n_type, K, 2)
  561. jcs = [xy[i, score[i] > 0.03] for i in range(n_type)]
  562. return line, label.float(), feat, jcs
  563. _COMMON_META = {
  564. "categories": _COCO_PERSON_CATEGORIES,
  565. "keypoint_names": _COCO_PERSON_KEYPOINT_NAMES,
  566. "min_size": (1, 1),
  567. }
  568. class LineRCNN_ResNet50_FPN_Weights(WeightsEnum):
  569. COCO_LEGACY = Weights(
  570. url="https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-9f466800.pth",
  571. transforms=ObjectDetection,
  572. meta={
  573. **_COMMON_META,
  574. "num_params": 59137258,
  575. "recipe": "https://github.com/pytorch/vision/issues/1606",
  576. "_metrics": {
  577. "COCO-val2017": {
  578. "box_map": 50.6,
  579. "kp_map": 61.1,
  580. }
  581. },
  582. "_ops": 133.924,
  583. "_file_size": 226.054,
  584. "_docs": """
  585. These weights were produced by following a similar training recipe as on the paper but use a checkpoint
  586. from an early epoch.
  587. """,
  588. },
  589. )
  590. COCO_V1 = Weights(
  591. url="https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-fc266e95.pth",
  592. transforms=ObjectDetection,
  593. meta={
  594. **_COMMON_META,
  595. "num_params": 59137258,
  596. "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#keypoint-r-cnn",
  597. "_metrics": {
  598. "COCO-val2017": {
  599. "box_map": 54.6,
  600. "kp_map": 65.0,
  601. }
  602. },
  603. "_ops": 137.42,
  604. "_file_size": 226.054,
  605. "_docs": """These weights were produced by following a similar training recipe as on the paper.""",
  606. },
  607. )
  608. DEFAULT = COCO_V1
  609. @register_model()
  610. @handle_legacy_interface(
  611. weights=(
  612. "pretrained",
  613. lambda kwargs: LineRCNN_ResNet50_FPN_Weights.COCO_LEGACY
  614. if kwargs["pretrained"] == "legacy"
  615. else LineRCNN_ResNet50_FPN_Weights.COCO_V1,
  616. ),
  617. weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
  618. )
  619. def linercnn_resnet50_fpn(
  620. *,
  621. weights: Optional[LineRCNN_ResNet50_FPN_Weights] = None,
  622. progress: bool = True,
  623. num_classes: Optional[int] = None,
  624. num_keypoints: Optional[int] = None,
  625. weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1,
  626. trainable_backbone_layers: Optional[int] = None,
  627. **kwargs: Any,
  628. ) -> LineRCNN:
  629. """
  630. Constructs a Keypoint R-CNN model with a ResNet-50-FPN backbone.
  631. .. betastatus:: detection module
  632. Reference: `Mask R-CNN <https://arxiv.org/abs/1703.06870>`__.
  633. The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each
  634. image, and should be in ``0-1`` range. Different images can have different sizes.
  635. The behavior of the model changes depending on if it is in training or evaluation mode.
  636. During training, the model expects both the input tensors and targets (list of dictionary),
  637. containing:
  638. - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
  639. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  640. - labels (``Int64Tensor[N]``): the class label for each ground-truth box
  641. - keypoints (``FloatTensor[N, K, 3]``): the ``K`` keypoints location for each of the ``N`` instances, in the
  642. format ``[x, y, visibility]``, where ``visibility=0`` means that the keypoint is not visible.
  643. The model returns a ``Dict[Tensor]`` during training, containing the classification and regression
  644. losses for both the RPN and the R-CNN, and the keypoint loss.
  645. During inference, the model requires only the input tensors, and returns the post-processed
  646. predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as
  647. follows, where ``N`` is the number of detected instances:
  648. - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
  649. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  650. - labels (``Int64Tensor[N]``): the predicted labels for each instance
  651. - scores (``Tensor[N]``): the scores or each instance
  652. - keypoints (``FloatTensor[N, K, 3]``): the locations of the predicted keypoints, in ``[x, y, v]`` format.
  653. For more details on the output, you may refer to :ref:`instance_seg_output`.
  654. Keypoint R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.
  655. Example::
  656. >>> model = torchvision.models.detection.keypointrcnn_resnet50_fpn(weights=KeypointRCNN_ResNet50_FPN_Weights.DEFAULT)
  657. >>> model.eval()
  658. >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
  659. >>> predictions = model(x)
  660. >>>
  661. >>> # optionally, if you want to export the model to ONNX:
  662. >>> torch.onnx.export(model, x, "keypoint_rcnn.onnx", opset_version = 11)
  663. Args:
  664. weights (:class:`~torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights`, optional): The
  665. pretrained weights to use. See
  666. :class:`~torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights`
  667. below for more details, and possible values. By default, no
  668. pre-trained weights are used.
  669. progress (bool): If True, displays a progress bar of the download to stderr
  670. num_classes (int, optional): number of output classes of the model (including the background)
  671. num_keypoints (int, optional): number of keypoints
  672. weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The
  673. pretrained weights for the backbone.
  674. trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from final block.
  675. Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is
  676. passed (the default) this value is set to 3.
  677. .. autoclass:: torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights
  678. :members:
  679. """
  680. weights = LineRCNN_ResNet50_FPN_Weights.verify(weights)
  681. weights_backbone = ResNet50_Weights.verify(weights_backbone)
  682. if weights is not None:
  683. weights_backbone = None
  684. num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
  685. num_keypoints = _ovewrite_value_param("num_keypoints", num_keypoints, len(weights.meta["keypoint_names"]))
  686. else:
  687. if num_classes is None:
  688. num_classes = 2
  689. if num_keypoints is None:
  690. num_keypoints = 17
  691. is_trained = weights is not None or weights_backbone is not None
  692. trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
  693. norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
  694. backbone = resnet50(weights=weights_backbone, progress=progress, norm_layer=norm_layer)
  695. backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers)
  696. model = LineRCNN(backbone, num_classes, num_keypoints=num_keypoints, **kwargs)
  697. if weights is not None:
  698. model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
  699. if weights == LineRCNN_ResNet50_FPN_Weights.COCO_V1:
  700. overwrite_eps(model, 0.0)
  701. return model