line_rcnn.py 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774
  1. from typing import Any, Optional
  2. import torch
  3. from torch import nn
  4. from torchvision.ops import MultiScaleRoIAlign
  5. from libs.vision_libs.ops import misc as misc_nn_ops
  6. from libs.vision_libs.transforms._presets import ObjectDetection
  7. from .roi_heads import RoIHeads
  8. from libs.vision_libs.models._api import register_model, Weights, WeightsEnum
  9. from libs.vision_libs.models._meta import _COCO_PERSON_CATEGORIES, _COCO_PERSON_KEYPOINT_NAMES
  10. from libs.vision_libs.models._utils import _ovewrite_value_param, handle_legacy_interface
  11. from libs.vision_libs.models.resnet import resnet50, ResNet50_Weights
  12. from libs.vision_libs.models.detection._utils import overwrite_eps
  13. from libs.vision_libs.models.detection.backbone_utils import _resnet_fpn_extractor, _validate_trainable_layers
  14. from libs.vision_libs.models.detection.faster_rcnn import FasterRCNN
  15. from models.config.config_tool import read_yaml
  16. import numpy as np
  17. import torch.nn.functional as F
  18. FEATURE_DIM = 8
  19. device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
  20. __all__ = [
  21. "LineRCNN",
  22. "LineRCNN_ResNet50_FPN_Weights",
  23. "linercnn_resnet50_fpn",
  24. ]
  25. def non_maximum_suppression(a):
  26. ap = F.max_pool2d(a, 3, stride=1, padding=1)
  27. mask = (a == ap).float().clamp(min=0.0)
  28. return a * mask
  29. class Bottleneck1D(nn.Module):
  30. def __init__(self, inplanes, outplanes):
  31. super(Bottleneck1D, self).__init__()
  32. planes = outplanes // 2
  33. self.op = nn.Sequential(
  34. nn.BatchNorm1d(inplanes),
  35. nn.ReLU(inplace=True),
  36. nn.Conv1d(inplanes, planes, kernel_size=1),
  37. nn.BatchNorm1d(planes),
  38. nn.ReLU(inplace=True),
  39. nn.Conv1d(planes, planes, kernel_size=3, padding=1),
  40. nn.BatchNorm1d(planes),
  41. nn.ReLU(inplace=True),
  42. nn.Conv1d(planes, outplanes, kernel_size=1),
  43. )
  44. def forward(self, x):
  45. return x + self.op(x)
  46. class LineRCNN(FasterRCNN):
  47. """
  48. Implements Keypoint R-CNN.
  49. The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each
  50. image, and should be in 0-1 range. Different images can have different sizes.
  51. The behavior of the model changes depending on if it is in training or evaluation mode.
  52. During training, the model expects both the input tensors and targets (list of dictionary),
  53. containing:
  54. - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
  55. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  56. - labels (Int64Tensor[N]): the class label for each ground-truth box
  57. - keypoints (FloatTensor[N, K, 3]): the K keypoints location for each of the N instances, in the
  58. format [x, y, visibility], where visibility=0 means that the keypoint is not visible.
  59. The model returns a Dict[Tensor] during training, containing the classification and regression
  60. losses for both the RPN and the R-CNN, and the keypoint loss.
  61. During inference, the model requires only the input tensors, and returns the post-processed
  62. predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as
  63. follows:
  64. - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
  65. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  66. - labels (Int64Tensor[N]): the predicted labels for each image
  67. - scores (Tensor[N]): the scores or each prediction
  68. - keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format.
  69. Args:
  70. backbone (nn.Module): the network used to compute the features for the model.
  71. It should contain an out_channels attribute, which indicates the number of output
  72. channels that each feature map has (and it should be the same for all feature maps).
  73. The backbone should return a single Tensor or and OrderedDict[Tensor].
  74. num_classes (int): number of output classes of the model (including the background).
  75. If box_predictor is specified, num_classes should be None.
  76. min_size (int): minimum size of the image to be rescaled before feeding it to the backbone
  77. max_size (int): maximum size of the image to be rescaled before feeding it to the backbone
  78. image_mean (Tuple[float, float, float]): mean values used for input normalization.
  79. They are generally the mean values of the dataset on which the backbone has been trained
  80. on
  81. image_std (Tuple[float, float, float]): std values used for input normalization.
  82. They are generally the std values of the dataset on which the backbone has been trained on
  83. rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
  84. maps.
  85. rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN
  86. rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training
  87. rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing
  88. rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training
  89. rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing
  90. rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
  91. rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
  92. considered as positive during training of the RPN.
  93. rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
  94. considered as negative during training of the RPN.
  95. rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN
  96. for computing the loss
  97. rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training
  98. of the RPN
  99. rpn_score_thresh (float): during inference, only return proposals with a classification score
  100. greater than rpn_score_thresh
  101. box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
  102. the locations indicated by the bounding boxes
  103. box_head (nn.Module): module that takes the cropped feature maps as input
  104. box_predictor (nn.Module): module that takes the output of box_head and returns the
  105. classification logits and box regression deltas.
  106. box_score_thresh (float): during inference, only return proposals with a classification score
  107. greater than box_score_thresh
  108. box_nms_thresh (float): NMS threshold for the prediction head. Used during inference
  109. box_detections_per_img (int): maximum number of detections per image, for all classes.
  110. box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be
  111. considered as positive during training of the classification head
  112. box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be
  113. considered as negative during training of the classification head
  114. box_batch_size_per_image (int): number of proposals that are sampled during training of the
  115. classification head
  116. box_positive_fraction (float): proportion of positive proposals in a mini-batch during training
  117. of the classification head
  118. bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the
  119. bounding boxes
  120. keypoint_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in
  121. the locations indicated by the bounding boxes, which will be used for the keypoint head.
  122. keypoint_head (nn.Module): module that takes the cropped feature maps as input
  123. keypoint_predictor (nn.Module): module that takes the output of the keypoint_head and returns the
  124. heatmap logits
  125. Example::
  126. >>> import torch
  127. >>> import torchvision
  128. >>> from torchvision.models.detection import KeypointRCNN
  129. >>> from torchvision.models.detection.anchor_utils import AnchorGenerator
  130. >>>
  131. >>> # load a pre-trained model for classification and return
  132. >>> # only the features
  133. >>> backbone = torchvision.models.mobilenet_v2(weights=MobileNet_V2_Weights.DEFAULT).features
  134. >>> # KeypointRCNN needs to know the number of
  135. >>> # output channels in a backbone. For mobilenet_v2, it's 1280,
  136. >>> # so we need to add it here
  137. >>> backbone.out_channels = 1280
  138. >>>
  139. >>> # let's make the RPN generate 5 x 3 anchors per spatial
  140. >>> # location, with 5 different sizes and 3 different aspect
  141. >>> # ratios. We have a Tuple[Tuple[int]] because each feature
  142. >>> # map could potentially have different sizes and
  143. >>> # aspect ratios
  144. >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
  145. >>> aspect_ratios=((0.5, 1.0, 2.0),))
  146. >>>
  147. >>> # let's define what are the feature maps that we will
  148. >>> # use to perform the region of interest cropping, as well as
  149. >>> # the size of the crop after rescaling.
  150. >>> # if your backbone returns a Tensor, featmap_names is expected to
  151. >>> # be ['0']. More generally, the backbone should return an
  152. >>> # OrderedDict[Tensor], and in featmap_names you can choose which
  153. >>> # feature maps to use.
  154. >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
  155. >>> output_size=7,
  156. >>> sampling_ratio=2)
  157. >>>
  158. >>> keypoint_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
  159. >>> output_size=14,
  160. >>> sampling_ratio=2)
  161. >>> # put the pieces together inside a KeypointRCNN model
  162. >>> model = KeypointRCNN(backbone,
  163. >>> num_classes=2,
  164. >>> rpn_anchor_generator=anchor_generator,
  165. >>> box_roi_pool=roi_pooler,
  166. >>> keypoint_roi_pool=keypoint_roi_pooler)
  167. >>> model.eval()
  168. >>> model.eval()
  169. >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
  170. >>> predictions = model(x)
  171. """
  172. def __init__(
  173. self,
  174. backbone,
  175. num_classes=None,
  176. # transform parameters
  177. min_size=None,
  178. max_size=1333,
  179. image_mean=None,
  180. image_std=None,
  181. # RPN parameters
  182. rpn_anchor_generator=None,
  183. rpn_head=None,
  184. rpn_pre_nms_top_n_train=2000,
  185. rpn_pre_nms_top_n_test=1000,
  186. rpn_post_nms_top_n_train=2000,
  187. rpn_post_nms_top_n_test=1000,
  188. rpn_nms_thresh=0.7,
  189. rpn_fg_iou_thresh=0.7,
  190. rpn_bg_iou_thresh=0.3,
  191. rpn_batch_size_per_image=256,
  192. rpn_positive_fraction=0.5,
  193. rpn_score_thresh=0.0,
  194. # Box parameters
  195. box_roi_pool=None,
  196. box_head=None,
  197. box_predictor=None,
  198. box_score_thresh=0.05,
  199. box_nms_thresh=0.5,
  200. box_detections_per_img=100,
  201. box_fg_iou_thresh=0.5,
  202. box_bg_iou_thresh=0.5,
  203. box_batch_size_per_image=512,
  204. box_positive_fraction=0.25,
  205. bbox_reg_weights=None,
  206. # line parameters
  207. line_head=None,
  208. line_predictor=None,
  209. **kwargs,
  210. ):
  211. # if not isinstance(keypoint_roi_pool, (MultiScaleRoIAlign, type(None))):
  212. # raise TypeError(
  213. # "keypoint_roi_pool should be of type MultiScaleRoIAlign or None instead of {type(keypoint_roi_pool)}"
  214. # )
  215. # if min_size is None:
  216. # min_size = (640, 672, 704, 736, 768, 800)
  217. #
  218. # if num_keypoints is not None:
  219. # if keypoint_predictor is not None:
  220. # raise ValueError("num_keypoints should be None when keypoint_predictor is specified")
  221. # else:
  222. # num_keypoints = 17
  223. out_channels = backbone.out_channels
  224. if line_head is None:
  225. # keypoint_layers = tuple(512 for _ in range(8))
  226. num_class = 5
  227. line_head = LineRCNNHeads(out_channels, num_class)
  228. if line_predictor is None:
  229. keypoint_dim_reduced = 512 # == keypoint_layers[-1]
  230. line_predictor = LineRCNNPredictor()
  231. super().__init__(
  232. backbone,
  233. num_classes,
  234. # transform parameters
  235. min_size,
  236. max_size,
  237. image_mean,
  238. image_std,
  239. # RPN-specific parameters
  240. rpn_anchor_generator,
  241. rpn_head,
  242. rpn_pre_nms_top_n_train,
  243. rpn_pre_nms_top_n_test,
  244. rpn_post_nms_top_n_train,
  245. rpn_post_nms_top_n_test,
  246. rpn_nms_thresh,
  247. rpn_fg_iou_thresh,
  248. rpn_bg_iou_thresh,
  249. rpn_batch_size_per_image,
  250. rpn_positive_fraction,
  251. rpn_score_thresh,
  252. # Box parameters
  253. box_roi_pool,
  254. box_head,
  255. box_predictor,
  256. box_score_thresh,
  257. box_nms_thresh,
  258. box_detections_per_img,
  259. box_fg_iou_thresh,
  260. box_bg_iou_thresh,
  261. box_batch_size_per_image,
  262. box_positive_fraction,
  263. bbox_reg_weights,
  264. **kwargs,
  265. )
  266. roi_heads = RoIHeads(
  267. # Box
  268. box_roi_pool,
  269. box_head,
  270. box_predictor,
  271. line_head,
  272. line_predictor,
  273. box_fg_iou_thresh,
  274. box_bg_iou_thresh,
  275. box_batch_size_per_image,
  276. box_positive_fraction,
  277. bbox_reg_weights,
  278. box_score_thresh,
  279. box_nms_thresh,
  280. box_detections_per_img,
  281. )
  282. # super().roi_heads = roi_heads
  283. self.roi_heads = roi_heads
  284. self.roi_heads.line_head = line_head
  285. self.roi_heads.line_predictor = line_predictor
  286. class LineRCNNHeads(nn.Sequential):
  287. def __init__(self, input_channels, num_class):
  288. super(LineRCNNHeads, self).__init__()
  289. # print("输入的维度是:", input_channels)
  290. m = int(input_channels / 4)
  291. heads = []
  292. self.head_size = [[2], [1], [2]]
  293. for output_channels in sum(self.head_size, []):
  294. heads.append(
  295. nn.Sequential(
  296. nn.Conv2d(input_channels, m, kernel_size=3, padding=1),
  297. nn.ReLU(inplace=True),
  298. nn.Conv2d(m, output_channels, kernel_size=1),
  299. )
  300. )
  301. self.heads = nn.ModuleList(heads)
  302. assert num_class == sum(sum(self.head_size, []))
  303. def forward(self, x):
  304. return torch.cat([head(x) for head in self.heads], dim=1)
  305. # def __init__(self, in_channels, layers):
  306. # d = []
  307. # next_feature = in_channels
  308. # for out_channels in layers:
  309. # d.append(nn.Conv2d(next_feature, out_channels, 3, stride=1, padding=1))
  310. # d.append(nn.ReLU(inplace=True))
  311. # next_feature = out_channels
  312. # super().__init__(*d)
  313. # for m in self.children():
  314. # if isinstance(m, nn.Conv2d):
  315. # nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
  316. # nn.init.constant_(m.bias, 0)
  317. class LineRCNNPredictor(nn.Module):
  318. def __init__(self):
  319. super().__init__()
  320. # self.backbone = backbone
  321. # self.cfg = read_yaml(cfg)
  322. self.cfg = read_yaml(r'D:\python\PycharmProjects\lcnn-master\lcnn_\MultiVisionModels\models\line_detect\wireframe.yaml')
  323. self.n_pts0 = self.cfg['model']['n_pts0']
  324. self.n_pts1 = self.cfg['model']['n_pts1']
  325. self.n_stc_posl = self.cfg['model']['n_stc_posl']
  326. self.dim_loi = self.cfg['model']['dim_loi']
  327. self.use_conv = self.cfg['model']['use_conv']
  328. self.dim_fc = self.cfg['model']['dim_fc']
  329. self.n_out_line = self.cfg['model']['n_out_line']
  330. self.n_out_junc = self.cfg['model']['n_out_junc']
  331. self.loss_weight = self.cfg['model']['loss_weight']
  332. self.n_dyn_junc = self.cfg['model']['n_dyn_junc']
  333. self.eval_junc_thres = self.cfg['model']['eval_junc_thres']
  334. self.n_dyn_posl = self.cfg['model']['n_dyn_posl']
  335. self.n_dyn_negl = self.cfg['model']['n_dyn_negl']
  336. self.n_dyn_othr = self.cfg['model']['n_dyn_othr']
  337. self.use_cood = self.cfg['model']['use_cood']
  338. self.use_slop = self.cfg['model']['use_slop']
  339. self.n_stc_negl = self.cfg['model']['n_stc_negl']
  340. self.head_size = self.cfg['model']['head_size']
  341. self.num_class = sum(sum(self.head_size, []))
  342. self.head_off = np.cumsum([sum(h) for h in self.head_size])
  343. lambda_ = torch.linspace(0, 1, self.n_pts0)[:, None]
  344. self.register_buffer("lambda_", lambda_)
  345. self.do_static_sampling = self.n_stc_posl + self.n_stc_negl > 0
  346. self.fc1 = nn.Conv2d(256, self.dim_loi, 1)
  347. scale_factor = self.n_pts0 // self.n_pts1
  348. if self.use_conv:
  349. self.pooling = nn.Sequential(
  350. nn.MaxPool1d(scale_factor, scale_factor),
  351. Bottleneck1D(self.dim_loi, self.dim_loi),
  352. )
  353. self.fc2 = nn.Sequential(
  354. nn.ReLU(inplace=True), nn.Linear(self.dim_loi * self.n_pts1 + FEATURE_DIM, 1)
  355. )
  356. else:
  357. self.pooling = nn.MaxPool1d(scale_factor, scale_factor)
  358. self.fc2 = nn.Sequential(
  359. nn.Linear(self.dim_loi * self.n_pts1 + FEATURE_DIM, self.dim_fc),
  360. nn.ReLU(inplace=True),
  361. nn.Linear(self.dim_fc, self.dim_fc),
  362. nn.ReLU(inplace=True),
  363. nn.Linear(self.dim_fc, 1),
  364. )
  365. self.loss = nn.BCEWithLogitsLoss(reduction="none")
  366. def forward(self, result, targets=None):
  367. # result = self.backbone(input_dict)
  368. h = result["preds"]
  369. x = self.fc1(result["feature"])
  370. n_batch, n_channel, row, col = x.shape
  371. if targets is not None:
  372. self.training = True
  373. # print(f'target:{targets}')
  374. wires_targets = [t["wires"] for t in targets]
  375. # print(f'wires_target:{wires_targets}')
  376. # 提取所有 'junc_map', 'junc_offset', 'line_map' 的张量
  377. junc_maps = [d["junc_map"] for d in wires_targets]
  378. junc_offsets = [d["junc_offset"] for d in wires_targets]
  379. line_maps = [d["line_map"] for d in wires_targets]
  380. junc_map_tensor = torch.stack(junc_maps, dim=0)
  381. junc_offset_tensor = torch.stack(junc_offsets, dim=0)
  382. line_map_tensor = torch.stack(line_maps, dim=0)
  383. wires_meta = {
  384. "junc_map": junc_map_tensor,
  385. "junc_offset": junc_offset_tensor,
  386. # "line_map": line_map_tensor,
  387. }
  388. else:
  389. self.training = False
  390. # self.training = False
  391. t = {
  392. "junc_coords": torch.zeros(1, 2).to(device),
  393. "jtyp": torch.zeros(1, dtype=torch.uint8).to(device),
  394. "line_pos_idx": torch.zeros(2, 2, dtype=torch.uint8).to(device),
  395. "line_neg_idx": torch.zeros(2, 2, dtype=torch.uint8).to(device),
  396. "junc_map": torch.zeros([1, 1, 128, 128]).to(device),
  397. "junc_offset": torch.zeros([1, 1, 2, 128, 128]).to(device),
  398. }
  399. wires_targets = [t for b in range(inputs.size(0))]
  400. wires_meta = {
  401. "junc_map": torch.zeros([1, 1, 128, 128]).to(device),
  402. "junc_offset": torch.zeros([1, 1, 2, 128, 128]).to(device),
  403. }
  404. xs, ys, fs, ps, idx, jcs = [], [], [], [], [0], []
  405. for i, meta in enumerate(input_dict["meta"]):
  406. p, label, feat, jc = self.sample_lines(
  407. meta, h["jmap"][i], h["joff"][i], input_dict["mode"]
  408. )
  409. # print("p.shape:", p.shape)
  410. ys.append(label)
  411. if input_dict["mode"] == "training" and self.do_static_sampling:
  412. p = torch.cat([p, meta["lpre"]])
  413. feat = torch.cat([feat, meta["lpre_feat"]])
  414. ys.append(meta["lpre_label"])
  415. del jc
  416. else:
  417. jcs.append(jc)
  418. ps.append(p)
  419. fs.append(feat)
  420. p = p[:, 0:1, :] * self.lambda_ + p[:, 1:2, :] * (1 - self.lambda_) - 0.5
  421. p = p.reshape(-1, 2) # [N_LINE x N_POINT, 2_XY]
  422. px, py = p[:, 0].contiguous(), p[:, 1].contiguous()
  423. px0 = px.floor().clamp(min=0, max=127)
  424. py0 = py.floor().clamp(min=0, max=127)
  425. px1 = (px0 + 1).clamp(min=0, max=127)
  426. py1 = (py0 + 1).clamp(min=0, max=127)
  427. px0l, py0l, px1l, py1l = px0.long(), py0.long(), px1.long(), py1.long()
  428. # xp: [N_LINE, N_CHANNEL, N_POINT]
  429. xp = (
  430. (
  431. x[i, :, px0l, py0l] * (px1 - px) * (py1 - py)
  432. + x[i, :, px1l, py0l] * (px - px0) * (py1 - py)
  433. + x[i, :, px0l, py1l] * (px1 - px) * (py - py0)
  434. + x[i, :, px1l, py1l] * (px - px0) * (py - py0)
  435. )
  436. .reshape(n_channel, -1, M.n_pts0)
  437. .permute(1, 0, 2)
  438. )
  439. xp = self.pooling(xp)
  440. xs.append(xp)
  441. idx.append(idx[-1] + xp.shape[0])
  442. x, y = torch.cat(xs), torch.cat(ys)
  443. f = torch.cat(fs)
  444. x = x.reshape(-1, self.n_pts1 * self.dim_loi)
  445. x = torch.cat([x, f], 1)
  446. x = x.to(dtype=torch.float32)
  447. x = self.fc2(x).flatten()
  448. # return x,idx,jcs,n_batch,ps,self.n_out_line,self.n_out_junc
  449. all=[x, ys, idx, jcs, n_batch, ps, self.n_out_line, self.n_out_junc]
  450. return all
  451. # return x, y, idx, jcs, n_batch, ps, self.n_out_line, self.n_out_junc
  452. # if mode != "training":
  453. # self.inference(x, idx, jcs, n_batch, ps)
  454. # return result
  455. def sample_lines(self, meta, jmap, joff):
  456. with torch.no_grad():
  457. junc = meta["junc_coords"] # [N, 2]
  458. jtyp = meta["jtyp"] # [N]
  459. Lpos = meta["line_pos_idx"]
  460. Lneg = meta["line_neg_idx"]
  461. n_type = jmap.shape[0]
  462. jmap = non_maximum_suppression(jmap).reshape(n_type, -1)
  463. joff = joff.reshape(n_type, 2, -1)
  464. max_K = self.n_dyn_junc // n_type
  465. N = len(junc)
  466. # if mode != "training":
  467. if not self.training:
  468. K = min(int((jmap > self.eval_junc_thres).float().sum().item()), max_K)
  469. else:
  470. K = min(int(N * 2 + 2), max_K)
  471. if K < 2:
  472. K = 2
  473. device = jmap.device
  474. # index: [N_TYPE, K]
  475. score, index = torch.topk(jmap, k=K)
  476. y = (index // 128).float() + torch.gather(joff[:, 0], 1, index) + 0.5
  477. x = (index % 128).float() + torch.gather(joff[:, 1], 1, index) + 0.5
  478. # xy: [N_TYPE, K, 2]
  479. xy = torch.cat([y[..., None], x[..., None]], dim=-1)
  480. xy_ = xy[..., None, :]
  481. del x, y, index
  482. # print(f"xy_.is_cuda: {xy_.is_cuda}")
  483. # print(f"junc.is_cuda: {junc.is_cuda}")
  484. # dist: [N_TYPE, K, N]
  485. dist = torch.sum((xy_ - junc) ** 2, -1)
  486. cost, match = torch.min(dist, -1)
  487. # xy: [N_TYPE * K, 2]
  488. # match: [N_TYPE, K]
  489. for t in range(n_type):
  490. match[t, jtyp[match[t]] != t] = N
  491. match[cost > 1.5 * 1.5] = N
  492. match = match.flatten()
  493. _ = torch.arange(n_type * K, device=device)
  494. u, v = torch.meshgrid(_, _)
  495. u, v = u.flatten(), v.flatten()
  496. up, vp = match[u], match[v]
  497. label = Lpos[up, vp]
  498. # if mode == "training":
  499. if self.training:
  500. c = torch.zeros_like(label, dtype=torch.bool)
  501. # sample positive lines
  502. cdx = label.nonzero().flatten()
  503. if len(cdx) > self.n_dyn_posl:
  504. # print("too many positive lines")
  505. perm = torch.randperm(len(cdx), device=device)[: self.n_dyn_posl]
  506. cdx = cdx[perm]
  507. c[cdx] = 1
  508. # sample negative lines
  509. cdx = Lneg[up, vp].nonzero().flatten()
  510. if len(cdx) > self.n_dyn_negl:
  511. # print("too many negative lines")
  512. perm = torch.randperm(len(cdx), device=device)[: self.n_dyn_negl]
  513. cdx = cdx[perm]
  514. c[cdx] = 1
  515. # sample other (unmatched) lines
  516. cdx = torch.randint(len(c), (self.n_dyn_othr,), device=device)
  517. c[cdx] = 1
  518. else:
  519. c = (u < v).flatten()
  520. # sample lines
  521. u, v, label = u[c], v[c], label[c]
  522. xy = xy.reshape(n_type * K, 2)
  523. xyu, xyv = xy[u], xy[v]
  524. u2v = xyu - xyv
  525. u2v /= torch.sqrt((u2v ** 2).sum(-1, keepdim=True)).clamp(min=1e-6)
  526. feat = torch.cat(
  527. [
  528. xyu / 128 * self.use_cood,
  529. xyv / 128 * self.use_cood,
  530. u2v * self.use_slop,
  531. (u[:, None] > K).float(),
  532. (v[:, None] > K).float(),
  533. ],
  534. 1,
  535. )
  536. line = torch.cat([xyu[:, None], xyv[:, None]], 1)
  537. xy = xy.reshape(n_type, K, 2)
  538. jcs = [xy[i, score[i] > 0.03] for i in range(n_type)]
  539. return line, label.float(), feat, jcs
  540. _COMMON_META = {
  541. "categories": _COCO_PERSON_CATEGORIES,
  542. "keypoint_names": _COCO_PERSON_KEYPOINT_NAMES,
  543. "min_size": (1, 1),
  544. }
  545. class LineRCNN_ResNet50_FPN_Weights(WeightsEnum):
  546. COCO_LEGACY = Weights(
  547. url="https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-9f466800.pth",
  548. transforms=ObjectDetection,
  549. meta={
  550. **_COMMON_META,
  551. "num_params": 59137258,
  552. "recipe": "https://github.com/pytorch/vision/issues/1606",
  553. "_metrics": {
  554. "COCO-val2017": {
  555. "box_map": 50.6,
  556. "kp_map": 61.1,
  557. }
  558. },
  559. "_ops": 133.924,
  560. "_file_size": 226.054,
  561. "_docs": """
  562. These weights were produced by following a similar training recipe as on the paper but use a checkpoint
  563. from an early epoch.
  564. """,
  565. },
  566. )
  567. COCO_V1 = Weights(
  568. url="https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-fc266e95.pth",
  569. transforms=ObjectDetection,
  570. meta={
  571. **_COMMON_META,
  572. "num_params": 59137258,
  573. "recipe": "https://github.com/pytorch/vision/tree/main/references/detection#keypoint-r-cnn",
  574. "_metrics": {
  575. "COCO-val2017": {
  576. "box_map": 54.6,
  577. "kp_map": 65.0,
  578. }
  579. },
  580. "_ops": 137.42,
  581. "_file_size": 226.054,
  582. "_docs": """These weights were produced by following a similar training recipe as on the paper.""",
  583. },
  584. )
  585. DEFAULT = COCO_V1
  586. @register_model()
  587. @handle_legacy_interface(
  588. weights=(
  589. "pretrained",
  590. lambda kwargs: LineRCNN_ResNet50_FPN_Weights.COCO_LEGACY
  591. if kwargs["pretrained"] == "legacy"
  592. else LineRCNN_ResNet50_FPN_Weights.COCO_V1,
  593. ),
  594. weights_backbone=("pretrained_backbone", ResNet50_Weights.IMAGENET1K_V1),
  595. )
  596. def linercnn_resnet50_fpn(
  597. *,
  598. weights: Optional[LineRCNN_ResNet50_FPN_Weights] = None,
  599. progress: bool = True,
  600. num_classes: Optional[int] = None,
  601. num_keypoints: Optional[int] = None,
  602. weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1,
  603. trainable_backbone_layers: Optional[int] = None,
  604. **kwargs: Any,
  605. ) -> LineRCNN:
  606. """
  607. Constructs a Keypoint R-CNN model with a ResNet-50-FPN backbone.
  608. .. betastatus:: detection module
  609. Reference: `Mask R-CNN <https://arxiv.org/abs/1703.06870>`__.
  610. The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each
  611. image, and should be in ``0-1`` range. Different images can have different sizes.
  612. The behavior of the model changes depending on if it is in training or evaluation mode.
  613. During training, the model expects both the input tensors and targets (list of dictionary),
  614. containing:
  615. - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
  616. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  617. - labels (``Int64Tensor[N]``): the class label for each ground-truth box
  618. - keypoints (``FloatTensor[N, K, 3]``): the ``K`` keypoints location for each of the ``N`` instances, in the
  619. format ``[x, y, visibility]``, where ``visibility=0`` means that the keypoint is not visible.
  620. The model returns a ``Dict[Tensor]`` during training, containing the classification and regression
  621. losses for both the RPN and the R-CNN, and the keypoint loss.
  622. During inference, the model requires only the input tensors, and returns the post-processed
  623. predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as
  624. follows, where ``N`` is the number of detected instances:
  625. - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
  626. ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
  627. - labels (``Int64Tensor[N]``): the predicted labels for each instance
  628. - scores (``Tensor[N]``): the scores or each instance
  629. - keypoints (``FloatTensor[N, K, 3]``): the locations of the predicted keypoints, in ``[x, y, v]`` format.
  630. For more details on the output, you may refer to :ref:`instance_seg_output`.
  631. Keypoint R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.
  632. Example::
  633. >>> model = torchvision.models.detection.keypointrcnn_resnet50_fpn(weights=KeypointRCNN_ResNet50_FPN_Weights.DEFAULT)
  634. >>> model.eval()
  635. >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
  636. >>> predictions = model(x)
  637. >>>
  638. >>> # optionally, if you want to export the model to ONNX:
  639. >>> torch.onnx.export(model, x, "keypoint_rcnn.onnx", opset_version = 11)
  640. Args:
  641. weights (:class:`~torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights`, optional): The
  642. pretrained weights to use. See
  643. :class:`~torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights`
  644. below for more details, and possible values. By default, no
  645. pre-trained weights are used.
  646. progress (bool): If True, displays a progress bar of the download to stderr
  647. num_classes (int, optional): number of output classes of the model (including the background)
  648. num_keypoints (int, optional): number of keypoints
  649. weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The
  650. pretrained weights for the backbone.
  651. trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from final block.
  652. Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. If ``None`` is
  653. passed (the default) this value is set to 3.
  654. .. autoclass:: torchvision.models.detection.KeypointRCNN_ResNet50_FPN_Weights
  655. :members:
  656. """
  657. weights = LineRCNN_ResNet50_FPN_Weights.verify(weights)
  658. weights_backbone = ResNet50_Weights.verify(weights_backbone)
  659. if weights is not None:
  660. weights_backbone = None
  661. num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
  662. num_keypoints = _ovewrite_value_param("num_keypoints", num_keypoints, len(weights.meta["keypoint_names"]))
  663. else:
  664. if num_classes is None:
  665. num_classes = 2
  666. if num_keypoints is None:
  667. num_keypoints = 17
  668. is_trained = weights is not None or weights_backbone is not None
  669. trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
  670. norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
  671. backbone = resnet50(weights=weights_backbone, progress=progress, norm_layer=norm_layer)
  672. backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers)
  673. model = LineRCNN(backbone, num_classes, num_keypoints=num_keypoints, **kwargs)
  674. if weights is not None:
  675. model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
  676. if weights == LineRCNN_ResNet50_FPN_Weights.COCO_V1:
  677. overwrite_eps(model, 0.0)
  678. return model