Bladeren bron

debug train line

RenLiqiang 5 maanden geleden
bovenliggende
commit
e974ae5475

+ 5 - 3
models/line_detect/heads/head_losses.py

@@ -131,7 +131,7 @@ def line_points_to_heatmap(keypoints, rois, heatmap_size):
     x = keypoints[..., 0]
     y = keypoints[..., 1]
 
-    gs = generate_gaussian_heatmaps(x, y, heatmap_size, 1.0)
+    gs = generate_gaussian_heatmaps(x, y, heatmap_size,num_points=2, sigma=1.0)
     # show_heatmap(gs[0],'target')
     all_roi_heatmap = []
     for roi, heatmap in zip(rois, gs):
@@ -364,7 +364,7 @@ def lines_point_pair_loss(line_logits, proposals, gt_lines, line_matched_idxs):
     # type: (Tensor, List[Tensor], List[Tensor], List[Tensor]) -> Tensor
     N, K, H, W = line_logits.shape
     len_proposals = len(proposals)
-    print(f'lines_point_pair_loss line_logits.shape:{line_logits.shape},len_proposals:{len_proposals}')
+    print(f'lines_point_pair_loss line_logits.shape:{line_logits.shape},len_proposals:{len_proposals},line_matched_idxs:{line_matched_idxs}')
     if H != W:
         raise ValueError(
             f"line_logits height and width (last two elements of shape) should be equal. Instead got H = {H} and W = {W}"
@@ -402,7 +402,9 @@ def lines_point_pair_loss(line_logits, proposals, gt_lines, line_matched_idxs):
 
     # line_logits = line_logits.view(N * K, H * W)
     # print(f'line_logits[valid]:{line_logits[valid].shape}')
-    line_logits = line_logits.squeeze(1)
+    print(f'loss1 line_logits:{line_logits.shape}')
+    line_logits = line_logits[:,2,:,:]
+    print(f'loss2 line_logits:{line_logits.shape}')
 
     # line_loss = F.cross_entropy(line_logits[valid], line_targets[valid])
     line_loss = F.cross_entropy(line_logits, gs_heatmaps)

+ 23 - 8
models/line_detect/line_dataset.py

@@ -80,23 +80,32 @@ class LineDataset(BaseDataset):
         target = {}
 
         target["image_id"] = torch.tensor(item)
-
-        target["boxes"], lines,target["points"], target["labels"] = get_boxes_lines(objs,shape)
+        boxes, lines, points, labels = get_boxes_lines(objs, shape)
+
+        if points is not None:
+            target["points"]=points
+        if lines is not None:
+            a = torch.full((lines.shape[0],), 2).unsqueeze(1)
+            lines = torch.cat((lines, a), dim=1)
+            target["lines"] = lines.to(torch.float32).view(-1, 2, 3)
+
+        target["boxes"]=boxes
+        target["labels"]=labels
+        # target["boxes"], lines,target["points"], target["labels"] = get_boxes_lines(objs,shape)
         # print(f'lines:{lines}')
         # target["labels"] = torch.ones(len(target["boxes"]), dtype=torch.int64)
         # print(f'target points:{target["points"]}')
 
-        a = torch.full((lines.shape[0],), 2).unsqueeze(1)
-        lines = torch.cat((lines, a), dim=1)
 
-        target["lines"] = lines.to(torch.float32).view(-1,2,3)
 
+        # target["lines"] = lines.to(torch.float32).view(-1,2,3)
 
+        print(f'')
 
         print(f'lines:{target["lines"].shape}')
         target["img_size"]=shape
 
-        validate_keypoints(lines, shape[0], shape[1])
+        # validate_keypoints(lines, shape[0], shape[1])
         return target
 
     def show(self, idx,show_type='all'):
@@ -186,10 +195,16 @@ def get_boxes_lines(objs,shape):
     boxes=torch.tensor(boxes)
     print(f'boxes:{boxes.shape}')
     labels=torch.tensor(labels)
-    points=torch.tensor(points)
+    if len(points)==0:
+        points=None
+    else:
+        points=torch.tensor(points)
     # print(f'read labels:{labels}')
     # print(f'read points:{points}')
-    line_point_pairs=torch.tensor(line_point_pairs)
+    if len(line_point_pairs)==0:
+        line_point_pairs=None
+    else:
+        line_point_pairs=torch.tensor(line_point_pairs)
 
     # print(f'boxes:{boxes.shape},line_point_pairs:{line_point_pairs.shape}')
     return boxes,line_point_pairs,points,labels

+ 68 - 35
models/line_detect/loi_heads.py

@@ -13,7 +13,7 @@ import libs.vision_libs.models.detection._utils as det_utils
 from collections import OrderedDict
 
 from models.line_detect.heads.head_losses import point_inference, compute_point_loss, line_iou_loss, \
-    lines_point_pair_loss, features_align
+    lines_point_pair_loss, features_align, line_inference
 
 
 def fastrcnn_loss(class_logits, box_regression, labels, regression_targets):
@@ -902,12 +902,6 @@ class RoIHeads(nn.Module):
 
             print(f'line_proposals:{len(line_proposals)}')
 
-            # line_features = self.line_roi_pool(features, line_proposals, image_shapes)
-
-            # print(f'line_features from line_roi_pool:{line_features.shape}')
-            #(b,256,512,512)
-            # cs_features = self.channel_compress(features['0'])
-            #(b.8,512,512)
             cs_features= features['0']
 
 
@@ -933,45 +927,74 @@ class RoIHeads(nn.Module):
             point_proposals_tensor=torch.cat(point_proposals)
             print(f'point_proposals_tensor:{point_proposals_tensor.shape}')
 
-            line_features=None
+            # line_features=None
 
             feature_logits = self.line_predictor(cs_features)
             print(f'feature_logits from line_predictor:{feature_logits.shape}')
 
             point_features = features_align(feature_logits, point_proposals, image_shapes)
-            print(f'feature_logits  features_align:{point_features.shape}')
-            feature_logits=point_features
+
+
+
+            line_features = features_align(feature_logits, line_proposals, image_shapes)
+
+            if line_features is not None:
+                print(f'line_features from align:{line_features.shape}')
+
+            if point_features is not None:
+                print(f'feature_logits  features_align:{point_features.shape}')
+            # feature_logits=point_features
 
             # line_logits = combine_features
             # print(f'line_logits:{line_logits.shape}')
 
-            loss_line = {}
-            loss_line_iou = {}
-            loss_point = {}
+            loss_line = None
+            loss_line_iou =None
+            loss_point = None
+
+
             if self.training:
 
                 if targets is None or pos_matched_idxs is None:
                     raise ValueError("both targets and pos_matched_idxs should not be None when in training mode")
 
-                gt_lines = [t["lines"] for t in targets]
-                gt_points = [t["points"] for t in targets]
+                gt_lines = [t["lines"] for t in targets if "lines" in t]
+                gt_points = [t["points"] for t in targets if "points" in t]
+                #
+                # line_pos_matched_idxs = []
+                # point_pos_matched_idxs = []
+
+
+
+
                 print(f'gt_lines:{gt_lines[0].shape}')
                 h, w = targets[0]["img_size"]
                 img_size = h
 
-                gt_lines_tensor=torch.cat(gt_lines)
-                gt_points_tensor = torch.cat(gt_points)
-                print(f'gt_lines_tensor:{gt_lines_tensor.shape}')
-                print(f'gt_points_tensor:{gt_points_tensor.shape}')
-                if gt_lines_tensor.shape[0]>0  and line_features is not None:
+                gt_lines_tensor=torch.zeros(0,0)
+                gt_points_tensor=torch.zeros(0,0)
+                if len(gt_lines)>0:
+                    gt_lines_tensor = torch.cat(gt_lines)
+                    print(f'gt_lines_tensor:{gt_lines_tensor.shape}')
+
+                if len(gt_points)>0:
+                    gt_points_tensor = torch.cat(gt_points)
+                    print(f'gt_points_tensor:{gt_points_tensor.shape}')
+
+
+
+
+                if gt_lines_tensor.shape[0]>0 :
+                    print(f'start to lines_point_pair_loss')
                     loss_line = lines_point_pair_loss(
-                        feature_logits, line_proposals, gt_lines, line_pos_matched_idxs
+                        line_features, line_proposals, gt_lines, line_pos_matched_idxs
                     )
-                    loss_line_iou = line_iou_loss(feature_logits, line_proposals, gt_lines, line_pos_matched_idxs, img_size)
+                    loss_line_iou = line_iou_loss(line_features, line_proposals, gt_lines, line_pos_matched_idxs, img_size)
 
-                if gt_points_tensor.shape[0]>0 and point_features is not None:
+                if gt_points_tensor.shape[0]>0 :
+                    print(f'start to compute_point_loss ')
                     loss_point = compute_point_loss(
-                        feature_logits, point_proposals, gt_points, point_pos_matched_idxs
+                        point_features, point_proposals, gt_points, point_pos_matched_idxs
                     )
 
                 if not loss_line:
@@ -982,7 +1005,11 @@ class RoIHeads(nn.Module):
 
                 loss_line = {"loss_line": loss_line}
                 loss_line_iou = {'loss_line_iou': loss_line_iou}
-                loss_point = {"loss_point": loss_point}
+
+                if loss_point is None:
+                    loss_point = {"loss_point": torch.tensor(0.0,device=feature_logits.device)}
+                else:
+                    loss_point = {"loss_point": loss_point}
 
             else:
                 if targets is not None:
@@ -994,18 +1021,20 @@ class RoIHeads(nn.Module):
                     gt_lines_tensor = torch.cat(gt_lines)
                     gt_points_tensor = torch.cat(gt_points)
 
+                    line_pos_matched_idxs = []
+                    point_pos_matched_idxs = []
 
 
                     if gt_lines_tensor.shape[0] > 0 and line_features is not None:
                         loss_line = lines_point_pair_loss(
-                            feature_logits, line_proposals, gt_lines, line_pos_matched_idxs
+                            line_features, line_proposals, gt_lines, line_pos_matched_idxs
                         )
-                        loss_line_iou = line_iou_loss(feature_logits, line_proposals, gt_lines, line_pos_matched_idxs,
+                        loss_line_iou = line_iou_loss(line_features , line_proposals, gt_lines, line_pos_matched_idxs,
                                                       img_size)
 
                     if gt_points_tensor.shape[0] > 0 and point_features is not None:
                         loss_point = compute_point_loss(
-                            feature_logits, point_proposals, gt_points, point_pos_matched_idxs
+                            point_features, point_proposals, gt_points, point_pos_matched_idxs
                         )
 
                     if not loss_line :
@@ -1019,7 +1048,11 @@ class RoIHeads(nn.Module):
 
                     loss_line = {"loss_line": loss_line}
                     loss_line_iou = {'loss_line_iou': loss_line_iou}
-                    loss_point={"loss_point":loss_point}
+                    
+                    if loss_point is None:
+                        loss_point = {"loss_point": torch.tensor(0.0, device=feature_logits.device)}
+                    else:
+                        loss_point = {"loss_point": loss_point}
 
 
 
@@ -1029,14 +1062,14 @@ class RoIHeads(nn.Module):
                             "both keypoint_logits and keypoint_proposals should not be None when not in training mode"
                         )
 
-                    # if line_features is not None:
-                    #     lines_probs, lines_scores = line_inference(combine_features,line_proposals)
-                    #     for keypoint_prob, kps, r in zip(lines_probs, lines_scores, result):
-                    #         r["lines"] = keypoint_prob
-                    #         r["liness_scores"] = kps
+                    if line_features is not None:
+                        lines_probs, lines_scores = line_inference(line_features,line_proposals)
+                        for keypoint_prob, kps, r in zip(lines_probs, lines_scores, result):
+                            r["lines"] = keypoint_prob
+                            r["liness_scores"] = kps
 
                     if point_features is not None:
-                        point_probs, points_scores=point_inference(feature_logits, point_proposals, )
+                        point_probs, points_scores=point_inference(point_features, point_proposals, )
                         for  points, ps, r in zip(point_probs,points_scores, result):
                             print(f'points_prob :{points.shape}')
 

+ 1 - 1
models/line_detect/train.yaml

@@ -1,6 +1,6 @@
 io:
   logdir: train_results
-  datadir: \\192.168.50.222/share/rlq/datasets/singepoint_Dataset0709_2
+  datadir: \\192.168.50.222/share/zjh/Dataset_correct_xanylabel
   data_type: rgb
 #  datadir: D:\python\PycharmProjects\data_20250223\0423_
 #  datadir: I:\datasets\wirenet_1000

+ 2 - 2
models/line_detect/trainer.py

@@ -315,13 +315,13 @@ class Trainer(BaseTrainer):
             targets = self.move_to_device(targets, device)
             if phase== 'val':
                 result,loss_dict = model(imgs, targets)
-                losses = sum(loss_dict.values()) if loss_dict else torch.tensor(0.0, device=device)
+                losses = sum(loss_dict.values())
 
                 print(f'val losses:{losses}')
                 # print(f'val result:{result}')
             else:
                 loss_dict = model(imgs, targets)
-                losses = sum(loss_dict.values()) if loss_dict else torch.tensor(0.0, device=device)
+                losses = sum(loss_dict.values())
                 print(f'train losses:{losses}')
 
             # loss = _loss(losses)