line_dataset.py 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. from torch.utils.data.dataset import T_co
  2. from libs.vision_libs.utils import draw_keypoints
  3. from models.base.base_dataset import BaseDataset
  4. import json
  5. import os
  6. import PIL
  7. import matplotlib as mpl
  8. from torchvision.utils import draw_bounding_boxes
  9. import torchvision.transforms.v2 as transforms
  10. import torch
  11. import matplotlib.pyplot as plt
  12. from models.base.transforms import get_transforms
  13. def validate_keypoints(keypoints, image_width, image_height):
  14. for kp in keypoints:
  15. x, y, v = kp
  16. if not (0 <= x < image_width and 0 <= y < image_height):
  17. raise ValueError(f"Key point ({x}, {y}) is out of bounds for image size ({image_width}, {image_height})")
  18. """
  19. 直接读取xanlabel标注的数据集json格式
  20. """
  21. class LineDataset(BaseDataset):
  22. def __init__(self, dataset_path, data_type, transforms=None,augmentation=False, dataset_type=None,img_type='rgb', target_type='pixel'):
  23. super().__init__(dataset_path)
  24. self.data_path = dataset_path
  25. self.data_type = data_type
  26. print(f'data_path:{dataset_path}')
  27. self.transforms = transforms
  28. self.img_path = os.path.join(dataset_path, "images/" + dataset_type)
  29. self.lbl_path = os.path.join(dataset_path, "labels/" + dataset_type)
  30. self.imgs = os.listdir(self.img_path)
  31. self.lbls = os.listdir(self.lbl_path)
  32. self.target_type = target_type
  33. self.img_type=img_type
  34. self.augmentation=augmentation
  35. print(f'augmentation:{augmentation}')
  36. # self.default_transform = DefaultTransform()
  37. def __getitem__(self, index) -> T_co:
  38. img_path = os.path.join(self.img_path, self.imgs[index])
  39. lbl_path = os.path.join(self.lbl_path, self.imgs[index][:-3] + 'json')
  40. img = PIL.Image.open(img_path).convert('RGB')
  41. w, h = img.size
  42. # wire_labels, target = self.read_target(item=index, lbl_path=lbl_path, shape=(h, w))
  43. target = self.read_target(item=index, lbl_path=lbl_path, shape=(h, w))
  44. self.transforms=get_transforms(augmention=self.augmentation)
  45. img, target = self.transforms(img, target)
  46. return img, target
  47. def __len__(self):
  48. return len(self.imgs)
  49. def read_target(self, item, lbl_path, shape, extra=None):
  50. # print(f'shape:{shape}')
  51. # print(f'lbl_path:{lbl_path}')
  52. with open(lbl_path, 'r') as file:
  53. lable_all = json.load(file)
  54. objs = lable_all["shapes"]
  55. point_pairs=objs[0]['points']
  56. # print(f'point_pairs:{point_pairs}')
  57. target = {}
  58. target["image_id"] = torch.tensor(item)
  59. boxes, lines, points, arc_mask,labels = get_boxes_lines(objs, shape)
  60. if points is not None:
  61. target["points"]=points
  62. if lines is not None:
  63. a = torch.full((lines.shape[0],), 2).unsqueeze(1)
  64. lines = torch.cat((lines, a), dim=1)
  65. target["lines"] = lines.to(torch.float32).view(-1, 2, 3)
  66. if arc_mask is not None:
  67. target['arc_mask']=arc_mask
  68. print(f'arc_mask dataset')
  69. else:
  70. print(f'not arc_mask dataset')
  71. target["boxes"]=boxes
  72. target["labels"]=labels
  73. # target["boxes"], lines,target["points"], target["labels"] = get_boxes_lines(objs,shape)
  74. # print(f'lines:{lines}')
  75. # target["labels"] = torch.ones(len(target["boxes"]), dtype=torch.int64)
  76. # print(f'target points:{target["points"]}')
  77. # target["lines"] = lines.to(torch.float32).view(-1,2,3)
  78. # print(f'')
  79. # print(f'lines:{target["lines"].shape}')
  80. target["img_size"]=shape
  81. # validate_keypoints(lines, shape[0], shape[1])
  82. return target
  83. def show(self, idx,show_type='all'):
  84. image, target = self.__getitem__(idx)
  85. cmap = plt.get_cmap("jet")
  86. norm = mpl.colors.Normalize(vmin=0.4, vmax=1.0)
  87. sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
  88. sm.set_array([])
  89. # img_path = os.path.join(self.img_path, self.imgs[idx])
  90. # print(f'boxes:{target["boxes"]}')
  91. img = image
  92. if show_type=='all':
  93. boxed_image = draw_bounding_boxes((img * 255).to(torch.uint8), target["boxes"],
  94. colors="yellow", width=1)
  95. keypoint_img=draw_keypoints(boxed_image,target['points'].unsqueeze(1),colors='red',width=3)
  96. plt.imshow(keypoint_img.permute(1, 2, 0).numpy())
  97. plt.show()
  98. # if show_type=='lines':
  99. # keypoint_img=draw_keypoints((img * 255).to(torch.uint8),target['lines'],colors='red',width=3)
  100. # plt.imshow(keypoint_img.permute(1, 2, 0).numpy())
  101. # plt.show()
  102. if show_type=='points':
  103. print(f'points:{target['points'].shape}')
  104. keypoint_img=draw_keypoints((img * 255).to(torch.uint8),target['points'].unsqueeze(1),colors='red',width=3)
  105. plt.imshow(keypoint_img.permute(1, 2, 0).numpy())
  106. plt.show()
  107. if show_type=='boxes':
  108. boxed_image = draw_bounding_boxes((img * 255).to(torch.uint8), target["boxes"],
  109. colors="yellow", width=1)
  110. plt.imshow(boxed_image.permute(1, 2, 0).numpy())
  111. plt.show()
  112. def show_img(self, img_path):
  113. pass
  114. def get_boxes_lines(objs,shape):
  115. boxes = []
  116. labels=[]
  117. h,w=shape
  118. line_point_pairs = []
  119. points=[]
  120. line_mask=[]
  121. for obj in objs:
  122. # plt.plot([a[1], b[1]], [a[0], b[0]], c="red", linewidth=1) # a[1], b[1]无明确大小
  123. # print(f"points:{obj['points']}")
  124. label=obj['label']
  125. if label =='line' or label=='dseam1':
  126. a,b=obj['points'][0],obj['points'][1]
  127. line_point_pairs.append(a)
  128. line_point_pairs.append(b)
  129. xmin = max(0, (min(a[0], b[0]) - 6))
  130. xmax = min(w, (max(a[0], b[0]) + 6))
  131. ymin = max(0, (min(a[1], b[1]) - 6))
  132. ymax = min(h, (max(a[1], b[1]) + 6))
  133. boxes.append([ xmin,ymin, xmax,ymax])
  134. labels.append(torch.tensor(2))
  135. elif label =='point':
  136. p= obj['points'][0]
  137. xmin=max(0,p[0]-12)
  138. xmax = min(w, p[0] +12)
  139. ymin=max(0,p[1]-12)
  140. ymax = min(h, p[1] + 12)
  141. points.append(p)
  142. labels.append(torch.tensor(1))
  143. boxes.append([xmin, ymin, xmax, ymax])
  144. elif label == 'arc':
  145. line_mask.append(obj['points'])
  146. xmin = obj['xmin']
  147. xmax = obj['xmax']
  148. ymin = obj['ymin']
  149. ymax = obj['ymax']
  150. boxes.append([xmin, ymin, xmax, ymax])
  151. labels.append(torch.tensor(3))
  152. boxes=torch.tensor(boxes)
  153. print(f'boxes:{boxes.shape}')
  154. labels=torch.tensor(labels)
  155. if len(points)==0:
  156. points=None
  157. else:
  158. points=torch.tensor(points)
  159. # print(f'read labels:{labels}')
  160. # print(f'read points:{points}')
  161. if len(line_point_pairs)==0:
  162. line_point_pairs=None
  163. else:
  164. line_point_pairs=torch.tensor(line_point_pairs)
  165. # print(f'boxes:{boxes.shape},line_point_pairs:{line_point_pairs.shape}')
  166. if len(line_mask)==0:
  167. line_mask=None
  168. else:
  169. line_mask=torch.tensor(line_mask)
  170. return boxes,line_point_pairs,points,line_mask, labels
  171. if __name__ == '__main__':
  172. path=r"\\192.168.50.222\share\rlq\datasets\Dataset0709_"
  173. dataset= LineDataset(dataset_path=path, dataset_type='train',augmentation=False, data_type='jpg')
  174. dataset.show(1,show_type='all')