dataset_LD.py 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. from torch.utils.data.dataset import T_co
  2. from models.base.base_dataset import BaseDataset
  3. import glob
  4. import json
  5. import math
  6. import os
  7. import random
  8. import cv2
  9. import PIL
  10. import matplotlib.pyplot as plt
  11. import matplotlib as mpl
  12. from torchvision.utils import draw_bounding_boxes
  13. import numpy as np
  14. import numpy.linalg as LA
  15. import torch
  16. from skimage import io
  17. from torch.utils.data import Dataset
  18. from torch.utils.data.dataloader import default_collate
  19. import matplotlib.pyplot as plt
  20. from models.dataset_tool import line_boxes, read_masks_from_txt_wire, read_masks_from_pixels_wire, adjacency_matrix
  21. from tools.presets import DetectionPresetTrain
  22. def line_boxes1(target):
  23. boxs = []
  24. lines = target.cpu().numpy()
  25. if len(lines) > 0 and not (lines[0] == 0).all():
  26. for i, ((a, b)) in enumerate(lines):
  27. if i > 0 and (lines[i] == lines[0]).all():
  28. break
  29. if a[1] > b[1]:
  30. ymax = a[1] + 10
  31. ymin = b[1] - 10
  32. else:
  33. ymin = a[1] - 10
  34. ymax = b[1] + 10
  35. if a[0] > b[0]:
  36. xmax = a[0] + 10
  37. xmin = b[0] - 10
  38. else:
  39. xmin = a[0] - 10
  40. xmax = b[0] + 10
  41. boxs.append([ymin, xmin, ymax, xmax])
  42. # if boxs == []:
  43. # print(target)
  44. return torch.tensor(boxs)
  45. class WirePointDataset(BaseDataset):
  46. def __init__(self, dataset_path, transforms=None, dataset_type=None, target_type='pixel'):
  47. super().__init__(dataset_path)
  48. self.data_path = dataset_path
  49. print(f'data_path:{dataset_path}')
  50. self.transforms = transforms
  51. self.img_path = os.path.join(dataset_path, "images/" + dataset_type)
  52. self.lbl_path = os.path.join(dataset_path, "labels/" + dataset_type)
  53. self.imgs = os.listdir(self.img_path)
  54. self.lbls = os.listdir(self.lbl_path)
  55. self.target_type = target_type
  56. # self.default_transform = DefaultTransform()
  57. def __getitem__(self, index) -> T_co:
  58. img_path = os.path.join(self.img_path, self.imgs[index])
  59. lbl_path = os.path.join(self.lbl_path, self.imgs[index][:-3] + 'json')
  60. img = PIL.Image.open(img_path).convert('RGB')
  61. w, h = img.size
  62. # wire_labels, target = self.read_target(item=index, lbl_path=lbl_path, shape=(h, w))
  63. target = self.read_target(item=index, lbl_path=lbl_path, shape=(h, w))
  64. if self.transforms:
  65. img, target = self.transforms(img, target)
  66. else:
  67. img = self.default_transform(img)
  68. # new_channel = torch.zeros(1, 512, 512)
  69. # img=torch.cat((img,new_channel),dim=0)
  70. # print(f'img:{img.shape}')
  71. return img, target
  72. def __len__(self):
  73. return len(self.imgs)
  74. def read_target(self, item, lbl_path, shape, extra=None):
  75. # print(f'lbl_path:{lbl_path}')
  76. with open(lbl_path, 'r') as file:
  77. lable_all = json.load(file)
  78. n_stc_posl = 300
  79. n_stc_negl = 40
  80. use_cood = 0
  81. use_slop = 0
  82. wire = lable_all["wires"][0] # ??
  83. line_pos_coords = np.random.permutation(wire["line_pos_coords"]["content"])[: n_stc_posl] # ?????????
  84. line_neg_coords = np.random.permutation(wire["line_neg_coords"]["content"])[: n_stc_negl]
  85. npos, nneg = len(line_pos_coords), len(line_neg_coords)
  86. lpre = np.concatenate([line_pos_coords, line_neg_coords], 0) # ??????????
  87. for i in range(len(lpre)):
  88. if random.random() > 0.5:
  89. lpre[i] = lpre[i, ::-1]
  90. ldir = lpre[:, 0, :2] - lpre[:, 1, :2]
  91. ldir /= np.clip(LA.norm(ldir, axis=1, keepdims=True), 1e-6, None)
  92. feat = [
  93. lpre[:, :, :2].reshape(-1, 4) / 512 * use_cood,
  94. ldir * use_slop,
  95. lpre[:, :, 2],
  96. ]
  97. feat = np.concatenate(feat, 1)
  98. wire_labels = {
  99. "junc_coords": torch.tensor(wire["junc_coords"]["content"])[:, :2],
  100. "jtyp": torch.tensor(wire["junc_coords"]["content"])[:, 2].byte(),
  101. "line_pos_idx": adjacency_matrix(len(wire["junc_coords"]["content"]), wire["line_pos_idx"]["content"]),
  102. # ???????????
  103. "line_neg_idx": adjacency_matrix(len(wire["junc_coords"]["content"]), wire["line_neg_idx"]["content"]),
  104. # ??????????
  105. "lpre": torch.tensor(lpre)[:, :, :2],
  106. "lpre_label": torch.cat([torch.ones(npos), torch.zeros(nneg)]), # ?????? 1?0
  107. "lpre_feat": torch.from_numpy(feat),
  108. "junc_map": torch.tensor(wire['junc_map']["content"]),
  109. "junc_offset": torch.tensor(wire['junc_offset']["content"]),
  110. "line_map": torch.tensor(wire['line_map']["content"]),
  111. }
  112. labels = []
  113. #
  114. # if self.target_type == 'polygon':
  115. # labels, masks = read_masks_from_txt_wire(lbl_path, shape)
  116. # elif self.target_type == 'pixel':
  117. # labels = read_masks_from_pixels_wire(lbl_path, shape)
  118. # print(torch.stack(masks).shape) # [???, 512, 512]
  119. target = {}
  120. # target["labels"] = torch.stack(labels)
  121. target["image_id"] = torch.tensor(item)
  122. # return wire_labels, target
  123. target["wires"] = wire_labels
  124. # target["boxes"] = line_boxes(target)
  125. target["boxes"] = line_boxes1(torch.tensor(wire["line_pos_coords"]["content"]))
  126. target["labels"]= torch.ones(len(target["boxes"]),dtype=torch.int64)
  127. # print(f'target["labels"]:{ target["labels"]}')
  128. # print(f'boxes:{target["boxes"].shape}')
  129. if target["boxes"].numel() == 0:
  130. print("Tensor is empty")
  131. print(f'path:{lbl_path}')
  132. return target
  133. def show(self, idx):
  134. image, target = self.__getitem__(idx)
  135. cmap = plt.get_cmap("jet")
  136. norm = mpl.colors.Normalize(vmin=0.4, vmax=1.0)
  137. sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
  138. sm.set_array([])
  139. def imshow(im):
  140. plt.close()
  141. plt.tight_layout()
  142. plt.imshow(im)
  143. plt.colorbar(sm, fraction=0.046)
  144. plt.xlim([0, im.shape[0]])
  145. plt.ylim([im.shape[0], 0])
  146. def draw_vecl(lines, sline, juncs, junts, fn=None):
  147. img_path = os.path.join(self.img_path, self.imgs[idx])
  148. imshow(io.imread(img_path))
  149. if len(lines) > 0 and not (lines[0] == 0).all():
  150. for i, ((a, b), s) in enumerate(zip(lines, sline)):
  151. if i > 0 and (lines[i] == lines[0]).all():
  152. break
  153. plt.plot([a[1], b[1]], [a[0], b[0]], c="red", linewidth=1) # a[1], b[1]?????
  154. if not (juncs[0] == 0).all():
  155. for i, j in enumerate(juncs):
  156. if i > 0 and (i == juncs[0]).all():
  157. break
  158. plt.scatter(j[1], j[0], c="red", s=2, zorder=100) # ? s=64
  159. img_path = os.path.join(self.img_path, self.imgs[idx])
  160. img = PIL.Image.open(img_path).convert('RGB')
  161. boxed_image = draw_bounding_boxes((self.default_transform(img) * 255).to(torch.uint8), target["boxes"],
  162. colors="yellow", width=1)
  163. plt.imshow(boxed_image.permute(1, 2, 0).numpy())
  164. plt.show()
  165. plt.show()
  166. if fn != None:
  167. plt.savefig(fn)
  168. junc = target['wires']['junc_coords'].cpu().numpy()
  169. jtyp = target['wires']['jtyp'].cpu().numpy()
  170. juncs = junc[jtyp == 0]
  171. junts = junc[jtyp == 1]
  172. lpre = target['wires']["lpre"].cpu().numpy()
  173. vecl_target = target['wires']["lpre_label"].cpu().numpy()
  174. lpre = lpre[vecl_target == 1]
  175. # draw_vecl(lpre, np.ones(lpre.shape[0]), juncs, junts, save_path)
  176. draw_vecl(lpre, np.ones(lpre.shape[0]), juncs, junts)
  177. def show_img(self, img_path):
  178. pass