line_dataset.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567
  1. import cv2
  2. import imageio
  3. import numpy as np
  4. from skimage.draw import ellipse
  5. from torch.utils.data.dataset import T_co
  6. from libs.vision_libs.utils import draw_keypoints
  7. from models.base.base_dataset import BaseDataset
  8. import json
  9. import os
  10. import PIL
  11. import matplotlib as mpl
  12. from torchvision.utils import draw_bounding_boxes
  13. import torchvision.transforms.v2 as transforms
  14. import torch
  15. import matplotlib.pyplot as plt
  16. from models.base.transforms import get_transforms
  17. from utils.data_process.mask.show_mask import save_full_mask
  18. from utils.data_process.show_prams import print_params
  19. def validate_keypoints(keypoints, image_width, image_height):
  20. for kp in keypoints:
  21. x, y, v = kp
  22. if not (0 <= x < image_width and 0 <= y < image_height):
  23. raise ValueError(f"Key point ({x}, {y}) is out of bounds for image size ({image_width}, {image_height})")
  24. """
  25. 直接读取xanlabel标注的数据集json格式
  26. """
  27. class LineDataset(BaseDataset):
  28. def __init__(self, dataset_path, data_type, transforms=None, augmentation=False, dataset_type=None, img_type='rgb',
  29. target_type='pixel'):
  30. super().__init__(dataset_path)
  31. self.data_path = dataset_path
  32. self.data_type = data_type
  33. print(f'data_path:{dataset_path}')
  34. self.transforms = transforms
  35. self.img_path = os.path.join(dataset_path, "images/" + dataset_type)
  36. self.lbl_path = os.path.join(dataset_path, "labels/" + dataset_type)
  37. self.imgs = os.listdir(self.img_path)
  38. self.lbls = os.listdir(self.lbl_path)
  39. self.target_type = target_type
  40. self.img_type = img_type
  41. self.augmentation = augmentation
  42. print(f'augmentation:{augmentation}')
  43. # self.default_transform = DefaultTransform()
  44. def __getitem__(self, index) -> T_co:
  45. img_path = os.path.join(self.img_path, self.imgs[index])
  46. if self.data_type == 'tiff':
  47. lbl_path = os.path.join(self.lbl_path, self.imgs[index][:-4] + 'json')
  48. img = imageio.v3.imread(img_path)[:, :, 0]
  49. print(f'img shape:{img.shape}')
  50. w, h = img.shape[:2]
  51. img = img.reshape(w, h, 1)
  52. img_3channel = np.zeros((w, h, 3), dtype=img.dtype)
  53. img_3channel[:, :, 2] = img[:, :, 0]
  54. img = torch.from_numpy(img_3channel).permute(2, 1, 0)
  55. else:
  56. lbl_path = os.path.join(self.lbl_path, self.imgs[index][:-3] + 'json')
  57. img = PIL.Image.open(img_path).convert('RGB')
  58. w, h = img.size
  59. # wire_labels, target = self.read_target(item=index, lbl_path=lbl_path, shape=(h, w))
  60. target = self.read_target(item=index, lbl_path=lbl_path, shape=(h, w))
  61. self.transforms = get_transforms(augmention=self.augmentation)
  62. img, target = self.transforms(img, target)
  63. return img, target
  64. def __len__(self):
  65. return len(self.imgs)
  66. def read_target(self, item, lbl_path, shape, extra=None):
  67. # print(f'shape:{shape}')
  68. # print(f'lbl_path:{lbl_path}')
  69. with open(lbl_path, 'r') as file:
  70. lable_all = json.load(file)
  71. objs = lable_all["shapes"]
  72. point_pairs = objs[0]['points']
  73. # print(f'point_pairs:{point_pairs}')
  74. target = {}
  75. target["image_id"] = torch.tensor(item)
  76. #boxes, line_point_pairs, points, labels, mask_ends, mask_params
  77. boxes, lines, points, labels, arc_ends, arc_params = get_boxes_lines(objs, shape)
  78. # print_params(arc_ends, arc_params)
  79. if points is not None:
  80. target["points"] = points
  81. # if lines is not None:
  82. # a = torch.full((lines.shape[0],), 2).unsqueeze(1)
  83. # lines = torch.cat((lines, a), dim=1)
  84. # target["lines"] = lines.to(torch.float32).view(-1, 2, 3)
  85. if lines is not None:
  86. label_3d = labels.view(-1, 1, 1).expand(-1, 2, -1) # [N] -> [N,2,1]
  87. line1 = torch.cat([lines, label_3d], dim=-1) # [N,2,3]
  88. target["lines"] = line1.to(torch.float32)
  89. if arc_ends is not None:
  90. target['mask_ends'] = arc_ends
  91. if arc_params is not None:
  92. target['mask_params'] = arc_params
  93. arc_angles = compute_arc_angles(arc_ends, arc_params)
  94. # print_params(arc_angles)
  95. arc_masks = []
  96. for i in range(len(arc_params)):
  97. arc_param_i = arc_params[i].view(-1) # shape (5,)
  98. arc_angle_i = arc_angles[i].view(-1) # shape (2,)
  99. arc7 = torch.cat([arc_param_i, arc_angle_i], dim=0) # shape (7,)
  100. # print_params(arc7)
  101. mask = arc_to_mask(arc7, shape, line_width=1)
  102. arc_masks.append(mask)
  103. # arc7=arc_params[i] + arc_angles[i].tolist()
  104. # arc_masks.append(arc_to_mask(arc7, shape, line_width=1))
  105. # print(f'circle_masks:{torch.stack(arc_masks, dim=0).shape}')
  106. target['circle_masks'] = torch.stack(arc_masks, dim=0)
  107. save_full_mask(target['circle_masks'], "arc_masks",
  108. "/home/zhaoyinghan/py_ws/code/circle_huayan/MultiVisionModels/models/line_detect/out_feature_dataset")
  109. target["boxes"] = boxes
  110. target["labels"] = labels
  111. # target["boxes"], lines,target["points"], target["labels"] = get_boxes_lines(objs,shape)
  112. # print(f'lines:{lines}')
  113. # target["labels"] = torch.ones(len(target["boxes"]), dtype=torch.int64)
  114. # print(f'target points:{target["points"]}')
  115. # target["lines"] = lines.to(torch.float32).view(-1,2,3)
  116. # print(f'')
  117. # print(f'lines:{target["lines"].shape}')
  118. target["img_size"] = shape
  119. # validate_keypoints(lines, shape[0], shape[1])
  120. return target
  121. def show(self, idx, show_type='all'):
  122. image, target = self.__getitem__(idx)
  123. cmap = plt.get_cmap("jet")
  124. norm = mpl.colors.Normalize(vmin=0.4, vmax=1.0)
  125. sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
  126. sm.set_array([])
  127. # img_path = os.path.join(self.img_path, self.imgs[idx])
  128. # print(f'boxes:{target["boxes"]}')
  129. img = image
  130. if show_type == 'circle_masks':
  131. boxed_image = draw_bounding_boxes((img * 255).to(torch.uint8), target["boxes"],
  132. colors="yellow", width=1)
  133. # arc = target['arc']
  134. arc_mask = target['circle_masks']
  135. # print(f'taget circle:{arc.shape}')
  136. print(f'target circle_masks:{arc_mask.shape}')
  137. combined = torch.cat(list(arc_mask), dim=1)
  138. plt.imshow(combined)
  139. plt.show()
  140. if show_type == 'circle_masks11':
  141. boxed_image = draw_bounding_boxes((img * 255).to(torch.uint8), target["boxes"],
  142. colors="yellow", width=1)
  143. circle = target['circles']
  144. circle_mask = target['circle_masks']
  145. print(f'taget circle:{circle.shape}')
  146. print(f'target circle_masks:{circle_mask.shape}')
  147. plt.imshow(circle_mask.squeeze(0))
  148. keypoint_img = draw_keypoints(boxed_image, circle, colors='red', width=3)
  149. # plt.imshow(keypoint_img.permute(1, 2, 0).numpy())
  150. plt.show()
  151. # if show_type=='lines':
  152. # keypoint_img=draw_keypoints((img * 255).to(torch.uint8),target['lines'],colors='red',width=3)
  153. # plt.imshow(keypoint_img.permute(1, 2, 0).numpy())
  154. # plt.show()
  155. if show_type == 'points':
  156. # print(f'points:{target['points'].shape}')
  157. keypoint_img = draw_keypoints((img * 255).to(torch.uint8), target['points'].unsqueeze(1), colors='red',
  158. width=3)
  159. plt.imshow(keypoint_img.permute(1, 2, 0).numpy())
  160. plt.show()
  161. if show_type == 'boxes':
  162. boxed_image = draw_bounding_boxes((img * 255).to(torch.uint8), target["boxes"],
  163. colors="yellow", width=1)
  164. plt.imshow(boxed_image.permute(1, 2, 0).numpy())
  165. plt.show()
  166. def show_img(self, img_path):
  167. pass
  168. def draw_el(all):
  169. # 解析椭圆参数
  170. if isinstance(all, torch.Tensor):
  171. all = all.cpu().numpy()
  172. x, y, a, b, q, q1, q2 = all
  173. theta = np.radians(q)
  174. phi1 = np.radians(q1) # 第一个点的参数角
  175. phi2 = np.radians(q2) # 第二个点的参数角
  176. # 生成椭圆上的点
  177. phi = np.linspace(0, 2 * np.pi, 500)
  178. x_ellipse = x + a * np.cos(phi) * np.cos(theta) - b * np.sin(phi) * np.sin(theta)
  179. y_ellipse = y + a * np.cos(phi) * np.sin(theta) + b * np.sin(phi) * np.cos(theta)
  180. # 计算两个指定点的坐标
  181. def param_to_point(phi, xc, yc, a, b, theta):
  182. x = xc + a * np.cos(phi) * np.cos(theta) - b * np.sin(phi) * np.sin(theta)
  183. y = yc + a * np.cos(phi) * np.sin(theta) + b * np.sin(phi) * np.cos(theta)
  184. return x, y
  185. P1 = param_to_point(phi1, x, y, a, b, theta)
  186. P2 = param_to_point(phi2, x, y, a, b, theta)
  187. # 创建画布并显示背景图片(使用传入的background_img,shape为[H, W, C])
  188. plt.figure(figsize=(10, 10))
  189. # plt.imshow(background_img) # 直接显示背景图
  190. # 绘制椭圆及相关元素
  191. plt.plot(x_ellipse, y_ellipse, 'b-', linewidth=2)
  192. plt.plot(x, y, 'ko', markersize=8)
  193. plt.plot(P1[0], P1[1], 'ro', markersize=10)
  194. plt.plot(P2[0], P2[1], 'go', markersize=10)
  195. plt.show()
  196. def arc_to_mask(arc7, shape, line_width=1):
  197. """
  198. Generate a binary mask of an elliptical arc.
  199. Args:
  200. xc, yc (float): 椭圆中心
  201. a, b (float): 长半轴、短半轴 (a >= b)
  202. theta (float): 椭圆旋转角度(**弧度**,逆时针,相对于 x 轴)
  203. phi1, phi2 (float): 起始和终止参数角(**弧度**,在 [0, 2π) 内)
  204. H, W (int): 输出 mask 的高度和宽度
  205. line_width (int): 弧线宽度(像素)
  206. Returns:
  207. mask (Tensor): [H, W], dtype=torch.uint8, 0/255
  208. """
  209. # print_params(arc7)
  210. # 确保 phi1 -> phi2 是正向(可处理跨 2π 的情况)
  211. if torch.all(arc7 == 0):
  212. return torch.zeros(shape, dtype=torch.uint8)
  213. xc, yc, a, b, theta, phi1, phi2 = arc7
  214. H, W = shape
  215. if phi2 < phi1:
  216. phi2 += 2 * np.pi
  217. # 生成参数角(足够密集,避免断线)
  218. num_points = max(int(200 * abs(phi2 - phi1) / (2 * np.pi)), 10)
  219. phi = np.linspace(phi1, phi2, num_points)
  220. # 椭圆参数方程(先在未旋转坐标系下计算)
  221. x_local = a * np.cos(phi)
  222. y_local = b * np.sin(phi)
  223. # 应用旋转和平移
  224. cos_t = np.cos(theta)
  225. sin_t = np.sin(theta)
  226. x_rot = x_local * cos_t - y_local * sin_t + xc
  227. y_rot = x_local * cos_t + y_local * sin_t + yc
  228. # 转为整数坐标(OpenCV 需要 int32)
  229. points = np.stack([x_rot, y_rot], axis=1).astype(np.int32)
  230. # 创建空白图像
  231. img = np.zeros((H, W), dtype=np.uint8)
  232. # 绘制折线(antialias=False 更适合 mask)
  233. cv2.polylines(img, [points], isClosed=False, color=255, thickness=line_width, lineType=cv2.LINE_AA)
  234. return torch.from_numpy(img).float() # [H, W], values: 0 or 255
  235. def compute_arc_angles(gt_mask_ends, gt_mask_params):
  236. """
  237. 给定椭圆上的一个点,计算其对应的参数角 phi(弧度)。
  238. Parameters:
  239. point: tuple or array-like, (x, y)
  240. ellipse_param: tuple or array-like, (xc, yc, a, b, theta)
  241. Returns:
  242. phi: float, in [0, 2*pi)
  243. """
  244. # print_params(gt_mask_ends, gt_mask_params)
  245. results = []
  246. if not isinstance(gt_mask_params, torch.Tensor):
  247. gt_mask_params_tensor = torch.tensor(gt_mask_params, dtype=gt_mask_ends.dtype, device=gt_mask_ends.device)
  248. else:
  249. gt_mask_params_tensor = gt_mask_params.clone().detach().to(gt_mask_ends)
  250. for ends_img, params_img in zip(gt_mask_ends, gt_mask_params_tensor):
  251. # print(f'params_img:{params_img}')
  252. if torch.norm(params_img) < 1e-6: # L2 norm near zero
  253. results.append(torch.zeros(2, device=params_img.device, dtype=params_img.dtype))
  254. continue
  255. x, y = ends_img
  256. xc, yc, a, b, theta = params_img
  257. # 1. 平移到中心
  258. dx = x - xc
  259. dy = y - yc
  260. # 2. 逆旋转(旋转 -theta)
  261. cos_t = torch.cos(theta)
  262. sin_t = torch.sin(theta)
  263. X = dx * cos_t + dy * sin_t
  264. Y = -dx * sin_t + dy * cos_t
  265. # 3. 归一化到单位圆(除以 a, b)
  266. cos_phi = X / a
  267. sin_phi = Y / b
  268. # 4. 用 atan2 求角度(自动处理象限)
  269. phi = torch.atan2(sin_phi, cos_phi)
  270. # 5. 转换到 [0, 2π)
  271. phi = torch.where(phi < 0, phi + 2 * torch.pi, phi)
  272. results.append(phi)
  273. return results
  274. def points_to_ellipse(points):
  275. """
  276. 根据提供的四个点估计椭圆参数。
  277. :param points: Tensor of shape (4, 2) 表示椭圆上的四个点
  278. :return: 返回 (cx, cy, r1, r2, orientation) 其中 cx, cy 是中心坐标,r1, r2 分别是长轴和短轴半径,orientation 是椭圆的方向(弧度)
  279. """
  280. # 转换为numpy数组进行计算
  281. pts = points.numpy()
  282. pts = pts.reshape(-1, 2)
  283. center = np.mean(pts, axis=0)
  284. A = np.hstack(
  285. [pts[:, 0:1] ** 2, pts[:, 0:1] * pts[:, 1:2], pts[:, 1:2] ** 2, pts[:, :2], np.ones((pts.shape[0], 1))])
  286. b = np.ones(pts.shape[0])
  287. x = np.linalg.lstsq(A, b, rcond=None)[0]
  288. # 解析解参见 https://en.wikipedia.org/wiki/Ellipse#General_ellipse
  289. a, b, c, d, f, g = x.ravel()
  290. numerator = 2 * (a * f * f + c * d * d + g * b * b - 2 * b * d * f - a * c * g)
  291. denominator1 = (b * b - a * c) * ((c - a) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a))
  292. denominator2 = (b * b - a * c) * ((a - c) * np.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a))
  293. major_axis = np.sqrt(numerator / denominator1)
  294. minor_axis = np.sqrt(numerator / denominator2)
  295. distances = np.linalg.norm(pts - center, axis=1)
  296. long_axis_length = np.max(distances) * 2
  297. short_axis_length = np.min(distances) * 2
  298. orientation = np.arctan2(pts[1, 1] - pts[0, 1], pts[1, 0] - pts[0, 0])
  299. return center[0], center[1], long_axis_length / 2, short_axis_length / 2, orientation
  300. def generate_ellipse_mask(shape, ellipse_params):
  301. """
  302. 在指定形状的图像上生成椭圆mask。
  303. :param shape: 输出mask的形状 (HxW)
  304. :param ellipse_params: 椭圆参数 (cx, cy, rx, ry, orientation)
  305. :return: 椭圆mask
  306. """
  307. cx, cy, rx, ry, orientation = ellipse_params
  308. img = np.zeros(shape, dtype=np.uint8)
  309. cx, cy, rx, ry = int(cx), int(cy), int(rx), int(ry)
  310. rr, cc = ellipse(cy, cx, ry, rx, shape)
  311. img[rr, cc] = 1
  312. return img
  313. def sort_points_clockwise(points):
  314. points = np.array(points)
  315. top_left_idx = np.lexsort((points[:, 0], points[:, 1]))[0]
  316. reference_point = points[top_left_idx]
  317. def angle_to_reference(point):
  318. return np.arctan2(point[1] - reference_point[1], point[0] - reference_point[0])
  319. angles = np.apply_along_axis(angle_to_reference, 1, points)
  320. angles[angles < 0] += 2 * np.pi
  321. sorted_indices = np.argsort(angles)
  322. sorted_points = points[sorted_indices]
  323. return sorted_points.tolist()
  324. def get_boxes_lines(objs, shape):
  325. boxes = []
  326. labels = []
  327. h, w = shape
  328. line_point_pairs = []
  329. points = []
  330. mask_ends = []
  331. mask_params = []
  332. for obj in objs:
  333. # plt.plot([a[1], b[1]], [a[0], b[0]], c="red", linewidth=1) # a[1], b[1]无明确大小
  334. # print(f"points:{obj['points']}")
  335. label = obj['label']
  336. if label == 'line' or label == 'dseam1':
  337. a, b = obj['points'][0], obj['points'][1]
  338. # line_point_pairs.append(a)
  339. # line_point_pairs.append(b)
  340. line_point_pairs.append([a, b])
  341. xmin = max(0, (min(a[0], b[0]) - 6))
  342. xmax = min(w, (max(a[0], b[0]) + 6))
  343. ymin = max(0, (min(a[1], b[1]) - 6))
  344. ymax = min(h, (max(a[1], b[1]) + 6))
  345. boxes.append([xmin, ymin, xmax, ymax])
  346. labels.append(torch.tensor(2))
  347. points.append(torch.tensor([0.0]))
  348. mask_ends.append([[0, 0], [0, 0]])
  349. mask_params.append([0, 0, 0, 0, 0])
  350. # circle_4points.append([[0, 0], [0, 0], [0, 0], [0, 0]])
  351. elif label == 'point':
  352. p = obj['points'][0]
  353. xmin = max(0, p[0] - 12)
  354. xmax = min(w, p[0] + 12)
  355. ymin = max(0, p[1] - 12)
  356. ymax = min(h, p[1] + 12)
  357. points.append(p)
  358. labels.append(torch.tensor(1))
  359. boxes.append([xmin, ymin, xmax, ymax])
  360. line_point_pairs.append([[0, 0], [0, 0]])
  361. mask_ends.append([[0, 0], [0, 0]])
  362. mask_params.append([0, 0, 0, 0, 0])
  363. # circle_4points.append([[0, 0], [0, 0], [0, 0], [0, 0]])
  364. # elif label == 'arc':
  365. # arc_points = obj['points']
  366. # arc_params = obj['params']
  367. # arc_ends = obj['ends']
  368. # line_mask.append(arc_points)
  369. # mask_ends.append(arc_ends)
  370. # mask_params.append(arc_params)
  371. #
  372. # xs = [p[0] for p in arc_points]
  373. # ys = [p[1] for p in arc_points]
  374. # xmin, xmax = min(xs), max(xs)
  375. # ymin, ymax = min(ys), max(ys)
  376. #
  377. # boxes.append([xmin, ymin, xmax, ymax])
  378. # labels.append(torch.tensor(3))
  379. #
  380. # points.append(torch.tensor([0.0]))
  381. # line_point_pairs.append([[0, 0], [0, 0]])
  382. # circle_4points.append([[0, 0], [0, 0], [0, 0], [0, 0]])
  383. elif label == 'arc':
  384. arc_params = obj['params']
  385. arc_ends = obj['ends']
  386. mask_ends.append(arc_ends)
  387. mask_params.append(arc_params)
  388. arc3points = obj['points']
  389. xs = [p[0] for p in arc3points]
  390. ys = [p[1] for p in arc3points]
  391. xmin_raw = min(xs)
  392. xmax_raw = max(xs)
  393. ymin_raw = min(ys)
  394. ymax_raw = max(ys)
  395. xmin = max(xmin_raw - 40, 0)
  396. xmax = min(xmax_raw + 40, w)
  397. ymin = max(ymin_raw - 40, 0)
  398. ymax = min(ymax_raw + 40, h)
  399. boxes.append([xmin, ymin, xmax, ymax])
  400. labels.append(torch.tensor(4))
  401. points.append(torch.tensor([0.0]))
  402. line_point_pairs.append([[0, 0], [0, 0]])
  403. boxes = torch.tensor(boxes, dtype=torch.float32)
  404. print(f'boxes:{boxes.shape}')
  405. labels = torch.tensor(labels)
  406. if points:
  407. points = torch.tensor(points, dtype=torch.float32)
  408. else:
  409. points = None
  410. if line_point_pairs:
  411. line_point_pairs = torch.tensor(line_point_pairs, dtype=torch.float32)
  412. else:
  413. line_point_pairs = None
  414. if mask_ends:
  415. mask_ends = torch.tensor(mask_ends, dtype=torch.float32)
  416. else:
  417. mask_ends = None
  418. if mask_params:
  419. mask_params = torch.tensor(mask_params, dtype=torch.float32)
  420. else:
  421. mask_params = None
  422. return boxes, line_point_pairs, points, labels, mask_ends, mask_params
  423. if __name__ == '__main__':
  424. path = r'/data/share/zyh/master_dataset/pokou/251115/a_dataset_pokou_mask'
  425. dataset = LineDataset(dataset_path=path, dataset_type='train', augmentation=False, data_type='jpg')
  426. dataset.show(9, show_type='circle_masks')