byte_tracker.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
  2. import numpy as np
  3. from ..utils import LOGGER
  4. from ..utils.ops import xywh2ltwh
  5. from .basetrack import BaseTrack, TrackState
  6. from .utils import matching
  7. from .utils.kalman_filter import KalmanFilterXYAH
  8. class STrack(BaseTrack):
  9. """
  10. Single object tracking representation that uses Kalman filtering for state estimation.
  11. This class is responsible for storing all the information regarding individual tracklets and performs state updates
  12. and predictions based on Kalman filter.
  13. Attributes:
  14. shared_kalman (KalmanFilterXYAH): Shared Kalman filter that is used across all STrack instances for prediction.
  15. _tlwh (np.ndarray): Private attribute to store top-left corner coordinates and width and height of bounding box.
  16. kalman_filter (KalmanFilterXYAH): Instance of Kalman filter used for this particular object track.
  17. mean (np.ndarray): Mean state estimate vector.
  18. covariance (np.ndarray): Covariance of state estimate.
  19. is_activated (bool): Boolean flag indicating if the track has been activated.
  20. score (float): Confidence score of the track.
  21. tracklet_len (int): Length of the tracklet.
  22. cls (Any): Class label for the object.
  23. idx (int): Index or identifier for the object.
  24. frame_id (int): Current frame ID.
  25. start_frame (int): Frame where the object was first detected.
  26. Methods:
  27. predict(): Predict the next state of the object using Kalman filter.
  28. multi_predict(stracks): Predict the next states for multiple tracks.
  29. multi_gmc(stracks, H): Update multiple track states using a homography matrix.
  30. activate(kalman_filter, frame_id): Activate a new tracklet.
  31. re_activate(new_track, frame_id, new_id): Reactivate a previously lost tracklet.
  32. update(new_track, frame_id): Update the state of a matched track.
  33. convert_coords(tlwh): Convert bounding box to x-y-aspect-height format.
  34. tlwh_to_xyah(tlwh): Convert tlwh bounding box to xyah format.
  35. Examples:
  36. Initialize and activate a new track
  37. >>> track = STrack(xywh=[100, 200, 50, 80, 0], score=0.9, cls="person")
  38. >>> track.activate(kalman_filter=KalmanFilterXYAH(), frame_id=1)
  39. """
  40. shared_kalman = KalmanFilterXYAH()
  41. def __init__(self, xywh, score, cls):
  42. """
  43. Initialize a new STrack instance.
  44. Args:
  45. xywh (List[float]): Bounding box coordinates and dimensions in the format (x, y, w, h, [a], idx), where
  46. (x, y) is the center, (w, h) are width and height, [a] is optional aspect ratio, and idx is the id.
  47. score (float): Confidence score of the detection.
  48. cls (Any): Class label for the detected object.
  49. Examples:
  50. >>> xywh = [100.0, 150.0, 50.0, 75.0, 1]
  51. >>> score = 0.9
  52. >>> cls = "person"
  53. >>> track = STrack(xywh, score, cls)
  54. """
  55. super().__init__()
  56. # xywh+idx or xywha+idx
  57. assert len(xywh) in {5, 6}, f"expected 5 or 6 values but got {len(xywh)}"
  58. self._tlwh = np.asarray(xywh2ltwh(xywh[:4]), dtype=np.float32)
  59. self.kalman_filter = None
  60. self.mean, self.covariance = None, None
  61. self.is_activated = False
  62. self.score = score
  63. self.tracklet_len = 0
  64. self.cls = cls
  65. self.idx = xywh[-1]
  66. self.angle = xywh[4] if len(xywh) == 6 else None
  67. def predict(self):
  68. """Predicts the next state (mean and covariance) of the object using the Kalman filter."""
  69. mean_state = self.mean.copy()
  70. if self.state != TrackState.Tracked:
  71. mean_state[7] = 0
  72. self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
  73. @staticmethod
  74. def multi_predict(stracks):
  75. """Perform multi-object predictive tracking using Kalman filter for the provided list of STrack instances."""
  76. if len(stracks) <= 0:
  77. return
  78. multi_mean = np.asarray([st.mean.copy() for st in stracks])
  79. multi_covariance = np.asarray([st.covariance for st in stracks])
  80. for i, st in enumerate(stracks):
  81. if st.state != TrackState.Tracked:
  82. multi_mean[i][7] = 0
  83. multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
  84. for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
  85. stracks[i].mean = mean
  86. stracks[i].covariance = cov
  87. @staticmethod
  88. def multi_gmc(stracks, H=np.eye(2, 3)):
  89. """Update state tracks positions and covariances using a homography matrix for multiple tracks."""
  90. if len(stracks) > 0:
  91. multi_mean = np.asarray([st.mean.copy() for st in stracks])
  92. multi_covariance = np.asarray([st.covariance for st in stracks])
  93. R = H[:2, :2]
  94. R8x8 = np.kron(np.eye(4, dtype=float), R)
  95. t = H[:2, 2]
  96. for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
  97. mean = R8x8.dot(mean)
  98. mean[:2] += t
  99. cov = R8x8.dot(cov).dot(R8x8.transpose())
  100. stracks[i].mean = mean
  101. stracks[i].covariance = cov
  102. def activate(self, kalman_filter, frame_id):
  103. """Activate a new tracklet using the provided Kalman filter and initialize its state and covariance."""
  104. self.kalman_filter = kalman_filter
  105. self.track_id = self.next_id()
  106. self.mean, self.covariance = self.kalman_filter.initiate(self.convert_coords(self._tlwh))
  107. self.tracklet_len = 0
  108. self.state = TrackState.Tracked
  109. if frame_id == 1:
  110. self.is_activated = True
  111. self.frame_id = frame_id
  112. self.start_frame = frame_id
  113. def re_activate(self, new_track, frame_id, new_id=False):
  114. """Reactivates a previously lost track using new detection data and updates its state and attributes."""
  115. self.mean, self.covariance = self.kalman_filter.update(
  116. self.mean, self.covariance, self.convert_coords(new_track.tlwh)
  117. )
  118. self.tracklet_len = 0
  119. self.state = TrackState.Tracked
  120. self.is_activated = True
  121. self.frame_id = frame_id
  122. if new_id:
  123. self.track_id = self.next_id()
  124. self.score = new_track.score
  125. self.cls = new_track.cls
  126. self.angle = new_track.angle
  127. self.idx = new_track.idx
  128. def update(self, new_track, frame_id):
  129. """
  130. Update the state of a matched track.
  131. Args:
  132. new_track (STrack): The new track containing updated information.
  133. frame_id (int): The ID of the current frame.
  134. Examples:
  135. Update the state of a track with new detection information
  136. >>> track = STrack([100, 200, 50, 80, 0.9, 1])
  137. >>> new_track = STrack([105, 205, 55, 85, 0.95, 1])
  138. >>> track.update(new_track, 2)
  139. """
  140. self.frame_id = frame_id
  141. self.tracklet_len += 1
  142. new_tlwh = new_track.tlwh
  143. self.mean, self.covariance = self.kalman_filter.update(
  144. self.mean, self.covariance, self.convert_coords(new_tlwh)
  145. )
  146. self.state = TrackState.Tracked
  147. self.is_activated = True
  148. self.score = new_track.score
  149. self.cls = new_track.cls
  150. self.angle = new_track.angle
  151. self.idx = new_track.idx
  152. def convert_coords(self, tlwh):
  153. """Convert a bounding box's top-left-width-height format to its x-y-aspect-height equivalent."""
  154. return self.tlwh_to_xyah(tlwh)
  155. @property
  156. def tlwh(self):
  157. """Returns the bounding box in top-left-width-height format from the current state estimate."""
  158. if self.mean is None:
  159. return self._tlwh.copy()
  160. ret = self.mean[:4].copy()
  161. ret[2] *= ret[3]
  162. ret[:2] -= ret[2:] / 2
  163. return ret
  164. @property
  165. def xyxy(self):
  166. """Converts bounding box from (top left x, top left y, width, height) to (min x, min y, max x, max y) format."""
  167. ret = self.tlwh.copy()
  168. ret[2:] += ret[:2]
  169. return ret
  170. @staticmethod
  171. def tlwh_to_xyah(tlwh):
  172. """Convert bounding box from tlwh format to center-x-center-y-aspect-height (xyah) format."""
  173. ret = np.asarray(tlwh).copy()
  174. ret[:2] += ret[2:] / 2
  175. ret[2] /= ret[3]
  176. return ret
  177. @property
  178. def xywh(self):
  179. """Returns the current position of the bounding box in (center x, center y, width, height) format."""
  180. ret = np.asarray(self.tlwh).copy()
  181. ret[:2] += ret[2:] / 2
  182. return ret
  183. @property
  184. def xywha(self):
  185. """Returns position in (center x, center y, width, height, angle) format, warning if angle is missing."""
  186. if self.angle is None:
  187. LOGGER.warning("WARNING ⚠️ `angle` attr not found, returning `xywh` instead.")
  188. return self.xywh
  189. return np.concatenate([self.xywh, self.angle[None]])
  190. @property
  191. def result(self):
  192. """Returns the current tracking results in the appropriate bounding box format."""
  193. coords = self.xyxy if self.angle is None else self.xywha
  194. return coords.tolist() + [self.track_id, self.score, self.cls, self.idx]
  195. def __repr__(self):
  196. """Returns a string representation of the STrack object including start frame, end frame, and track ID."""
  197. return f"OT_{self.track_id}_({self.start_frame}-{self.end_frame})"
  198. class BYTETracker:
  199. """
  200. BYTETracker: A tracking algorithm built on top of YOLOv8 for object detection and tracking.
  201. Responsible for initializing, updating, and managing the tracks for detected objects in a video sequence.
  202. It maintains the state of tracked, lost, and removed tracks over frames, utilizes Kalman filtering for predicting
  203. the new object locations, and performs data association.
  204. Attributes:
  205. tracked_stracks (List[STrack]): List of successfully activated tracks.
  206. lost_stracks (List[STrack]): List of lost tracks.
  207. removed_stracks (List[STrack]): List of removed tracks.
  208. frame_id (int): The current frame ID.
  209. args (Namespace): Command-line arguments.
  210. max_time_lost (int): The maximum frames for a track to be considered as 'lost'.
  211. kalman_filter (KalmanFilterXYAH): Kalman Filter object.
  212. Methods:
  213. update(results, img=None): Updates object tracker with new detections.
  214. get_kalmanfilter(): Returns a Kalman filter object for tracking bounding boxes.
  215. init_track(dets, scores, cls, img=None): Initialize object tracking with detections.
  216. get_dists(tracks, detections): Calculates the distance between tracks and detections.
  217. multi_predict(tracks): Predicts the location of tracks.
  218. reset_id(): Resets the ID counter of STrack.
  219. joint_stracks(tlista, tlistb): Combines two lists of stracks.
  220. sub_stracks(tlista, tlistb): Filters out the stracks present in the second list from the first list.
  221. remove_duplicate_stracks(stracksa, stracksb): Removes duplicate stracks based on IoU.
  222. Examples:
  223. Initialize BYTETracker and update with detection results
  224. >>> tracker = BYTETracker(args, frame_rate=30)
  225. >>> results = yolo_model.detect(image)
  226. >>> tracked_objects = tracker.update(results)
  227. """
  228. def __init__(self, args, frame_rate=30):
  229. """
  230. Initialize a BYTETracker instance for object tracking.
  231. Args:
  232. args (Namespace): Command-line arguments containing tracking parameters.
  233. frame_rate (int): Frame rate of the video sequence.
  234. Examples:
  235. Initialize BYTETracker with command-line arguments and a frame rate of 30
  236. >>> args = Namespace(track_buffer=30)
  237. >>> tracker = BYTETracker(args, frame_rate=30)
  238. """
  239. self.tracked_stracks = [] # type: list[STrack]
  240. self.lost_stracks = [] # type: list[STrack]
  241. self.removed_stracks = [] # type: list[STrack]
  242. self.frame_id = 0
  243. self.args = args
  244. self.max_time_lost = int(frame_rate / 30.0 * args.track_buffer)
  245. self.kalman_filter = self.get_kalmanfilter()
  246. self.reset_id()
  247. def update(self, results, img=None):
  248. """Updates the tracker with new detections and returns the current list of tracked objects."""
  249. self.frame_id += 1
  250. activated_stracks = []
  251. refind_stracks = []
  252. lost_stracks = []
  253. removed_stracks = []
  254. scores = results.conf
  255. bboxes = results.xywhr if hasattr(results, "xywhr") else results.xywh
  256. # Add index
  257. bboxes = np.concatenate([bboxes, np.arange(len(bboxes)).reshape(-1, 1)], axis=-1)
  258. cls = results.cls
  259. remain_inds = scores >= self.args.track_high_thresh
  260. inds_low = scores > self.args.track_low_thresh
  261. inds_high = scores < self.args.track_high_thresh
  262. inds_second = inds_low & inds_high
  263. dets_second = bboxes[inds_second]
  264. dets = bboxes[remain_inds]
  265. scores_keep = scores[remain_inds]
  266. scores_second = scores[inds_second]
  267. cls_keep = cls[remain_inds]
  268. cls_second = cls[inds_second]
  269. detections = self.init_track(dets, scores_keep, cls_keep, img)
  270. # Add newly detected tracklets to tracked_stracks
  271. unconfirmed = []
  272. tracked_stracks = [] # type: list[STrack]
  273. for track in self.tracked_stracks:
  274. if not track.is_activated:
  275. unconfirmed.append(track)
  276. else:
  277. tracked_stracks.append(track)
  278. # Step 2: First association, with high score detection boxes
  279. strack_pool = self.joint_stracks(tracked_stracks, self.lost_stracks)
  280. # Predict the current location with KF
  281. self.multi_predict(strack_pool)
  282. if hasattr(self, "gmc") and img is not None:
  283. warp = self.gmc.apply(img, dets)
  284. STrack.multi_gmc(strack_pool, warp)
  285. STrack.multi_gmc(unconfirmed, warp)
  286. dists = self.get_dists(strack_pool, detections)
  287. matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.args.match_thresh)
  288. for itracked, idet in matches:
  289. track = strack_pool[itracked]
  290. det = detections[idet]
  291. if track.state == TrackState.Tracked:
  292. track.update(det, self.frame_id)
  293. activated_stracks.append(track)
  294. else:
  295. track.re_activate(det, self.frame_id, new_id=False)
  296. refind_stracks.append(track)
  297. # Step 3: Second association, with low score detection boxes association the untrack to the low score detections
  298. detections_second = self.init_track(dets_second, scores_second, cls_second, img)
  299. r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
  300. # TODO
  301. dists = matching.iou_distance(r_tracked_stracks, detections_second)
  302. matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5)
  303. for itracked, idet in matches:
  304. track = r_tracked_stracks[itracked]
  305. det = detections_second[idet]
  306. if track.state == TrackState.Tracked:
  307. track.update(det, self.frame_id)
  308. activated_stracks.append(track)
  309. else:
  310. track.re_activate(det, self.frame_id, new_id=False)
  311. refind_stracks.append(track)
  312. for it in u_track:
  313. track = r_tracked_stracks[it]
  314. if track.state != TrackState.Lost:
  315. track.mark_lost()
  316. lost_stracks.append(track)
  317. # Deal with unconfirmed tracks, usually tracks with only one beginning frame
  318. detections = [detections[i] for i in u_detection]
  319. dists = self.get_dists(unconfirmed, detections)
  320. matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
  321. for itracked, idet in matches:
  322. unconfirmed[itracked].update(detections[idet], self.frame_id)
  323. activated_stracks.append(unconfirmed[itracked])
  324. for it in u_unconfirmed:
  325. track = unconfirmed[it]
  326. track.mark_removed()
  327. removed_stracks.append(track)
  328. # Step 4: Init new stracks
  329. for inew in u_detection:
  330. track = detections[inew]
  331. if track.score < self.args.new_track_thresh:
  332. continue
  333. track.activate(self.kalman_filter, self.frame_id)
  334. activated_stracks.append(track)
  335. # Step 5: Update state
  336. for track in self.lost_stracks:
  337. if self.frame_id - track.end_frame > self.max_time_lost:
  338. track.mark_removed()
  339. removed_stracks.append(track)
  340. self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
  341. self.tracked_stracks = self.joint_stracks(self.tracked_stracks, activated_stracks)
  342. self.tracked_stracks = self.joint_stracks(self.tracked_stracks, refind_stracks)
  343. self.lost_stracks = self.sub_stracks(self.lost_stracks, self.tracked_stracks)
  344. self.lost_stracks.extend(lost_stracks)
  345. self.lost_stracks = self.sub_stracks(self.lost_stracks, self.removed_stracks)
  346. self.tracked_stracks, self.lost_stracks = self.remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
  347. self.removed_stracks.extend(removed_stracks)
  348. if len(self.removed_stracks) > 1000:
  349. self.removed_stracks = self.removed_stracks[-999:] # clip remove stracks to 1000 maximum
  350. return np.asarray([x.result for x in self.tracked_stracks if x.is_activated], dtype=np.float32)
  351. def get_kalmanfilter(self):
  352. """Returns a Kalman filter object for tracking bounding boxes using KalmanFilterXYAH."""
  353. return KalmanFilterXYAH()
  354. def init_track(self, dets, scores, cls, img=None):
  355. """Initializes object tracking with given detections, scores, and class labels using the STrack algorithm."""
  356. return [STrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] if len(dets) else [] # detections
  357. def get_dists(self, tracks, detections):
  358. """Calculates the distance between tracks and detections using IoU and optionally fuses scores."""
  359. dists = matching.iou_distance(tracks, detections)
  360. if self.args.fuse_score:
  361. dists = matching.fuse_score(dists, detections)
  362. return dists
  363. def multi_predict(self, tracks):
  364. """Predict the next states for multiple tracks using Kalman filter."""
  365. STrack.multi_predict(tracks)
  366. @staticmethod
  367. def reset_id():
  368. """Resets the ID counter for STrack instances to ensure unique track IDs across tracking sessions."""
  369. STrack.reset_id()
  370. def reset(self):
  371. """Resets the tracker by clearing all tracked, lost, and removed tracks and reinitializing the Kalman filter."""
  372. self.tracked_stracks = [] # type: list[STrack]
  373. self.lost_stracks = [] # type: list[STrack]
  374. self.removed_stracks = [] # type: list[STrack]
  375. self.frame_id = 0
  376. self.kalman_filter = self.get_kalmanfilter()
  377. self.reset_id()
  378. @staticmethod
  379. def joint_stracks(tlista, tlistb):
  380. """Combines two lists of STrack objects into a single list, ensuring no duplicates based on track IDs."""
  381. exists = {}
  382. res = []
  383. for t in tlista:
  384. exists[t.track_id] = 1
  385. res.append(t)
  386. for t in tlistb:
  387. tid = t.track_id
  388. if not exists.get(tid, 0):
  389. exists[tid] = 1
  390. res.append(t)
  391. return res
  392. @staticmethod
  393. def sub_stracks(tlista, tlistb):
  394. """Filters out the stracks present in the second list from the first list."""
  395. track_ids_b = {t.track_id for t in tlistb}
  396. return [t for t in tlista if t.track_id not in track_ids_b]
  397. @staticmethod
  398. def remove_duplicate_stracks(stracksa, stracksb):
  399. """Removes duplicate stracks from two lists based on Intersection over Union (IoU) distance."""
  400. pdist = matching.iou_distance(stracksa, stracksb)
  401. pairs = np.where(pdist < 0.15)
  402. dupa, dupb = [], []
  403. for p, q in zip(*pairs):
  404. timep = stracksa[p].frame_id - stracksa[p].start_frame
  405. timeq = stracksb[q].frame_id - stracksb[q].start_frame
  406. if timep > timeq:
  407. dupb.append(q)
  408. else:
  409. dupa.append(p)
  410. resa = [t for i, t in enumerate(stracksa) if i not in dupa]
  411. resb = [t for i, t in enumerate(stracksb) if i not in dupb]
  412. return resa, resb