utils.py 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344
  1. # import torch
  2. #
  3. #
  4. # def evaluate(model, data_loader, device):
  5. # n_threads = torch.get_num_threads()
  6. # # FIXME remove this and make paste_masks_in_image run on the GPU
  7. # torch.set_num_threads(1)
  8. # cpu_device = torch.device("cpu")
  9. # model.eval()
  10. # metric_logger = utils.MetricLogger(delimiter=" ")
  11. # header = "Test:"
  12. #
  13. # coco = get_coco_api_from_dataset(data_loader.dataset)
  14. # iou_types = _get_iou_types(model)
  15. # coco_evaluator = CocoEvaluator(coco, iou_types)
  16. #
  17. # print(f'start to evaluate!!!')
  18. # for images, targets in metric_logger.log_every(data_loader, 10, header):
  19. # images = list(img.to(device) for img in images)
  20. #
  21. # if torch.cuda.is_available():
  22. # torch.cuda.synchronize()
  23. # model_time = time.time()
  24. # outputs = model(images)
  25. #
  26. # outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
  27. # model_time = time.time() - model_time
  28. #
  29. # res = {target["image_id"]: output for target, output in zip(targets, outputs)}
  30. # evaluator_time = time.time()
  31. # coco_evaluator.update(res)
  32. # evaluator_time = time.time() - evaluator_time
  33. # metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
  34. #
  35. # # gather the stats from all processes
  36. # metric_logger.synchronize_between_processes()
  37. # print("Averaged stats:", metric_logger)
  38. # coco_evaluator.synchronize_between_processes()
  39. #
  40. # # accumulate predictions from all images
  41. # coco_evaluator.accumulate()
  42. # coco_evaluator.summarize()
  43. # torch.set_num_threads(n_threads)
  44. # return coco_evaluator