|
@@ -180,30 +180,30 @@ def evaluate(model, data_loader, epoch, writer, device):
|
|
|
|
|
|
model_time = time.time()
|
|
|
outputs = model(images)
|
|
|
- print(f'outputs:{outputs}')
|
|
|
+ # print(f'outputs:{outputs}')
|
|
|
|
|
|
if batch_idx == 0:
|
|
|
show_line(images[0], outputs[0], epoch, writer)
|
|
|
|
|
|
- outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
|
|
|
- model_time = time.time() - model_time
|
|
|
-
|
|
|
- res = {target["image_id"]: output for target, output in zip(targets, outputs)}
|
|
|
- evaluator_time = time.time()
|
|
|
- coco_evaluator.update(res)
|
|
|
- evaluator_time = time.time() - evaluator_time
|
|
|
- metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
|
|
|
-
|
|
|
- # gather the stats from all processes
|
|
|
- metric_logger.synchronize_between_processes()
|
|
|
- print("Averaged stats:", metric_logger)
|
|
|
- coco_evaluator.synchronize_between_processes()
|
|
|
-
|
|
|
- # accumulate predictions from all images
|
|
|
- coco_evaluator.accumulate()
|
|
|
- coco_evaluator.summarize()
|
|
|
- torch.set_num_threads(n_threads)
|
|
|
- return coco_evaluator
|
|
|
+ # outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
|
|
|
+ # # model_time = time.time() - model_time
|
|
|
+ # #
|
|
|
+ # # res = {target["image_id"]: output for target, output in zip(targets, outputs)}
|
|
|
+ # # evaluator_time = time.time()
|
|
|
+ # # coco_evaluator.update(res)
|
|
|
+ # # evaluator_time = time.time() - evaluator_time
|
|
|
+ # # metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
|
|
|
+ # #
|
|
|
+ # # # gather the stats from all processes
|
|
|
+ # # metric_logger.synchronize_between_processes()
|
|
|
+ # # print("Averaged stats:", metric_logger)
|
|
|
+ # # coco_evaluator.synchronize_between_processes()
|
|
|
+ # #
|
|
|
+ # # # accumulate predictions from all images
|
|
|
+ # # coco_evaluator.accumulate()
|
|
|
+ # # coco_evaluator.summarize()
|
|
|
+ # # torch.set_num_threads(n_threads)
|
|
|
+ # # return coco_evaluator
|
|
|
|
|
|
|
|
|
def train_cfg(model, cfg):
|
|
@@ -352,7 +352,15 @@ def get_transform(is_train, **kwargs):
|
|
|
def write_metric_logs(epoch, metric_logger, writer):
|
|
|
writer.add_scalar(f'loss_classifier:', metric_logger.meters['loss_classifier'].global_avg, epoch)
|
|
|
writer.add_scalar(f'loss_box_reg:', metric_logger.meters['loss_box_reg'].global_avg, epoch)
|
|
|
- writer.add_scalar(f'loss_mask:', metric_logger.meters['loss_mask'].global_avg, epoch)
|
|
|
+ # writer.add_scalar(f'loss_mask:', metric_logger.meters['loss_mask'].global_avg, epoch)
|
|
|
+ writer.add_scalar('Loss/box_reg', metric_logger.meters['loss_keypoint'].global_avg, epoch)
|
|
|
writer.add_scalar(f'loss_objectness:', metric_logger.meters['loss_objectness'].global_avg, epoch)
|
|
|
writer.add_scalar(f'loss_rpn_box_reg:', metric_logger.meters['loss_rpn_box_reg'].global_avg, epoch)
|
|
|
writer.add_scalar(f'train loss:', metric_logger.meters['loss'].global_avg, epoch)
|
|
|
+
|
|
|
+# def log_losses_to_tensorboard(writer, result, step):
|
|
|
+# writer.add_scalar('Loss/classifier', result['loss_classifier'].item(), step)
|
|
|
+# writer.add_scalar('Loss/box_reg', result['loss_box_reg'].item(), step)
|
|
|
+# writer.add_scalar('Loss/box_reg', result['loss_keypoint'].item(), step)
|
|
|
+# writer.add_scalar('Loss/objectness', result['loss_objectness'].item(), step)
|
|
|
+# writer.add_scalar('Loss/rpn_box_reg', result['loss_rpn_box_reg'].item(), step)
|