From 2a561bcf01c52d75c4ae9e90b81dda2abe96a758 Mon Sep 17 00:00:00 2001 From: Ting-Jun Wang Date: Thu, 22 Feb 2024 22:29:44 +0800 Subject: [PATCH] feat: log "success" in predict file Why: In visualization tool, we should check whether the agent arrive the target viewpoint. We need to calculate the distance between the GT viewpoint & the predicted viewpoint but it's difficult to calculate the distance without the simulator (we run the visualization tool on Jupyter notebook which is not in the docker container so we cannot use the simulator) How: After getting the result which gather from the env. We should run the eval_metrics() to get the success rate, FOUND score..., etc. So we get the "success" after eval_metrics() and log it in the predicted file so that the visualization tool can get the "success" status in the predicted file. --- map_nav_src/reverie/env.py | 14 +++++++++++++- map_nav_src/scripts/run_reverie.sh | 6 ++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/map_nav_src/reverie/env.py b/map_nav_src/reverie/env.py index 65e9848..7234ebc 100644 --- a/map_nav_src/reverie/env.py +++ b/map_nav_src/reverie/env.py @@ -370,7 +370,11 @@ class ReverieObjectNavBatch(object): assert len(goal_viewpoints) > 0, '%s_%s'%(scan, str(gt_objid)) scores['success'] = float(path[-1] in goal_viewpoints) - scores['found_success'] = float(pred_found == gt_found) + if scores['success'] == 1.0: + scores['found_success'] = float(pred_found == gt_found) + else: + scores['found_success'] = 0.0 + scores['oracle_success'] = float(any(x in goal_viewpoints for x in path)) scores['spl'] = scores['success'] * gt_lengths / max(scores['trajectory_lengths'], gt_lengths, 0.01) scores['sspl'] = scores['spl'] * scores['found_success'] @@ -385,6 +389,7 @@ class ReverieObjectNavBatch(object): print('eval %d predictions' % (len(preds))) print(preds[0]) + metrics = defaultdict(list) for item in preds: instr_id = item['instr_id'] @@ -393,7 +398,14 @@ class ReverieObjectNavBatch(object): scan, gt_traj, gt_objid = self.gt_trajs[instr_id] pred_found = item['found'] gt_found = item['gt_found'] + + traj_scores = self._eval_item(scan, traj, pred_objid, gt_traj, gt_objid, pred_found, gt_found) + + # record "success" in the result file + # let the visualization tool can get the success status + item['success'] = traj_scores['success'] + for k, v in traj_scores.items(): metrics[k].append(v) metrics['instr_id'].append(instr_id) diff --git a/map_nav_src/scripts/run_reverie.sh b/map_nav_src/scripts/run_reverie.sh index 34f84bc..56b5f01 100644 --- a/map_nav_src/scripts/run_reverie.sh +++ b/map_nav_src/scripts/run_reverie.sh @@ -10,7 +10,7 @@ obj_ft_dim=768 ngpus=1 seed=0 -name=${train_alg}-${features} +name=${train_alg}-${features}-advanced-adversarial name=${name}-seed.${seed} #-${ngpus}gpus outdir=${DATA_ROOT}/REVERIE/exprs_map/finetune/${name} @@ -63,8 +63,10 @@ flag="--root_dir ${DATA_ROOT} # --tokenizer bert \ # --bert_ckpt_file '../datasets/REVERIE/exprs_map/pretrain/cmt-vitbase-mlm.mrc.sap.og-init.lxmert-aug.speaker/ckpts/model_step_100000.pt' \ # --eval_first + # test +echo /root/mount/Matterport3DSimulator/VLN-DUET/datasets/REVERIE/exprs_map/finetune/${name}/ckpts/best_val_unseen CUDA_VISIBLE_DEVICES='0' python3 reverie/main_nav_obj.py $flag \ --tokenizer bert \ - --resume_file /root/mount/Matterport3DSimulator/VLN-DUET/datasets/REVERIE/exprs_map/finetune/dagger-vitbase-seed.0/ckpts/best_val_unseen \ + --resume_file /root/mount/Matterport3DSimulator/VLN-DUET/datasets/REVERIE/exprs_map/finetune/${name}/ckpts/best_val_unseen \ --test --submit