feat: log "success" in predict file
Why: In visualization tool, we should check whether the agent arrive the target viewpoint. We need to calculate the distance between the GT viewpoint & the predicted viewpoint but it's difficult to calculate the distance without the simulator (we run the visualization tool on Jupyter notebook which is not in the docker container so we cannot use the simulator) How: After getting the result which gather from the env. We should run the eval_metrics() to get the success rate, FOUND score..., etc. So we get the "success" after eval_metrics() and log it in the predicted file so that the visualization tool can get the "success" status in the predicted file.
This commit is contained in:
parent
0135ab3ac8
commit
2a561bcf01
@ -370,7 +370,11 @@ class ReverieObjectNavBatch(object):
|
||||
assert len(goal_viewpoints) > 0, '%s_%s'%(scan, str(gt_objid))
|
||||
|
||||
scores['success'] = float(path[-1] in goal_viewpoints)
|
||||
if scores['success'] == 1.0:
|
||||
scores['found_success'] = float(pred_found == gt_found)
|
||||
else:
|
||||
scores['found_success'] = 0.0
|
||||
|
||||
scores['oracle_success'] = float(any(x in goal_viewpoints for x in path))
|
||||
scores['spl'] = scores['success'] * gt_lengths / max(scores['trajectory_lengths'], gt_lengths, 0.01)
|
||||
scores['sspl'] = scores['spl'] * scores['found_success']
|
||||
@ -385,6 +389,7 @@ class ReverieObjectNavBatch(object):
|
||||
print('eval %d predictions' % (len(preds)))
|
||||
print(preds[0])
|
||||
|
||||
|
||||
metrics = defaultdict(list)
|
||||
for item in preds:
|
||||
instr_id = item['instr_id']
|
||||
@ -393,7 +398,14 @@ class ReverieObjectNavBatch(object):
|
||||
scan, gt_traj, gt_objid = self.gt_trajs[instr_id]
|
||||
pred_found = item['found']
|
||||
gt_found = item['gt_found']
|
||||
|
||||
|
||||
traj_scores = self._eval_item(scan, traj, pred_objid, gt_traj, gt_objid, pred_found, gt_found)
|
||||
|
||||
# record "success" in the result file
|
||||
# let the visualization tool can get the success status
|
||||
item['success'] = traj_scores['success']
|
||||
|
||||
for k, v in traj_scores.items():
|
||||
metrics[k].append(v)
|
||||
metrics['instr_id'].append(instr_id)
|
||||
|
||||
@ -10,7 +10,7 @@ obj_ft_dim=768
|
||||
ngpus=1
|
||||
seed=0
|
||||
|
||||
name=${train_alg}-${features}
|
||||
name=${train_alg}-${features}-advanced-adversarial
|
||||
name=${name}-seed.${seed} #-${ngpus}gpus
|
||||
|
||||
outdir=${DATA_ROOT}/REVERIE/exprs_map/finetune/${name}
|
||||
@ -63,8 +63,10 @@ flag="--root_dir ${DATA_ROOT}
|
||||
# --tokenizer bert \
|
||||
# --bert_ckpt_file '../datasets/REVERIE/exprs_map/pretrain/cmt-vitbase-mlm.mrc.sap.og-init.lxmert-aug.speaker/ckpts/model_step_100000.pt' \
|
||||
# --eval_first
|
||||
|
||||
# test
|
||||
echo /root/mount/Matterport3DSimulator/VLN-DUET/datasets/REVERIE/exprs_map/finetune/${name}/ckpts/best_val_unseen
|
||||
CUDA_VISIBLE_DEVICES='0' python3 reverie/main_nav_obj.py $flag \
|
||||
--tokenizer bert \
|
||||
--resume_file /root/mount/Matterport3DSimulator/VLN-DUET/datasets/REVERIE/exprs_map/finetune/dagger-vitbase-seed.0/ckpts/best_val_unseen \
|
||||
--resume_file /root/mount/Matterport3DSimulator/VLN-DUET/datasets/REVERIE/exprs_map/finetune/${name}/ckpts/best_val_unseen \
|
||||
--test --submit
|
||||
|
||||
Loading…
Reference in New Issue
Block a user