feat: add new metrics for new reverie

This commit is contained in:
Ting-Jun Wang 2024-09-01 14:13:05 +08:00
parent 8b5a7438a0
commit bfcb1f49ea
Signed by: snsd0805
GPG Key ID: 48D331A3D6160354
6 changed files with 92 additions and 9 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -87,6 +87,7 @@ def construct_instrs(anno_dir, dataset, splits, tokenizer, max_instr_len=512):
new_item['objId'] = None
new_item['instruction'] = instr
new_item['instr_encoding'] = item['instr_encodings'][j][:max_instr_len]
new_item['path'] = item['path'][j]
new_item['found'] = item['found'][j]
del new_item['instructions']
del new_item['instr_encodings']

View File

@ -19,6 +19,11 @@ from utils.data import angle_feature, get_all_point_angle_feature
with open('./node_region.json') as fp:
node_region = json.load(fp)
with open('region2objs.json') as fp:
region2objs = json.load(fp)
with open('vp2objs.json') as fp:
vp2objs = json.load(fp)
class EnvBatch(object):
''' A simple wrapper for a batch of MatterSim environments,
@ -368,6 +373,7 @@ class ReverieObjectNavBatch(object):
pred_stop_region = node_region[scan][path[-1]]
gt_stop_region = node_region[scan][gt_path[-1]]
scores['action_steps'] = len(pred_path) - 1
scores['trajectory_steps'] = len(path) - 1
scores['trajectory_lengths'] = np.sum([shortest_distances[a][b] for a, b in zip(path[:-1], path[1:])])
@ -382,16 +388,86 @@ class ReverieObjectNavBatch(object):
scores['success'] = float(path[-1] in goal_viewpoints)
scores['room_success'] = float(pred_stop_region == gt_stop_region)
scores['oracle_success'] = float(any(x in goal_viewpoints for x in path))
gt_room_start_vp = None
gt_back_path = []
gt_front_path = []
exit_room = False
for vp in gt_path[::-1]:
if node_region[scan][vp] == gt_stop_region and not exit_room:
gt_back_path.append(vp)
gt_room_start_vp = vp
else:
exit_room = True
gt_front_path.append(vp)
gt_front_path.reverse()
gt_back_path.reverse()
assert (gt_front_path + gt_back_path) == gt_path, "Front path & Back path error"
gt_front_path += [gt_room_start_vp]
'''
if scores['success'] == 1.0:
scores['found_success'] = float(pred_found == gt_found)
else:
scores['found_success'] = 0.0
'''
gt_reach_length = np.sum([shortest_distances[a][b] for a, b in zip(gt_front_path[:-1], gt_front_path[1:])]) if len(gt_front_path) != 1 else 0.01
gt_explore_length = np.sum([shortest_distances[a][b] for a, b in zip(gt_back_path[:-1], gt_back_path[1:])]) if len(gt_back_path) != 1 else 0.01
if scores['room_success'] != 0.0:
# corse-grained
# get the reach_path & explore_path
room_start_vp = None
back_path = []
front_path = []
exit_room = False
for vp in path[::-1]:
if node_region[scan][vp] == gt_stop_region and not exit_room:
back_path.append(vp)
room_start_vp = vp
else:
exit_room = True
front_path.append(vp)
front_path.reverse()
back_path.reverse()
assert (front_path + back_path) == path, "Front path & Back path error"
# front_path = ... room_start_vp
# back_path = room_start_vp ...
front_path += [room_start_vp]
reach_length = np.sum([shortest_distances[a][b] for a, b in zip(front_path[:-1], front_path[1:])]) if len(front_path) != 1 else 0.01
explore_length = np.sum([shortest_distances[a][b] for a, b in zip(back_path[:-1], back_path[1:])]) if len(back_path) != 1 else 0.01
scores['room_spl'] = scores['room_success'] * gt_reach_length / max(reach_length, gt_reach_length, 0.01)
if scores['found_success'] != 0.0:
# fine-grained score
# p is converage rate
if gt_found:
p = 1.0
else:
explore_objs = set()
for vp in back_path:
explore_objs.update(vp2objs[vp])
p = len(explore_objs) / len(region2objs[scan][gt_stop_region])
scores['coverage_rate'] = p
scores['explore_spl'] = scores['room_success'] * scores['found_success'] * gt_explore_length / max(gt_explore_length, explore_length, 0.01) * p
else:
scores['coverage_rate'] = 0
scores['explore_spl'] = 0
else:
scores['room_spl'] = 0.0
scores['coverage_rate'] = 0
scores['explore_spl'] = 0
scores['spl'] = scores['success'] * gt_lengths / max(scores['trajectory_lengths'], gt_lengths, 0.01)
'''
scores['sspl_1'] = scores['success'] * gt_lengths / max(scores['trajectory_lengths'], gt_lengths, 0.01) * scores['found_success']
scores['sspl_2'] = scores['room_success'] * gt_lengths / max(scores['trajectory_lengths'], gt_lengths, 0.01) * scores['found_success']
scores['sspl_3'] = scores['oracle_success'] * gt_lengths / max(scores['trajectory_lengths'], gt_lengths, 0.01) * scores['found_success']
@ -399,7 +475,8 @@ class ReverieObjectNavBatch(object):
scores['ss_1'] = scores['success'] * scores['found_success']
scores['ss_2'] = scores['room_success'] * scores['found_success']
scores['ss_3'] = scores['oracle_success'] * scores['found_success']
# scores['sspl'] = scores['spl'] * scores['found_success']
'''
scores['sspl'] = scores['spl'] * scores['found_success']
scores['rgs'] = str(pred_objid) == str(gt_objid)
scores['rgspl'] = scores['rgs'] * gt_lengths / max(scores['trajectory_lengths'], gt_lengths, 0.01)
@ -439,16 +516,15 @@ class ReverieObjectNavBatch(object):
'sr': np.mean(metrics['success']) * 100,
'oracle_sr': np.mean(metrics['oracle_success']) * 100,
'spl': np.mean(metrics['spl']) * 100,
'sspl': np.mean(metrics['sspl']) * 100,
'rgs': np.mean(metrics['rgs']) * 100,
'rgspl': np.mean(metrics['rgspl']) * 100,
'sspl_1': np.mean(metrics['sspl_1']) * 100,
'sspl_2': np.mean(metrics['sspl_2']) * 100,
'sspl_3': np.mean(metrics['sspl_3']) * 100,
'ss_1': np.mean(metrics['ss_1']) * 100,
'ss_2': np.mean(metrics['ss_2']) * 100,
'ss_3': np.mean(metrics['ss_3']) * 100,
'found_sr': np.mean(metrics['found_success']) * 100,
'room_sr': np.mean(metrics['room_success']) * 100,
'room_spl': np.mean(metrics['room_spl']) * 100,
'coverage_rate': np.mean(metrics['coverage_rate']) * 100,
'explore_spl': np.mean(metrics['explore_spl']) * 100,
}
return avg_metrics, metrics

View File

@ -138,7 +138,7 @@ def train(args, train_env, val_envs, aug_env=None, rank=-1):
'\nListener training starts, start iteration: %s' % str(start_iter), record_file
)
best_val = {'val_unseen': {"spl": 0., "sr": 0., "room_sr": 0., "state":"", "sspl": 0., 'found_sr': 0.}}
best_val = {'val_unseen': {"spl": 0., "sr": 0., "room_sr": 0., "state":"", "sspl": 0., 'found_sr': 0., 'explore_spl': 0.}}
for idx in range(start_iter, start_iter+args.iters, args.log_every):
listner.logs = defaultdict(list)
@ -203,9 +203,12 @@ def train(args, train_env, val_envs, aug_env=None, rank=-1):
# select model by spl
if env_name in best_val:
if score_summary['room_sr'] >= best_val[env_name]['room_sr']:
if score_summary['explore_spl'] >= best_val[env_name]['explore_spl']:
best_val[env_name]['spl'] = score_summary['spl']
best_val[env_name]['sspl'] = score_summary['sspl']
best_val[env_name]['explore_spl'] = score_summary['explore_spl']
best_val[env_name]['coverage_rate'] = score_summary['coverage_rate']
best_val[env_name]['room_spl'] = score_summary['room_spl']
best_val[env_name]['sr'] = score_summary['sr']
best_val[env_name]['found_sr'] = score_summary['found_sr']
best_val[env_name]['room_sr'] = score_summary['room_sr']

1
map_nav_src/vp2objs.json Normal file

File diff suppressed because one or more lines are too long