Compare commits

..

No commits in common. "8b5a7438a0f31c27b61506e4b75998e950287b8f" and "2a561bcf01c52d75c4ae9e90b81dda2abe96a758" have entirely different histories.

3 changed files with 10 additions and 39 deletions

View File

@ -443,9 +443,9 @@ class GMapObjectNavAgent(Seq2SeqAgent):
) )
ml_loss += self.criterion(nav_outs['local_logits'], local_nav_targets) # local ml_loss += self.criterion(nav_outs['local_logits'], local_nav_targets) # local
# objec grounding # objec grounding
# obj_targets = self._teacher_object(obs, ended, pano_inputs['view_lens'], obj_logits) obj_targets = self._teacher_object(obs, ended, pano_inputs['view_lens'], obj_logits)
# print(t, obj_targets[6], obj_logits[6], obs[6]['obj_ids'], pano_inputs['view_lens'][i], obs[6]['gt_obj_id']) # print(t, obj_targets[6], obj_logits[6], obs[6]['obj_ids'], pano_inputs['view_lens'][i], obs[6]['gt_obj_id'])
# og_loss += self.criterion(obj_logits, obj_targets) og_loss += self.criterion(obj_logits, obj_targets)
# print(F.cross_entropy(obj_logits, obj_targets, reduction='none')) # print(F.cross_entropy(obj_logits, obj_targets, reduction='none'))
# print(t, 'og_loss', og_loss.item(), self.criterion(obj_logits, obj_targets).item()) # print(t, 'og_loss', og_loss.item(), self.criterion(obj_logits, obj_targets).item())
@ -532,11 +532,11 @@ class GMapObjectNavAgent(Seq2SeqAgent):
if train_ml is not None: if train_ml is not None:
ml_loss = ml_loss * train_ml / batch_size ml_loss = ml_loss * train_ml / batch_size
# og_loss = og_loss * train_ml / batch_size og_loss = og_loss * train_ml / batch_size
self.loss += ml_loss self.loss += ml_loss
# self.loss += og_loss self.loss += og_loss
self.logs['IL_loss'].append(ml_loss.item()) self.logs['IL_loss'].append(ml_loss.item())
# self.logs['OG_loss'].append(og_loss.item()) self.logs['OG_loss'].append(og_loss.item())
''' '''
print("TRAJ:") print("TRAJ:")

View File

@ -8,17 +8,12 @@ import random
import networkx as nx import networkx as nx
from collections import defaultdict from collections import defaultdict
import copy import copy
from glob import glob
import MatterSim import MatterSim
from utils.data import load_nav_graphs, new_simulator from utils.data import load_nav_graphs, new_simulator
from utils.data import angle_feature, get_all_point_angle_feature from utils.data import angle_feature, get_all_point_angle_feature
with open('./node_region.json') as fp:
node_region = json.load(fp)
class EnvBatch(object): class EnvBatch(object):
''' A simple wrapper for a batch of MatterSim environments, ''' A simple wrapper for a batch of MatterSim environments,
@ -365,9 +360,6 @@ class ReverieObjectNavBatch(object):
path = sum(pred_path, []) path = sum(pred_path, [])
assert gt_path[0] == path[0], 'Result trajectories should include the start position' assert gt_path[0] == path[0], 'Result trajectories should include the start position'
pred_stop_region = node_region[scan][path[-1]]
gt_stop_region = node_region[scan][gt_path[-1]]
scores['action_steps'] = len(pred_path) - 1 scores['action_steps'] = len(pred_path) - 1
scores['trajectory_steps'] = len(path) - 1 scores['trajectory_steps'] = len(path) - 1
scores['trajectory_lengths'] = np.sum([shortest_distances[a][b] for a, b in zip(path[:-1], path[1:])]) scores['trajectory_lengths'] = np.sum([shortest_distances[a][b] for a, b in zip(path[:-1], path[1:])])
@ -377,29 +369,15 @@ class ReverieObjectNavBatch(object):
goal_viewpoints = set(self.obj2vps['%s_%s'%(scan, str(gt_objid))]) goal_viewpoints = set(self.obj2vps['%s_%s'%(scan, str(gt_objid))])
assert len(goal_viewpoints) > 0, '%s_%s'%(scan, str(gt_objid)) assert len(goal_viewpoints) > 0, '%s_%s'%(scan, str(gt_objid))
scores['found_success'] = float(pred_found == gt_found)
scores['success'] = float(path[-1] in goal_viewpoints) scores['success'] = float(path[-1] in goal_viewpoints)
scores['room_success'] = float(pred_stop_region == gt_stop_region)
scores['oracle_success'] = float(any(x in goal_viewpoints for x in path))
'''
if scores['success'] == 1.0: if scores['success'] == 1.0:
scores['found_success'] = float(pred_found == gt_found) scores['found_success'] = float(pred_found == gt_found)
else: else:
scores['found_success'] = 0.0 scores['found_success'] = 0.0
'''
scores['oracle_success'] = float(any(x in goal_viewpoints for x in path))
scores['spl'] = scores['success'] * gt_lengths / max(scores['trajectory_lengths'], gt_lengths, 0.01) scores['spl'] = scores['success'] * gt_lengths / max(scores['trajectory_lengths'], gt_lengths, 0.01)
scores['sspl_1'] = scores['success'] * gt_lengths / max(scores['trajectory_lengths'], gt_lengths, 0.01) * scores['found_success'] scores['sspl'] = scores['spl'] * scores['found_success']
scores['sspl_2'] = scores['room_success'] * gt_lengths / max(scores['trajectory_lengths'], gt_lengths, 0.01) * scores['found_success']
scores['sspl_3'] = scores['oracle_success'] * gt_lengths / max(scores['trajectory_lengths'], gt_lengths, 0.01) * scores['found_success']
scores['ss_1'] = scores['success'] * scores['found_success']
scores['ss_2'] = scores['room_success'] * scores['found_success']
scores['ss_3'] = scores['oracle_success'] * scores['found_success']
# scores['sspl'] = scores['spl'] * scores['found_success']
scores['rgs'] = str(pred_objid) == str(gt_objid) scores['rgs'] = str(pred_objid) == str(gt_objid)
scores['rgspl'] = scores['rgs'] * gt_lengths / max(scores['trajectory_lengths'], gt_lengths, 0.01) scores['rgspl'] = scores['rgs'] * gt_lengths / max(scores['trajectory_lengths'], gt_lengths, 0.01)
@ -441,14 +419,8 @@ class ReverieObjectNavBatch(object):
'spl': np.mean(metrics['spl']) * 100, 'spl': np.mean(metrics['spl']) * 100,
'rgs': np.mean(metrics['rgs']) * 100, 'rgs': np.mean(metrics['rgs']) * 100,
'rgspl': np.mean(metrics['rgspl']) * 100, 'rgspl': np.mean(metrics['rgspl']) * 100,
'sspl_1': np.mean(metrics['sspl_1']) * 100, 'sspl': np.mean(metrics['sspl']) * 100,
'sspl_2': np.mean(metrics['sspl_2']) * 100,
'sspl_3': np.mean(metrics['sspl_3']) * 100,
'ss_1': np.mean(metrics['ss_1']) * 100,
'ss_2': np.mean(metrics['ss_2']) * 100,
'ss_3': np.mean(metrics['ss_3']) * 100,
'found_sr': np.mean(metrics['found_success']) * 100, 'found_sr': np.mean(metrics['found_success']) * 100,
'room_sr': np.mean(metrics['room_success']) * 100,
} }
return avg_metrics, metrics return avg_metrics, metrics

View File

@ -138,7 +138,7 @@ def train(args, train_env, val_envs, aug_env=None, rank=-1):
'\nListener training starts, start iteration: %s' % str(start_iter), record_file '\nListener training starts, start iteration: %s' % str(start_iter), record_file
) )
best_val = {'val_unseen': {"spl": 0., "sr": 0., "room_sr": 0., "state":"", "sspl": 0., 'found_sr': 0.}} best_val = {'val_unseen': {"spl": 0., "sr": 0., "state":"", "sspl": 0., 'found_sr': 0.}}
for idx in range(start_iter, start_iter+args.iters, args.log_every): for idx in range(start_iter, start_iter+args.iters, args.log_every):
listner.logs = defaultdict(list) listner.logs = defaultdict(list)
@ -203,12 +203,11 @@ def train(args, train_env, val_envs, aug_env=None, rank=-1):
# select model by spl # select model by spl
if env_name in best_val: if env_name in best_val:
if score_summary['room_sr'] >= best_val[env_name]['room_sr']: if score_summary['sspl'] >= best_val[env_name]['sspl']:
best_val[env_name]['spl'] = score_summary['spl'] best_val[env_name]['spl'] = score_summary['spl']
best_val[env_name]['sspl'] = score_summary['sspl'] best_val[env_name]['sspl'] = score_summary['sspl']
best_val[env_name]['sr'] = score_summary['sr'] best_val[env_name]['sr'] = score_summary['sr']
best_val[env_name]['found_sr'] = score_summary['found_sr'] best_val[env_name]['found_sr'] = score_summary['found_sr']
best_val[env_name]['room_sr'] = score_summary['room_sr']
best_val[env_name]['state'] = 'Iter %d %s' % (iter, loss_str) best_val[env_name]['state'] = 'Iter %d %s' % (iter, loss_str)
listner.save(idx, os.path.join(args.ckpt_dir, "best_%s" % (env_name))) listner.save(idx, os.path.join(args.ckpt_dir, "best_%s" % (env_name)))