diff --git a/README.md b/README.md index 90dea61..8e1fd58 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ python ./exps/basic_main.py [] The argument list is loaded by `./lib/config_utils/basic_args.py`. An examples script can is `./scripts/300W-DET.sh`, and you can simple run to train the base detector on the `300-W` dataset. ``` -sh scripts/300W-DET.sh +bash scripts/300W-DET.sh ``` ### Improving the Detector by SBR @@ -64,7 +64,7 @@ The argument list is loaded by `./lib/config_utils/lk_args.py`. #### An example to train SBR on the unlabeled sequences The `init_model` parameter is the path to the detector trained in the `Basic Training` section. ``` -sh scripts/demo_sbr.sh +bash scripts/demo_sbr.sh ``` To see visualization results use the commands in `Visualization`. @@ -104,7 +104,7 @@ ffmpeg -start_number 3 -i cache_data/cache/demo-sbr-vis/image%04d.png -b:v 30000 supervision-by-registration is released under the [CC-BY-NC license](https://github.com/facebookresearch/supervision-by-registration/blob/master/LICENSE). -## Useful information +## Useful Information ### 1. train on your own video data You should look at the `./lib/datasets/VideoDataset.py` and `./lib/datasets/parse_utils.py`, and add how to find the neighbour frames when giving one image path. diff --git a/exps/basic_main.py b/exps/basic_main.py index 30285d2..789f826 100755 --- a/exps/basic_main.py +++ b/exps/basic_main.py @@ -159,6 +159,7 @@ def main(args): 'args' : deepcopy(args), 'arch' : model_config.arch, 'state_dict': net.state_dict(), + 'detector' : net.state_dict(), 'scheduler' : scheduler.state_dict(), 'optimizer' : optimizer.state_dict(), }, logger.path('model') / '{:}-{:}.pth'.format(model_config.arch, epoch_str), logger) @@ -169,6 +170,7 @@ def main(args): }, logger.last_info(), logger) eval_results = eval_all(args, eval_loaders, net, criterion, epoch_str, logger, opt_config) + logger.log('NME Results : {:}'.format( eval_results )) # measure elapsed time epoch_time.update(time.time() - start_time) diff --git a/exps/eval.py b/exps/eval.py index a34daa4..7bea010 100755 --- a/exps/eval.py +++ b/exps/eval.py @@ -20,6 +20,7 @@ from models import obtain_model, remove_module_dict from config_utils import load_configure + def evaluate(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True @@ -39,14 +40,15 @@ def evaluate(args): std=[0.229, 0.224, 0.225]) param = snapshot['args'] - eval_transform = transforms.Compose([transforms.PreCrop(param.pre_crop_expand), transforms.TrainScale2WH((param.crop_width, param.crop_height)), transforms.ToTensor(), normalize]) + import pdb; pdb.set_trace() + eval_transform = transforms.Compose([transforms.PreCrop(param.pre_crop_expand), transforms.TrainScale2WH((param.crop_width, param.crop_height)), transforms.ToTensor(), normalize]) model_config = load_configure(param.model_config, None) dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, param.data_indicator) dataset.reset(param.num_pts) net = obtain_model(model_config, param.num_pts + 1) net = net.cuda() - weights = remove_module_dict(snapshot['state_dict']) + weights = remove_module_dict(snapshot['detector']) net.load_state_dict(weights) print ('Prepare input data') [image, _, _, _, _, _, cropped_size], meta = dataset.prepare_input(args.image, args.face) diff --git a/exps/lk_main.py b/exps/lk_main.py index 57a0fdc..c394c3c 100755 --- a/exps/lk_main.py +++ b/exps/lk_main.py @@ -169,6 +169,7 @@ def main(args): 'args' : deepcopy(args), 'arch' : model_config.arch, 'state_dict': net.state_dict(), + 'detector' : detector.state_dict(), 'scheduler' : scheduler.state_dict(), 'optimizer' : optimizer.state_dict(), }, logger.path('model') / '{:}-{:}.pth'.format(model_config.arch, epoch_str), logger) diff --git a/lib/procedure/basic_eval.py b/lib/procedure/basic_eval.py index d26cbad..90fbb64 100644 --- a/lib/procedure/basic_eval.py +++ b/lib/procedure/basic_eval.py @@ -23,8 +23,8 @@ def basic_eval_all(args, loaders, net, criterion, epoch_str, logger, opt_config) nme, _, _ = eval_meta.compute_mse(logger) meta_path = logger.path('meta') / 'eval-{:}-{:02d}-{:02d}.pth'.format(epoch_str, i, len(loaders)) eval_meta.save(meta_path) - nmes.append(nme) - return ', '.join(['{:.1f}'.format(x) for x in nmes]) + nmes.append(nme*100) + return ', '.join(['{:.2f}'.format(x) for x in nmes]) def basic_eval(args, loader, net, criterion, epoch_str, logger, opt_config): diff --git a/lib/xvision/common_eval.py b/lib/xvision/common_eval.py index 529623d..2951076 100644 --- a/lib/xvision/common_eval.py +++ b/lib/xvision/common_eval.py @@ -62,7 +62,7 @@ def evaluate_normalized_mean_error(predictions, groundtruth, log, extra_faces): accuracy_under_007 = np.sum(error_per_image<0.07) * 100. / error_per_image.size accuracy_under_008 = np.sum(error_per_image<0.08) * 100. / error_per_image.size - print_log('Compute NME and AUC for {:} images with {:} points :: [(nms): mean={:.3f}, std={:.3f}], auc@0.07={:.3f}, auc@0.08-{:.3f}, acc@0.07={:.3f}, acc@0.08={:.3f}'.format(num_images, num_points, normalise_mean_error*100, error_per_image.std()*100, area_under_curve07*100, area_under_curve08*100, accuracy_under_007, accuracy_under_008), log) + print_log('Compute NME and AUC for {:} images with {:} points :: [(NME): mean={:.3f}, std={:.3f}], auc@0.07={:.3f}, auc@0.08-{:.3f}, acc@0.07={:.3f}, acc@0.08={:.3f}'.format(num_images, num_points, normalise_mean_error*100, error_per_image.std()*100, area_under_curve07*100, area_under_curve08*100, accuracy_under_007, accuracy_under_008), log) for_pck_curve = [] for x in range(0, 3501, 1):