Skip to content

Commit

Permalink
update places configuration files, unfreeze weights for the first sta…
Browse files Browse the repository at this point in the history
…ge, and reduce the learning rates.
  • Loading branch information
zhmiao committed Feb 11, 2020
1 parent 4dd065b commit d904d35
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 14 deletions.
16 changes: 8 additions & 8 deletions config/Places_LT/stage_1.py
Expand Up @@ -5,27 +5,27 @@
training_opt['dataset'] = 'Places_LT'
training_opt['log_dir'] = './logs/Places_LT/stage1'
training_opt['num_classes'] = 365
training_opt['batch_size'] = 256
training_opt['batch_size'] = 128
training_opt['num_workers'] = 4
training_opt['num_epochs'] = 30
training_opt['display_step'] = 10
training_opt['feature_dim'] = 512
training_opt['open_threshold'] = 0.1
training_opt['sampler'] = None
training_opt['scheduler_params'] = {'step_size':10, 'gamma':0.1}
training_opt['scheduler_params'] = {'step_size': 10, 'gamma': 0.1}
config['training_opt'] = training_opt

networks = {}
feature_param = {'use_modulatedatt':False, 'use_fc': True, 'dropout': None,
'stage1_weights': False, 'dataset': training_opt['dataset'], 'caffe': True}
feature_optim_param = {'lr': 0.01, 'momentum':0.9, 'weight_decay':0.0005}
feature_param = {'use_modulatedatt': False, 'use_fc': True, 'dropout': None,
'stage1_weights': False, 'dataset': training_opt['dataset'], 'caffe': True}
feature_optim_param = {'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0005}
networks['feat_model'] = {'def_file': './models/ResNet152Feature.py',
'params': feature_param,
'optim_params': feature_optim_param,
'fix': True}
'fix': False}
classifier_param = {'in_dim': training_opt['feature_dim'], 'num_classes': training_opt['num_classes'],
'stage1_weights': False, 'dataset': training_opt['dataset']}
classifier_optim_param = {'lr': 0.1, 'momentum':0.9, 'weight_decay':0.0005}
'stage1_weights': False, 'dataset': training_opt['dataset']}
classifier_optim_param = {'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0005}
networks['classifier'] = {'def_file': './models/DotProductClassifier.py',
'params': classifier_param,
'optim_params': classifier_optim_param}
Expand Down
12 changes: 6 additions & 6 deletions config/Places_LT/stage_2_meta_embedding.py
Expand Up @@ -5,7 +5,7 @@
training_opt['dataset'] = 'Places_LT'
training_opt['log_dir'] = './logs/Places_LT/meta_embedding'
training_opt['num_classes'] = 365
training_opt['batch_size'] = 256
training_opt['batch_size'] = 128
training_opt['num_workers'] = 4
training_opt['num_epochs'] = 30
training_opt['display_step'] = 10
Expand All @@ -18,15 +18,15 @@

networks = {}
feature_param = {'use_modulatedatt': True, 'use_fc': True, 'dropout': None,
'stage1_weights': True, 'dataset': training_opt['dataset'], 'caffe': False}
feature_optim_param = {'lr': 0.01, 'momentum':0.9, 'weight_decay':0.0005}
'stage1_weights': True, 'dataset': training_opt['dataset'], 'caffe': False}
feature_optim_param = {'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0005}
networks['feat_model'] = {'def_file': './models/ResNet152Feature.py',
'params': feature_param,
'optim_params': feature_optim_param,
'fix': True}
classifier_param = {'in_dim': training_opt['feature_dim'], 'num_classes': training_opt['num_classes'],
'stage1_weights': True, 'dataset': training_opt['dataset']}
classifier_optim_param = {'lr': 0.1, 'momentum':0.9, 'weight_decay':0.0005}
classifier_param = {'in_dim': training_opt['feature_dim'], 'num_classes': training_opt['num_classes'],
'stage1_weights': True, 'dataset': training_opt['dataset']}
classifier_optim_param = {'lr': 0.01, 'momentum': 0.9, 'weight_decay': 0.0005}
networks['classifier'] = {'def_file': './models/MetaEmbeddingClassifier.py',
'params': classifier_param,
'optim_params': classifier_optim_param}
Expand Down

0 comments on commit d904d35

Please sign in to comment.