Skip to content
This repository has been archived by the owner on Mar 22, 2021. It is now read-only.

added FineTuningStep #81

Merged
merged 1 commit into from
Sep 17, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 75 additions & 1 deletion common_blocks/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,4 +392,78 @@ def clean_object_from_memory(obj):
del obj
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.empty_cache()


class FineTuneStep(Step):
def __init__(self,
name,
transformer,
experiment_directory,
input_data=None,
input_steps=None,
adapter=None,
is_trainable=False,
cache_output=False,
persist_output=False,
load_persisted_output=False,
force_fitting=False,
fine_tuning=False,
persist_upstream_pipeline_structure=False):
super().__init__(name,
transformer,
experiment_directory,
input_data=input_data,
input_steps=input_steps,
adapter=adapter,
is_trainable=is_trainable,
cache_output=cache_output,
persist_output=persist_output,
load_persisted_output=load_persisted_output,
force_fitting=force_fitting,
persist_upstream_pipeline_structure=persist_upstream_pipeline_structure)
self.fine_tuning = fine_tuning

def _cached_fit_transform(self, step_inputs):
if self.is_trainable:
if self.transformer_is_cached:
if self.force_fitting and self.fine_tuning:
raise ValueError('only one of force_fitting or fine_tuning can be True')
elif self.force_fitting:
logger.info('Step {}, fitting and transforming...'.format(self.name))
step_output_data = self.transformer.fit_transform(**step_inputs)
logger.info('Step {}, persisting transformer to the {}'
.format(self.name, self.exp_dir_transformers_step))
self.transformer.persist(self.exp_dir_transformers_step)
elif self.fine_tuning:
logger.info('Step {}, loading transformer from the {}'
.format(self.name, self.exp_dir_transformers_step))
self.transformer.load(self.exp_dir_transformers_step)
logger.info('Step {}, transforming...'.format(self.name))
step_output_data = self.transformer.fit_transform(**step_inputs)
self.transformer.persist(self.exp_dir_transformers_step)
else:
logger.info('Step {}, loading transformer from the {}'
.format(self.name, self.exp_dir_transformers_step))
self.transformer.load(self.exp_dir_transformers_step)
logger.info('Step {}, transforming...'.format(self.name))
step_output_data = self.transformer.transform(**step_inputs)
else:
logger.info('Step {}, fitting and transforming...'.format(self.name))
step_output_data = self.transformer.fit_transform(**step_inputs)
logger.info('Step {}, persisting transformer to the {}'
.format(self.name, self.exp_dir_transformers_step))
self.transformer.persist(self.exp_dir_transformers_step)
else:
logger.info('Step {}, transforming...'.format(self.name))
step_output_data = self.transformer.transform(**step_inputs)

if self.cache_output:
logger.info('Step {}, caching output to the {}'
.format(self.name, self.exp_dir_cache_step))
self._persist_output(step_output_data, self.exp_dir_cache_step)
if self.persist_output:
logger.info('Step {}, persisting output to the {}'
.format(self.name, self.exp_dir_outputs_step))
self._persist_output(step_output_data, self.exp_dir_outputs_step)
return step_output_data
22 changes: 12 additions & 10 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,7 @@
'training_config': {'epochs': PARAMS.epochs_nr,
'shuffle': True,
'batch_size': PARAMS.batch_size_train,
'fine_tuning': PARAMS.fine_tuning,
},
'callbacks_config': {'model_checkpoint': {
'filepath': os.path.join(EXPERIMENT_DIR, 'checkpoints', 'unet', 'best.torch'),
Expand Down Expand Up @@ -253,16 +254,17 @@ def unet(config, suffix='', train_mode=True):
else:
preprocessing = pipelines.preprocessing_inference(config, suffix=suffix)

unet = Step(name='unet{}'.format(suffix),
transformer=models.PyTorchUNet(**config.model['unet']),
input_data=['callback_input'],
input_steps=[preprocessing],
adapter=Adapter({'datagen': E(preprocessing.name, 'datagen'),
'validation_datagen': E(preprocessing.name, 'validation_datagen'),
'meta_valid': E('callback_input', 'meta_valid'),
}),
is_trainable=True,
experiment_directory=config.execution.experiment_dir)
unet = utils.FineTuneStep(name='unet{}'.format(suffix),
transformer=models.PyTorchUNet(**config.model['unet']),
input_data=['callback_input'],
input_steps=[preprocessing],
adapter=Adapter({'datagen': E(preprocessing.name, 'datagen'),
'validation_datagen': E(preprocessing.name, 'validation_datagen'),
'meta_valid': E('callback_input', 'meta_valid'),
}),
is_trainable=True,
fine_tuning=config.model.unet.training_config.fine_tuning,
experiment_directory=config.execution.experiment_dir)

if config.general.loader_mode == 'resize_and_pad':
size_adjustment_function = partial(postprocessing.crop_image, target_size=config.general.original_size)
Expand Down