From af570c96fe438f3eb86cd53307a086e3480553fe Mon Sep 17 00:00:00 2001 From: AndrewBeers Date: Fri, 10 Aug 2018 19:01:39 +0200 Subject: [PATCH] prog: progressive GAN progress --- .gitignore | 1 + .travis.yml | 0 Dockerfile | 0 LICENSE | 0 README.md | 0 coverage_wrapper.py | 0 deepneuro/__init__.py | 0 deepneuro/augmentation/__init__.py | 0 deepneuro/augmentation/augment.py | 0 deepneuro/data/__init__.py | 0 deepneuro/data/data_collection.py | 30 +- deepneuro/data/data_group.py | 0 deepneuro/data/data_load.py | 31 +- deepneuro/data/data_utilities.py | 0 deepneuro/docker/__init__.py | 0 deepneuro/docker/docker_cli.py | 0 deepneuro/external/__init__.py | 0 deepneuro/interface/__init__.py | 0 deepneuro/interface/master_cli.py | 0 deepneuro/interface/web_wrapper.py | 0 deepneuro/load/__init__.py | 0 deepneuro/load/load.py | 0 deepneuro/models/__init__.py | 0 deepneuro/models/blocks.py | 126 ++++- deepneuro/models/callbacks.py | 107 ++++- deepneuro/models/cost_functions.py | 27 ++ deepneuro/models/cyclegan.py | 0 deepneuro/models/dn_ops.py | 30 +- deepneuro/models/gan.py | 97 ++-- deepneuro/models/interp_gan.py | 442 +++++------------- deepneuro/models/minimal.py | 0 deepneuro/models/model.py | 120 ++++- deepneuro/models/ops.py | 14 +- deepneuro/models/progressive_growing_gan.py | 367 +++++++++++++++ deepneuro/models/timenet.py | 0 deepneuro/models/unet.py | 45 +- deepneuro/outputs/__init__.py | 0 deepneuro/outputs/gan.py | 129 +++++ deepneuro/outputs/inference.py | 0 deepneuro/outputs/measure.py | 0 deepneuro/outputs/output.py | 0 deepneuro/outputs/radiomics.py | 0 deepneuro/outputs/statistics.py | 0 deepneuro/outputs/visualization.py | 0 deepneuro/package_test/__init__.py | 0 deepneuro/package_test/ci_test.py | 0 deepneuro/package_test/package_test.py | 0 deepneuro/pipelines/Segment_GBM/Dockerfile | 0 deepneuro/pipelines/Segment_GBM/README.md | 0 deepneuro/pipelines/Segment_GBM/__init__.py | 0 deepneuro/pipelines/Segment_GBM/cli.py | 0 .../pipelines/Segment_GBM/edema_train.py | 0 deepneuro/pipelines/Segment_GBM/predict.py | 0 .../pipelines/Segment_GBM/resources/icon.png | Bin deepneuro/pipelines/Segment_GBM/template.py | 0 deepneuro/pipelines/Segment_GBM/train.py | 0 .../pipelines/Skull_Stripping/Dockerfile | 0 deepneuro/pipelines/Skull_Stripping/README.md | 0 .../pipelines/Skull_Stripping/__init__.py | 0 deepneuro/pipelines/Skull_Stripping/cli.py | 0 .../pipelines/Skull_Stripping/predict.py | 0 .../Skull_Stripping/resources/icon.png | Bin deepneuro/pipelines/__init__.py | 0 deepneuro/pipelines/shared.py | 0 deepneuro/pipelines/template.py | 0 deepneuro/postprocessing/__init__.py | 0 deepneuro/postprocessing/label.py | 0 deepneuro/postprocessing/postprocessor.py | 0 deepneuro/postprocessing/transform.py | 0 deepneuro/preprocessing/__init__.py | 0 deepneuro/preprocessing/preprocessor.py | 0 deepneuro/preprocessing/signal.py | 8 +- deepneuro/preprocessing/skullstrip.py | 0 deepneuro/preprocessing/transform.py | 0 deepneuro/utilities/__init__.py | 0 deepneuro/utilities/conversion.py | 10 - deepneuro/utilities/util.py | 0 deepneuro/utilities/visualize.py | 4 +- docs/Makefile | 0 docs/make.bat | 0 docs/source/.doctrees/deepneuro.doctree | Bin .../source/.doctrees/deepneuro.models.doctree | Bin .../.doctrees/deepneuro.testing.doctree | Bin docs/source/.doctrees/deepneuro.train.doctree | Bin docs/source/.doctrees/environment.pickle | Bin docs/source/.doctrees/index.doctree | Bin docs/source/.doctrees/modules.doctree | Bin docs/source/conf.py | 0 docs/source/deepneuro.models.rst | 0 docs/source/deepneuro.rst | 0 docs/source/deepneuro.testing.rst | 0 docs/source/deepneuro.train.rst | 0 docs/source/index.rst | 0 docs/source/modules.rst | 0 entrypoint.sh | 0 .../Segment_GBM/DeepNeuro_Glioblastoma.json | 0 misc/DeepInfer/Segment_GBM/Dockerfile | 0 misc/DeepInfer/Segment_GBM/entrypoint.py | 0 misc/DeepInfer/Segment_GBM/entrypoint.sh | 0 package_resources/logos/DeepNeuro.PNG | Bin package_resources/logos/DeepNeuro_alt.PNG | Bin setup.cfg | 0 setup.py | 1 - tox.ini | 0 104 files changed, 1102 insertions(+), 487 deletions(-) mode change 100644 => 100755 .gitignore mode change 100644 => 100755 .travis.yml mode change 100644 => 100755 Dockerfile mode change 100644 => 100755 LICENSE mode change 100644 => 100755 README.md mode change 100644 => 100755 coverage_wrapper.py mode change 100644 => 100755 deepneuro/__init__.py mode change 100644 => 100755 deepneuro/augmentation/__init__.py mode change 100644 => 100755 deepneuro/augmentation/augment.py mode change 100644 => 100755 deepneuro/data/__init__.py mode change 100644 => 100755 deepneuro/data/data_collection.py mode change 100644 => 100755 deepneuro/data/data_group.py mode change 100644 => 100755 deepneuro/data/data_load.py mode change 100644 => 100755 deepneuro/data/data_utilities.py mode change 100644 => 100755 deepneuro/docker/__init__.py mode change 100644 => 100755 deepneuro/docker/docker_cli.py mode change 100644 => 100755 deepneuro/external/__init__.py mode change 100644 => 100755 deepneuro/interface/__init__.py mode change 100644 => 100755 deepneuro/interface/master_cli.py mode change 100644 => 100755 deepneuro/interface/web_wrapper.py mode change 100644 => 100755 deepneuro/load/__init__.py mode change 100644 => 100755 deepneuro/load/load.py mode change 100644 => 100755 deepneuro/models/__init__.py mode change 100644 => 100755 deepneuro/models/blocks.py mode change 100644 => 100755 deepneuro/models/callbacks.py mode change 100644 => 100755 deepneuro/models/cost_functions.py mode change 100644 => 100755 deepneuro/models/cyclegan.py mode change 100644 => 100755 deepneuro/models/dn_ops.py mode change 100644 => 100755 deepneuro/models/gan.py mode change 100644 => 100755 deepneuro/models/interp_gan.py mode change 100644 => 100755 deepneuro/models/minimal.py mode change 100644 => 100755 deepneuro/models/model.py mode change 100644 => 100755 deepneuro/models/ops.py create mode 100755 deepneuro/models/progressive_growing_gan.py mode change 100644 => 100755 deepneuro/models/timenet.py mode change 100644 => 100755 deepneuro/models/unet.py mode change 100644 => 100755 deepneuro/outputs/__init__.py create mode 100755 deepneuro/outputs/gan.py mode change 100644 => 100755 deepneuro/outputs/inference.py mode change 100644 => 100755 deepneuro/outputs/measure.py mode change 100644 => 100755 deepneuro/outputs/output.py mode change 100644 => 100755 deepneuro/outputs/radiomics.py mode change 100644 => 100755 deepneuro/outputs/statistics.py mode change 100644 => 100755 deepneuro/outputs/visualization.py mode change 100644 => 100755 deepneuro/package_test/__init__.py mode change 100644 => 100755 deepneuro/package_test/ci_test.py mode change 100644 => 100755 deepneuro/package_test/package_test.py mode change 100644 => 100755 deepneuro/pipelines/Segment_GBM/Dockerfile mode change 100644 => 100755 deepneuro/pipelines/Segment_GBM/README.md mode change 100644 => 100755 deepneuro/pipelines/Segment_GBM/__init__.py mode change 100644 => 100755 deepneuro/pipelines/Segment_GBM/cli.py mode change 100644 => 100755 deepneuro/pipelines/Segment_GBM/edema_train.py mode change 100644 => 100755 deepneuro/pipelines/Segment_GBM/predict.py mode change 100644 => 100755 deepneuro/pipelines/Segment_GBM/resources/icon.png mode change 100644 => 100755 deepneuro/pipelines/Segment_GBM/template.py mode change 100644 => 100755 deepneuro/pipelines/Segment_GBM/train.py mode change 100644 => 100755 deepneuro/pipelines/Skull_Stripping/Dockerfile mode change 100644 => 100755 deepneuro/pipelines/Skull_Stripping/README.md mode change 100644 => 100755 deepneuro/pipelines/Skull_Stripping/__init__.py mode change 100644 => 100755 deepneuro/pipelines/Skull_Stripping/cli.py mode change 100644 => 100755 deepneuro/pipelines/Skull_Stripping/predict.py mode change 100644 => 100755 deepneuro/pipelines/Skull_Stripping/resources/icon.png mode change 100644 => 100755 deepneuro/pipelines/__init__.py mode change 100644 => 100755 deepneuro/pipelines/shared.py mode change 100644 => 100755 deepneuro/pipelines/template.py mode change 100644 => 100755 deepneuro/postprocessing/__init__.py mode change 100644 => 100755 deepneuro/postprocessing/label.py mode change 100644 => 100755 deepneuro/postprocessing/postprocessor.py mode change 100644 => 100755 deepneuro/postprocessing/transform.py mode change 100644 => 100755 deepneuro/preprocessing/__init__.py mode change 100644 => 100755 deepneuro/preprocessing/preprocessor.py mode change 100644 => 100755 deepneuro/preprocessing/signal.py mode change 100644 => 100755 deepneuro/preprocessing/skullstrip.py mode change 100644 => 100755 deepneuro/preprocessing/transform.py mode change 100644 => 100755 deepneuro/utilities/__init__.py mode change 100644 => 100755 deepneuro/utilities/conversion.py mode change 100644 => 100755 deepneuro/utilities/util.py mode change 100644 => 100755 deepneuro/utilities/visualize.py mode change 100644 => 100755 docs/Makefile mode change 100644 => 100755 docs/make.bat mode change 100644 => 100755 docs/source/.doctrees/deepneuro.doctree mode change 100644 => 100755 docs/source/.doctrees/deepneuro.models.doctree mode change 100644 => 100755 docs/source/.doctrees/deepneuro.testing.doctree mode change 100644 => 100755 docs/source/.doctrees/deepneuro.train.doctree mode change 100644 => 100755 docs/source/.doctrees/environment.pickle mode change 100644 => 100755 docs/source/.doctrees/index.doctree mode change 100644 => 100755 docs/source/.doctrees/modules.doctree mode change 100644 => 100755 docs/source/conf.py mode change 100644 => 100755 docs/source/deepneuro.models.rst mode change 100644 => 100755 docs/source/deepneuro.rst mode change 100644 => 100755 docs/source/deepneuro.testing.rst mode change 100644 => 100755 docs/source/deepneuro.train.rst mode change 100644 => 100755 docs/source/index.rst mode change 100644 => 100755 docs/source/modules.rst mode change 100644 => 100755 entrypoint.sh mode change 100644 => 100755 misc/DeepInfer/Segment_GBM/DeepNeuro_Glioblastoma.json mode change 100644 => 100755 misc/DeepInfer/Segment_GBM/Dockerfile mode change 100644 => 100755 misc/DeepInfer/Segment_GBM/entrypoint.py mode change 100644 => 100755 misc/DeepInfer/Segment_GBM/entrypoint.sh mode change 100644 => 100755 package_resources/logos/DeepNeuro.PNG mode change 100644 => 100755 package_resources/logos/DeepNeuro_alt.PNG mode change 100644 => 100755 setup.cfg mode change 100644 => 100755 setup.py mode change 100644 => 100755 tox.ini diff --git a/.gitignore b/.gitignore old mode 100644 new mode 100755 index 6524089..3a36f20 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ checkpoint *.DS_Store *.gz *.bak +deepneuro/local # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/.travis.yml b/.travis.yml old mode 100644 new mode 100755 diff --git a/Dockerfile b/Dockerfile old mode 100644 new mode 100755 diff --git a/LICENSE b/LICENSE old mode 100644 new mode 100755 diff --git a/README.md b/README.md old mode 100644 new mode 100755 diff --git a/coverage_wrapper.py b/coverage_wrapper.py old mode 100644 new mode 100755 diff --git a/deepneuro/__init__.py b/deepneuro/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/augmentation/__init__.py b/deepneuro/augmentation/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/augmentation/augment.py b/deepneuro/augmentation/augment.py old mode 100644 new mode 100755 diff --git a/deepneuro/data/__init__.py b/deepneuro/data/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/data/data_collection.py b/deepneuro/data/data_collection.py old mode 100644 new mode 100755 index 48d7d12..ef69925 --- a/deepneuro/data/data_collection.py +++ b/deepneuro/data/data_collection.py @@ -13,17 +13,24 @@ from deepneuro.utilities.conversion import read_image_files from deepneuro.data.data_group import DataGroup from deepneuro.data.data_load import parse_modality_directories, parse_subject_directory +from deepneuro.utilities.util import add_parameter class DataCollection(object): - def __init__(self, data_directory=None, data_storage=None, modality_dict=None, spreadsheet_dict=None, value_dict=None, case_list=None, verbose=False): + def __init__(self, data_directory=None, data_storage=None, data_group_dict=None, spreadsheet_dict=None, value_dict=None, case_list=None, verbose=False, **kwargs): # Input vars self.data_directory = data_directory self.data_storage = data_storage - self.modality_dict = modality_dict + self.data_group_dict = data_group_dict self.spreadsheet_dict = spreadsheet_dict + + # File location variables + add_parameter(self, kwargs, 'source', 'directories') + add_parameter(self, kwargs, 'recursive', False) + add_parameter(self, kwargs, 'file_identifying_chars', None) + self.value_dict = value_dict self.case_list = case_list self.verbose = verbose @@ -42,6 +49,9 @@ def __init__(self, data_directory=None, data_storage=None, modality_dict=None, s # Data group variables self.data_groups = {} + if data_group_dict is not None or data_storage is not None: + self.fill_data_groups() + def add_case(self, case_dict, case_name=None): # Currently only works for filepaths. Maybe add functionality for python data types, hdf5s? @@ -61,21 +71,21 @@ def add_case(self, case_dict, case_name=None): self.preprocessed_cases[case_name] = {} self.total_cases = len(self.cases) - def fill_data_groups(self, source='direcotries', recursive=False, identifying_chars=None): + def fill_data_groups(self): """ Populates data collection variables from either a directory structure or an hdf5 file. Repeated usage may have unexpected results. """ - if source == 'files': + if self.source == 'files': # Create DataGroups for this DataCollection. - for modality_group in self.modality_dict: + for modality_group in self.data_group_dict: if modality_group not in list(self.data_groups.keys()): self.data_groups[modality_group] = DataGroup(modality_group) self.data_groups[modality_group].source = 'file' - parse_modality_directories(self, self.modality_dict, case_list=self.case_list, recursive=recursive, identifying_chars=identifying_chars) + parse_modality_directories(self, self.data_group_dict, case_list=self.case_list, recursive=self.recursive, file_identifying_chars=self.file_identifying_chars) self.total_cases = len(self.cases) @@ -85,20 +95,20 @@ def fill_data_groups(self, source='direcotries', recursive=False, identifying_ch else: print('Found', self.total_cases, 'number of cases..') - elif self.data_directory is not None and source == 'directories': + elif self.data_directory is not None and self.source == 'directories': if self.verbose: print('Gathering image data from...', self.data_directory, '\n') # Create DataGroups for this DataCollection. - for modality_group in self.modality_dict: + for modality_group in self.data_group_dict: if modality_group not in list(self.data_groups.keys()): self.data_groups[modality_group] = DataGroup(modality_group) self.data_groups[modality_group].source = 'directory' # Iterate through directories.. Always looking for a better way to check optional list typing. if isinstance(self.data_directory, str): - if not os.path.exist(self.data_directory): + if not os.path.exists(self.data_directory): print('The data directory you have input does not exist!') exit(1) directory_list = sorted(glob.glob(os.path.join(self.data_directory, "*/"))) @@ -112,7 +122,7 @@ def fill_data_groups(self, source='direcotries', recursive=False, identifying_ch for subject_dir in directory_list: - parse_subject_directory(subject_dir, case_list=self.case_list) + parse_subject_directory(self, subject_dir, case_list=self.case_list) self.total_cases = len(self.cases) diff --git a/deepneuro/data/data_group.py b/deepneuro/data/data_group.py old mode 100644 new mode 100755 diff --git a/deepneuro/data/data_load.py b/deepneuro/data/data_load.py old mode 100644 new mode 100755 index 0113198..0eaec43 --- a/deepneuro/data/data_load.py +++ b/deepneuro/data/data_load.py @@ -16,7 +16,7 @@ def parse_subject_directory(data_collection, subject_dir, case_list=None): return # Search for modality files, and skip those missing with files modalities. - for data_group, modality_labels in data_collection.modality_dict.items(): + for data_group, modality_labels in data_collection.data_group_dict.items(): modality_group_files = [] for modality in modality_labels: @@ -47,7 +47,7 @@ def parse_subject_directory(data_collection, subject_dir, case_list=None): data_collection.preprocessed_cases[case_name] = defaultdict(list) -def parse_modality_directories(data_collection, modality_dict, case_list=None, recursive=True, verbose=True, identifying_chars=None): +def parse_modality_directories(data_collection, data_group_dict, case_list=None, recursive=True, verbose=True, file_identifying_chars=None): """ Recursive functionality not yet available """ @@ -55,8 +55,13 @@ def parse_modality_directories(data_collection, modality_dict, case_list=None, r # Cases not yet implemented. # Pulling from multiple directories not yet implemented. - lead_group = modality_dict[modality_dict.keys()[0]] - lead_directory = os.path.abspath(lead_group[0]) + lead_group = data_group_dict[data_group_dict.keys()[0]] + + if type(lead_group[0]) is list: + lead_directory = os.path.abspath(lead_group[0][0]) + else: + lead_directory = os.path.abspath(lead_group[0]) + lead_files = [] for directory in lead_group: @@ -73,11 +78,11 @@ def parse_modality_directories(data_collection, modality_dict, case_list=None, r base_filedir = os.path.dirname(lead_filepath).split(lead_directory, 1)[1] base_filepath = nifti_splitext(lead_filepath)[0] - if identifying_chars is not None: - base_filepath = os.path.basename(os.path.join(os.path.dirname(base_filepath), os.path.basename(base_filepath)[:identifying_chars])) + if file_identifying_chars is not None: + base_filepath = os.path.basename(os.path.join(os.path.dirname(base_filepath), os.path.basename(base_filepath)[:file_identifying_chars])) # Search for modality files, and skip those missing with files modalities. - for data_group, modality_labels in data_collection.modality_dict.items(): + for data_group, modality_labels in data_collection.data_group_dict.items(): modality_group_files = [] @@ -104,4 +109,14 @@ def parse_modality_directories(data_collection, modality_dict, case_list=None, r if lead_filepath is not None: case_name = lead_filepath data_collection.cases.append(case_name) - data_collection.preprocessed_cases[case_name] = defaultdict(list) \ No newline at end of file + data_collection.preprocessed_cases[case_name] = defaultdict(list) + + +def parse_csv_file(): + + return + + +if __name__ == '__main__': + + pass \ No newline at end of file diff --git a/deepneuro/data/data_utilities.py b/deepneuro/data/data_utilities.py old mode 100644 new mode 100755 diff --git a/deepneuro/docker/__init__.py b/deepneuro/docker/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/docker/docker_cli.py b/deepneuro/docker/docker_cli.py old mode 100644 new mode 100755 diff --git a/deepneuro/external/__init__.py b/deepneuro/external/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/interface/__init__.py b/deepneuro/interface/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/interface/master_cli.py b/deepneuro/interface/master_cli.py old mode 100644 new mode 100755 diff --git a/deepneuro/interface/web_wrapper.py b/deepneuro/interface/web_wrapper.py old mode 100644 new mode 100755 diff --git a/deepneuro/load/__init__.py b/deepneuro/load/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/load/load.py b/deepneuro/load/load.py old mode 100644 new mode 100755 diff --git a/deepneuro/models/__init__.py b/deepneuro/models/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/models/blocks.py b/deepneuro/models/blocks.py old mode 100644 new mode 100755 index a127ab0..dd74ed4 --- a/deepneuro/models/blocks.py +++ b/deepneuro/models/blocks.py @@ -1,40 +1,142 @@ import tensorflow as tf -from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling -from deepneuro.models.ops import leaky_relu +from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling +from deepneuro.models.ops import leaky_relu, minibatch_state_concat -def generator(model, latent_var, depth=1, initial_size=4, reuse=False, name=None): +def generator(model, latent_var, depth=1, initial_size=4, reuse=False, transition=False, alpha_transition=0, name=None): - convs = [] + """ + """ with tf.variable_scope(name) as scope: + convs = [] + if reuse: scope.reuse_variables() - convs += [tf.reshape(latent_var, [model.training_batch_size] + [1] * model.dim + [model.latent_size])] + convs += [tf.reshape(latent_var, [tf.shape(latent_var)[0]] + [1] * model.dim + [model.latent_size])] - # TODO: refactor the padding on this step. - convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(4,) * model.dim, stride_size=(1,) * model.dim, padding='Other', name='generator_conv_1_latent', dim=model.dim)), model.dim) + # TODO: refactor the padding on this step. Or replace with a dense layer? + convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(4,) * model.dim, stride_size=(1,) * model.dim, padding='Other', name='generator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim)), model.dim) - convs += [tf.reshape(convs[-1], [model.training_batch_size] + [initial_size] * model.dim + [model.get_filter_num(0)])] + convs += [tf.reshape(convs[-1], [tf.shape(latent_var)[0]] + [initial_size] * model.dim + [model.get_filter_num(0)])] - convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), stride_size=(1,) * model.dim, name='generator_conv_2_latent', dim=model.dim)), dim=model.dim) + convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='generator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=model.dim)), dim=model.dim) for i in range(depth): + if i == depth - 1 and transition: + #To RGB + transition_conv = DnConv(convs[-1], output_dim=model.channels, kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]), dim=model.dim) + transition_conv = DnUpsampling(transition_conv, (2,) * model.dim, dim=model.dim) + convs += [DnUpsampling(convs[-1], (2,) * model.dim, dim=model.dim)] - convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), stride_size=(1,) * model.dim, name='generator_conv_1_depth_{}_{}'.format(i, convs[-1].shape[1]), dim=model.dim)), dim=model.dim) + convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='generator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim)), dim=model.dim) - convs += [DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), stride_size=(1,) * model.dim, name='generator_conv_2_depth_{}_{}'.format(i, convs[-1].shape[1]), dim=model.dim)), dim=model.dim)] + convs += [DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='generator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=model.dim)), dim=model.dim)] #To RGB - convs += [DnConv(convs[-1], output_dim=model.channels, kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='generator_y_final_conv_{}'.format(convs[-1].shape[1]), dim=model.dim)] + convs += [DnConv(convs[-1], output_dim=model.channels, kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]), dim=model.dim)] + + if transition: + convs[-1] = (1 - alpha_transition) * transition_conv + alpha_transition * convs[-1] return convs[-1] +def discriminator(model, input_image, reuse=False, name=None, depth=1, transition=False, **kwargs): + + """ + """ + + with tf.variable_scope(name) as scope: + + if reuse: + scope.reuse_variables() + + if transition: + transition_conv = DnAveragePooling(input_image, (2,) * model.dim, dim=model.dim) + transition_conv = leaky_relu(DnConv(transition_conv, output_dim=model.get_filter_num(depth - 1), kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_y_rgb_conv_{}'.format(transition_conv.shape[1]), dim=model.dim)) + + convs = [] + + # fromRGB + convs += [leaky_relu(DnConv(input_image, output_dim=model.get_filter_num(depth), kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_y_rgb_conv_{}'.format(input_image.shape[1]), dim=model.dim))] + + for i in range(depth): + + convs += [leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(depth - i), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim))] + + convs += [leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(depth - 1 - i), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=model.dim))] + convs[-1] = DnAveragePooling(convs[-1], dim=model.dim) + + convs += [minibatch_state_concat(convs[-1])] + convs[-1] = leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(3,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim)) + + #for D -- what's going on with the channel number here? + output = tf.reshape(convs[-1], [tf.shape(convs[-1])[0], 4 * 4 * model.get_filter_num(0)]) + + # Currently erroring + # discriminate_output = dense(output, output_size=1, name='discriminator_n_fully') + + discriminate_output = tf.layers.dense(output, model.get_filter_num(0), name='discriminator_n_1_fully') + discriminate_output = tf.layers.dense(discriminate_output, 1, name='discriminator_n_2_fully') + + return tf.nn.sigmoid(discriminate_output), discriminate_output + + +def unet(model, input_tensor, backend='tensorflow'): + + left_outputs = [] + + for level in range(model.depth): + + filter_num = int(model.max_filter / (2 ** (model.depth - level)) / model.downsize_filters_factor) + + if level == 0: + left_outputs += [DnConv(input_tensor, filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_1'.format(level), backend=backend)] + left_outputs[level] = DnConv(left_outputs[level], 2 * filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_2'.format(level), backend=backend) + else: + left_outputs += [DnMaxPooling(left_outputs[level - 1], pool_size=model.pool_size, dim=model.dim, backend=backend)] + left_outputs[level] = DnConv(left_outputs[level], filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_1'.format(level), backend=backend) + left_outputs[level] = DnConv(left_outputs[level], 2 * filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_2'.format(level), backend=backend) + + if model.dropout is not None and model.dropout != 0: + left_outputs[level] = DnDropout(model.dropout)(left_outputs[level]) + + if model.batch_norm: + left_outputs[level] = DnBatchNormalization(left_outputs[level]) + + right_outputs = [left_outputs[model.depth - 1]] + + for level in range(model.depth): + + filter_num = int(model.max_filter / (2 ** (level)) / model.downsize_filters_factor) + + if level > 0: + right_outputs += [DnUpsampling(right_outputs[level - 1], pool_size=model.pool_size, dim=model.dim, backend=backend)] + right_outputs[level] = concatenate([right_outputs[level], left_outputs[model.depth - level - 1]], axis=model.dim + 1) + right_outputs[level] = DnConv(right_outputs[level], filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_upsampling_conv_{}_1'.format(level), backend=backend) + right_outputs[level] = DnConv(right_outputs[level], int(filter_num / 2), model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_upsampling_conv_{}_2'.format(level), backend=backend) + else: + continue + + if model.dropout is not None and model.dropout != 0: + right_outputs[level] = DnDropout(model.dropout)(right_outputs[level]) + + if model.batch_norm: + right_outputs[level] = DnBatchNormalization()(right_outputs[level]) + + output_layer = DnConv(right_outputs[level], 1, (1, ) * model.dim, stride_size=(1,) * model.dim, dim=model.dim, name='end_conv', backend=backend) + + # TODO: Brainstorm better way to specify outputs + if model.input_tensor is not None: + return output_layer + + return model.model + # def progressive_generator(model, latent_var, progressive_depth=1, name=None, transition=False, alpha_transition=0.0): # with tf.variable_scope(name) as scope: diff --git a/deepneuro/models/callbacks.py b/deepneuro/models/callbacks.py old mode 100644 new mode 100755 index 673660f..1794cbc --- a/deepneuro/models/callbacks.py +++ b/deepneuro/models/callbacks.py @@ -1,6 +1,7 @@ import keras import os import imageio +import numpy as np from keras.callbacks import ModelCheckpoint, EarlyStopping, CSVLogger @@ -16,65 +17,123 @@ def __init__(self, **kwargs): add_parameter(self, kwargs, 'epoch_prediction_data_collection', self.data_collection) add_parameter(self, kwargs, 'epoch_prediction_object', None) add_parameter(self, kwargs, 'deepneuro_model', None) - add_parameter(self, kwargs, 'output_folder', None) + add_parameter(self, kwargs, 'epoch_prediction_dir', None) add_parameter(self, kwargs, 'output_gif', None) add_parameter(self, kwargs, 'batch_size', 1) add_parameter(self, kwargs, 'epoch_prediction_batch_size', self.batch_size) - if not os.path.exists(self.output_folder): - os.mkdir(self.output_folder) - - if self.epoch_prediction_object is not None: - self.epoch_prediction_object.model = self.deepneuro_model - else: - self.epoch_prediction_object = self.deepneuro_model + if not os.path.exists(self.epoch_prediction_dir): + os.mkdir(self.epoch_prediction_dir) self.predictions = [] # There's a more concise way to do this.. self.predict_data = next(self.epoch_prediction_data_collection.data_generator(perpetual=True, verbose=False, just_one_batch=True, batch_size=self.epoch_prediction_batch_size)) - - def on_train_begin(self, logs={}): - return def on_train_end(self, logs={}): - imageio.mimsave(os.path.join(self.output_folder, 'epoch_prediction.gif'), self.predictions) + imageio.mimsave(os.path.join(self.epoch_prediction_dir, 'epoch_prediction.gif'), self.predictions) return - def on_epoch_begin(self, epoch, logs={}): + def on_epoch_end(self, epoch, logs={}): + + if self.epoch_prediction_object is None: + prediction = self.deepneuro_model.predict(self.predict_data[self.deepneuro_model.input_data]) + else: + prediction = self.epoch_prediction_object.process_case(self.predict_data[self.deepneuro_model.input_data], model=self.deepneuro_model) + + output_filepaths, output_images = check_data({'prediction': prediction}, output_filepath=os.path.join(self.epoch_prediction_dir, 'epoch_{}.png'.format(epoch)), show_output=False, batch_size=self.epoch_prediction_batch_size) + + self.predictions += [output_images['prediction'].astype('uint8')] + + return + + +class GANPredict(keras.callbacks.Callback): + + def __init__(self, **kwargs): + + add_parameter(self, kwargs, 'data_collection', None) + add_parameter(self, kwargs, 'epoch_prediction_data_collection', self.data_collection) + add_parameter(self, kwargs, 'epoch_prediction_object', None) + add_parameter(self, kwargs, 'deepneuro_model', None) + add_parameter(self, kwargs, 'epoch_prediction_dir', None) + add_parameter(self, kwargs, 'output_gif', None) + add_parameter(self, kwargs, 'batch_size', 1) + add_parameter(self, kwargs, 'epoch_prediction_batch_size', self.batch_size) + add_parameter(self, kwargs, 'latent_size', 128) + add_parameter(self, kwargs, 'sample_latent', np.random.normal(size=[self.epoch_prediction_batch_size, self.latent_size])) + + if not os.path.exists(self.epoch_prediction_dir): + os.mkdir(self.epoch_prediction_dir) + + self.predictions = [] + + def on_train_end(self, logs={}): + imageio.mimsave(os.path.join(self.epoch_prediction_dir, 'epoch_prediction.gif'), self.predictions) return def on_epoch_end(self, epoch, logs={}): - prediction = self.epoch_prediction_object.process_case(self.predict_data[self.deepneuro_model.input_data], model=self.deepneuro_model) + if self.epoch_prediction_object is None: + prediction = self.deepneuro_model.predict(sample_latent=self.sample_latent) + else: + prediction = self.epoch_prediction_object.process_case(self.predict_data[self.deepneuro_model.input_data], model=self.deepneuro_model) - output_filepaths, output_images = check_data({'prediction': prediction}, output_filepath=os.path.join(self.output_folder, 'epoch_{}.png'.format(epoch)), show_output=False, batch_size=self.epoch_prediction_batch_size) + output_filepaths, output_images = check_data({'prediction': prediction}, output_filepath=os.path.join(self.epoch_prediction_dir, 'epoch_{}.png'.format(epoch)), show_output=False, batch_size=self.epoch_prediction_batch_size) self.predictions += [output_images['prediction'].astype('uint8')] return - - def on_batch_begin(self, batch, logs={}): + + +class SaveModel(keras.callbacks.Callback): + + def __init__(self, **kwargs): + + # Add save best only. + + add_parameter(self, kwargs, 'deepneuro_model', None) + + def on_train_begin(self, logs={}): + self.deepneuro_model.save_model(self.deepneuro_model.output_model_filepath) + return + + def on_train_end(self, logs={}): + self.deepneuro_model.save_model(self.deepneuro_model.output_model_filepath) return - def on_batch_end(self, batch, logs={}): + def on_epoch_end(self, epoch, logs={}): + self.deepneuro_model.save_model(self.deepneuro_model.output_model_filepath) return -def get_callbacks(model_file, callbacks=['save_model', 'early_stopping', 'log'], monitor='val_loss', model=None, data_collection=None, save_best_only=False, epoch_prediction_dir=None, batch_size=1, epoch_prediction_object=None, epoch_prediction_data_collection=None, epoch_prediction_batch_size=None): +def get_callbacks(callbacks=['save_model', 'early_stopping', 'log'], output_model_filepath=None, monitor='val_loss', model=None, data_collection=None, save_best_only=False, epoch_prediction_dir=None, batch_size=1, epoch_prediction_object=None, epoch_prediction_data_collection=None, epoch_prediction_batch_size=None, latent_size=128, backend='tensorflow', **kwargs): - """ Temporary function; callbacks will be dealt with in more detail in the future. - Very disorganized currently. Do with dictionary. + """ Very disorganized currently. Replace with dictionary? Also address never-ending parameters """ + print kwargs + print model return_callbacks = [] + for callback in callbacks: + if callback == 'save_model': - return_callbacks += [ModelCheckpoint(model_file, monitor=monitor, save_best_only=save_best_only)] + if backend == 'keras': + return_callbacks += [ModelCheckpoint(output_model_filepath, monitor=monitor, save_best_only=save_best_only)] + else: + return_callbacks += [SaveModel(deepneuro_model=model)] + if callback == 'early_stopping': return_callbacks += [EarlyStopping(monitor=monitor, patience=10)] + if callback == 'log': - return_callbacks += [CSVLogger(model_file.replace('.h5', '.log'))] + return_callbacks += [CSVLogger(output_model_filepath.replace('.h5', '.log'))] + if callback == 'predict_epoch': - return_callbacks += [EpochPredict(deepneuro_model=model, data_collection=data_collection, output_folder=epoch_prediction_dir, batch_size=batch_size, epoch_prediction_object=epoch_prediction_object, epoch_prediction_data_collection=epoch_prediction_data_collection, epoch_prediction_batch_size=epoch_prediction_batch_size)] + return_callbacks += [EpochPredict(deepneuro_model=model, data_collection=data_collection, epoch_prediction_dir=epoch_prediction_dir, batch_size=batch_size, epoch_prediction_object=epoch_prediction_object, epoch_prediction_data_collection=epoch_prediction_data_collection, epoch_prediction_batch_size=epoch_prediction_batch_size)] + + if callback == 'predict_gan': + return_callbacks += [GANPredict(deepneuro_model=model, data_collection=data_collection, epoch_prediction_dir=epoch_prediction_dir, batch_size=batch_size, epoch_prediction_object=epoch_prediction_object, epoch_prediction_data_collection=epoch_prediction_data_collection, epoch_prediction_batch_size=epoch_prediction_batch_size, latent_size=latent_size)] + return return_callbacks \ No newline at end of file diff --git a/deepneuro/models/cost_functions.py b/deepneuro/models/cost_functions.py old mode 100644 new mode 100755 index 627cdf1..ae4a1a3 --- a/deepneuro/models/cost_functions.py +++ b/deepneuro/models/cost_functions.py @@ -1,3 +1,5 @@ +import tensorflow as tf + from keras import backend as K @@ -19,3 +21,28 @@ def dice_coef(y_true, y_pred, smooth=1.): y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) + + +def wasserstein_loss(model, discriminator, discriminator_fake_logits, discriminator_real_logits, synthetic_images, reference_images, gradient_penalty_weight=10, name='discriminator', dim=2, depth=None, transition=False, alpha_transition=0): + + if depth is None: + depth = model.depth + + D_loss = tf.reduce_mean(discriminator_fake_logits) - tf.reduce_mean(discriminator_real_logits) + G_loss = -tf.reduce_mean(discriminator_fake_logits) + + differences = synthetic_images - reference_images + alpha = tf.random_uniform(shape=[tf.shape(differences)[0]] + [1] * (dim + 1), minval=0., maxval=1.) + interpolates = reference_images + (alpha * differences) + _, interpolates_logits = discriminator(model, interpolates, reuse=True, depth=depth, name=name, transition=transition, alpha_transition=alpha_transition) + gradients = tf.gradients(interpolates_logits, [interpolates])[0] + + slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=range(1, 2 + model.dim))) + gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2) + tf.summary.scalar("gp_loss", gradient_penalty) + + D_origin_loss = D_loss + D_loss += 10 * gradient_penalty + D_loss += 0.001 * tf.reduce_mean(tf.square(discriminator_real_logits - 0.0)) + + return [D_loss], [G_loss], [D_origin_loss] \ No newline at end of file diff --git a/deepneuro/models/cyclegan.py b/deepneuro/models/cyclegan.py old mode 100644 new mode 100755 diff --git a/deepneuro/models/dn_ops.py b/deepneuro/models/dn_ops.py old mode 100644 new mode 100755 index b6de5a4..1ddc86a --- a/deepneuro/models/dn_ops.py +++ b/deepneuro/models/dn_ops.py @@ -1,7 +1,7 @@ import tensorflow as tf -from keras.layers import UpSampling2D, UpSampling3D, Conv3D, MaxPooling3D, Conv2D, MaxPooling2D, Activation +from keras.layers import UpSampling2D, UpSampling3D, Conv3D, MaxPooling3D, Conv2D, MaxPooling2D, Activation, BatchNormalization, Dropout from deepneuro.models.ops import pixel_norm_2d, pixel_norm_3d, conv2d, conv3d, deconv2d, deconv3d, upscale2d @@ -13,6 +13,32 @@ def __init__(self): return +def DnDropout(input_, ratio=.5, backend='tensorflow'): + + if backend == 'keras': + + return Dropout(ratio)(input_) + + if backend == 'tensorflow': + + return tf.nn.dropout(input_, ratio) + + return + + +def DnBatchNormalization(input_, backend='tensorflow'): + + if backend == 'keras' or True: + + return BatchNormalization()(input_) + + if backend == 'tensorflow': + + return tf.contrib.layers.batch_norm(input_) + + return + + def DnMaxPooling(input_, pool_size=(2, 2), dim=2, padding='SAME', backend='tensorflow'): op = None @@ -32,7 +58,7 @@ def DnMaxPooling(input_, pool_size=(2, 2), dim=2, padding='SAME', backend='tenso op = tf.nn.max_pool3d(input_, ksize=[1] + list(pool_size) + [1], strides=[1] + list(pool_size) + [1], padding='SAME') if op is None: - print 'Option Not Implemented' + raise NotImplementedError return op diff --git a/deepneuro/models/gan.py b/deepneuro/models/gan.py old mode 100644 new mode 100755 index 49c4e02..399ed89 --- a/deepneuro/models/gan.py +++ b/deepneuro/models/gan.py @@ -1,16 +1,15 @@ -""" unet.py includes different implementations of the popular U-Net model. - See more at https://arxiv.org/abs/1505.04597 +""" This is a vanilla implementation of a generative adversarial network. It includes + the Wasserstein Gradient-Penalty by default. """ import numpy as np import tensorflow as tf import os -from tqdm import tqdm - from deepneuro.models.model import TensorFlowModel from deepneuro.utilities.util import add_parameter -from deepneuro.models.blocks import generator +from deepneuro.models.blocks import generator, discriminator +from deepneuro.models.cost_functions import wasserstein_loss class GAN(TensorFlowModel): @@ -33,6 +32,7 @@ def load(self, kwargs): # Generator Parameters add_parameter(self, kwargs, 'latent_size', 128) add_parameter(self, kwargs, 'depth', 4) + add_parameter(self, kwargs, 'generator_updates', 1) # Model Parameters add_parameter(self, kwargs, 'filter_cap', 128) @@ -43,13 +43,10 @@ def load(self, kwargs): # Discriminator Parameters add_parameter(self, kwargs, 'discriminator_depth', 4) add_parameter(self, kwargs, 'discriminator_max_filter', 128) + add_parameter(self, kwargs, 'discriminator_updates', 1) - # Training Parameters - add_parameter(self, kwargs, 'train_with_GAN', True) - add_parameter(self, kwargs, 'train_separately', False) - - add_parameter(self, kwargs, 'consistency_weight', 10) # AKA lambda - add_parameter(self, kwargs, 'gradient_penalty_weight', 10) + # Loss Parameters + add_parameter(self, kwargs, 'gradient_penalty_weight', 10) # For WP self.sess = None self.init = None @@ -62,40 +59,20 @@ def get_filter_num(self, depth): else: return min(self.max_filter / (2 ** (depth)), self.filter_cap) - def train(self, training_data_collection, **kwargs): - - # Outputs - add_parameter(self, kwargs, 'output_model_filepath') - - # Training Parameters - add_parameter(self, kwargs, 'num_epochs', 100) - add_parameter(self, kwargs, 'training_steps_per_epoch', 10) - add_parameter(self, kwargs, 'training_batch_size', 16) - - self.build_tensorflow_model(self.training_batch_size) - self.create_data_generators(training_data_collection, training_batch_size=self.training_batch_size, training_steps_per_epoch=self.training_steps_per_epoch) - self.init_sess() - - step = 0 - - for epoch in range(self.num_epochs): - - step_counter = tqdm(list(range(self.training_steps_per_epoch)), total=self.training_steps_per_epoch, unit="step", desc="Generator Loss:", miniters=1) - - for step in step_counter: + def process_step(self, step_counter): - # Replace with GPU function? - sample_latent = np.random.normal(size=[self.training_batch_size, self.latent_size]) - reference_data = next(self.training_data_generator)[self.input_data] + # Replace with GPU function? + sample_latent = np.random.normal(size=[self.training_batch_size, self.latent_size]) + reference_data = next(self.training_data_generator)[self.input_data] - # Optimize! + # Optimize! - _, g_loss = self.sess.run([self.basic_optimizer, self.basic_loss], feed_dict={self.reference_images: reference_data, self.latent: sample_latent}) + _, g_loss = self.sess.run([self.opti_G, self.G_loss], feed_dict={self.reference_images: reference_data, self.latent: sample_latent}) + _, d_loss, d_origin_loss = self.sess.run([self.opti_D, self.D_loss, self.d_origin_loss], feed_dict={self.reference_images: reference_data, self.latent: sample_latent}) - self.log([g_loss], headers=['Basic Loss'], verbose=self.hyperverbose) - step_counter.set_description("Generator Loss: {0:.5f}".format(g_loss)) - - self.save_model(self.output_model_filepath) + # This is a little weird -- it only records loss on discriminator steps. + self.log([g_loss, d_loss, d_origin_loss], headers=['Generator Loss', 'WP Discriminator Loss', 'Discriminator Loss'], verbose=self.hyperverbose) + step_counter.set_description("Generator Loss: {0:.5f}".format(g_loss) + " Discriminator Loss: {0:.5f}".format(d_loss)) return @@ -108,22 +85,36 @@ def build_tensorflow_model(self, batch_size): self.model_input_shape = tuple([batch_size] + list(self.input_shape)) self.model_output_shape = tuple([batch_size] + list(self.input_shape)) - self.latent = tf.placeholder(tf.float32, [self.training_batch_size, self.latent_size]) - self.reference_images = tf.placeholder(tf.float32, list(self.model_input_shape)) - + self.latent = tf.placeholder(tf.float32, [None, self.latent_size]) + self.reference_images = tf.placeholder(tf.float32, [None] + list(self.model_input_shape)[1:]) self.synthetic_images = generator(self, self.latent, depth=self.depth, name='generator') - self.basic_loss = tf.reduce_mean(tf.square(self.reference_images - self.synthetic_images)) + self.discriminator_real, self.discriminator_real_logits = discriminator(self, self.reference_images, depth=self.depth + 1, name='discriminator') + self.discriminator_fake, self.discriminator_fake_logits = discriminator(self, self.synthetic_images, depth=self.depth + 1, name='discriminator', reuse=True) - # Hmmm.. better way to do this? Or at least move to function. t_vars = tf.trainable_variables() self.d_vars = [var for var in t_vars if 'discriminator' in var.name] self.g_vars = [var for var in t_vars if 'generator' in var.name] - - # Create save/load operation self.saver = tf.train.Saver(self.g_vars + self.d_vars) - self.basic_optimizer = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize(self.basic_loss, var_list=self.g_vars) + self.calculate_losses() + + if self.hyperverbose: + self.model_summary() + + def calculate_losses(self): + + self.D_loss, self.G_loss, self.D_origin_loss = wasserstein_loss(self, discriminator, self.discriminator_fake_logits, self.discriminator_real_logits, self.synthetic_images, self.real_images, gradient_penalty_weight=self.gradient_penalty_weight, name='discriminator', dim=self.dim) + + # A little sketchy. Attempting to make variable loss functions extensible later. + self.D_loss = self.D_loss[0] + self.G_loss = self.G_loss[0] + self.D_origin_loss = self.D_origin_loss[0] + + # Create Optimizers + self.opti_D = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize( + self.D_loss, var_list=self.d_vars) + self.opti_G = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize(self.G_loss, var_list=self.g_vars) def load_model(self, input_model_path, batch_size=1): @@ -131,11 +122,11 @@ def load_model(self, input_model_path, batch_size=1): self.init_sess() self.saver.restore(self.sess, os.path.join(input_model_path, 'model.ckpt')) - def predict(self, input_latent=None): + def predict(self, sample_latent=None, batch_size=1): self.init_sess() - if input_latent is None: - input_latent = np.random.normal(size=[self.training_batch_size, self.latent_size]) + if sample_latent is None: + sample_latent = np.random.normal(size=[batch_size, self.latent_size]) - return self.sess.run(self.fake_images, feed_dict={self.latent: input_latent}) \ No newline at end of file + return self.sess.run(self.synthetic_images, feed_dict={self.latent: sample_latent}) \ No newline at end of file diff --git a/deepneuro/models/interp_gan.py b/deepneuro/models/interp_gan.py old mode 100644 new mode 100755 index e8dba53..0b63690 --- a/deepneuro/models/interp_gan.py +++ b/deepneuro/models/interp_gan.py @@ -2,16 +2,18 @@ See more at https://arxiv.org/abs/1505.04597 """ +import numpy as np import tensorflow as tf import os +import keras -from deepneuro.models.model import TensorFlowModel -from deepneuro.models.cost_functions import wasserstein_loss -from deepneuro.models.dn_ops import DnConv, DnAveragePooling, pixel_norm, dense, minibatch_state_concat, leaky_relu, upscale, downscale +from deepneuro.models.model import TensorFlowModel, load_old_model +from deepneuro.models.unet import UNet from deepneuro.utilities.util import add_parameter +from deepneuro.models.blocks import generator, discriminator -class ProgressiveGAN(TensorFlowModel): +class InterpGAN(TensorFlowModel): def load(self, kwargs): @@ -26,211 +28,93 @@ def load(self, kwargs): """ - super(ProgressiveGAN, self).load(kwargs) - - add_parameter(self, kwargs, 'dim', 2) + super(InterpGAN, self).load(kwargs) # Generator Parameters - add_parameter(self, kwargs, 'generator_depth', 4) - add_parameter(self, kwargs, 'generator_max_filter', 4096) - - # Discriminator Parameters - add_parameter(self, kwargs, 'discriminator_depth', 4) - add_parameter(self, kwargs, 'discriminator_max_filter', 4096) - - # GAN Parameters add_parameter(self, kwargs, 'latent_size', 128) + add_parameter(self, kwargs, 'depth', 4) - # PGGAN Parameters - add_parameter(self, kwargs, 'progressive_depth', 9) - add_parameter(self, kwargs, 'transition', True) + # Model Parameters add_parameter(self, kwargs, 'filter_cap', 128) add_parameter(self, kwargs, 'filter_floor', 16) - # Wasserstein Parameters - add_parameter(self, kwargs, 'gradient_penalty_weight', 10) + add_parameter(self, kwargs, 'generator_max_filter', 128) - # Conditional Parameters - add_parameter(self, kwargs, 'classify', None) + # Discriminator Parameters + add_parameter(self, kwargs, 'discriminator_depth', 4) + add_parameter(self, kwargs, 'discriminator_max_filter', 128) # Training Parameters - add_parameter(self, kwargs, 'training_batch_size', 16) + add_parameter(self, kwargs, 'train_with_GAN', True) + add_parameter(self, kwargs, 'train_separately', False) + + add_parameter(self, kwargs, 'consistency_weight', 10) # AKA lambda + add_parameter(self, kwargs, 'gradient_penalty_weight', 10) - # Derived Parameters - self.channels = self.input_shape[-1] + add_parameter(self, kwargs, 'input_tensor_name', 'input_1') + add_parameter(self, kwargs, 'activated_tensor_name', 'downsampling_conv_1_1/BiasAdd') + add_parameter(self, kwargs, 'filter_num', 0) self.sess = None self.init = None - def get_filter_num(self, depth, max_filter): + def get_filter_num(self, depth): # This will need to be a bit more complicated; see PGGAN paper. - if max_filter / (2 ** (depth)) <= self.filter_floor: + if self.max_filter / (2 ** (depth)) <= self.filter_floor: return self.filter_floor else: - return min(max_filter / (2 ** (depth)), self.filter_cap) + return min(self.max_filter / (2 ** (depth)), self.filter_cap) - def train(self, training_data_collection, validation_data_collection=None, **kwargs): + def process_step(self, step_counter): - # Outputs - add_parameter(self, kwargs, 'output_model_filepath', 'pgan_model') + # Replace with GPU function? + sample_latent = np.random.normal(size=[self.training_batch_size, self.latent_size]) + reference_data = next(self.training_data_generator)[self.input_data] - # Training Parameters - add_parameter(self, kwargs, 'num_epochs', 100) - add_parameter(self, kwargs, 'training_steps_per_epoch', 10) - add_parameter(self, kwargs, 'training_batch_size', 16) - - self.build_tensorflow_model(self.training_batch_size) - self.create_data_generators(training_data_collection, validation_data_collection, training_batch_size=self.training_batch_size, training_steps_per_epoch=self.training_steps_per_epoch) - self.init_sess() + # Optimize! - # step = 0 + # _, b_loss, a_loss = self.sess.run([self.combined_optimizer, self.basic_loss, self.activation_loss], feed_dict={self.latent: sample_latent, self.reference_images: reference_data}) - # for epoch in range(self.num_epochs): + _, g_loss, a_loss, total_g_loss = self.sess.run([self.opti_G, self.G_activation_loss, self.activation_loss, self.G_loss], feed_dict={self.latent: sample_latent, self.reference_images: reference_data}) - # for step in range(self.training_steps_per_epoch): - - # print epoch, step - # input_modality_1, input_modality_2 = next(self.training_data_generator) + _, d_loss = self.sess.run([self.opti_D, self.D_loss], feed_dict={self.latent: sample_latent, self.reference_images: reference_data}) - # # Optimize! - - # if self.train_with_GAN: - - # _, _, discrim_1_loss, discrim_2_loss, d_loss, generator_1_loss, generator_2_loss, cons_1_loss, cons_2_loss, g_loss = self.sess.run([self.generator_optimizer, self.discriminator_optimizer, self.D_loss_wgan_2, self.D_loss_wgan_1, self.total_D_loss, self.G_loss_1_2, self.G_loss_2_1, self.generator_1_consistency_loss, self.generator_2_consistency_loss, self.total_G_loss], feed_dict={self.generator_input_images_1: input_modality_1, self.generator_input_images_2: input_modality_2}) - - # self.log([discrim_1_loss, discrim_2_loss, d_loss, generator_1_loss, generator_2_loss, cons_1_loss, cons_2_loss, g_loss], headers=['Dis 1 Loss', 'Dis 2 Loss', 'Total D Loss', 'Gen 1 Loss', 'Gen 2 Loss', 'Consistency 12 Loss', 'Consistency 21 Loss', 'Total G Loss'], verbose=self.hyperverbose) - - # else: - - # _, cons_1_loss, cons_2_loss, g_loss = self.sess.run([self.consistency_optimizer, self.generator_2_consistency_loss, self.generator_1_consistency_loss, self.total_consistency_loss], feed_dict={self.generator_input_images_1: input_modality_1, self.generator_input_images_2: input_modality_2}) - - # self.log([cons_1_loss, cons_2_loss, g_loss], headers=['Consistency Loss 12', 'Consistency Loss 21', 'Total G Loss'], verbose=self.hyperverbose) - - # self.save_model(self.output_model_filepath) - - - # Create fade-in (transition) parameters. - step_pl = tf.placeholder(tf.float32, shape=None) - alpha_transition_assign = self.alpha_transition.assign(step_pl / self.max_iterations) - - # Create Optimizers - opti_D = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.0, beta2=0.99).minimize( - self.D_loss, var_list=self.d_vars) - opti_G = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.0, beta2=0.99).minimize( - self.G_loss, var_list=self.g_vars) - - init = tf.global_variables_initializer() - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - - with tf.Session(config=config) as sess: - - # Personally have no idea what is being logged in this thing --andrew - sess.run(init) - summary_op = tf.summary.merge_all() - summary_writer = tf.summary.FileWriter(self.log_dir, sess.graph) - - # No idea what the saving systems is like. TODO investigate --andrew. - # I don't think you need to save and reload models if you create a crazy - # system where you're only optimizing certain outputs/cost functions at - # any one time. - if self.progressive_depth != 1: - - if self.transition: - self.r_saver.restore(sess, self.input_model_path) - self.rgb_saver.restore(sess, self.input_model_path) - else: - self.saver.restore(sess, self.input_model_path) - - step = 0 - batch_num = 0 - while step <= self.max_iterations: - - n_critic = 1 - - # Update Discriminator - for i in range(n_critic): - - sample_latent = np.random.normal(size=[self.batch_size, self.latent_size]) - - if self.classes: - realbatch_array, _ = self.training_data.get_next_batch_classed(batch_num=batch_num, zoom_level=self.zoom_level, batch_size=self.batch_size) - else: - realbatch_array, _ = self.training_data.get_next_batch(batch_num=batch_num, zoom_level=self.zoom_level, batch_size=self.batch_size) - - realbatch_array = realbatch_array[...,0:4] - - if self.transition: - - realbatch_array = sess.run(self.real_images, feed_dict={self.images: realbatch_array}) - - sess.run(opti_D, feed_dict={self.images: realbatch_array, self.latent: sample_latent}) - batch_num += 1 - - # Update Generator - sess.run(opti_G, feed_dict={self.latent: sample_latent}) - - summary_str = sess.run(summary_op, feed_dict={self.images: realbatch_array, self.latent: sample_latent}) - summary_writer.add_summary(summary_str, step) - - # the alpha of fake_in process - sess.run(alpha_transition_assign, feed_dict={step_pl: step}) - - if step % 40 == 0: - D_loss, G_loss, D_origin_loss, alpha_tra = sess.run([self.D_loss, self.G_loss, self.D_origin_loss, self.alpha_transition], feed_dict={self.images: realbatch_array, self.latent: sample_latent}) - print("PG %d, step %d: D loss=%.7f G loss=%.7f, D_or loss=%.7f, opt_alpha_tra=%.7f" % (self.progressive_depth, step, D_loss, G_loss, D_origin_loss, alpha_tra)) - - if step % 400 == 0: - - realbatch_array = np.clip(realbatch_array, -1, 1) - save_images(realbatch_array[0:self.batch_size], [2, self.batch_size/2], '{}/{:02d}_real.png'.format(self.samples_dir, step)) - - fake_image = sess.run(self.fake_images, feed_dict={self.images: realbatch_array, self.latent: sample_latent}) - fake_image = np.clip(fake_image, -1, 1) - save_images(fake_image[0:self.batch_size], [2, self.batch_size/2], '{}/{:02d}_train.png'.format(self.samples_dir, step)) - - if np.mod(step, 4000) == 0 and step != 0: - self.saver.save(sess, self.output_model_path) - step += 1 - - save_path = self.saver.save(sess, self.output_model_path) - print "Model saved in file: %s" % save_path - - tf.reset_default_graph() + # self.log([b_loss], headers=['Basic Loss'], verbose=self.hyperverbose) + # step_counter.set_description("Activation Loss: {0:.5f}".format(a_loss) + " Basic Loss: {0:.5f}".format(b_loss)) + step_counter.set_description("Activation Loss: {0:.5f}".format(a_loss) + " G Loss: {0:.5f}".format(g_loss) + " D Loss: {0:.5f}".format(d_loss) + " Big_G Loss: {0:.5f}".format(total_g_loss)) return def build_tensorflow_model(self, batch_size): + """ Break it out into functions? + """ + # Set input/output shapes for reference during inference. self.model_input_shape = tuple([batch_size] + list(self.input_shape)) self.model_output_shape = tuple([batch_size] + list(self.input_shape)) - self.latent = tf.placeholder(tf.float32, [self.training_batch_size, self.latent_size]) + self.latent = tf.placeholder(tf.float32, [None, self.latent_size]) + self.reference_images = tf.placeholder(tf.float32, [None] + list(self.model_input_shape)[1:]) - # Derived Parameters - self.output_size = pow(2, self.progressive_depth + 1) - self.zoom_level = self.progressive_depth - self.images = tf.placeholder(tf.float32, [self.training_batch_size, self.output_size, self.output_size, self.channels]) - # self.seg_images = tf.placeholder(tf.float32, [self.training_batch_size, self.output_size, self.output_size, 1]) - self.alpha_transition = tf.Variable(initial_value=0.0, trainable=False, name='alpha_transition') + self.synthetic_images = generator(self, self.latent, depth=self.depth, name='generator') - self.fake_images = self.generator(self.latent, progressive_depth=self.progressive_depth, transition=self.transition, alpha_transition=self.alpha_transition, name='generator') + _, _, _, self.discriminator_real_logits = discriminator(self, self.reference_images, depth=self.depth + 1, name='discriminator') + _, _, _, self.discriminator_fake_logits = discriminator(self, self.synthetic_images, depth=self.depth + 1, name='discriminator', reuse=True) - self.Q_generated_real, _, _, self.D_pro_logits = self.discriminator(self.images, reuse=False, progressive_depth=self.progressive_depth, transition=self.transition, alpha_transition=self.alpha_transition, name='discriminator') - self.Q_generated_fake, _, _, self.G_pro_logits = self.discriminator(self.fake_images, reuse=True, progressive_depth=self.progressive_depth, transition=self.transition, alpha_transition=self.alpha_transition, name='discriminator') + self.basic_loss = tf.reduce_mean(tf.square(self.reference_images - self.synthetic_images)) # Loss functions - self.D_loss = tf.reduce_mean(self.G_pro_logits) - tf.reduce_mean(self.D_pro_logits) - self.G_loss = -tf.reduce_mean(self.G_pro_logits) + self.D_loss = tf.reduce_mean(self.discriminator_fake_logits) - tf.reduce_mean(self.discriminator_real_logits) + self.G_loss = -tf.reduce_mean(self.discriminator_fake_logits) # Gradient Penalty from Wasserstein GAN GP, I believe? Check on it --andrew # Also investigate more what's happening here --andrew - self.differences = self.fake_images - self.images - self.alpha = tf.random_uniform(shape=[self.training_batch_size, 1, 1, 1], minval=0., maxval=1.) - interpolates = self.images + (self.alpha * self.differences) - _, _, _, discri_logits = self.discriminator(interpolates, reuse=True, progressive_depth=self.progressive_depth, transition=self.transition, alpha_transition=self.alpha_transition, name='discriminator') + self.differences = self.synthetic_images - self.reference_images + self.alpha = tf.random_uniform(shape=[tf.shape(self.differences)[0], 1, 1, 1], minval=0., maxval=1.) + interpolates = self.reference_images + (self.alpha * self.differences) + _, _, _, discri_logits = discriminator(self, interpolates, reuse=True, depth=self.depth + 1, name='discriminator') gradients = tf.gradients(discri_logits, [interpolates])[0] # Some sort of norm from papers, check up on it. --andrew @@ -241,125 +125,88 @@ def build_tensorflow_model(self, batch_size): # Update Loss functions.. self.D_origin_loss = self.D_loss self.D_loss += 10 * self.gradient_penalty - self.D_loss += 0.001 * tf.reduce_mean(tf.square(self.D_pro_logits - 0.0)) - - # Data Loading Tools - self.low_images = upscale(downscale(self.images, 2), 2) - self.real_images = self.alpha_transition * self.images + (1 - self.alpha_transition) * self.low_images - - self.log_vars = [] - self.log_vars.append(("generator_loss", self.G_loss)) - self.log_vars.append(("discriminator_loss", self.D_loss)) + self.D_loss += 0.001 * tf.reduce_mean(tf.square(self.discriminator_real_logits - 0.0)) + + # vgg_model = tf.keras.applications.VGG19(include_top=False, + # weights='imagenet', + # input_tensor=self.synthetic_images, + # input_shape=(64, 64, 3), + # pooling=None, + # classes=1000) + # print(vgg_model) + + # self.load_reference_model() + + input_tensor = keras.layers.Input(tensor=self.synthetic_images, shape=self.input_shape) + + model_parameters = {'input_shape': self.input_shape, + 'downsize_filters_factor': 1, + 'pool_size': (2, 2), + 'kernel_size': (3, 3), + 'dropout': 0, + 'batch_norm': True, + 'initial_learning_rate': 0.00001, + 'output_type': 'binary_label', + 'num_outputs': 1, + 'activation': 'relu', + 'padding': 'same', + 'implementation': 'keras', + 'depth': 3, + 'max_filter': 128, + 'stride_size': (1, 1), + 'input_tensor': input_tensor} + + unet_output = UNet(**model_parameters) + unet_model = keras.models.Model(input_tensor, unet_output.output_layer) + unet_model.load_weights('retinal_seg_weights.h5') + + if self.hyperverbose: + self.model_summary() + + # self.find_layers(['sampling']) + + self.activated_tensor = self.grab_tensor(self.activated_tensor_name) + print self.activated_tensor + self.activated_tensor = tf.stack([self.activated_tensor[..., self.filter_num]], axis=-1) + print self.activated_tensor + # self.input_tensor = self.grab_tensor(self.input_tensor_name) + + self.activation_loss = -1 * tf.reduce_mean(self.activated_tensor) + self.activaton_graidents = tf.gradients(self.activation_loss, self.synthetic_images) + print self.activaton_graidents # Hmmm.. better way to do this? Or at least move to function. t_vars = tf.trainable_variables() self.d_vars = [var for var in t_vars if 'discriminator' in var.name] self.g_vars = [var for var in t_vars if 'generator' in var.name] - # save the variables , which remain unchanged - self.d_vars_n = [var for var in self.d_vars if 'discriminator_n' in var.name] - self.g_vars_n = [var for var in self.g_vars if 'generator_n' in var.name] - - # remove the new variables for the new model - self.d_vars_n_read = [var for var in self.d_vars_n if '{}'.format(self.output_size) not in var.name] - self.g_vars_n_read = [var for var in self.g_vars_n if '{}'.format(self.output_size) not in var.name] - - # save the rgb variables, which remain unchanged - self.d_vars_n_2 = [var for var in self.d_vars if 'discriminator_y_rgb_conv' in var.name] - self.g_vars_n_2 = [var for var in self.g_vars if 'generator_y_rgb_conv' in var.name] - - self.d_vars_n_2_rgb = [var for var in self.d_vars_n_2 if '{}'.format(self.output_size) not in var.name] - self.g_vars_n_2_rgb = [var for var in self.g_vars_n_2 if '{}'.format(self.output_size) not in var.name] - - self.saver = tf.train.Saver(self.d_vars + self.g_vars) - self.r_saver = tf.train.Saver(self.d_vars_n_read + self.g_vars_n_read) - if len(self.d_vars_n_2_rgb + self.g_vars_n_2_rgb): - self.rgb_saver = tf.train.Saver(self.d_vars_n_2_rgb + self.g_vars_n_2_rgb) - - for layer in self.d_vars + self.g_vars: - print layer - - for k, v in self.log_vars: - tf.summary.scalar(k, v) - - def discriminator(self, input_image, reuse=False, name=None, progressive_depth=1, transition=False, alpha_transition=0.01): - - with tf.variable_scope(name) as scope: - - if reuse: - scope.reuse_variables() - - if transition: - transition_conv = DnAveragePooling(input_image, dim=self.dim) - transition_conv = leaky_relu(DnConv(transition_conv, output_dim=self.get_filter_num(progressive_depth - 2, self.discriminator_max_filter), kernel_size=(1, 1), stride_size=(1, 1), name='discriminator_y_rgb_conv_{}'.format(transition_conv.shape[1]), dim=self.dim)) - - convs = [] - - # fromRGB - convs += [leaky_relu(DnConv(input_image, output_dim=self.get_filter_num(progressive_depth - 1, self.discriminator_max_filter), kernel_size=(1, 1), stride_size=(1, 1), name='discriminator_y_rgb_conv_{}'.format(input_image.shape[1]), dim=self.dim))] - - for i in range(progressive_depth - 1): - - convs += [leaky_relu(DnConv(convs[-1], output_dim=self.get_filter_num(progressive_depth - 1 - i, self.discriminator_max_filter), stride_size=(1, 1), name='discriminator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=self.dim))] - - convs += [leaky_relu(DnConv(convs[-1], output_dim=self.get_filter_num(progressive_depth - 2 - i, self.discriminator_max_filter), stride_size=(1, 1), name='discriminator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=self.dim))] - convs[-1] = DnAveragePooling(convs[-1], dim=self.dim) - - if i == 0 and transition: - convs[-1] = alpha_transition * convs[-1] + (1 - alpha_transition) * transition_conv - - convs += [minibatch_state_concat(convs[-1])] - convs[-1] = leaky_relu(DnConv(convs[-1], output_dim=self.get_filter_num(1, self.discriminator_max_filter), kernel_size=(3, 3), stride_size=(1, 1), name='discriminator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=self.dim)) - - if False: - convs[-1] = leaky_relu(DnConv(convs[-1], output_dim=self.get_filter_num(1, self.discriminator_max_filter), kernel_size=(4, 4), stride_size=(1, 1), padding='VALID', name='discriminator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=self.dim)) - - for conv in convs: - print conv - - #for D - output = tf.reshape(convs[-1], [self.training_batch_size, -1]) - - if self.classify is None: - discriminate_output = dense(output, output_size=1, scope='discriminator_n_fully') - return None, None, tf.nn.sigmoid(discriminate_output), discriminate_output - - def generator(self, latent_var, progressive_depth=1, name=None, transition=False, alpha_transition=0.0): - - with tf.variable_scope(name) as scope: + # Create save/load operation + self.saver = tf.train.Saver(self.g_vars + self.d_vars) - convs = [] + self.G_activation_loss = self.G_loss + .000 * self.activation_loss - convs += [tf.reshape(latent_var, [self.training_batch_size, 1, 1, self.latent_size])] - - convs[-1] = pixel_norm(leaky_relu(DnConv(convs[-1], output_dim=self.get_filter_num(1, self.generator_max_filter), kernel_size=(4, 4), stride_size=(1, 1), padding='Other', name='generator_n_1_conv', dim=self.dim))) - - convs += [tf.reshape(convs[-1], [self.training_batch_size, 4, 4, self.get_filter_num(1, self.generator_max_filter)])] # why necessary? --andrew - convs[-1] = pixel_norm(leaky_relu(DnConv(convs[-1], output_dim=self.get_filter_num(1, self.generator_max_filter), stride_size=(1, 1), name='generator_n_2_conv', dim=self.dim))) + # Create Optimizers + self.opti_D = tf.train.AdamOptimizer(learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize( + self.D_loss, var_list=self.d_vars) + self.opti_G = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize(self.G_activation_loss, var_list=self.g_vars) - for i in range(progressive_depth - 1): + self.combined_loss = 1 * self.activation_loss + 1 * self.basic_loss - if i == progressive_depth - 2 and transition: # redundant conditions? --andrew - #To RGB - # Don't totally understand this yet, diagram out --andrew - transition_conv = DnConv(convs[-1], output_dim=self.channels, kernel_size=(1, 1), stride_size=(1, 1), name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]), dim=self.dim) - transition_conv = upscale(transition_conv, 2) + self.combined_optimizer = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize(self.combined_loss, var_list=self.g_vars) - convs += [upscale(convs[-1], 2)] - convs[-1] = pixel_norm(leaky_relu(DnConv(convs[-1], output_dim=self.get_filter_num(i + 1, self.generator_max_filter), stride_size=(1, 1), name='generator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=self.dim))) + self.basic_optimizer = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize(self.basic_loss, var_list=self.g_vars) - convs += [pixel_norm(leaky_relu(DnConv(convs[-1], output_dim=self.get_filter_num(i + 1, self.generator_max_filter), stride_size=(1, 1), name='generator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=self.dim)))] + self.activation_optimizer = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize(self.activation_loss, var_list=self.g_vars) - #To RGB - convs += [DnConv(convs[-1], output_dim=self.channels, kernel_size=(1, 1), stride_size=(1, 1), name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]), dim=self.dim)] + def load_reference_model(self, input_model_path=None): - if transition: - convs[-1] = (1 - alpha_transition) * transition_conv + alpha_transition * convs[-1] + # from deepneuro.local.basic_cnn import cnn_baseline + # model = cnn_baseline() + # model.load_weights('../Interp_GAN/classification_model/cnn/best_cnn_weights_5Classes_RGB_Flip_Rot_0.001.hdf5') - for conv in convs: - print conv + load_old_model('DRIVE_segmentation_unet.h5') - return convs[-1] + return def load_model(self, input_model_path, batch_size=1): @@ -367,60 +214,11 @@ def load_model(self, input_model_path, batch_size=1): self.init_sess() self.saver.restore(self.sess, os.path.join(input_model_path, 'model.ckpt')) - def predict(self, input_data): + def predict(self, sample_latent=None, batch_size=1): self.init_sess() - return self.sess.run(self.generator_1_2_real, feed_dict={self.generator_input_images_1: input_data}) - - def wasserstein_loss(self, discriminator_loss, real_data, fake_data, batch_size, gradient_penalty_weight=10, name=''): - - # Implementation fo Wasserstein loss with gradient penalty. - - # Gradient Penalty from Wasserstein GAN GP, I believe? Check on it --andrew - # Also investigate more of what's happening here --andrew - differences = fake_data - real_data - alpha = tf.random_uniform(shape=[batch_size, 1, 1, 1, 1], minval=0., maxval=1.) - interpolates = real_data + (alpha * differences) - _, discri_logits = self.discriminator(interpolates, name=name, reuse=True) - gradients = tf.gradients(discri_logits, [interpolates])[0] - - # Some sort of norm from papers, check up on it. --andrew - slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1, 2, 3, 4])) - gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2) - - # Update Loss functions.. - discriminator_loss += gradient_penalty_weight * gradient_penalty - # discriminator_loss += 0.001 * tf.reduce_mean(tf.square(self.discriminator_2_real_logits - 0.0)) - - return discriminator_loss - - def discriminator(self, input_image, reuse=False, name=None, scope=None, transition=False, alpha_transition=0.01): - - with tf.variable_scope(name) as scope: - - if reuse: - scope.reuse_variables() - - convs = [] - - convs += [lrelu(DnConv(input_image, output_dim=self.discriminator_max_filter / self.discriminator_depth, kernel_size=(1, 1, 1), name='dis_y_rgb_conv_{}'.format(input_image.shape[1])))] - - for i in range(self.discriminator_depth - 1): - - convs += [lrelu(DnConv(convs[-1], output_dim=self.discriminator_max_filter / (self.discriminator_depth - i - 1), stride_size=(1, 1, 1), name='dis_n_conv_1_{}'.format(convs[-1].shape[1])))] - - convs += [lrelu(DnConv(convs[-1], output_dim=self.discriminator_max_filter / (self.discriminator_depth - 1 - i), stride_size=(1, 1, 1), name='dis_n_conv_2_{}'.format(convs[-1].shape[1])))] - convs[-1] = DnAveragePooling(convs[-1], ratio=(2, 2, 2)) - - # convs += [minibatch_state_concat(convs[-1])] - convs[-1] = lrelu(DnConv(convs[-1], output_dim=self.discriminator_max_filter, kernel_size=(3, 3, 3), stride_size=(1, 1, 1), name='dis_n_conv_1_{}'.format(convs[-1].shape[1]))) - - # conv = lrelu(DnConv(convs[-1], output_dim=self.discriminator_max_filter, kernel_size=(2, 2, 2), stride_size=(1, 1, 1), padding='VALID', name='dis_n_conv_2_{}'.format(convs[-1].shape[1]))) - - #for D - output = tf.layers.Flatten()(convs[-1]) - output = fully_connect(output, output_size=1, scope='dis_n_fully') - # fd = dg + if sample_latent is None: + sample_latent = np.random.normal(size=[batch_size, self.latent_size]) - return tf.nn.sigmoid(output), output \ No newline at end of file + return self.sess.run(self.synthetic_images, feed_dict={self.latent: sample_latent}) \ No newline at end of file diff --git a/deepneuro/models/minimal.py b/deepneuro/models/minimal.py old mode 100644 new mode 100755 diff --git a/deepneuro/models/model.py b/deepneuro/models/model.py old mode 100644 new mode 100755 index 330546b..ce78c53 --- a/deepneuro/models/model.py +++ b/deepneuro/models/model.py @@ -7,6 +7,7 @@ import csv from shutil import rmtree +from tqdm import tqdm from keras.engine import Input from keras.models import load_model @@ -88,6 +89,7 @@ def __init__(self, model=None, downsize_filters_factor=1, pool_size=(2, 2, 2), f add_parameter(self, kwargs, 'stride_size', (1, 1, 1)) add_parameter(self, kwargs, 'activation', 'relu') add_parameter(self, kwargs, 'optimizer', 'Adam') + add_parameter(self, kwargs, 'cost_function', 'mean_squared_error') self.dropout = dropout self.batch_norm = batch_norm @@ -253,10 +255,10 @@ def train(self, training_data_collection, validation_data_collection=None, outpu self.create_data_generators(training_data_collection, validation_data_collection, input_groups, training_batch_size, validation_batch_size, training_steps_per_epoch, validation_steps_per_epoch) if validation_data_collection is None: - self.model.fit_generator(generator=self.training_data_generator, steps_per_epoch=self.training_steps_per_epoch, epochs=num_epochs, pickle_safe=True, callbacks=get_callbacks(output_model_filepath, callbacks=callbacks, data_collection=training_data_collection, batch_size=training_batch_size, model=self, **kwargs)) + self.model.fit_generator(generator=self.training_data_generator, steps_per_epoch=self.training_steps_per_epoch, epochs=num_epochs, pickle_safe=True, callbacks=get_callbacks(callbacks=callbacks, output_model_filepath=output_model_filepath, data_collection=training_data_collection, batch_size=training_batch_size, model=self, backend='keras', **kwargs)) else: - self.model.fit_generator(generator=self.training_data_generator, steps_per_epoch=self.training_steps_per_epoch, epochs=num_epochs, pickle_safe=True, validation_data=self.validation_data_generator, validation_steps=self.validation_steps_per_epoch, callbacks=get_callbacks(output_model_filepath, callbacks=callbacks, data_collection=training_data_collection, model=self, batch_size=training_batch_size, **kwargs)) + self.model.fit_generator(generator=self.training_data_generator, steps_per_epoch=self.training_steps_per_epoch, epochs=num_epochs, pickle_safe=True, validation_data=self.validation_data_generator, validation_steps=self.validation_steps_per_epoch, callbacks=get_callbacks(callbacks, output_model_filepath=output_model_filepath, data_collection=training_data_collection, model=self, batch_size=training_batch_size, backend='keras', **kwargs)) return @@ -268,7 +270,7 @@ def fit_one_batch(self, training_data_collection, output_model_filepath=None, in training_steps_per_epoch = training_data_collection.total_cases // training_batch_size + 1 try: - self.model.fit_generator(generator=one_batch_generator, steps_per_epoch=training_steps_per_epoch, epochs=num_epochs, pickle_safe=True, callbacks=get_callbacks(output_model_filepath, callbacks=callbacks, data_collection=training_data_collection, model=self, batch_size=training_batch_size, **kwargs)) + self.model.fit_generator(generator=one_batch_generator, steps_per_epoch=training_steps_per_epoch, epochs=num_epochs, pickle_safe=True, callbacks=get_callbacks(callbacks=callbacks, output_model_filepath=output_model_filepath, data_collection=training_data_collection, model=self, batch_size=training_batch_size, backend='keras', **kwargs)) except KeyboardInterrupt: pass except: @@ -293,7 +295,7 @@ def create_data_generators(self, training_data_collection, validation_data_colle return - def keras_generator(self, data_generator, input_data='input_modalities', targets='ground_truth'): + def keras_generator(self, data_generator, input_data='input_data', targets='ground_truth'): while True: data = next(data_generator) @@ -331,27 +333,91 @@ def load(self, kwargs): self.tensorflow_optimizer_dict = {'Adam': tf.train.AdamOptimizer} - def train(self, training_data_collection, validation_data_collection=None, output_model_filepath=None, input_groups=None, training_batch_size=32, validation_batch_size=32, training_steps_per_epoch=None, validation_steps_per_epoch=None, initial_learning_rate=.0001, learning_rate_drop=None, learning_rate_epochs=None, num_epochs=None, callbacks=['save_model'], **kwargs): + def init_training(self, training_data_collection, kwargs): - self.create_data_generators(training_data_collection, validation_data_collection, input_groups, training_batch_size, validation_batch_size, training_steps_per_epoch, validation_steps_per_epoch) + # Outputs + add_parameter(self, kwargs, 'output_model_filepath') + + # Training Parameters + add_parameter(self, kwargs, 'num_epochs', 100) + add_parameter(self, kwargs, 'training_steps_per_epoch', 10) + add_parameter(self, kwargs, 'training_batch_size', 16) + add_parameter(self, kwargs, 'callbacks') + + self.callbacks = get_callbacks(backend='tensorflow', model=self, batch_size=self.training_batch_size, **kwargs) - def get_optimizer(self): + self.init_sess() + self.build_tensorflow_model(self.training_batch_size) + self.create_data_generators(training_data_collection, training_batch_size=self.training_batch_size, training_steps_per_epoch=self.training_steps_per_epoch) + + return + + def train(self, training_data_collection, validation_data_collection=None, **kwargs): + + self.init_training(training_data_collection, kwargs) + + self.init = tf.global_variables_initializer() + self.sess.run(self.init) + + self.callback_process('on_train_begin') + + for epoch in range(self.num_epochs): + + print('Epoch {}/{}'.format(epoch, self.num_epochs)) + self.callback_process('on_epoch_begin', epoch) + + step_counter = tqdm(list(range(self.training_steps_per_epoch)), total=self.training_steps_per_epoch, unit="step", desc="Generator Loss:", miniters=1) + + for step in step_counter: + + self.callback_process('on_batch_begin', step) + + self.process_step(step_counter) + + self.callback_process('on_batch_end', step) + + self.callback_process('on_epoch_end', epoch) + + self.callback_process('on_train_end') + + def process_step(self): + + for epoch in range(self.num_epochs): + + step_counter = tqdm(list(range(self.training_steps_per_epoch)), total=self.training_steps_per_epoch, unit="step", desc="Generator Loss:", miniters=1) + + for step in step_counter: + + # Replace with GPU function? + sample_latent = np.random.normal(size=[self.training_batch_size, self.latent_size]) + reference_data = next(self.training_data_generator)[self.input_data] + + # Optimize! + + _, g_loss = self.sess.run([self.basic_optimizer, self.basic_loss], feed_dict={self.reference_images: reference_data, self.latent: sample_latent}) + + self.log([g_loss], headers=['Basic Loss'], verbose=self.hyperverbose) + step_counter.set_description("Generator Loss: {0:.5f}".format(g_loss)) + + self.save_model(self.output_model_filepath) return def init_sess(self): if self.sess is None: - self.init = tf.global_variables_initializer() + self.graph = tf.Graph() + config = tf.ConfigProto() config.gpu_options.allow_growth = True - - self.sess = tf.Session(config=config) - - self.sess.run(self.init) + self.sess = tf.InteractiveSession(config=config, graph=self.graph) elif self.sess._closed: - self.sess.run(self.init) + self.graph = tf.Graph() + + config = tf.ConfigProto() + config.gpu_options.allow_growth = True + self.sess = tf.InteractiveSession(config=config, graph=self.graph) def save_model(self, output_model_filepath, overwrite=True): @@ -374,6 +440,34 @@ def save_model(self, output_model_filepath, overwrite=True): return save_path + def model_summary(self): + + for layer in tf.trainable_variables(): + print layer + + def callback_process(self, command='', idx=None): + + for callback in self.callbacks: + if type(callback) is str: + continue + method = getattr(callback, command) + method(idx) + + return + + def grab_tensor(self, layer): + return self.graph.get_tensor_by_name(layer + ':0') + + def find_layers(self, contains=['discriminator/']): + + for layer in self.graph.get_operations(): + if any(op_type in layer.name for op_type in contains): + try: + if self.graph.get_tensor_by_name(layer.name + ':0').get_shape() != (): + print(layer.name, self.graph.get_tensor_by_name(layer.name + ':0').get_shape()) + except: + continue + def load_old_model(model_file, backend='keras'): diff --git a/deepneuro/models/ops.py b/deepneuro/models/ops.py old mode 100644 new mode 100755 index 9a1d9ce..dbb12fd --- a/deepneuro/models/ops.py +++ b/deepneuro/models/ops.py @@ -119,7 +119,7 @@ def adjusted_std(x, **kwargs): return tf.sqrt(tf.reduce_mean((x - tf.reduce_mean(x, **kwargs)) ** 2, **kwargs) + 1e-8) -def minibatch_state_concat(_input, averaging='all'): +def minibatch_state_concat(_input, averaging='all', dim=2): # Rewrite this later, and understand it --andrew @@ -130,7 +130,8 @@ def minibatch_state_concat(_input, averaging='all'): else: print "nothing" - multiples = tuple([int(_input.shape[0]), 4, 4, 1]) + multiples = (tf.shape(_input)[0], 4, 4, 1) + vals = tf.tile(vals, multiples=multiples) # Be aware, need updated TF for this to work. return tf.concat([_input, vals], axis=3) @@ -159,7 +160,7 @@ def upscale2d(x, scale): return resize_nearest_neighbor(x, (h * scale, w * scale)) -def downscale(x, scale): +def downscale2d(x, scale): _, h, w, _ = get_conv_shape(x) return resize_nearest_neighbor(x, (int(h / scale), int(w / scale))) @@ -202,15 +203,16 @@ def sigmoid(backend='tf'): return tf.nn.sigmoid -def dense(tensor, output_size, stddev=0.02, bias_start=0.0, with_w=False, backend='tf', scope=False): +def dense(tensor, output_size, stddev=0.02, bias_start=0.0, with_w=False, backend='tensorflow', name="dense"): - if backend == 'tf': + if backend == 'tensorflow': - with tf.variable_scope(scope or "Linear"): + with tf.variable_scope(name): shape = tensor.get_shape().as_list() matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, tf.contrib.layers.xavier_initializer()) + bias = tf.get_variable("bias", [output_size], initializer=tf.zeros_initializer()) if with_w: diff --git a/deepneuro/models/progressive_growing_gan.py b/deepneuro/models/progressive_growing_gan.py new file mode 100755 index 0000000..233ec61 --- /dev/null +++ b/deepneuro/models/progressive_growing_gan.py @@ -0,0 +1,367 @@ +"""This is a vanilla implementation of a generative adversarial network. It includes +the Wasserstein Gradient-Penalty by default. +""" + +import math +import numpy as np +import tensorflow as tf +import os +import imageio +import keras +import scipy + +from tqdm import tqdm + + +from deepneuro.utilities.util import add_parameter +from deepneuro.models.blocks import generator, discriminator +from deepneuro.models.cost_functions import wasserstein_loss +from deepneuro.models.gan import GAN +from deepneuro.models.ops import upscale2d, downscale2d +from deepneuro.utilities.visualize import check_data + + +class PGGAN(GAN): + + def load(self, kwargs): + + """ Parameters + ---------- + depth : int, optional + Specified the layers deep the proposed U-Net should go. + Layer depth is symmetric on both upsampling and downsampling + arms. + max_filter: int, optional + Specifies the number of filters at the bottom level of the U-Net. + + """ + + super(PGGAN, self).load(kwargs) + + # PGGAN Parameters + self.starting_depth = 0 + + if self.dim == 3: + raise NotImplementedError + + def get_filter_num(self, depth): + + # This will need to be a bit more complicated; see PGGAN paper. + if self.max_filter / (2 ** (depth)) <= self.filter_floor: + return self.filter_floor + else: + return min(self.max_filter / (2 ** (depth)), self.filter_cap) + + def init_training(self, training_data_collection, kwargs): + + # Outputs + add_parameter(self, kwargs, 'output_model_filepath') + + # Training Parameters + add_parameter(self, kwargs, 'num_epochs', 100) + add_parameter(self, kwargs, 'training_steps_per_epoch', 10) + add_parameter(self, kwargs, 'training_batch_size', 16) + add_parameter(self, kwargs, 'callbacks') + + self.callbacks = self.get_callbacks(backend='tensorflow', model=self, batch_size=self.training_batch_size, **kwargs) + + self.create_data_generators(training_data_collection, training_batch_size=self.training_batch_size, training_steps_per_epoch=self.training_steps_per_epoch) + + return + + def train(self, training_data_collection, validation_data_collection=None, **kwargs): + + self.init_training(training_data_collection, kwargs) + + if not os.path.exists(self.output_model_filepath): + os.mkdir(self.output_model_filepath) + + self.callback_process('on_train_begin') + + # Some explanation on training stages: The progressive gan trains each resolution + # in two stages. One interpolates from the previous resolution, while one trains + # solely on the current resolution. The loop below looks odd because the lowest + # resolution only has one stage. + + for training_stage in range(int(np.ceil((self.starting_depth - 1) / 2.)), (self.depth * 2) - 1): + + if (training_stage % 2 == 0): + self.transition = False + else: + self.transition = True + + current_depth = np.ceil((training_stage + 2) / 2.) + previous_depth = np.ceil((training_stage + 1) / 2.) + self.progressive_depth = int(current_depth - 1) + + current_model_path = os.path.join(self.output_model_filepath, '{}_{}'.format(str(current_depth), str(self.transition)), 'model.ckpt') + if not os.path.exists(os.path.dirname(current_model_path)): + os.mkdir(os.path.dirname(current_model_path)) + + previous_model_path = os.path.join(self.output_model_filepath, '{}_{}'.format(str(previous_depth), str(not self.transition)), 'model.ckpt') + + self.callback_process('on_depth_begin', [current_depth, self.transition]) + + self.init_sess() + self.build_tensorflow_model(self.training_batch_size) + self.init = tf.global_variables_initializer() + self.sess.run(self.init) + + self.graph = tf.get_default_graph() + for layer in self.graph.get_operations(): + if any(op_type in layer.name for op_type in ['']): + try: + if self.graph.get_tensor_by_name(layer.name + ':0').get_shape() != (): + print(layer.name, self.graph.get_tensor_by_name(layer.name + ':0').get_shape()) + except: + continue + + if self.transition: + # for i in self.d_vars_n_2_rgb + self.g_vars_n_2_rgb: + # print i + # print '' + # for i in self.d_vars_n_read + self.g_vars_n_read: + # print i + self.r_saver.restore(self.sess, previous_model_path) + self.rgb_saver.restore(self.sess, previous_model_path) + elif self.progressive_depth > 0: + # for i in self.d_vars + self.g_vars: + # print i + self.saver.restore(self.sess, previous_model_path) + + for epoch in range(self.num_epochs): + + print('Depth {}/{}, Epoch {}/{}'.format(self.progressive_depth + 1, self.depth, epoch, self.num_epochs)) + self.callback_process('on_epoch_begin', '_'.join([str(current_depth), str(epoch)])) + + step_counter = tqdm(list(range(self.training_steps_per_epoch)), total=self.training_steps_per_epoch, unit="step", desc="Generator Loss:", miniters=1) + + for step in step_counter: + + self.callback_process('on_batch_begin', step) + + reference_data = self.process_step(step_counter, step, epoch) + + self.callback_process('on_batch_end', step) + + self.callback_process('on_epoch_end', [str(epoch), reference_data]) + + save_path = self.saver.save(self.sess, current_model_path) + + self.callback_process('on_depth_end', [current_depth, self.transition]) + + self.saver.save(self.sess, current_model_path) + + self.sess.close() + tf.reset_default_graph() + + # Should this be called after each progression, or after all training? + self.callback_process('on_train_end') + + return + + def process_step(self, step_counter, step, epoch): + + for i in range(self.discriminator_updates): + + sample_latent = np.random.normal(size=[self.training_batch_size, self.latent_size]) + + reference_data = next(self.training_data_generator)[self.input_data] + reference_data = self.sess.run(self.input_volumes, feed_dict={self.raw_volumes: reference_data}) + + if self.transition: + reference_data = self.sess.run(self.real_images, feed_dict={self.reference_images: reference_data}) + + self.sess.run(self.opti_D, feed_dict={self.reference_images: reference_data, self.latent: sample_latent}) + + # Update Generator + for i in range(self.generator_updates): + + self.sess.run(self.opti_G, feed_dict={self.latent: sample_latent}) + + # the alpha of fake_in process + if self.transition: + self.sess.run(self.alpha_transition_assign, feed_dict={self.step_pl: (epoch * float(self.training_steps_per_epoch)) + (step + 1)}) + + d_loss, d_loss_origin, g_loss, transition = self.sess.run([self.D_loss, self.G_loss, self.D_origin_loss, self.alpha_transition], feed_dict={self.reference_images: reference_data, self.latent: sample_latent}) + + self.log([d_loss, d_loss_origin, g_loss, transition], headers=['Dis-WP Loss', 'Dis Loss', 'Gen Loss', 'Alpha'], verbose=self.hyperverbose) + + if self.transition: + step_counter.set_description("Generator Loss: {0:.5f}".format(g_loss) + " Discriminator Loss: {0:.5f}".format(d_loss) + " Alpha: {0:.2f}".format(transition)) + else: + step_counter.set_description("Generator Loss: {0:.5f}".format(g_loss) + " Discriminator Loss: {0:.5f}".format(d_loss)) + + return reference_data + + def build_tensorflow_model(self, batch_size): + + """ Break it out into functions? + """ + + # Set input/output shapes for reference during inference. + self.model_input_shape = tuple([batch_size] + list(self.input_shape)) + self.model_output_shape = tuple([batch_size] + list(self.input_shape)) + + self.alpha_transition = tf.Variable(initial_value=0.0, trainable=False, name='alpha_transition') + self.step_pl = tf.placeholder(tf.float32, shape=None) + self.alpha_transition_assign = self.alpha_transition.assign(self.step_pl / (self.num_epochs * self.training_steps_per_epoch)) + + self.latent = tf.placeholder(tf.float32, [None, self.latent_size]) + self.reference_images = tf.placeholder(tf.float32, [None] + list(self.model_input_shape)[1:]) + self.synthetic_images = generator(self, self.latent, depth=self.progressive_depth, transition=self.transition, alpha_transition=self.alpha_transition, name='generator') + + # Derived Parameters + self.output_size = pow(2, self.progressive_depth + 2) + self.zoom_level = self.progressive_depth + 1 + self.reference_images = tf.placeholder(tf.float32, [None] + [self.output_size] * self.dim + [self.channels]) + + max_downscale = np.floor(math.log(self.model_input_shape[1], 2)) + downscale_factor = 2 ** max_downscale / (2 ** (self.progressive_depth + 2)) + self.raw_volumes = tf.placeholder(tf.float32, self.model_input_shape) + self.input_volumes = downscale2d(self.raw_volumes, downscale_factor) + + # Data Loading Tools + self.low_images = upscale2d(downscale2d(self.reference_images, 2), 2) + self.real_images = self.alpha_transition * self.reference_images + (1 - self.alpha_transition) * self.low_images + + self.discriminator_real, self.discriminator_real_logits = discriminator(self, self.reference_images, depth=self.progressive_depth, name='discriminator', transition=self.transition, alpha_transition=self.alpha_transition) + self.discriminator_fake, self.discriminator_fake_logits = discriminator(self, self.synthetic_images, depth=self.progressive_depth, name='discriminator', transition=self.transition, alpha_transition=self.alpha_transition, reuse=True) + + # Hmmm.. better way to do this? Or at least move to function. + t_vars = tf.trainable_variables() + self.d_vars = [var for var in t_vars if 'discriminator' in var.name] + self.g_vars = [var for var in t_vars if 'generator' in var.name] + + # save the variables , which remain unchanged + self.d_vars_n = [var for var in self.d_vars if 'discriminator_n' in var.name] + self.g_vars_n = [var for var in self.g_vars if 'generator_n' in var.name] + + # remove the new variables for the new model + self.d_vars_n_read = [var for var in self.d_vars_n if '{}'.format(self.output_size) not in var.name] + self.g_vars_n_read = [var for var in self.g_vars_n if '{}'.format(self.output_size) not in var.name] + + # save the rgb variables, which remain unchanged + self.d_vars_n_2 = [var for var in self.d_vars if 'discriminator_y_rgb_conv' in var.name] + self.g_vars_n_2 = [var for var in self.g_vars if 'generator_y_rgb_conv' in var.name] + + self.d_vars_n_2_rgb = [var for var in self.d_vars_n_2 if '{}'.format(self.output_size) not in var.name] + self.g_vars_n_2_rgb = [var for var in self.g_vars_n_2 if '{}'.format(self.output_size) not in var.name] + + self.saver = tf.train.Saver(self.d_vars + self.g_vars) + self.r_saver = tf.train.Saver(self.d_vars_n_read + self.g_vars_n_read) + if len(self.d_vars_n_2_rgb + self.g_vars_n_2_rgb): + self.rgb_saver = tf.train.Saver(self.d_vars_n_2_rgb + self.g_vars_n_2_rgb) + + self.calculate_losses() + + if self.hyperverbose: + self.model_summary() + + def calculate_losses(self): + + self.D_loss, self.G_loss, self.D_origin_loss = wasserstein_loss(self, discriminator, self.discriminator_fake_logits, self.discriminator_real_logits, self.synthetic_images, self.real_images, gradient_penalty_weight=self.gradient_penalty_weight, name='discriminator', depth=self.progressive_depth, transition=self.transition, alpha_transition=self.alpha_transition, dim=self.dim) + + # A little sketchy. Attempting to make variable loss functions extensible later. + self.D_loss = self.D_loss[0] + self.G_loss = self.G_loss[0] + self.D_origin_loss = self.D_origin_loss[0] + + # Create Optimizers + self.opti_D = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize( + self.D_loss, var_list=self.d_vars) + self.opti_G = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize( + self.G_loss, var_list=self.g_vars) + + def load_model(self, input_model_path, batch_size=1): + + self.build_tensorflow_model(batch_size) + self.init_sess() + self.saver.restore(self.sess, os.path.join(input_model_path, 'model.ckpt')) + + def get_callbacks(self, callbacks=[], output_model_filepath=None, monitor='val_loss', model=None, data_collection=None, save_best_only=False, epoch_prediction_dir=None, batch_size=1, epoch_prediction_object=None, epoch_prediction_data_collection=None, epoch_prediction_batch_size=None, latent_size=128, backend='tensorflow', **kwargs): + + """ Very disorganized currently. Replace with dictionary? Also address never-ending parameters + """ + + return_callbacks = [] + + for callback in callbacks: + + if callback == 'predict_gan': + return_callbacks += [PGGANPredict(deepneuro_model=model, data_collection=data_collection, epoch_prediction_dir=epoch_prediction_dir, batch_size=batch_size, epoch_prediction_object=epoch_prediction_object, epoch_prediction_data_collection=epoch_prediction_data_collection, epoch_prediction_batch_size=epoch_prediction_batch_size, latent_size=latent_size)] + + return return_callbacks + + +class PGGANPredict(keras.callbacks.Callback): + + def __init__(self, **kwargs): + + add_parameter(self, kwargs, 'data_collection', None) + add_parameter(self, kwargs, 'epoch_prediction_data_collection', self.data_collection) + add_parameter(self, kwargs, 'epoch_prediction_object', None) + add_parameter(self, kwargs, 'deepneuro_model', None) + add_parameter(self, kwargs, 'epoch_prediction_dir', None) + add_parameter(self, kwargs, 'output_gif', None) + add_parameter(self, kwargs, 'batch_size', 1) + add_parameter(self, kwargs, 'epoch_prediction_batch_size', self.batch_size) + add_parameter(self, kwargs, 'latent_size', 128) + add_parameter(self, kwargs, 'sample_latent', np.random.normal(size=[self.epoch_prediction_batch_size, self.latent_size])) + + if not os.path.exists(self.epoch_prediction_dir): + os.mkdir(self.epoch_prediction_dir) + + self.predictions = [] + self.depth_directories = [] + + def on_train_end(self, logs={}): + + max_size = self.predictions[-1][0].shape[0] + final_predictions = [] + + for predictions in self.predictions: + + if predictions[0].shape[0] != max_size: + upsample_ratio = max_size / predictions[0].shape[0] + predictions = [scipy.misc.imresize(prediction, upsample_ratio * 100, interp='nearest') for prediction in predictions] + + final_predictions += predictions + + imageio.mimsave(os.path.join(self.epoch_prediction_dir, 'pggan_training.gif'), final_predictions) + + return + + def on_epoch_end(self, data, logs={}): + + # Hacky, revise later. + epoch = data[0] + reference_data = data[1] + + if self.epoch_prediction_object is None: + prediction = self.deepneuro_model.predict(sample_latent=self.sample_latent) + else: + prediction = self.epoch_prediction_object.process_case(self.predict_data[self.deepneuro_model.input_data], model=self.deepneuro_model) + + output_filepaths, output_images = check_data({'prediction': prediction, 'real_data': reference_data}, output_filepath=os.path.join(self.depth_dir, 'epoch_{}.png'.format(epoch)), show_output=False, batch_size=self.epoch_prediction_batch_size) + + self.predictions[-1] += [output_images['prediction'].astype('uint8')] + + return + + def on_depth_begin(self, depth_transition, logs={}): + + self.depth_dir = os.path.join(self.epoch_prediction_dir, '{}_{}'.format(str(depth_transition[0]), str(depth_transition[1]))) + + if not os.path.exists(self.depth_dir): + os.mkdir(self.depth_dir) + + self.predictions += [[]] + + return + + def on_depth_end(self, depth_transition, logs={}): + + imageio.mimsave(os.path.join(self.depth_dir, 'epoch_prediction.gif'), self.predictions[-1]) + + return \ No newline at end of file diff --git a/deepneuro/models/timenet.py b/deepneuro/models/timenet.py old mode 100644 new mode 100755 diff --git a/deepneuro/models/unet.py b/deepneuro/models/unet.py old mode 100644 new mode 100755 index 5b93e05..8118eb9 --- a/deepneuro/models/unet.py +++ b/deepneuro/models/unet.py @@ -85,32 +85,35 @@ def build_model(self): if self.batch_norm: right_outputs[level] = BatchNormalization()(right_outputs[level]) - output_layer = DnConv(right_outputs[level], 1, (1, ) * self.dim, stride_size=(1,) * self.dim, dim=self.dim, name='end_conv', backend='keras') + self.output_layer = DnConv(right_outputs[level], 1, (1, ) * self.dim, stride_size=(1,) * self.dim, dim=self.dim, name='end_conv', backend='keras') # TODO: Brainstorm better way to specify outputs - if self.input_tensor is not None: - return output_layer + if self.input_tensor is None: - if self.output_type == 'regression': - self.model = Model(inputs=self.inputs, outputs=output_layer) - self.model.compile(optimizer=Nadam(lr=self.initial_learning_rate), loss='mean_squared_error', metrics=['mean_squared_error']) + if self.output_type == 'regression': + self.model = Model(inputs=self.inputs, outputs=self.output_layer) + self.model.compile(optimizer=Nadam(lr=self.initial_learning_rate), loss='mean_squared_error', metrics=['mean_squared_error']) - if self.output_type == 'dice': - act = Activation('sigmoid')(output_layer) - self.model = Model(inputs=self.inputs, outputs=act) - self.model.compile(optimizer=Nadam(lr=self.initial_learning_rate), loss=dice_coef_loss, metrics=[dice_coef]) + if self.output_type == 'dice': + act = Activation('sigmoid')(self.output_layer) + self.model = Model(inputs=self.inputs, outputs=act) + self.model.compile(optimizer=Nadam(lr=self.initial_learning_rate), loss=dice_coef_loss, metrics=[dice_coef]) - if self.output_type == 'binary_label': - act = Activation('sigmoid')(output_layer) - self.model = Model(inputs=self.inputs, outputs=act) - self.model.compile(optimizer=Nadam(lr=self.initial_learning_rate), loss='binary_crossentropy', metrics=['binary_accuracy']) + if self.output_type == 'binary_label': + act = Activation('sigmoid')(self.output_layer) + self.model = Model(inputs=self.inputs, outputs=act) + self.model.compile(optimizer=Nadam(lr=self.initial_learning_rate), loss='binary_crossentropy', metrics=['binary_accuracy']) - if self.output_type == 'categorical_label': - act = Activation('softmax')(output_layer) - self.model = Model(inputs=self.inputs, outputs=act) - self.model.compile(optimizer=Nadam(lr=self.initial_learning_rate), loss='categorical_crossentropy', - metrics=['categorical_accuracy']) + if self.output_type == 'categorical_label': + act = Activation('softmax')(self.output_layer) + self.model = Model(inputs=self.inputs, outputs=act) + self.model.compile(optimizer=Nadam(lr=self.initial_learning_rate), loss='categorical_crossentropy', + metrics=['categorical_accuracy']) - super(UNet, self).build() + super(UNet, self).build() - return self.model \ No newline at end of file + return self.model + + else: + + return \ No newline at end of file diff --git a/deepneuro/outputs/__init__.py b/deepneuro/outputs/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/outputs/gan.py b/deepneuro/outputs/gan.py new file mode 100755 index 0000000..33f9f96 --- /dev/null +++ b/deepneuro/outputs/gan.py @@ -0,0 +1,129 @@ +import numpy as np + +from deepneuro.outputs.output import Output +from deepneuro.utilities.util import add_parameter, docker_print + +class GanSLERP(Output): + + def load(self, kwargs): + + """ Parameters + ---------- + depth : int, optional + Specified the layers deep the proposed U-Net should go. + Layer depth is symmetric on both upsampling and downsampling + arms. + max_filter: int, optional + Specifies the number of filters at the bottom level of the U-Net. + + """ + + super(GanSLERP, self).load(kwargs) + + # Patching Parameters + add_parameter(self, kwargs, 'patch_overlaps', 1) + add_parameter(self, kwargs, 'output_patch_shape', None) + add_parameter(self, kwargs, 'patch_overlaps', True) + add_parameter(self, kwargs, 'check_empty_patch', True) + add_parameter(self, kwargs, 'pad_borders', True) + + add_parameter(self, kwargs, 'patch_dimensions', None) + + add_parameter(self, kwargs, 'output_patch_dimensions', self.patch_dimensions) + + def process_case(self, input_data, model=None): + + # A little bit strange to access casename this way. Maybe make it an optional + # return of the generator. + + # Note that input_modalities as the first input is hard-coded here. Very fragile. + + # If an image is being repatched, its output shape is not certain. We attempt to infer it from + # the input data. This is wonky. Move this to PatchInference, maybe. + + if model is not None: + self.model = model + + output_data = self.predict(input_data, model) + + # Will fail for time-data. + if self.channels_first: + output_data = np.swapaxes(output_data, 1, -1) + + self.return_objects.append(output_data) + + return output_data + + def predict(self, input_data, model=None): + + repetition_offsets = [np.linspace(0, self.input_patch_shape[axis] - 1, self.patch_overlaps, dtype=int) for axis in self.patch_dimensions] + + if self.pad_borders: + # TODO -- Clean up this border-padding code and make it more readable. + input_pad_dimensions = [(0, 0)] * input_data.ndim + repatched_shape = self.output_shape + new_input_shape = list(input_data.shape) + for idx, dim in enumerate(self.patch_dimensions): + # Might not work for odd-shaped patches; check. + input_pad_dimensions[dim] = (int(self.input_patch_shape[dim] / 2), int(self.input_patch_shape[dim] / 2)) + new_input_shape[dim] += self.input_patch_shape[dim] + for idx, dim in enumerate(self.output_patch_dimensions): + repatched_shape[dim] += self.input_patch_shape[dim] + + padded_input_data = np.zeros(new_input_shape) + if self.channels_first: + input_slice = [slice(None)] * 2 + [slice(self.input_patch_shape[dim] / 2, -self.input_patch_shape[dim] / 2, None) for dim in self.patch_dimensions] + else: + input_slice = [slice(None)] + [slice(self.input_patch_shape[dim] / 2, -self.input_patch_shape[dim] / 2, None) for dim in self.patch_dimensions] + [slice(None)] + padded_input_data[tuple(input_slice)] = input_data + input_data = padded_input_data + + repatched_image = np.zeros(repatched_shape) + + corner_data_dims = [input_data.shape[axis] for axis in self.patch_dimensions] + corner_patch_dims = [self.output_patch_shape[axis] for axis in self.patch_dimensions] + + all_corners = np.indices(corner_data_dims) + + # There must be a better way to round up to an integer.. + possible_corners_slice = [slice(None)] + [slice(self.input_patch_shape[dim] / 2, -self.input_patch_shape[dim] / 2, None) for dim in self.patch_dimensions] + all_corners = all_corners[tuple(possible_corners_slice)] + + for rep_idx in range(self.patch_overlaps): + + if self.verbose: + docker_print('Predicting patch set', str(rep_idx + 1) + '/' + str(self.patch_overlaps) + '...') + + corners_grid_shape = [slice(None)] + for dim in range(all_corners.ndim - 1): + corners_grid_shape += [slice(repetition_offsets[dim][rep_idx], corner_data_dims[dim], corner_patch_dims[dim])] + + corners_list = all_corners[tuple(corners_grid_shape)] + corners_list = np.reshape(corners_list, (corners_list.shape[0], -1)).T + + if self.check_empty_patch: + corners_list = self.remove_empty_patches(input_data, corners_list) + + for corner_list_idx in range(0, corners_list.shape[0], self.batch_size): + + corner_batch = corners_list[corner_list_idx:corner_list_idx + self.batch_size] + input_patches = self.grab_patch(input_data, corner_batch) + + prediction = self.model.predict(input_patches) + + self.insert_patch(repatched_image, prediction, corner_batch) + + if rep_idx == 0: + output_data = np.copy(repatched_image) + else: + output_data = output_data + (1.0 / (rep_idx)) * (repatched_image - output_data) # Running Average + + if self.pad_borders: + + output_slice = [slice(None)] * output_data.ndim # Weird + for idx, dim in enumerate(self.output_patch_dimensions): + # Might not work for odd-shaped patches; check. + output_slice[dim] = slice(self.input_patch_shape[dim] / 2, -self.input_patch_shape[dim] / 2, 1) + output_data = output_data[tuple(output_slice)] + + return output_data \ No newline at end of file diff --git a/deepneuro/outputs/inference.py b/deepneuro/outputs/inference.py old mode 100644 new mode 100755 diff --git a/deepneuro/outputs/measure.py b/deepneuro/outputs/measure.py old mode 100644 new mode 100755 diff --git a/deepneuro/outputs/output.py b/deepneuro/outputs/output.py old mode 100644 new mode 100755 diff --git a/deepneuro/outputs/radiomics.py b/deepneuro/outputs/radiomics.py old mode 100644 new mode 100755 diff --git a/deepneuro/outputs/statistics.py b/deepneuro/outputs/statistics.py old mode 100644 new mode 100755 diff --git a/deepneuro/outputs/visualization.py b/deepneuro/outputs/visualization.py old mode 100644 new mode 100755 diff --git a/deepneuro/package_test/__init__.py b/deepneuro/package_test/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/package_test/ci_test.py b/deepneuro/package_test/ci_test.py old mode 100644 new mode 100755 diff --git a/deepneuro/package_test/package_test.py b/deepneuro/package_test/package_test.py old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Segment_GBM/Dockerfile b/deepneuro/pipelines/Segment_GBM/Dockerfile old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Segment_GBM/README.md b/deepneuro/pipelines/Segment_GBM/README.md old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Segment_GBM/__init__.py b/deepneuro/pipelines/Segment_GBM/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Segment_GBM/cli.py b/deepneuro/pipelines/Segment_GBM/cli.py old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Segment_GBM/edema_train.py b/deepneuro/pipelines/Segment_GBM/edema_train.py old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Segment_GBM/predict.py b/deepneuro/pipelines/Segment_GBM/predict.py old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Segment_GBM/resources/icon.png b/deepneuro/pipelines/Segment_GBM/resources/icon.png old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Segment_GBM/template.py b/deepneuro/pipelines/Segment_GBM/template.py old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Segment_GBM/train.py b/deepneuro/pipelines/Segment_GBM/train.py old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Skull_Stripping/Dockerfile b/deepneuro/pipelines/Skull_Stripping/Dockerfile old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Skull_Stripping/README.md b/deepneuro/pipelines/Skull_Stripping/README.md old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Skull_Stripping/__init__.py b/deepneuro/pipelines/Skull_Stripping/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Skull_Stripping/cli.py b/deepneuro/pipelines/Skull_Stripping/cli.py old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Skull_Stripping/predict.py b/deepneuro/pipelines/Skull_Stripping/predict.py old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/Skull_Stripping/resources/icon.png b/deepneuro/pipelines/Skull_Stripping/resources/icon.png old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/__init__.py b/deepneuro/pipelines/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/shared.py b/deepneuro/pipelines/shared.py old mode 100644 new mode 100755 diff --git a/deepneuro/pipelines/template.py b/deepneuro/pipelines/template.py old mode 100644 new mode 100755 diff --git a/deepneuro/postprocessing/__init__.py b/deepneuro/postprocessing/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/postprocessing/label.py b/deepneuro/postprocessing/label.py old mode 100644 new mode 100755 diff --git a/deepneuro/postprocessing/postprocessor.py b/deepneuro/postprocessing/postprocessor.py old mode 100644 new mode 100755 diff --git a/deepneuro/postprocessing/transform.py b/deepneuro/postprocessing/transform.py old mode 100644 new mode 100755 diff --git a/deepneuro/preprocessing/__init__.py b/deepneuro/preprocessing/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/preprocessing/preprocessor.py b/deepneuro/preprocessing/preprocessor.py old mode 100644 new mode 100755 diff --git a/deepneuro/preprocessing/signal.py b/deepneuro/preprocessing/signal.py old mode 100644 new mode 100755 index 6c0c046..1379a7d --- a/deepneuro/preprocessing/signal.py +++ b/deepneuro/preprocessing/signal.py @@ -48,9 +48,9 @@ def load(self, kwargs): add_parameter(self, kwargs, 'mask_value', None) # Normalization Parameters - add_parameter(self, kwargs, 'intensity_range', [-1,1]) + add_parameter(self, kwargs, 'intensity_range', [-1, 1]) add_parameter(self, kwargs, 'normalize_by_channel', True) - + add_parameter(self, kwargs, 'channels', None) self.array_input = True @@ -85,7 +85,7 @@ def load(self, kwargs): super(RangeNormalization, self).load(kwargs) - add_parameter(self, kwargs, 'intensity_range', [-1,1]) + add_parameter(self, kwargs, 'intensity_range', [-1, 1]) add_parameter(self, kwargs, 'input_intensity_range', None) add_parameter(self, kwargs, 'outlier_percent', None) # Not Implemented @@ -131,7 +131,7 @@ def load(self, kwargs): super(BinaryNormalization, self).load(kwargs) - add_parameter(self, kwargs, 'intensity_range', [-1,1]) + add_parameter(self, kwargs, 'intensity_range', [-1, 1]) # Not Implemented add_parameter(self, kwargs, 'threshold', 0) diff --git a/deepneuro/preprocessing/skullstrip.py b/deepneuro/preprocessing/skullstrip.py old mode 100644 new mode 100755 diff --git a/deepneuro/preprocessing/transform.py b/deepneuro/preprocessing/transform.py old mode 100644 new mode 100755 diff --git a/deepneuro/utilities/__init__.py b/deepneuro/utilities/__init__.py old mode 100644 new mode 100755 diff --git a/deepneuro/utilities/conversion.py b/deepneuro/utilities/conversion.py old mode 100644 new mode 100755 index 99dc65b..682de54 --- a/deepneuro/utilities/conversion.py +++ b/deepneuro/utilities/conversion.py @@ -310,9 +310,6 @@ def nifti_2_numpy(input_filepath, return_all=False): def save_numpy_2_nifti(image_numpy, reference_nifti_filepath=None, output_filepath=None, metadata=None, **kwargs): """ This is a bit convoluted. - - TODO: Documentation, rearrange reference_nifti and output_filepath, and - propagate changes to the rest of qtim_tools. """ if reference_nifti_filepath is not None: @@ -382,7 +379,6 @@ def check_format(filepath): if format_type is None: raise ValueError - # print 'Error! Input file extension is not supported by qtim_tools. Returning None.' else: return format_type @@ -436,12 +432,6 @@ def convert_input_2_numpy(input_data, input_format=None, return_all=False): def save_data(input_data, output_filename, reference_data=None, metadata=None, affine=None, output_format=None, **kwargs): - """ This is a bit convoluted. - - TODO: Documentation, rearrange reference_nifti and output_filepath, and - propagate changes to the rest of qtim_tools. - """ - if output_format is None: output_format = check_format(output_filename) diff --git a/deepneuro/utilities/util.py b/deepneuro/utilities/util.py old mode 100644 new mode 100755 diff --git a/deepneuro/utilities/visualize.py b/deepneuro/utilities/visualize.py old mode 100644 new mode 100755 index 434d39f..db70d54 --- a/deepneuro/utilities/visualize.py +++ b/deepneuro/utilities/visualize.py @@ -6,7 +6,7 @@ from deepneuro.utilities.util import replace_suffix -def check_data(output_data=None, data_collection=None, batch_size=4, merge_batch=True, show_output=True, output_filepath=None, viz_rows=2, viz_mode_3d='2d_center', color_range=None, output_groups=None, combine_outputs=False): +def check_data(output_data=None, data_collection=None, batch_size=4, merge_batch=True, show_output=True, output_filepath=None, viz_rows=6, viz_mode_3d='2d_center', color_range=None, output_groups=None, combine_outputs=False): if data_collection is not None: generator = data_collection.data_generator(perpetual=True, verbose=False, batch_size=batch_size) @@ -41,6 +41,8 @@ def check_data(output_data=None, data_collection=None, batch_size=4, merge_batch if show_output: fig, axarr = plt.subplots(len(output_images.keys())) + if type(axarr) is not list: + axarr = [axarr] for plot_idx, (label, data) in enumerate(output_images.items()): diff --git a/docs/Makefile b/docs/Makefile old mode 100644 new mode 100755 diff --git a/docs/make.bat b/docs/make.bat old mode 100644 new mode 100755 diff --git a/docs/source/.doctrees/deepneuro.doctree b/docs/source/.doctrees/deepneuro.doctree old mode 100644 new mode 100755 diff --git a/docs/source/.doctrees/deepneuro.models.doctree b/docs/source/.doctrees/deepneuro.models.doctree old mode 100644 new mode 100755 diff --git a/docs/source/.doctrees/deepneuro.testing.doctree b/docs/source/.doctrees/deepneuro.testing.doctree old mode 100644 new mode 100755 diff --git a/docs/source/.doctrees/deepneuro.train.doctree b/docs/source/.doctrees/deepneuro.train.doctree old mode 100644 new mode 100755 diff --git a/docs/source/.doctrees/environment.pickle b/docs/source/.doctrees/environment.pickle old mode 100644 new mode 100755 diff --git a/docs/source/.doctrees/index.doctree b/docs/source/.doctrees/index.doctree old mode 100644 new mode 100755 diff --git a/docs/source/.doctrees/modules.doctree b/docs/source/.doctrees/modules.doctree old mode 100644 new mode 100755 diff --git a/docs/source/conf.py b/docs/source/conf.py old mode 100644 new mode 100755 diff --git a/docs/source/deepneuro.models.rst b/docs/source/deepneuro.models.rst old mode 100644 new mode 100755 diff --git a/docs/source/deepneuro.rst b/docs/source/deepneuro.rst old mode 100644 new mode 100755 diff --git a/docs/source/deepneuro.testing.rst b/docs/source/deepneuro.testing.rst old mode 100644 new mode 100755 diff --git a/docs/source/deepneuro.train.rst b/docs/source/deepneuro.train.rst old mode 100644 new mode 100755 diff --git a/docs/source/index.rst b/docs/source/index.rst old mode 100644 new mode 100755 diff --git a/docs/source/modules.rst b/docs/source/modules.rst old mode 100644 new mode 100755 diff --git a/entrypoint.sh b/entrypoint.sh old mode 100644 new mode 100755 diff --git a/misc/DeepInfer/Segment_GBM/DeepNeuro_Glioblastoma.json b/misc/DeepInfer/Segment_GBM/DeepNeuro_Glioblastoma.json old mode 100644 new mode 100755 diff --git a/misc/DeepInfer/Segment_GBM/Dockerfile b/misc/DeepInfer/Segment_GBM/Dockerfile old mode 100644 new mode 100755 diff --git a/misc/DeepInfer/Segment_GBM/entrypoint.py b/misc/DeepInfer/Segment_GBM/entrypoint.py old mode 100644 new mode 100755 diff --git a/misc/DeepInfer/Segment_GBM/entrypoint.sh b/misc/DeepInfer/Segment_GBM/entrypoint.sh old mode 100644 new mode 100755 diff --git a/package_resources/logos/DeepNeuro.PNG b/package_resources/logos/DeepNeuro.PNG old mode 100644 new mode 100755 diff --git a/package_resources/logos/DeepNeuro_alt.PNG b/package_resources/logos/DeepNeuro_alt.PNG old mode 100644 new mode 100755 diff --git a/setup.cfg b/setup.cfg old mode 100644 new mode 100755 diff --git a/setup.py b/setup.py old mode 100644 new mode 100755 index ec75b89..4c23651 --- a/setup.py +++ b/setup.py @@ -18,7 +18,6 @@ setup( name = 'deepneuro', - # packages = ['qtim_tools'], # this must be the same as the name above version = '0.1.1', description = DOCLINES[0], packages = find_packages(), diff --git a/tox.ini b/tox.ini old mode 100644 new mode 100755