Skip to content

Commit

Permalink
prog: progressive GAN progress
Browse files Browse the repository at this point in the history
  • Loading branch information
AndrewBeers committed Aug 10, 2018
1 parent ff85edc commit af570c9
Show file tree
Hide file tree
Showing 104 changed files with 1,102 additions and 487 deletions.
1 change: 1 addition & 0 deletions .gitignore 100644 → 100755
Expand Up @@ -12,6 +12,7 @@ checkpoint
*.DS_Store
*.gz
*.bak
deepneuro/local

# Byte-compiled / optimized / DLL files
__pycache__/
Expand Down
Empty file modified .travis.yml 100644 → 100755
Empty file.
Empty file modified Dockerfile 100644 → 100755
Empty file.
Empty file modified LICENSE 100644 → 100755
Empty file.
Empty file modified README.md 100644 → 100755
Empty file.
Empty file modified coverage_wrapper.py 100644 → 100755
Empty file.
Empty file modified deepneuro/__init__.py 100644 → 100755
Empty file.
Empty file modified deepneuro/augmentation/__init__.py 100644 → 100755
Empty file.
Empty file modified deepneuro/augmentation/augment.py 100644 → 100755
Empty file.
Empty file modified deepneuro/data/__init__.py 100644 → 100755
Empty file.
30 changes: 20 additions & 10 deletions deepneuro/data/data_collection.py 100644 → 100755
Expand Up @@ -13,17 +13,24 @@
from deepneuro.utilities.conversion import read_image_files
from deepneuro.data.data_group import DataGroup
from deepneuro.data.data_load import parse_modality_directories, parse_subject_directory
from deepneuro.utilities.util import add_parameter


class DataCollection(object):

def __init__(self, data_directory=None, data_storage=None, modality_dict=None, spreadsheet_dict=None, value_dict=None, case_list=None, verbose=False):
def __init__(self, data_directory=None, data_storage=None, data_group_dict=None, spreadsheet_dict=None, value_dict=None, case_list=None, verbose=False, **kwargs):

# Input vars
self.data_directory = data_directory
self.data_storage = data_storage
self.modality_dict = modality_dict
self.data_group_dict = data_group_dict
self.spreadsheet_dict = spreadsheet_dict

# File location variables
add_parameter(self, kwargs, 'source', 'directories')
add_parameter(self, kwargs, 'recursive', False)
add_parameter(self, kwargs, 'file_identifying_chars', None)

self.value_dict = value_dict
self.case_list = case_list
self.verbose = verbose
Expand All @@ -42,6 +49,9 @@ def __init__(self, data_directory=None, data_storage=None, modality_dict=None, s
# Data group variables
self.data_groups = {}

if data_group_dict is not None or data_storage is not None:
self.fill_data_groups()

def add_case(self, case_dict, case_name=None):

# Currently only works for filepaths. Maybe add functionality for python data types, hdf5s?
Expand All @@ -61,21 +71,21 @@ def add_case(self, case_dict, case_name=None):
self.preprocessed_cases[case_name] = {}
self.total_cases = len(self.cases)

def fill_data_groups(self, source='direcotries', recursive=False, identifying_chars=None):
def fill_data_groups(self):

""" Populates data collection variables from either a directory structure or an hdf5 file.
Repeated usage may have unexpected results.
"""

if source == 'files':
if self.source == 'files':

# Create DataGroups for this DataCollection.
for modality_group in self.modality_dict:
for modality_group in self.data_group_dict:
if modality_group not in list(self.data_groups.keys()):
self.data_groups[modality_group] = DataGroup(modality_group)
self.data_groups[modality_group].source = 'file'

parse_modality_directories(self, self.modality_dict, case_list=self.case_list, recursive=recursive, identifying_chars=identifying_chars)
parse_modality_directories(self, self.data_group_dict, case_list=self.case_list, recursive=self.recursive, file_identifying_chars=self.file_identifying_chars)

self.total_cases = len(self.cases)

Expand All @@ -85,20 +95,20 @@ def fill_data_groups(self, source='direcotries', recursive=False, identifying_ch
else:
print('Found', self.total_cases, 'number of cases..')

elif self.data_directory is not None and source == 'directories':
elif self.data_directory is not None and self.source == 'directories':

if self.verbose:
print('Gathering image data from...', self.data_directory, '\n')

# Create DataGroups for this DataCollection.
for modality_group in self.modality_dict:
for modality_group in self.data_group_dict:
if modality_group not in list(self.data_groups.keys()):
self.data_groups[modality_group] = DataGroup(modality_group)
self.data_groups[modality_group].source = 'directory'

# Iterate through directories.. Always looking for a better way to check optional list typing.
if isinstance(self.data_directory, str):
if not os.path.exist(self.data_directory):
if not os.path.exists(self.data_directory):
print('The data directory you have input does not exist!')
exit(1)
directory_list = sorted(glob.glob(os.path.join(self.data_directory, "*/")))
Expand All @@ -112,7 +122,7 @@ def fill_data_groups(self, source='direcotries', recursive=False, identifying_ch

for subject_dir in directory_list:

parse_subject_directory(subject_dir, case_list=self.case_list)
parse_subject_directory(self, subject_dir, case_list=self.case_list)

self.total_cases = len(self.cases)

Expand Down
Empty file modified deepneuro/data/data_group.py 100644 → 100755
Empty file.
31 changes: 23 additions & 8 deletions deepneuro/data/data_load.py 100644 → 100755
Expand Up @@ -16,7 +16,7 @@ def parse_subject_directory(data_collection, subject_dir, case_list=None):
return

# Search for modality files, and skip those missing with files modalities.
for data_group, modality_labels in data_collection.modality_dict.items():
for data_group, modality_labels in data_collection.data_group_dict.items():

modality_group_files = []
for modality in modality_labels:
Expand Down Expand Up @@ -47,16 +47,21 @@ def parse_subject_directory(data_collection, subject_dir, case_list=None):
data_collection.preprocessed_cases[case_name] = defaultdict(list)


def parse_modality_directories(data_collection, modality_dict, case_list=None, recursive=True, verbose=True, identifying_chars=None):
def parse_modality_directories(data_collection, data_group_dict, case_list=None, recursive=True, verbose=True, file_identifying_chars=None):

""" Recursive functionality not yet available
"""

# Cases not yet implemented.

# Pulling from multiple directories not yet implemented.
lead_group = modality_dict[modality_dict.keys()[0]]
lead_directory = os.path.abspath(lead_group[0])
lead_group = data_group_dict[data_group_dict.keys()[0]]

if type(lead_group[0]) is list:
lead_directory = os.path.abspath(lead_group[0][0])
else:
lead_directory = os.path.abspath(lead_group[0])

lead_files = []

for directory in lead_group:
Expand All @@ -73,11 +78,11 @@ def parse_modality_directories(data_collection, modality_dict, case_list=None, r
base_filedir = os.path.dirname(lead_filepath).split(lead_directory, 1)[1]
base_filepath = nifti_splitext(lead_filepath)[0]

if identifying_chars is not None:
base_filepath = os.path.basename(os.path.join(os.path.dirname(base_filepath), os.path.basename(base_filepath)[:identifying_chars]))
if file_identifying_chars is not None:
base_filepath = os.path.basename(os.path.join(os.path.dirname(base_filepath), os.path.basename(base_filepath)[:file_identifying_chars]))

# Search for modality files, and skip those missing with files modalities.
for data_group, modality_labels in data_collection.modality_dict.items():
for data_group, modality_labels in data_collection.data_group_dict.items():

modality_group_files = []

Expand All @@ -104,4 +109,14 @@ def parse_modality_directories(data_collection, modality_dict, case_list=None, r
if lead_filepath is not None:
case_name = lead_filepath
data_collection.cases.append(case_name)
data_collection.preprocessed_cases[case_name] = defaultdict(list)
data_collection.preprocessed_cases[case_name] = defaultdict(list)


def parse_csv_file():

return


if __name__ == '__main__':

pass
Empty file modified deepneuro/data/data_utilities.py 100644 → 100755
Empty file.
Empty file modified deepneuro/docker/__init__.py 100644 → 100755
Empty file.
Empty file modified deepneuro/docker/docker_cli.py 100644 → 100755
Empty file.
Empty file modified deepneuro/external/__init__.py 100644 → 100755
Empty file.
Empty file modified deepneuro/interface/__init__.py 100644 → 100755
Empty file.
Empty file modified deepneuro/interface/master_cli.py 100644 → 100755
Empty file.
Empty file modified deepneuro/interface/web_wrapper.py 100644 → 100755
Empty file.
Empty file modified deepneuro/load/__init__.py 100644 → 100755
Empty file.
Empty file modified deepneuro/load/load.py 100644 → 100755
Empty file.
Empty file modified deepneuro/models/__init__.py 100644 → 100755
Empty file.
126 changes: 114 additions & 12 deletions deepneuro/models/blocks.py 100644 → 100755
@@ -1,40 +1,142 @@
import tensorflow as tf

from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling
from deepneuro.models.ops import leaky_relu
from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling
from deepneuro.models.ops import leaky_relu, minibatch_state_concat


def generator(model, latent_var, depth=1, initial_size=4, reuse=False, name=None):
def generator(model, latent_var, depth=1, initial_size=4, reuse=False, transition=False, alpha_transition=0, name=None):

convs = []
"""
"""

with tf.variable_scope(name) as scope:

convs = []

if reuse:
scope.reuse_variables()

convs += [tf.reshape(latent_var, [model.training_batch_size] + [1] * model.dim + [model.latent_size])]
convs += [tf.reshape(latent_var, [tf.shape(latent_var)[0]] + [1] * model.dim + [model.latent_size])]

# TODO: refactor the padding on this step.
convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(4,) * model.dim, stride_size=(1,) * model.dim, padding='Other', name='generator_conv_1_latent', dim=model.dim)), model.dim)
# TODO: refactor the padding on this step. Or replace with a dense layer?
convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(4,) * model.dim, stride_size=(1,) * model.dim, padding='Other', name='generator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim)), model.dim)

convs += [tf.reshape(convs[-1], [model.training_batch_size] + [initial_size] * model.dim + [model.get_filter_num(0)])]
convs += [tf.reshape(convs[-1], [tf.shape(latent_var)[0]] + [initial_size] * model.dim + [model.get_filter_num(0)])]

convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), stride_size=(1,) * model.dim, name='generator_conv_2_latent', dim=model.dim)), dim=model.dim)
convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='generator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=model.dim)), dim=model.dim)

for i in range(depth):

if i == depth - 1 and transition:
#To RGB
transition_conv = DnConv(convs[-1], output_dim=model.channels, kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]), dim=model.dim)
transition_conv = DnUpsampling(transition_conv, (2,) * model.dim, dim=model.dim)

convs += [DnUpsampling(convs[-1], (2,) * model.dim, dim=model.dim)]
convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), stride_size=(1,) * model.dim, name='generator_conv_1_depth_{}_{}'.format(i, convs[-1].shape[1]), dim=model.dim)), dim=model.dim)
convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='generator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim)), dim=model.dim)

convs += [DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), stride_size=(1,) * model.dim, name='generator_conv_2_depth_{}_{}'.format(i, convs[-1].shape[1]), dim=model.dim)), dim=model.dim)]
convs += [DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='generator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=model.dim)), dim=model.dim)]

#To RGB
convs += [DnConv(convs[-1], output_dim=model.channels, kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='generator_y_final_conv_{}'.format(convs[-1].shape[1]), dim=model.dim)]
convs += [DnConv(convs[-1], output_dim=model.channels, kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]), dim=model.dim)]

if transition:
convs[-1] = (1 - alpha_transition) * transition_conv + alpha_transition * convs[-1]

return convs[-1]


def discriminator(model, input_image, reuse=False, name=None, depth=1, transition=False, **kwargs):

"""
"""

with tf.variable_scope(name) as scope:

if reuse:
scope.reuse_variables()

if transition:
transition_conv = DnAveragePooling(input_image, (2,) * model.dim, dim=model.dim)
transition_conv = leaky_relu(DnConv(transition_conv, output_dim=model.get_filter_num(depth - 1), kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_y_rgb_conv_{}'.format(transition_conv.shape[1]), dim=model.dim))

convs = []

# fromRGB
convs += [leaky_relu(DnConv(input_image, output_dim=model.get_filter_num(depth), kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_y_rgb_conv_{}'.format(input_image.shape[1]), dim=model.dim))]

for i in range(depth):

convs += [leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(depth - i), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim))]

convs += [leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(depth - 1 - i), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=model.dim))]
convs[-1] = DnAveragePooling(convs[-1], dim=model.dim)

convs += [minibatch_state_concat(convs[-1])]
convs[-1] = leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(3,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim))

#for D -- what's going on with the channel number here?
output = tf.reshape(convs[-1], [tf.shape(convs[-1])[0], 4 * 4 * model.get_filter_num(0)])

# Currently erroring
# discriminate_output = dense(output, output_size=1, name='discriminator_n_fully')

discriminate_output = tf.layers.dense(output, model.get_filter_num(0), name='discriminator_n_1_fully')
discriminate_output = tf.layers.dense(discriminate_output, 1, name='discriminator_n_2_fully')

return tf.nn.sigmoid(discriminate_output), discriminate_output


def unet(model, input_tensor, backend='tensorflow'):

left_outputs = []

for level in range(model.depth):

filter_num = int(model.max_filter / (2 ** (model.depth - level)) / model.downsize_filters_factor)

if level == 0:
left_outputs += [DnConv(input_tensor, filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_1'.format(level), backend=backend)]
left_outputs[level] = DnConv(left_outputs[level], 2 * filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_2'.format(level), backend=backend)
else:
left_outputs += [DnMaxPooling(left_outputs[level - 1], pool_size=model.pool_size, dim=model.dim, backend=backend)]
left_outputs[level] = DnConv(left_outputs[level], filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_1'.format(level), backend=backend)
left_outputs[level] = DnConv(left_outputs[level], 2 * filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_2'.format(level), backend=backend)

if model.dropout is not None and model.dropout != 0:
left_outputs[level] = DnDropout(model.dropout)(left_outputs[level])

if model.batch_norm:
left_outputs[level] = DnBatchNormalization(left_outputs[level])

right_outputs = [left_outputs[model.depth - 1]]

for level in range(model.depth):

filter_num = int(model.max_filter / (2 ** (level)) / model.downsize_filters_factor)

if level > 0:
right_outputs += [DnUpsampling(right_outputs[level - 1], pool_size=model.pool_size, dim=model.dim, backend=backend)]
right_outputs[level] = concatenate([right_outputs[level], left_outputs[model.depth - level - 1]], axis=model.dim + 1)
right_outputs[level] = DnConv(right_outputs[level], filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_upsampling_conv_{}_1'.format(level), backend=backend)
right_outputs[level] = DnConv(right_outputs[level], int(filter_num / 2), model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_upsampling_conv_{}_2'.format(level), backend=backend)
else:
continue

if model.dropout is not None and model.dropout != 0:
right_outputs[level] = DnDropout(model.dropout)(right_outputs[level])

if model.batch_norm:
right_outputs[level] = DnBatchNormalization()(right_outputs[level])

output_layer = DnConv(right_outputs[level], 1, (1, ) * model.dim, stride_size=(1,) * model.dim, dim=model.dim, name='end_conv', backend=backend)

# TODO: Brainstorm better way to specify outputs
if model.input_tensor is not None:
return output_layer

return model.model

# def progressive_generator(model, latent_var, progressive_depth=1, name=None, transition=False, alpha_transition=0.0):

# with tf.variable_scope(name) as scope:
Expand Down

0 comments on commit af570c9

Please sign in to comment.