From 45d2d0dd241a14bc4564eb5c5edeebd0b0f1bbe4 Mon Sep 17 00:00:00 2001 From: nhiggs Date: Tue, 5 Jun 2018 15:05:57 -0600 Subject: [PATCH] Revert "Network compression" --- deepplantphenomics/__init__.py | 2 +- deepplantphenomics/deepplantpheno.py | 279 ++++++------------ deepplantphenomics/layers.py | 5 - deepplantphenomics/loaders.py | 6 +- deepplantphenomics/tools.py | 2 +- docs/Compression.md | 72 ----- .../arabidopsis_strain_classifier_test.py | 4 +- examples/rosette_leaf_count_test.py | 4 +- mkdocs.yml | 1 - 9 files changed, 98 insertions(+), 277 deletions(-) delete mode 100644 docs/Compression.md diff --git a/deepplantphenomics/__init__.py b/deepplantphenomics/__init__.py index d4ece9e..f767026 100644 --- a/deepplantphenomics/__init__.py +++ b/deepplantphenomics/__init__.py @@ -5,4 +5,4 @@ from .deepplantpheno import * from .tools import * -from .networks import * +from .networks import * \ No newline at end of file diff --git a/deepplantphenomics/deepplantpheno.py b/deepplantphenomics/deepplantpheno.py index ce46da9..f7a6251 100644 --- a/deepplantphenomics/deepplantpheno.py +++ b/deepplantphenomics/deepplantpheno.py @@ -1,7 +1,6 @@ -from __future__ import print_function - from . import layers from . import loaders +from . import preprocessing from . import definitions from . import networks import numpy as np @@ -13,13 +12,6 @@ import warnings import copy import matplotlib.pyplot as plt -import math - -try: - from . import preprocessing -except ModuleNotFoundError: - print("PlantCV not found, preprocessing will be unavailable") - class DPPModel(object): # Operation settings @@ -479,7 +471,7 @@ def __add_layers_to_graph(self): if callable(getattr(layer, 'add_to_graph', None)): layer.add_to_graph() - def __assemble_graph(self, iteration=None): + def __assemble_graph(self): with self.__graph.as_default(): self.__log('Parsing dataset...') @@ -608,13 +600,13 @@ def __assemble_graph(self, iteration=None): # Calculate test accuracy if self.__has_moderation: - self.x_test, self.__graph_ops['y_test'], mod_w_test = tf.train.batch( + x_test, self.__graph_ops['y_test'], mod_w_test = tf.train.batch( [self.__test_images, self.__test_labels, self.__test_moderation_features], batch_size=self.__batch_size, num_threads=self.__num_threads, capacity=self.__queue_capacity) else: - self.x_test, self.__graph_ops['y_test'] = tf.train.batch([self.__test_images, self.__test_labels], + x_test, self.__graph_ops['y_test'] = tf.train.batch([self.__test_images, self.__test_labels], batch_size=self.__batch_size, num_threads=self.__num_threads, capacity=self.__queue_capacity) @@ -623,7 +615,7 @@ def __assemble_graph(self, iteration=None): if self.__problem_type == definitions.ProblemType.REGRESSION: self.__graph_ops['y_test'] = loaders.label_string_to_tensor(self.__graph_ops['y_test'], self.__batch_size, self.__num_regression_outputs) - self.x_test = tf.reshape(self.x_test, shape=[-1, self.__image_height, self.__image_width, self.__image_depth]) + x_test = tf.reshape(x_test, shape=[-1, self.__image_height, self.__image_width, self.__image_depth]) if self.__problem_type == definitions.ProblemType.SEMANTICSEGMETNATION: self.__graph_ops['y_test'] = tf.reshape(self.__graph_ops['y_test'], shape=[-1, self.__image_height, self.__image_width, 1]) @@ -631,7 +623,7 @@ def __assemble_graph(self, iteration=None): # Take a slice of image. Same size and location as the slice from training. patch_width = self.__patch_width patch_height = self.__patch_height - self.x_test = tf.image.extract_glimpse(self.x_test, [patch_height, patch_width], offsets, + x_test = tf.image.extract_glimpse(x_test, [patch_height, patch_width], offsets, normalized=False, centered=False) if self.__problem_type == definitions.ProblemType.SEMANTICSEGMETNATION: self.__graph_ops['y_test'] = tf.image.extract_glimpse(self.__graph_ops['y_test'], @@ -639,9 +631,9 @@ def __assemble_graph(self, iteration=None): normalized=False, centered=False) if self.__has_moderation: - self.__graph_ops['x_test_predicted'] = self.forward_pass(self.x_test, deterministic=True, moderation_features=mod_w_test) + self.__graph_ops['x_test_predicted'] = self.forward_pass(x_test, deterministic=True, moderation_features=mod_w_test) else: - self.__graph_ops['x_test_predicted'] = self.forward_pass(self.x_test, deterministic=True) + self.__graph_ops['x_test_predicted'] = self.forward_pass(x_test, deterministic=True) if self.__problem_type == definitions.ProblemType.CLASSIFICATION: test_class_predictions = tf.argmax(tf.nn.softmax(self.__graph_ops['x_test_predicted']), 1) @@ -665,18 +657,18 @@ def __assemble_graph(self, iteration=None): if self.__tb_dir is not None: self.__log('Creating Tensorboard summaries...') # Summaries for any problem type - tf.summary.scalar('train/loss%s' % iteration, self.__graph_ops['cost'], collections=['custom_summaries']) - tf.summary.scalar('train/learning_rate%s' % iteration, self.__learning_rate, collections=['custom_summaries']) - tf.summary.scalar('train/l2_loss%s' % iteration, l2_cost, collections=['custom_summaries']) + tf.summary.scalar('train/loss', self.__graph_ops['cost'], collections=['custom_summaries']) + tf.summary.scalar('train/learning_rate', self.__learning_rate, collections=['custom_summaries']) + tf.summary.scalar('train/l2_loss', l2_cost, collections=['custom_summaries']) filter_summary = self.__get_weights_as_image(self.__first_layer().weights) - tf.summary.image('filters/first%s' % iteration, filter_summary, collections=['custom_summaries']) + tf.summary.image('filters/first', filter_summary, collections=['custom_summaries']) # Summaries for classification problems if self.__problem_type == definitions.ProblemType.CLASSIFICATION: - tf.summary.scalar('train/accuracy%s' % iteration, self.__graph_ops['accuracy'], collections=['custom_summaries']) - tf.summary.scalar('test/accuracy%s' % iteration, self.__graph_ops['test_accuracy'], collections=['custom_summaries']) - tf.summary.histogram('train/class_predictions%s' % iteration, class_predictions, collections=['custom_summaries']) - tf.summary.histogram('test/class_predictions%s' % iteration, test_class_predictions, + tf.summary.scalar('train/accuracy', self.__graph_ops['accuracy'], collections=['custom_summaries']) + tf.summary.scalar('test/accuracy', self.__graph_ops['test_accuracy'], collections=['custom_summaries']) + tf.summary.histogram('train/class_predictions', class_predictions, collections=['custom_summaries']) + tf.summary.histogram('test/class_predictions', test_class_predictions, collections=['custom_summaries']) # Summaries for regression @@ -699,20 +691,20 @@ def __assemble_graph(self, iteration=None): # Summaries for each layer for layer in self.__layers: if hasattr(layer, 'name') and not isinstance(layer, layers.batchNormLayer): - tf.summary.histogram('weights%s/' %iteration + layer.name, layer.weights, collections=['custom_summaries']) - tf.summary.histogram('biases%s/' %iteration + layer.name, layer.biases, collections=['custom_summaries']) - tf.summary.histogram('activations%s/' %iteration + layer.name, layer.activations, + tf.summary.histogram('weights/' + layer.name, layer.weights, collections=['custom_summaries']) + tf.summary.histogram('biases/' + layer.name, layer.biases, collections=['custom_summaries']) + tf.summary.histogram('activations/' + layer.name, layer.activations, collections=['custom_summaries']) # Summaries for gradients for index, grad in enumerate(gradients): - tf.summary.histogram("gradients%s/" %iteration + variables[index].name, gradients[index], + tf.summary.histogram("gradients/" + variables[index].name, gradients[index], collections=['custom_summaries']) - tf.summary.histogram("gradient_global_norm%s/" %iteration, global_grad_norm, collections=['custom_summaries']) + tf.summary.histogram("gradient_global_norm/", global_grad_norm, collections=['custom_summaries']) self.__graph_ops['merged'] = tf.summary.merge_all(key='custom_summaries') - def begin_training(self, return_test_loss=False, shut_down=True, iteration=None): + def begin_training(self, return_test_loss=False): """ Initialize the network and either run training to the specified max epoch, or load trainable variables. The full test accuracy is calculated immediately afterward. Finally, the trainable parameters are saved and @@ -730,184 +722,98 @@ def begin_training(self, return_test_loss=False, shut_down=True, iteration=None) # "'DPPModel.add_convolutional_layer()'. See documentation for a complete list of layers.") with self.__graph.as_default(): - with tf.variable_scope('dpp', reuse=tf.AUTO_REUSE): - self.__assemble_graph(iteration=iteration) - - # Either load the network parameters from a checkpoint file or start training - if self.__load_from_saved is not False: - self.load_state() - - self.__initialize_queue_runners() + self.__assemble_graph() - self.compute_full_test_accuracy() - - if shut_down: - self.shut_down() - else: - if self.__tb_dir is not None: - train_writer = tf.summary.FileWriter(self.__tb_dir, self.__session.graph) - - self.__log('Initializing parameters...') - tf.contrib.quantize.create_training_graph() - init_op = tf.global_variables_initializer() - self.__session.run(init_op) - - self.__initialize_queue_runners() - - self.__log('Beginning training...') + # Either load the network parameters from a checkpoint file or start training + if self.__load_from_saved is not False: + self.load_state() - self.__set_learning_rate() + self.__initialize_queue_runners() - for i in range(self.__maximum_training_batches): - start_time = time.time() - self.__global_epoch = i + self.compute_full_test_accuracy() - self.__session.run(self.__graph_ops['optimizer']) + self.shut_down() + else: + if self.__tb_dir is not None: + train_writer = tf.summary.FileWriter(self.__tb_dir, self.__session.graph) - if self.__global_epoch > 0 and self.__global_epoch % self.__report_rate == 0: - elapsed = time.time() - start_time + self.__log('Initializing parameters...') + init_op = tf.global_variables_initializer() + self.__session.run(init_op) - if self.__tb_dir is not None: - summary = self.__session.run(self.__graph_ops['merged']) - train_writer.add_summary(summary, i) + self.__initialize_queue_runners() - if self.__problem_type == definitions.ProblemType.CLASSIFICATION: - loss, epoch_accuracy, epoch_test_accuracy = self.__session.run( - [self.__graph_ops['cost'], - self.__graph_ops['accuracy'], - self.__graph_ops['test_accuracy']]) + self.__log('Beginning training...') - samples_per_sec = self.__batch_size / elapsed + self.__set_learning_rate() - self.__log( - 'Results for batch {} (epoch {}) - Loss: {:.5f}, Training Accuracy: {:.4f}, samples/sec: {:.2f}' - .format(i, - i / (self.__total_training_samples / self.__batch_size), - loss, - epoch_accuracy, - samples_per_sec)) - elif self.__problem_type == definitions.ProblemType.REGRESSION or \ - self.__problem_type == definitions.ProblemType.SEMANTICSEGMETNATION: - loss, epoch_test_loss = self.__session.run([self.__graph_ops['cost'], - self.__graph_ops['test_cost']]) + for i in range(self.__maximum_training_batches): + start_time = time.time() + self.__global_epoch = i - samples_per_sec = self.__batch_size / elapsed + self.__session.run(self.__graph_ops['optimizer']) - self.__log( - 'Results for batch {} (epoch {}) - Loss: {}, samples/sec: {:.2f}' - .format(i, - i / (self.__total_training_samples / self.__batch_size), - loss, - samples_per_sec)) + if self.__global_epoch > 0 and self.__global_epoch % self.__report_rate == 0: + elapsed = time.time() - start_time - if self.__save_checkpoints and self.__global_epoch % (self.__report_rate * 100) == 0: - self.save_state() - else: - loss = self.__session.run([self.__graph_ops['cost']]) + if self.__tb_dir is not None: + summary = self.__session.run(self.__graph_ops['merged']) + train_writer.add_summary(summary, i) - if loss == 0.0: - self.__log('Stopping due to zero loss') - break + if self.__problem_type == definitions.ProblemType.CLASSIFICATION: + loss, epoch_accuracy, epoch_test_accuracy = self.__session.run( + [self.__graph_ops['cost'], + self.__graph_ops['accuracy'], + self.__graph_ops['test_accuracy']]) - if i == self.__maximum_training_batches - 1: - self.__log('Stopping due to maximum epochs') + samples_per_sec = self.__batch_size / elapsed - self.save_state() + self.__log( + 'Results for batch {} (epoch {}) - Loss: {:.5f}, Training Accuracy: {:.4f}, samples/sec: {:.2f}' + .format(i, + i / (self.__total_training_samples / self.__batch_size), + loss, + epoch_accuracy, + samples_per_sec)) + elif self.__problem_type == definitions.ProblemType.REGRESSION or \ + self.__problem_type == definitions.ProblemType.SEMANTICSEGMETNATION: + loss, epoch_test_loss = self.__session.run([self.__graph_ops['cost'], + self.__graph_ops['test_cost']]) - self.compute_full_test_accuracy() + samples_per_sec = self.__batch_size / elapsed - if shut_down: - self.shut_down() + self.__log( + 'Results for batch {} (epoch {}) - Loss: {}, samples/sec: {:.2f}' + .format(i, + i / (self.__total_training_samples / self.__batch_size), + loss, + samples_per_sec)) - if return_test_loss: - return final_test_loss + if self.__save_checkpoints and self.__global_epoch % (self.__report_rate * 100) == 0: + self.save_state() else: - return - - def compress(self, times=1, threshold=0.00005, quantize=False, debug=False): - self.set_learning_rate(self.__learning_rate * 0.1) # Use a lower learning rate for re-training - with self.__graph.as_default(): - tf.GraphKeys.PRUNING_MASKS = "pruning_masks" # This prevents pruning variables from being stored with the model - # Iterate through layers and add compression layers - compression_layers = [] - for layer in self.__layers: - self.__log('Looking for layers to compress...') - self.__log('Layer: {} '.format(getattr(layer, 'name', None))) - if isinstance(layer, (layers.convLayer, layers.fullyConnectedLayer)): - # Create pruning mask for low weight connections and prune weight layer - prune_mask = tf.get_variable(layer.name + '_prune', initializer=tf.ones_like(layer.weights), trainable=False, - collections=[tf.GraphKeys.PRUNING_MASKS]) - pruned_weights = tf.multiply(layer.weights, prune_mask) - layer.unpruned_weights = layer.weights - - t = tf.sqrt(tf.nn.l2_loss(layer.weights)) * threshold - indicator_matrix = tf.multiply(tf.to_float( - tf.greater_equal(tf.abs(layer.weights), tf.ones_like(layer.weights) * t)), prune_mask) - - layer.update_mask = prune_mask.assign(indicator_matrix) - layer.prune_layer = layer.weights.assign(pruned_weights) - - compression_layers.append(layer) - - # Keep track of the number of connections - nonzero_indicator = tf.to_float(tf.not_equal(layer.weights, tf.zeros_like(layer.weights))) - layer.parameter_count = tf.reduce_sum(nonzero_indicator) - layer.mask_count = tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix, tf.zeros_like(indicator_matrix)))) - - layer.weights = pruned_weights - - dropout_layers = [(idx, layer) for (idx, layer) in enumerate(self.__layers) if isinstance(layer, layers.dropoutLayer)] - for idx, dropout_layer in dropout_layers: - weight_layer = self.__layers[idx-1] - parameter_count = self.__session.run(weight_layer.parameter_count) - - dropout_layer.original_parameter_count = parameter_count - - if quantize: - tf.contrib.quantize.create_training_graph() - self.__session.run(tf.initialize_variables(tf.get_collection(tf.GraphKeys.PRUNING_MASKS))) - for i in range(times): - self.__log("Compression run {}".format(i+1)) - - for layer in compression_layers: - self.__session.run(layer.update_mask) - if debug: - self.__log('Num parameters for prune_mask {} pre-training: {}/{}'.format( - layer.name, self.__session.run(layer.mask_count), self.__session.run(layer.parameter_count))) - self.__log('Loss: {}'.format(self.__session.run(tf.nn.l2_loss(layer.weights)))) - self.__session.run(layer.prune_layer) - - - self.__log("Accuracy after pruning") - self.compute_full_test_accuracy() + loss = self.__session.run([self.__graph_ops['cost']]) - for idx, dropout_layer in dropout_layers: - weight_layer = self.__layers[idx-1] - parameter_count = self.__session.run(weight_layer.parameter_count) - dropout_layer.set_p(1.0 - ((1.0 - dropout_layer.p) * math.sqrt( - parameter_count / dropout_layer.original_parameter_count))) + if loss == 0.0: + self.__log('Stopping due to zero loss') + break - if i < times-1: # do not retrain on the last time - self.begin_training(shut_down=False, iteration=i) - if debug: - for layer in compression_layers: - self.__log('Num parameters for layer {} post-training: {}'.format( - layer.name, self.__session.run(layer.parameter_count))) + if i == self.__maximum_training_batches - 1: + self.__log('Stopping due to maximum epochs') + self.save_state() - self.save_state(step=1) + final_test_loss = self.compute_full_test_accuracy() - if quantize: - tf.contrib.quantize.create_eval_graph() - with open('quantized.pb', 'w') as f: - f.write(str(self.__graph.as_graph_def())) + # Commented out because I wanted to test on the model directly after training on another dataset. + # self.shut_down() - self.__log("Accuracy after quantization") - self.compute_full_test_accuracy() - - self.shut_down() + if return_test_loss: + return final_test_loss + else: + return def begin_training_with_hyperparameter_search(self, l2_reg_limits=None, lr_limits=None, num_steps=3): """ @@ -1115,7 +1021,7 @@ def __get_weights_as_image(self, kernel): return x8 - def save_state(self, directory=None, step=0): + def save_state(self, directory=None): """Save all trainable variables as a checkpoint in the current working path""" self.__log('Saving parameters...') @@ -1129,7 +1035,7 @@ def save_state(self, directory=None, step=0): with self.__graph.as_default(): saver = tf.train.Saver(tf.trainable_variables()) - saver.save(self.__session, dir + '/tfhSaved', global_step=step) + saver.save(self.__session, dir + '/tfhSaved') self.__has_trained = True @@ -1619,7 +1525,7 @@ def add_fully_connected_layer(self, output_size, activation_function, regulariza self.__batch_size, activation_function, self.__weight_initializer, - regularization_coefficient,) + regularization_coefficient) self.__log('Inputs: {0} Outputs: {1}'.format(layer.input_size, layer.output_size)) @@ -1683,7 +1589,7 @@ def add_output_layer(self, regularization_coefficient=None, output_size=None): regularization_coefficient) else: layer = layers.fullyConnectedLayer('output', - copy.deepcopy(self.__last_layer().output_size), + # copy.deepcopy(self.__last_layer().output_size), num_out, reshape, self.__batch_size, @@ -1722,9 +1628,6 @@ def load_dataset_from_directory_with_csv_labels(self, dirname, labels_file, colu self.__log('Total raw examples is %d' % self.__total_raw_samples) self.__log('Total classes is %d' % self.__total_classes) - self.__raw_image_files = image_files - self.__raw_labels = labels - def load_dataset_from_directory_with_segmentation_masks(self, dirname, seg_dirname): """ Loads the png images in the given directory into an internal representation, using binary segmentation diff --git a/deepplantphenomics/layers.py b/deepplantphenomics/layers.py index aa4a3f8..14528bf 100644 --- a/deepplantphenomics/layers.py +++ b/deepplantphenomics/layers.py @@ -165,7 +165,6 @@ def add_to_graph(self): if self.__initializer == 'xavier': self.weights = tf.get_variable(self.name + '_weights', shape=[vec_size, self.output_size], initializer=tf.contrib.layers.xavier_initializer()) - else: self.weights = tf.get_variable(self.name + '_weights', shape=[vec_size, self.output_size], @@ -230,10 +229,6 @@ def forward_pass(self, x, deterministic): else: return tf.nn.dropout(x, self.p) - def set_p(self, p): - print("dropout: %f -> %f" % (self.p, p)) - self.p = p - class moderationLayer(object): """Layer for fusing moderating data into the input vector""" diff --git a/deepplantphenomics/loaders.py b/deepplantphenomics/loaders.py index 2332f7b..37cbde7 100644 --- a/deepplantphenomics/loaders.py +++ b/deepplantphenomics/loaders.py @@ -10,10 +10,10 @@ def split_raw_data(images, labels, ratio, moderation_features=None, augmentation if isinstance(labels, list): if split_labels: labels = [' '.join(map(str, label)) for label in labels] - total_samples = len(labels) - else: - total_samples = labels.get_shape().as_list()[0] + #else: + #total_samples = labels.get_shape().as_list()[0] + total_samples = len(labels) # calculate and perform random split diff --git a/deepplantphenomics/tools.py b/deepplantphenomics/tools.py index bd516c9..afc6430 100644 --- a/deepplantphenomics/tools.py +++ b/deepplantphenomics/tools.py @@ -1,4 +1,4 @@ -from deepplantphenomics import networks +from . import networks import numpy as np import cv2 diff --git a/docs/Compression.md b/docs/Compression.md deleted file mode 100644 index 3ab0874..0000000 --- a/docs/Compression.md +++ /dev/null @@ -1,72 +0,0 @@ -## Compression - -DeepPlantPhenomics offers compression similar to [DeepCompression](https://arxiv.org/abs/1510.00149). - -The compression pipeline consists of 2 steps: pruning and quantization, and is to be combined with `gzip` -or a similar compression tool in order to achieve the highest level of compression possible. Pruning is -iterative, so the more compression runs that are made the longer the compression takes to run. - -**NOTE** Due to a bug in tensorflow, quantization is not yet possible. - -The following will build a fully compressed model. - -``` -import deepplantphenomics as dpp - -model = dpp.DPPModel(debug=True, save_checkpoints=True, tensorboard_dir='./tensorlogs', report_rate=1000) -# 3 channels for colour, 1 channel for greyscale -channels = 3 - -# Setup and hyperparameters -model.set_batch_size(4) -model.set_number_of_threads(8) -model.set_image_dimensions(128, 128, channels) -model.set_resize_images(True) -model.set_problem_type('classification') -model.set_train_test_split(0.8) -model.set_learning_rate(0.0001) -model.set_weight_initializer('xavier') -model.set_maximum_training_epochs(200) - -# Augmentation options -# model.set_augmentation_brightness_and_contrast(True) -# model.set_augmentation_flip_horizontal(True) -# model.set_augmentation_flip_vertical(True) -# model.set_augmentation_crop(True) - - -# Load all data for IPPN leaf counting dataset -model.load_ippn_dataset_from_directory('./data/Plant_Phenotyping_Datasets/Plant/Ara2013-Canon/') - -# Define a model architecture -model.add_input_layer() - -model.add_convolutional_layer(filter_dimension=[5, 5, channels, 32], stride_length=1, activation_function='tanh') -model.add_pooling_layer(kernel_size=3, stride_length=2) - -model.add_convolutional_layer(filter_dimension=[5, 5, 32, 64], stride_length=1, activation_function='tanh') -model.add_pooling_layer(kernel_size=3, stride_length=2) - -model.add_convolutional_layer(filter_dimension=[3, 3, 64, 64], stride_length=1, activation_function='tanh') -model.add_pooling_layer(kernel_size=3, stride_length=2) - -model.add_fully_connected_layer(output_size=512, activation_function='relu') -model.add_dropout_layer(0.6) -model.add_fully_connected_layer(output_size=256, activation_function='relu') -model.add_output_layer() -# Begin training the regression model -model.begin_training(shut_down=False) - -model.compress(5, quantize=True, debug=True) - -``` - -This will create a protobuf file, `quantized.pb`, that then must be converted into a tensorflow lite quantized model - -``` -bazel-bin/tensorflow/contrib/lite/toco/toco --input_file=../quantized.pb --output_file=model.tflite --input_format TENSORFLOW_GRAPHDEF --output_format=TFLITE --inferfence_type=QUANTIZED_UINT8 --input_shape="1, 128, 128, 3" --input_array=conv1 --output_array=output_weights --std_value=127.5 --mean=127.5 -``` -More information about tensorflow lite and compression can be found [here](https://www.tensorflow.org/performance/quantizationa). - -If the quantization step is skipped (`quantize=False`) the model will still be pruned, and will need to be gzipped in order -to observe any actual compression. diff --git a/examples/arabidopsis_strain_classifier_test.py b/examples/arabidopsis_strain_classifier_test.py index bc6df30..0afd312 100644 --- a/examples/arabidopsis_strain_classifier_test.py +++ b/examples/arabidopsis_strain_classifier_test.py @@ -1,5 +1,3 @@ -from __future__ import print_function - # # Demonstrates the use of tools.classify_arabidopsis_strain on images of rosettes. # Loads filenames from the IPPN dataset by default. @@ -21,6 +19,6 @@ y = dpp.tools.classify_arabidopsis_strain(images) for k,v in zip(images, y): - print('%s: %s' % (os.path.basename(k), v)) + print '%s: %s' % (os.path.basename(k), v) print('Done') diff --git a/examples/rosette_leaf_count_test.py b/examples/rosette_leaf_count_test.py index 36ab3cc..0999393 100644 --- a/examples/rosette_leaf_count_test.py +++ b/examples/rosette_leaf_count_test.py @@ -1,5 +1,3 @@ -from __future__ import print_function - # # Demonstrates the use of tools.predict_rosette_leaf_count on images of rosettes. # Loads filenames from the IPPN dataset by default. @@ -23,4 +21,4 @@ for k,v in zip(images, y): print('%s: %d' % (os.path.basename(k), v)) -print('Done') +print('Done') \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 3b922c8..9d18851 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -10,7 +10,6 @@ pages: - Model Options: Model-Options.md - Data Fusion Options: Data-Fusion-Options.md - Semantic Segmentation: Semantic-Segmentation.md - - Compression: Compression.md - Tools: Tools.md - Hyperparameter Optimization: Hyperparameter-Optimization.md - Tutorials: