Skip to content

Commit

Permalink
silenced SOME (a few) warnings thrown about by pylint. I wont fix any…
Browse files Browse the repository at this point in the history
… more now.
  • Loading branch information
lene committed Feb 27, 2016
1 parent 2092e13 commit c46f82e
Show file tree
Hide file tree
Showing 10 changed files with 70 additions and 59 deletions.
74 changes: 37 additions & 37 deletions nn_wtf/input_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,35 +30,35 @@


def maybe_download(filename, work_directory):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath


def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]


def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
return images_from_bytestream(bytestream, rows, cols, num_images)
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
return images_from_bytestream(bytestream, rows, cols, num_images)


def images_from_bytestream(bytestream, rows, cols, num_images):
Expand Down Expand Up @@ -92,20 +92,20 @@ def dense_to_one_hot(labels_dense, num_classes=10):


def extract_labels(filename, one_hot=False):
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels


TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
Expand Down
16 changes: 12 additions & 4 deletions nn_wtf/mnist_graph.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from nn_wtf.neural_network_graph import NeuralNetworkGraph
from nn_wtf.neural_network_graph import NeuralNetworkGraph, CHANGE_THIS_LEARNING_RATE
from nn_wtf.neural_network_graph_mixins import SaverMixin, DEFAULT_TRAIN_DIR, SummaryWriterMixin

__author__ = 'Lene Preuss <lp@sinnwerkstatt.com>'
Expand All @@ -14,11 +14,19 @@ class MNISTGraph(NeuralNetworkGraph, SaverMixin, SummaryWriterMixin):
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE

def __init__(
self, verbose=True,
learning_rate=0.01, layer_sizes=(128, 32, None),
self, input_size=None, layer_sizes=(128, 32, None), output_size=None, learning_rate=CHANGE_THIS_LEARNING_RATE,
verbose=True,
train_dir=DEFAULT_TRAIN_DIR,
input_size=None, output_size=None
):
"""The MNISTGraph constructor takes no positional args, in contrast with NeuralNetworkGraph.
:param input_size: ignored, present for client compatibility
:param layer_sizes: tuple of sizes of the neural network hidden layers
:param output_size: ignored, present for client compatibility
:param learning_rate: learning rate for gradient descent optimizer
:param verbose: whether to print some info about the training progress
:param train_dir: where to write savepoints and summaries
"""
NeuralNetworkGraph.__init__(
self, self.IMAGE_PIXELS, layer_sizes, self.NUM_CLASSES, learning_rate
)
Expand Down
21 changes: 9 additions & 12 deletions nn_wtf/neural_network_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,12 @@
class NeuralNetworkGraph:

def __init__(self, input_size, layer_sizes, output_size, learning_rate=CHANGE_THIS_LEARNING_RATE):
"""
Args:
input_size: number of input channels
layer_sizes: Sizes of hidden layers in a tuple or list.
output_size: Number of output channels.
"""Initialize a neural network given its geometry.
:param input_size: number of input channels
:param layer_sizes: tuple of sizes of the neural network hidden layers
:param output_size: number of output classes
:param learning_rate: learning rate for gradient descent optimizer
"""
self._setup_geometry(input_size, layer_sizes, output_size)
self.learning_rate = learning_rate
Expand Down Expand Up @@ -59,11 +60,8 @@ def fill_feed_dict(self, data_set, batch_size):
....
}
Args:
data_set: The set of images and labels, from input_data.read_data_sets()
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
:param data_set: The set of images and labels, from input_data.read_data_sets()
:return The feed dictionary mapping from placeholders to values.
"""
# Create the feed_dict for the placeholders filled with the next `batch size ` examples.
input_feed, labels_feed = data_set.next_batch(batch_size)
Expand Down Expand Up @@ -94,8 +92,7 @@ def _set_layer_sizes(self, layer_sizes):
def _build_neural_network(self):
"""Builds a neural network with the given layers and output size.
Returns:
logits: Output tensor with the computed logits.
:return Output tensor with the computed logits.
"""

assert self.layers == [], 'build_neural_network() has been called before'
Expand Down
9 changes: 5 additions & 4 deletions nn_wtf/neural_network_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@


class NeuralNetworkOptimizer:
"""Attempts to find the best parameters to a neural network to be trained in the fastest way."""

DEFAULT_LEARNING_RATE = 0.1
DEFAULT_LAYER_SIZES = (
Expand Down Expand Up @@ -75,7 +76,7 @@ def get_network_geometries(self):
for l3 in self.layer_sizes[2] if l3 is None or l3 <= l2)

def brute_force_optimize_learning_rate(self):
raise NotImplemented()
raise NotImplementedError()

def timed_run_training(self, data_sets, geometry, max_steps=10000):
graph, cpu, wall = timed_run(self.run_training_once, data_sets, geometry, max_steps)
Expand All @@ -99,6 +100,6 @@ def run_training_once(self, data_sets, geometry, max_steps):


def timed_run(function, *args, **kwargs):
start_cpu_time, start_wall_time = time.process_time(), time.time()
returned = function(*args, **kwargs)
return returned, time.process_time()-start_cpu_time, time.time()-start_wall_time
start_cpu_time, start_wall_time = time.process_time(), time.time()
returned = function(*args, **kwargs)
return returned, time.process_time()-start_cpu_time, time.time()-start_wall_time
2 changes: 2 additions & 0 deletions nn_wtf/tests/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
__author__ = 'Lene Preuss <lp@sinnwerkstatt.com>'

"""unit tests for nn_wtf"""
1 change: 1 addition & 0 deletions nn_wtf/tests/images_labels_data_set_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import unittest

__author__ = 'Lene Preuss <lp@sinnwerkstatt.com>'
# pylint: disable=missing-docstring

NUM_TRAINING_SAMPLES = 20
IMAGE_WIDTH = 10
Expand Down
1 change: 1 addition & 0 deletions nn_wtf/tests/input_data_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import unittest

__author__ = 'Lene Preuss <lene.preuss@gmail.com>'
# pylint: disable=missing-docstring


class InputDataTest(unittest.TestCase):
Expand Down
1 change: 1 addition & 0 deletions nn_wtf/tests/neural_network_graph_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import unittest

__author__ = 'Lene Preuss <lp@sinnwerkstatt.com>'
# pylint: disable=missing-docstring


class NeuralNetworkGraphTest(unittest.TestCase):
Expand Down
1 change: 1 addition & 0 deletions nn_wtf/tests/neural_network_optimizer_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import unittest

__author__ = 'Lene Preuss <lene.preuss@gmail.com>'
# pylint: disable=missing-docstring


class NeuralNetworkOptimizerTest(unittest.TestCase):
Expand Down
3 changes: 1 addition & 2 deletions nn_wtf/tests/predictor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,8 @@

import unittest

from nn_wtf.trainer import Trainer

__author__ = 'Lene Preuss <lene.preuss@gmail.com>'
# pylint: disable=missing-docstring


class PredictorTest(unittest.TestCase):
Expand Down

0 comments on commit c46f82e

Please sign in to comment.