Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

removes python path insert of tests folder for examples #9151

Merged
merged 19 commits into from Jan 15, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 2 additions & 5 deletions example/adversary/adversary_generation.ipynb
Expand Up @@ -28,10 +28,7 @@
"import matplotlib.pyplot as plt\n",
"import matplotlib.cm as cm\n",
"\n",
"import os\n",
"import sys\n",
"sys.path.append(os.path.join(os.getcwd(), \"../../tests/python/common\"))\n",
"from get_data import MNISTIterator"
"from mxnet.test_utils import get_mnist_iterator"
]
},
{
Expand All @@ -53,7 +50,7 @@
"source": [
"dev = mx.cpu()\n",
"batch_size = 100\n",
"train_iter, val_iter = mnist_iterator(batch_size=batch_size, input_shape = (1,28,28))"
"train_iter, val_iter = get_mnist_iterator(batch_size=batch_size, input_shape = (1,28,28))"
]
},
{
Expand Down
9 changes: 2 additions & 7 deletions example/caffe/data.py
Expand Up @@ -15,19 +15,14 @@
# specific language governing permissions and limitations
# under the License.

import sys
import os
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, "../../tests/python/common"))
import get_data
import mxnet as mx
from mxnet.test_utils import get_mnist_ubyte

def get_iterator(data_shape, use_caffe_data):
def get_iterator_impl_mnist(args, kv):
"""return train and val iterators for mnist"""
# download data
get_data.GetMNIST_ubyte()
get_mnist_ubyte()
flat = False if len(data_shape) != 1 else True

train = mx.io.MNISTIter(
Expand Down
53 changes: 17 additions & 36 deletions example/gluon/data.py
Expand Up @@ -19,39 +19,11 @@
""" data iterator for mnist """
import os
import random
import sys
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, "../../tests/python/common"))
import get_data
import mxnet as mx
from mxnet.test_utils import get_cifar10

def mnist_iterator(batch_size, input_shape):
"""return train and val iterators for mnist"""
# download data
get_data.GetMNIST_ubyte()
flat = False if len(input_shape) == 3 else True

train_dataiter = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
input_shape=input_shape,
batch_size=batch_size,
shuffle=True,
flat=flat)

val_dataiter = mx.io.MNISTIter(
image="data/t10k-images-idx3-ubyte",
label="data/t10k-labels-idx1-ubyte",
input_shape=input_shape,
batch_size=batch_size,
flat=flat)

return (train_dataiter, val_dataiter)


def cifar10_iterator(batch_size, data_shape, resize=-1):
get_data.GetCifar10()
def get_cifar10_iterator(batch_size, data_shape, resize=-1, num_parts=1, part_index=0):
get_cifar10()

train = mx.io.ImageRecordIter(
path_imgrec = "data/cifar/train.rec",
Expand All @@ -60,7 +32,9 @@ def cifar10_iterator(batch_size, data_shape, resize=-1):
data_shape = data_shape,
batch_size = batch_size,
rand_crop = True,
rand_mirror = True)
rand_mirror = True,
num_parts=num_parts,
part_index=part_index)

val = mx.io.ImageRecordIter(
path_imgrec = "data/cifar/test.rec",
Expand All @@ -69,11 +43,14 @@ def cifar10_iterator(batch_size, data_shape, resize=-1):
rand_crop = False,
rand_mirror = False,
data_shape = data_shape,
batch_size = batch_size)
batch_size = batch_size,
num_parts=num_parts,
part_index=part_index)

return train, val

def imagenet_iterator(train_data, val_data, batch_size, data_shape, resize=-1):

def get_imagenet_iterator(train_data, val_data, batch_size, data_shape, resize=-1, num_parts=1, part_index=0):
train = mx.io.ImageRecordIter(
path_imgrec = train_data,
data_shape = data_shape,
Expand All @@ -96,7 +73,9 @@ def imagenet_iterator(train_data, val_data, batch_size, data_shape, resize=-1):
max_random_shear_ratio = 0.1,
max_random_aspect_ratio = 0.25,
fill_value = 127,
min_random_scale = 0.533)
min_random_scale = 0.533,
num_parts = num_parts,
part_index = part_index)

val = mx.io.ImageRecordIter(
path_imgrec = val_data,
Expand All @@ -109,7 +88,9 @@ def imagenet_iterator(train_data, val_data, batch_size, data_shape, resize=-1):
std_b = 57.375,
preprocess_threads = 32,
batch_size = batch_size,
resize = resize)
resize = resize,
num_parts = num_parts,
part_index = part_index)

return train, val

Expand Down
57 changes: 34 additions & 23 deletions example/gluon/image_classification.py
Expand Up @@ -26,12 +26,13 @@
from mxnet.gluon import nn
from mxnet.gluon.model_zoo import vision as models
from mxnet import autograd as ag
from mxnet.test_utils import get_mnist_iterator

from data import *

# CLI
parser = argparse.ArgumentParser(description='Train a model for image classification.')
parser.add_argument('--dataset', type=str, default='mnist',
parser.add_argument('--dataset', type=str, default='cifar10',
help='dataset to use. options are mnist, cifar10, and dummy.')
parser.add_argument('--train-data', type=str, default='',
help='training record file to use, required for imagenet.')
Expand Down Expand Up @@ -92,25 +93,31 @@

net = models.get_model(opt.model, **kwargs)

# get dataset iterators
if dataset == 'mnist':
train_data, val_data = mnist_iterator(batch_size, (1, 32, 32))
elif dataset == 'cifar10':
train_data, val_data = cifar10_iterator(batch_size, (3, 32, 32))
elif dataset == 'imagenet':
if model_name == 'inceptionv3':
train_data, val_data = imagenet_iterator(opt.train_data, opt.val_data,
batch_size, (3, 299, 299))
else:
train_data, val_data = imagenet_iterator(opt.train_data, opt.val_data,
batch_size, (3, 224, 224))
elif dataset == 'dummy':
if model_name == 'inceptionv3':
train_data, val_data = dummy_iterator(batch_size, (3, 299, 299))
else:
train_data, val_data = dummy_iterator(batch_size, (3, 224, 224))

def test(ctx):
def get_data_iters(dataset, batch_size, num_workers=1, rank=0):
# get dataset iterators
if dataset == 'mnist':
train_data, val_data = get_mnist_iterator(batch_size, (1, 28, 28),
num_parts=num_workers, part_index=rank)
elif dataset == 'cifar10':
train_data, val_data = get_cifar10_iterator(batch_size, (3, 32, 32),
num_parts=num_workers, part_index=rank)
elif dataset == 'imagenet':
if model_name == 'inceptionv3':
train_data, val_data = get_imagenet_iterator(opt.train_data, opt.val_data,
batch_size, (3, 299, 299),
num_parts=num_workers, part_index=rank)
else:
train_data, val_data = get_imagenet_iterator(opt.train_data, opt.val_data,
batch_size, (3, 224, 224),
num_parts=num_workers, part_index=rank)
elif dataset == 'dummy':
if model_name == 'inceptionv3':
train_data, val_data = dummy_iterator(batch_size, (3, 299, 299))
else:
train_data, val_data = dummy_iterator(batch_size, (3, 224, 224))
return train_data, val_data

def test(ctx, val_data):
metric = mx.metric.Accuracy()
val_data.reset()
for batch in val_data:
Expand All @@ -127,9 +134,11 @@ def train(epochs, ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
net.initialize(mx.init.Xavier(magnitude=2), ctx=ctx)
kv = mx.kv.create(opt.kvstore)
train_data, val_data = get_data_iters(dataset, batch_size, kv.num_workers, kv.rank)
trainer = gluon.Trainer(net.collect_params(), 'sgd',
{'learning_rate': opt.lr, 'wd': opt.wd, 'momentum': opt.momentum},
kvstore = opt.kvstore)
kvstore = kv)
metric = mx.metric.Accuracy()
loss = gluon.loss.SoftmaxCrossEntropyLoss()

Expand Down Expand Up @@ -164,7 +173,7 @@ def train(epochs, ctx):
name, acc = metric.get()
logging.info('[Epoch %d] training: %s=%f'%(epoch, name, acc))
logging.info('[Epoch %d] time cost: %f'%(epoch, time.time()-tic))
name, val_acc = test(ctx)
name, val_acc = test(ctx, val_data)
logging.info('[Epoch %d] validation: %s=%f'%(epoch, name, val_acc))

net.save_params('image-classifier-%s-%d.params'%(opt.model, epochs))
Expand All @@ -175,10 +184,12 @@ def main():
out = net(data)
softmax = mx.sym.SoftmaxOutput(out, name='softmax')
mod = mx.mod.Module(softmax, context=[mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu()])
kv = mx.kv.create(opt.kvstore)
train_data, val_data = get_data_iters(dataset, batch_size, kv.num_workers, kv.rank)
mod.fit(train_data,
eval_data = val_data,
num_epoch=opt.epochs,
kvstore=opt.kvstore,
kvstore=kv,
batch_end_callback = mx.callback.Speedometer(batch_size, max(1, opt.log_interval)),
epoch_end_callback = mx.callback.do_checkpoint('image-classifier-%s'% opt.model),
optimizer = 'sgd',
Expand Down
8 changes: 2 additions & 6 deletions example/multi-task/example_multi_task.py
Expand Up @@ -16,12 +16,8 @@
# under the License.

# pylint: skip-file
import sys
import os
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, "../../tests/python/common"))
from get_data import MNISTIterator
import mxnet as mx
from mxnet.test_utils import get_mnist_iterator
import numpy as np
import logging
import time
Expand Down Expand Up @@ -142,7 +138,7 @@ def get_name_value(self):
lr = 0.01

network = build_network()
train, val = MNISTIterator(batch_size=batch_size, input_shape = (784,))
train, val = get_mnist_iterator(batch_size=batch_size, input_shape = (784,))
train = Multi_mnist_iterator(train)
val = Multi_mnist_iterator(val)

Expand Down
8 changes: 2 additions & 6 deletions example/numpy-ops/custom_softmax.py
Expand Up @@ -16,12 +16,8 @@
# under the License.

# pylint: skip-file
import sys
import os
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, "../../tests/python/common"))
from get_data import MNISTIterator
import mxnet as mx
from mxnet.test_utils import get_mnist_iterator
import numpy as np
import logging

Expand Down Expand Up @@ -75,7 +71,7 @@ def create_operator(self, ctx, shapes, dtypes):

# data

train, val = MNISTIterator(batch_size=100, input_shape = (784,))
train, val = get_mnist_iterator(batch_size=100, input_shape = (784,))

# train

Expand Down
9 changes: 2 additions & 7 deletions example/numpy-ops/ndarray_softmax.py
Expand Up @@ -16,16 +16,11 @@
# under the License.

# pylint: skip-file
import os
import sys
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, "../../tests/python/common"))
from get_data import MNISTIterator
import mxnet as mx
from mxnet.test_utils import get_mnist_iterator
import numpy as np
import logging


class NDArraySoftmax(mx.operator.NDArrayOp):
def __init__(self):
super(NDArraySoftmax, self).__init__(False)
Expand Down Expand Up @@ -97,7 +92,7 @@ def backward(self, out_grad, in_data, out_data, in_grad):

# data

train, val = MNISTIterator(batch_size=100, input_shape = (784,))
train, val = get_mnist_iterator(batch_size=100, input_shape = (784,))

# train

Expand Down
8 changes: 2 additions & 6 deletions example/numpy-ops/numpy_softmax.py
Expand Up @@ -16,12 +16,8 @@
# under the License.

# pylint: skip-file
import sys
import os
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, "../../tests/python/common"))
from get_data import MNISTIterator
import mxnet as mx
from mxnet.test_utils import get_mnist_iterator
import numpy as np
import logging

Expand Down Expand Up @@ -70,7 +66,7 @@ def backward(self, out_grad, in_data, out_data, in_grad):

# data

train, val = MNISTIterator(batch_size=100, input_shape = (784,))
train, val = get_mnist_iterator(batch_size=100, input_shape = (784,))

# train

Expand Down
8 changes: 2 additions & 6 deletions example/python-howto/monitor_weights.py
Expand Up @@ -16,12 +16,8 @@
# under the License.

# pylint: skip-file
import sys
import os
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, "../../tests/python/common"))
from get_data import MNISTIterator
import mxnet as mx
from mxnet.test_utils import get_mnist_iterator
import numpy as np
import logging

Expand All @@ -35,7 +31,7 @@
mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')

# data
train, val = MNISTIterator(batch_size=100, input_shape = (784,))
train, val = get_mnist_iterator(batch_size=100, input_shape = (784,))

# monitor
def norm_stat(d):
Expand Down