Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Mark tests that should only be run nightly.
Browse files Browse the repository at this point in the history
Setup Jenkins to ignore slow tests during PR builds.
Also marking one crashing test.  Github issue has been raised.
  • Loading branch information
KellenSunderland committed Nov 18, 2017
1 parent 3107326 commit 6cb6e16
Show file tree
Hide file tree
Showing 13 changed files with 94 additions and 5 deletions.
8 changes: 4 additions & 4 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
def python2_ut(docker_type) {
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${docker_type} find . -name '*.pyc' -type f -delete"
sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-2.7 --with-timer --verbose tests/python/unittest"
sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-2.7 --with-timer --verbose -a '!nightly,!crashing' tests/python/unittest"
sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-2.7 --with-timer --verbose tests/python/train"
}
}
Expand All @@ -95,7 +95,7 @@ def python2_ut(docker_type) {
def python3_ut(docker_type) {
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${docker_type} find . -name '*.pyc' -type f -delete"
sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-3.4 --with-timer --verbose tests/python/unittest"
sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-3.4 --with-timer --verbose -a '!nightly,!crashing' tests/python/unittest"
}
}

Expand All @@ -105,15 +105,15 @@ def python3_ut(docker_type) {
def python2_gpu_ut(docker_type) {
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${docker_type} find . -name '*.pyc' -type f -delete"
sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-2.7 --with-timer --verbose tests/python/gpu"
sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-2.7 --with-timer --verbose -a '!nightly,!crashing' tests/python/gpu"
}
}

// Python 3
def python3_gpu_ut(docker_type) {
timeout(time: max_time, unit: 'MINUTES') {
sh "${docker_run} ${docker_type} find . -name '*.pyc' -type f -delete"
sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-3.4 --with-timer --verbose tests/python/gpu"
sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-3.4 --with-timer --verbose -a '!nightly,!crashing' tests/python/gpu"
}
}

Expand Down
3 changes: 3 additions & 0 deletions tests/ci_build/ci_build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -139,9 +139,12 @@ echo "Running '${COMMAND[@]}' inside ${DOCKER_IMG_NAME}..."
# By default we cleanup - remove the container once it finish running (--rm)
# and share the PID namespace (--pid=host) so the process inside does not have
# pid 1 and SIGKILL is propagated to the process inside (jenkins can kill it).
# We're passing the cuda archs specifically for the SMs needed by the CI.
${DOCKER_BINARY} run --rm --pid=host \
-v ${WORKSPACE}:/workspace \
-w /workspace \
-e "CUDA_ARCH=-gencode arch=compute_30,code=sm_30 -gencode arch=compute_52,code=[sm_52,compute_52]" \
-e "MXNET_STORAGE_FALLBACK_LOG_VERBOSE=0" \
-e "CI_BUILD_HOME=${WORKSPACE}" \
-e "CI_BUILD_USER=$(id -u -n)" \
-e "CI_BUILD_UID=$(id -u)" \
Expand Down
7 changes: 7 additions & 0 deletions tests/python/gpu/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,15 @@
import numpy as np
import mxnet as mx
from mxnet.test_utils import *
from nose.plugins.attrib import attr


def _get_model():
if not os.path.exists('model/Inception-7-symbol.json'):
download('http://data.mxnet.io/models/imagenet/inception-v3.tar.gz', dirname='model')
os.system("cd model; tar -xf inception-v3.tar.gz --strip-components 1")


def _dump_images(shape):
import skimage.io
import skimage.transform
Expand All @@ -40,10 +43,13 @@ def _dump_images(shape):
imgs = np.asarray(img_list, dtype=np.float32).transpose((0, 3, 1, 2)) - 128
np.save('data/test_images_%d_%d.npy'%shape, imgs)


def _get_data(shape):
download("http://data.mxnet.io/data/test_images_%d_%d.npy" % (shape), dirname='data')
download("http://data.mxnet.io/data/inception-v3-dump.npz", dirname="data")


@attr('nightly')
def test_consistency(dump=False):
shape = (299, 299)
_get_model()
Expand All @@ -64,5 +70,6 @@ def test_consistency(dump=False):
if dump:
np.savez('data/inception-v3-dump.npz', **{n: a.asnumpy() for n, a in gt.items()})


if __name__ == '__main__':
test_consistency(False)
29 changes: 29 additions & 0 deletions tests/python/gpu/test_operator_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ def check_countsketch(in_dim,out_dim,n):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
assert_almost_equal(a,arr_grad[0].asnumpy(),rtol=1e-3, atol=1e-12)


def test_countsketch():
np.random.seed(0)
nrepeat = 2
Expand All @@ -97,6 +98,7 @@ def test_countsketch():
n = np.random.randint(1,maxn)
check_countsketch(in_dim, out_dim, n)


def check_ifft(shape):
shape_old = shape
if len(shape) == 2:
Expand Down Expand Up @@ -178,6 +180,7 @@ def test_ifft():
shape = tuple(np.random.randint(1, maxdim, size=order))
check_ifft(shape)


def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
Expand Down Expand Up @@ -256,6 +259,7 @@ def check_fft(shape):
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[3],rtol=1e-3, atol=1e-6)


def test_fft():
np.random.seed(0)
nrepeat = 2
Expand All @@ -265,6 +269,7 @@ def test_fft():
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)


def test_batchnorm_with_type():
ctx_list_v1_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
Expand Down Expand Up @@ -373,13 +378,15 @@ def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_globa

check_consistency(sym_list, ctx_list)


def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)


def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
Expand All @@ -388,6 +395,7 @@ def test_2d_batchnorm(fix_gamma, use_global_stats):
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)


def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
Expand Down Expand Up @@ -445,12 +453,14 @@ def test_convolution_with_type():
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)


# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list))


def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
Expand Down Expand Up @@ -516,6 +526,7 @@ def test_convolution_options():
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)


def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
Expand All @@ -541,6 +552,7 @@ def test_convolution_versions():
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)


def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
Expand All @@ -556,6 +568,7 @@ def test_pooling_with_type():
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list)


def test_deconvolution_with_type():
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
Expand All @@ -572,6 +585,7 @@ def test_deconvolution_with_type():
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")


def test_deconvolution_options():

# # 1D convolution (not yet enabled)
Expand Down Expand Up @@ -612,6 +626,7 @@ def test_deconvolution_options():
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)


# # 3D convolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
Expand Down Expand Up @@ -644,6 +659,7 @@ def test_bilinear_sampler_with_type():
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")


def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
Expand All @@ -657,6 +673,7 @@ def test_grid_generator_with_type():
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")


@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/7645")
def test_spatial_transformer_with_type():
np.random.seed(1234)
Expand All @@ -672,6 +689,7 @@ def test_spatial_transformer_with_type():
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")


# Checking max pooling consistency over the data sets of different float types is problematic
# as one max value in a float32 data set may not be the max value in a float16 data set.
# This function will not be called.
Expand Down Expand Up @@ -746,6 +764,7 @@ def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, str
name='pool'))
check_consistency(sym_list, ctx_list)


def test_1d_pooling(pool_type):
data = (2, 3, 20)
kernel = (4,)
Expand Down Expand Up @@ -777,6 +796,7 @@ def test_1d_pooling(pool_type):
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True)


def test_2d_pooling(pool_type):
data = (2, 3, 20, 20)
kernel = (4, 5)
Expand Down Expand Up @@ -1140,6 +1160,7 @@ def test_bidirectional():
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)


def test_unfuse():
for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:
fused = mx.rnn.FusedRNNCell(
Expand All @@ -1153,6 +1174,7 @@ def test_unfuse():
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)


def test_psroipooling_with_type():
np.random.seed(1234)
arg_params = {
Expand Down Expand Up @@ -1210,6 +1232,7 @@ def test_deformable_psroipooling_with_type():
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)


def test_deformable_convolution_with_type():
np.random.seed(1234)
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
Expand Down Expand Up @@ -1314,6 +1337,9 @@ def test_deformable_convolution_options():
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2,
name='deformable_conv')


# Test crashing, see: https://github.com/apache/incubator-mxnet/issues/8564
@attr('crashing')
def test_residual_fused():
cell = mx.rnn.ResidualCell(
mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',
Expand All @@ -1333,6 +1359,7 @@ def test_residual_fused():
expected_outputs = np.ones((10, 2, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)


def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
Expand Down Expand Up @@ -1362,6 +1389,7 @@ def test_rnn_layer():
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))


@unittest.skip("Test fails intermittently. Temporarily disabled until fixed. Tracked at https://github.com/apache/incubator-mxnet/issues/8211")
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
Expand All @@ -1373,6 +1401,7 @@ def test_autograd_save_memory():
x.wait_to_read()
x.backward()


def test_gluon_ctc_consistency():
loss = mx.gluon.loss.CTCLoss()
data = mx.nd.arange(0, 4, repeat=40, ctx=mx.gpu(0)).reshape((2,20,4)).flip(axis=0)
Expand Down
6 changes: 6 additions & 0 deletions tests/python/unittest/test_gluon_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
import mxnet as mx
import numpy as np
from mxnet import gluon
from nose.plugins.attrib import attr


def test_array_dataset():
X = np.random.uniform(size=(10, 20))
Expand Down Expand Up @@ -63,6 +65,7 @@ def test_recordimage_dataset():
assert x.shape[0] == 1 and x.shape[3] == 3
assert y.asscalar() == i


def test_sampler():
seq_sampler = gluon.data.SequentialSampler(10)
assert list(seq_sampler) == list(range(10))
Expand All @@ -75,6 +78,8 @@ def test_sampler():
rand_batch_keep = gluon.data.BatchSampler(rand_sampler, 3, 'keep')
assert sorted(sum(list(rand_batch_keep), [])) == list(range(10))


@attr('nightly')
def test_datasets():
assert len(gluon.data.vision.MNIST(root='data/mnist')) == 60000
assert len(gluon.data.vision.MNIST(root='data/mnist', train=False)) == 10000
Expand All @@ -86,6 +91,7 @@ def test_datasets():
assert len(gluon.data.vision.CIFAR100(root='data/cifar100', fine_label=True)) == 50000
assert len(gluon.data.vision.CIFAR100(root='data/cifar100', train=False)) == 10000


def test_image_folder_dataset():
prepare_record()
dataset = gluon.data.vision.ImageFolderDataset('data/test_images')
Expand Down
2 changes: 2 additions & 0 deletions tests/python/unittest/test_gluon_model_zoo.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from mxnet.gluon import nn
from mxnet.gluon.model_zoo.custom_layers import HybridConcurrent, Identity
from mxnet.gluon.model_zoo.vision import get_model
from nose.plugins.attrib import attr
import sys

def eprint(*args, **kwargs):
Expand Down Expand Up @@ -50,6 +51,7 @@ def test_identity():
x.asnumpy())


@attr('nightly')
def test_models():
all_models = ['resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
Expand Down
4 changes: 4 additions & 0 deletions tests/python/unittest/test_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,10 @@
import sys
from common import get_data, assertRaises
import unittest
from nose.plugins.attrib import attr


@attr('nightly')
def test_MNISTIter():
# prepare data
get_data.GetMNIST_ubyte()
Expand Down Expand Up @@ -60,6 +62,8 @@ def test_MNISTIter():
label_1 = train_dataiter.getlabel().asnumpy().flatten()
assert(sum(label_0 - label_1) == 0)


@attr('nightly')
def test_Cifar10Rec():
get_data.GetCifar10()
dataiter = mx.io.ImageRecordIter(
Expand Down
2 changes: 2 additions & 0 deletions tests/python/unittest/test_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from mxnet.module.executor_group import DataParallelExecutorGroup
from common import assertRaises
from collections import namedtuple
from nose.plugins.attrib import attr

import numpy.random as rnd

Expand Down Expand Up @@ -534,6 +535,7 @@ def check_shared_exec_group(sparse_embedding):
check_shared_exec_group(opt)


@attr('nightly')
def test_factorization_machine_module(verbose=False):
""" Test factorization machine model with sparse operators """
def check_factorization_machine_module(optimizer=None, num_epochs=None):
Expand Down

0 comments on commit 6cb6e16

Please sign in to comment.