From b34925cff368e5d5d7b8d57fe16bc4ac568225de Mon Sep 17 00:00:00 2001 From: eric-haibin-lin Date: Mon, 29 May 2017 17:45:21 +0000 Subject: [PATCH 1/4] update Makefile --- Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Makefile b/Makefile index e9b3e25b30ec..c71cb1398963 100644 --- a/Makefile +++ b/Makefile @@ -44,9 +44,8 @@ ifeq ($(DEV), 1) endif # CFLAGS for debug -# FIXME(haibin) temporarily turn on -DDMLC_LOG_FATAL_THROW for debug ifeq ($(DEBUG), 1) - CFLAGS += -g -O0 -DDMLC_LOG_FATAL_THROW=1 + CFLAGS += -g -O0 else CFLAGS += -O3 -DNDEBUG=1 endif From e7201df7a19fd258263717fea582d08bc8b3ded9 Mon Sep 17 00:00:00 2001 From: eric-haibin-lin Date: Mon, 29 May 2017 17:45:48 +0000 Subject: [PATCH 2/4] refactor test_sparse_operator --- python/mxnet/test_utils.py | 3 ++ tests/python/unittest/test_sparse_ndarray.py | 3 -- tests/python/unittest/test_sparse_operator.py | 46 ++++++------------- 3 files changed, 17 insertions(+), 35 deletions(-) diff --git a/python/mxnet/test_utils.py b/python/mxnet/test_utils.py index 8f802d4882a9..9a088e5fee76 100644 --- a/python/mxnet/test_utils.py +++ b/python/mxnet/test_utils.py @@ -101,6 +101,9 @@ def rand_ndarray(shape, storage_type, density=None): arr, _ = rand_sparse_ndarray(shape, storage_type, density=density) return arr +def rand_shape_2d(): + return (rnd.randint(1, 10), rnd.randint(1, 10)) + def np_reduce(dat, axis, keepdims, numpy_reduce_func): """Compatible reduce for old version of NumPy. diff --git a/tests/python/unittest/test_sparse_ndarray.py b/tests/python/unittest/test_sparse_ndarray.py index 224a5e008b3b..fa61d962d35e 100644 --- a/tests/python/unittest/test_sparse_ndarray.py +++ b/tests/python/unittest/test_sparse_ndarray.py @@ -11,9 +11,6 @@ def assert_fcompex(f, *args, **kwargs): f(*args, **kwargs) mx.test_utils.set_env_var("MXNET_EXEC_STORAGE_FALLBACK", prev_val) -def rand_shape_2d(): - return (rnd.randint(1, 10), rnd.randint(1, 10)) - def sparse_nd_ones(shape, stype): return mx.nd.cast_storage(mx.nd.ones(shape), storage_type=stype) diff --git a/tests/python/unittest/test_sparse_operator.py b/tests/python/unittest/test_sparse_operator.py index 289a146a1507..4018b236665c 100644 --- a/tests/python/unittest/test_sparse_operator.py +++ b/tests/python/unittest/test_sparse_operator.py @@ -27,13 +27,12 @@ def check_elemwise_add_ex(lhs_stype, rhs_stype, shape, lhs_grad_stype=None, rhs_ def test_elemwise_add_ex(): - shape = (rnd.randint(1, 10), rnd.randint(1, 10)) + shape = rand_shape_2d() check_elemwise_add_ex('default_storage', 'default_storage', shape) - # TODO(haibin/jun) enable these tests when Dns -> Rsp (compact) is implemented. - #check_elemwise_add_ex('default_storage', 'row_sparse', shape) - #check_elemwise_add_ex('row_sparse', 'default_storage', shape) - #check_elemwise_add_ex('row_sparse', 'row_sparse', shape, - # lhs_grad_stype='row_sparse', rhs_grad_stype='row_sparse') + check_elemwise_add_ex('default_storage', 'row_sparse', shape) + check_elemwise_add_ex('row_sparse', 'default_storage', shape) + check_elemwise_add_ex('row_sparse', 'row_sparse', shape, + lhs_grad_stype='row_sparse', rhs_grad_stype='row_sparse') # TODO(haibin) randomize this test @@ -99,19 +98,12 @@ def test_dns_to_csr(dns_in): ret = mx.nd.cast_storage(csr_out, storage_type='default_storage') assert same(ret.asnumpy(), dns_in) - shape = (rnd.randint(1, 10), rnd.randint(1, 10)) + shape = rand_shape_2d() test_rsp_to_dns(shape) test_dns_to_rsp(shape) test_csr_to_dns((4, 4)) test_dns_to_csr([[0, 1, 0], [0, 2, 0], [3, 0, 0], [0, 0, 4], [5, 6, 0], [0, 0, 7]]) - -# TODO(junwu): The backward of the operator dot cannot be tested for now -# since the backend function CopyFromTo does not support taking two arguments -# of the different storage types. Will add backward test after removing this -# restriction on CopyFromTo(@haibin). Nevertheless, both backward and forward use -# the same impl function of dot(csr, dns) = rsp and it has been tested -# in the forward test cases as the following. def test_sparse_dot(): def test_dot_csr_dns(csr_shape, dns_shape, trans_csr): dns1 = rand_ndarray(csr_shape, 'default_storage') @@ -128,25 +120,19 @@ def test_dot_csr_dns(csr_shape, dns_shape, trans_csr): # test symbolic forward lhs = mx.symbol.Variable('lhs', storage_type='csr') rhs = mx.symbol.Variable('rhs', storage_type='default_storage') - # TODO(haibin) since backward op is not fully implemented, here we add a dense zero ndarray - # so that the output gradient is dense. - zeros = mx.symbol.Variable('zero', storage_type='default_storage') - - sym_dot = mx.symbol.dot(lhs, rhs, transpose_a=trans_csr) - test = mx.symbol.elemwise_add(sym_dot, zeros) - location = {'lhs': csr, 'rhs': dns2, 'zero': mx.nd.zeros(out_expected.shape)} - expected = {'rhs': rhs_backward_grad, 'zero': out_np} - # dot(lhs, rhs) + zeros + test = mx.symbol.dot(lhs, rhs, transpose_a=trans_csr) + location = {'lhs': csr, 'rhs': dns2} + expected = {'rhs': rhs_backward_grad} + # dot(lhs, rhs) check_symbolic_forward(test, location, [out_expected.asnumpy()], rtol=1e-3, atol=1e-4) check_symbolic_backward(test, location, [out_np], expected, - grad_req={'lhs': 'null', 'rhs': 'write', 'zero': 'write'}, + grad_req={'lhs': 'null', 'rhs': 'write'}, rtol=1e-3, atol=1e-4) - lhs_shape = (rnd.randint(1, 10), rnd.randint(1, 10)) + lhs_shape = rand_shape_2d() test_dot_csr_dns(lhs_shape, (lhs_shape[1], rnd.randint(1, 10)), False) test_dot_csr_dns(lhs_shape, (lhs_shape[0], rnd.randint(1, 10)), True) - def test_sparse_embedding(): in_dim = 10 out_dim = 4 @@ -190,9 +176,5 @@ def check_csr_slice(shape, slice_input): check_csr_slice(shape, False) if __name__ == '__main__': - test_elemwise_add_ex() - test_elemwise_add_ex_multiple_stages() - test_cast_storage_ex() - test_sparse_dot() - test_sparse_embedding() - test_sparse_slice() + import nose + nose.runmodule() From 9872df0d65fcc74f3c2022af96a2a48f97d5089c Mon Sep 17 00:00:00 2001 From: eric-haibin-lin Date: Mon, 29 May 2017 17:59:23 +0000 Subject: [PATCH 3/4] change `default_storage` back to `default` --- python/mxnet/executor.py | 2 +- python/mxnet/ndarray.py | 4 ++-- python/mxnet/sparse_ndarray.py | 8 +++---- python/mxnet/symbol.py | 2 +- python/mxnet/test_utils.py | 2 +- src/operator/tensor/elemwise_unary_op.h | 4 +--- tests/python/unittest/test_infer_shape.py | 6 ++--- tests/python/unittest/test_module.py | 4 ++-- .../python/unittest/test_multi_device_exec.py | 4 ++-- tests/python/unittest/test_optimizer.py | 8 +++---- tests/python/unittest/test_sparse_ndarray.py | 18 +++++++------- tests/python/unittest/test_sparse_operator.py | 24 +++++++++---------- 12 files changed, 42 insertions(+), 44 deletions(-) diff --git a/python/mxnet/executor.py b/python/mxnet/executor.py index b585c23121cd..fa97290e1241 100644 --- a/python/mxnet/executor.py +++ b/python/mxnet/executor.py @@ -99,7 +99,7 @@ def _get_outputs(self): ctypes.byref(storage_type))) assert(storage_type != _STORAGE_TYPE_STR_TO_ID['undefined']) output = NDArray(NDArrayHandle(handles[i])) \ - if storage_type.value == _STORAGE_TYPE_STR_TO_ID['default_storage'] \ + if storage_type.value == _STORAGE_TYPE_STR_TO_ID['default'] \ else SparseNDArray(NDArrayHandle(handles[i])) outputs.append(output) return outputs diff --git a/python/mxnet/ndarray.py b/python/mxnet/ndarray.py index 1d9aed6b42b0..cfefde71f0f3 100644 --- a/python/mxnet/ndarray.py +++ b/python/mxnet/ndarray.py @@ -60,13 +60,13 @@ } _STORAGE_TYPE_ID_TO_STR = { -1 : 'undefined', - 0 : 'default_storage', + 0 : 'default', 1 : 'row_sparse', 2 : 'csr', } _STORAGE_TYPE_STR_TO_ID = { 'undefined' : -1, - 'default_storage' : 0, + 'default' : 0, 'row_sparse' : 1, 'csr' : 2, } diff --git a/python/mxnet/sparse_ndarray.py b/python/mxnet/sparse_ndarray.py index 4c4fdfafa18b..0df558369c62 100644 --- a/python/mxnet/sparse_ndarray.py +++ b/python/mxnet/sparse_ndarray.py @@ -495,11 +495,11 @@ def row_sparse(values, indices, shape, ctx=None, dtype=None, indices_type=None): """Creates a row sparse array with a set of tensor slices at given indices. A SparseNDArray with `row_sparse` storage is typically used to represent a subset of a larger - NDArray with `default_storage` of shape [LARGE0, D1, .. , DN] where LARGE0 >> D0. The values + NDArray with `default` of shape [LARGE0, D1, .. , DN] where LARGE0 >> D0. The values in indices are the indices in the first dimension of the slices that have been extracted from the larger NDArray. The indices are expected to be sorted in ascending order. - The corresponding NDArray ``dense`` with `default_storage` represented by a ``rsp`` + The corresponding NDArray ``dense`` with `default` represented by a ``rsp`` SparseNDArray with `row_sparse` storage has ``dense[rsp.indices[i], :, :, :, ...] = rsp.values[i, :, :, :, ...]`` @@ -558,7 +558,7 @@ def to_dense(source): SparseNDArray The dense array with default storage """ - return ndarray.cast_storage(source, storage_type='default_storage') + return ndarray.cast_storage(source, storage_type='default') def zeros(storage_type, shape, ctx=None, dtype=None, aux_types=None): """Return a new array of given shape and type, filled with zeros. @@ -604,7 +604,7 @@ def _ndarray_cls(handle): stype = _storage_type(handle) # TODO(haibin) in the long run, we want to have CSRNDArray and RowSparseNDArray which # inherit from SparseNDArray - return NDArray(handle) if stype == 'default_storage' else SparseNDArray(handle) + return NDArray(handle) if stype == 'default' else SparseNDArray(handle) # pylint: enable=too-many-locals, invalid-name def _init_ndarray_module(ndarray_class, root_namespace): diff --git a/python/mxnet/symbol.py b/python/mxnet/symbol.py index cee08fdfb038..543c75a1e625 100644 --- a/python/mxnet/symbol.py +++ b/python/mxnet/symbol.py @@ -1416,7 +1416,7 @@ def simple_bind(self, ctx, grad_req='write', type_dict=None, storage_type_dict=N shared_buffer_names = [] shared_buffer_handles = [] for k, v in shared_buffer.items(): - assert(v.storage_type == 'default_storage'), \ + assert(v.storage_type == 'default'), \ "shared_buffer is expected to only contain NDArrays with default storage" shared_buffer_names.append(c_str(k)) shared_buffer_handles.append(v.handle) diff --git a/python/mxnet/test_utils.py b/python/mxnet/test_utils.py index 9a088e5fee76..37e346bdf638 100644 --- a/python/mxnet/test_utils.py +++ b/python/mxnet/test_utils.py @@ -95,7 +95,7 @@ def rand_sparse_ndarray(shape, storage_type, density=None): assert(False), "unknown storage type" def rand_ndarray(shape, storage_type, density=None): - if storage_type == 'default_storage': + if storage_type == 'default': arr = mx.nd.array(random_arrays(shape)) else: arr, _ = rand_sparse_ndarray(shape, storage_type, density=density) diff --git a/src/operator/tensor/elemwise_unary_op.h b/src/operator/tensor/elemwise_unary_op.h index 47797b58f972..996a25d5a647 100644 --- a/src/operator/tensor/elemwise_unary_op.h +++ b/src/operator/tensor/elemwise_unary_op.h @@ -206,12 +206,10 @@ struct relu_grad { } // namespace kernel_launch_op struct CastStorageParam : public dmlc::Parameter { - // use int for enumeration - // TODO(haibin) add enum for storage_type. Probably also aux-types int storage_type; DMLC_DECLARE_PARAMETER(CastStorageParam) { DMLC_DECLARE_FIELD(storage_type) - .add_enum("default_storage", kDefaultStorage) + .add_enum("default", kDefaultStorage) .add_enum("row_sparse", kRowSparseStorage) .add_enum("csr", kCSRStorage) .describe("Output storage type."); diff --git a/tests/python/unittest/test_infer_shape.py b/tests/python/unittest/test_infer_shape.py index 6412aad50866..9188dd9d933f 100644 --- a/tests/python/unittest/test_infer_shape.py +++ b/tests/python/unittest/test_infer_shape.py @@ -138,9 +138,9 @@ def check_infer_storage(v1, v2, v1_storage, v2_storage, out_chunk): def test_elemwise_add_infer_storage_type(): v1 = mx.symbol.Variable('v1') v2 = mx.symbol.Variable('v2') - check_infer_storage(v1, v2, 'default_storage', 'default_storage', 'default_storage') - check_infer_storage(v1, v2, 'default_storage', 'row_sparse', 'default_storage') - check_infer_storage(v1, v2, 'row_sparse', 'default_storage', 'default_storage') + check_infer_storage(v1, v2, 'default', 'default', 'default') + check_infer_storage(v1, v2, 'default', 'row_sparse', 'default') + check_infer_storage(v1, v2, 'row_sparse', 'default', 'default') check_infer_storage(v1, v2, 'row_sparse', 'row_sparse', 'row_sparse') if __name__ == "__main__": diff --git a/tests/python/unittest/test_module.py b/tests/python/unittest/test_module.py index 608cdabe4677..c3c7f12bbc57 100644 --- a/tests/python/unittest/test_module.py +++ b/tests/python/unittest/test_module.py @@ -259,7 +259,7 @@ def mean_abs(x): assert(mon_result_counts == [2, 2, 1, 6, 6, 4]) def test_fm_module(): - def fm_model(k, feature_dim, storage_type='default_storage'): + def fm_model(k, feature_dim, storage_type='default'): initializer = mx.initializer.Normal(sigma=0.01) x = mx.symbol.Variable("data", storage_type=storage_type) v = mx.symbol.Variable("v", shape=(feature_dim, k), init=initializer) @@ -310,7 +310,7 @@ def fm_model(k, feature_dim, storage_type='default_storage'): metric = mx.metric.create('MSE') # train 5 epoch, i.e. going over the data iter one pass # TODO(haibin) test with row_sparse instead - storage_type_dict = {'v' : 'default_storage'} + storage_type_dict = {'v' : 'default'} for epoch in range(10): train_iter.reset() diff --git a/tests/python/unittest/test_multi_device_exec.py b/tests/python/unittest/test_multi_device_exec.py index 37809bf8a3bc..3293ae2b0abc 100644 --- a/tests/python/unittest/test_multi_device_exec.py +++ b/tests/python/unittest/test_multi_device_exec.py @@ -58,8 +58,8 @@ def check_ctx_group_sparse(lhs_stype, rhs_stype): assert arr.context == group2ctx['stage2'] def test_ctx_group_sparse(): - check_ctx_group_sparse('default_storage', 'default_storage') - check_ctx_group_sparse('default_storage', 'row_sparse') + check_ctx_group_sparse('default', 'default') + check_ctx_group_sparse('default', 'row_sparse') check_ctx_group_sparse('row_sparse', 'row_sparse') if __name__ == '__main__': diff --git a/tests/python/unittest/test_optimizer.py b/tests/python/unittest/test_optimizer.py index ad0793405959..6f69828ed9b1 100644 --- a/tests/python/unittest/test_optimizer.py +++ b/tests/python/unittest/test_optimizer.py @@ -30,8 +30,8 @@ def test_lr_wd_mult(): assert not mx.test_utils.almost_equal(args1['fc2_weight'], args2['fc2_weight'], 1e-1) -def compare_optimizer(opt1, opt2, shape, w_stype='default_storage', g_stype='default_storage'): - if w_stype == 'default_storage': +def compare_optimizer(opt1, opt2, shape, w_stype='default', g_stype='default'): + if w_stype == 'default': w2 = mx.random.uniform(shape=shape, ctx=default_context()) w1 = w2.copyto(default_context()) elif w_stype == 'row_sparse': @@ -39,7 +39,7 @@ def compare_optimizer(opt1, opt2, shape, w_stype='default_storage', g_stype='def w1 = rand_ndarray(shape, w_stype).to_dense() else: raise Exception("type not supported yet") - if g_stype == 'default_storage': + if g_stype == 'default': g2 = mx.random.uniform(shape=shape, ctx=default_context()) g1 = g2.copyto(default_context()) elif g_stype == 'row_sparse': @@ -230,7 +230,7 @@ def test_sparse_sgd(): {'clip_gradient': 0.4, 'rescale_grad': 0.14, 'wd': 0.03, 'momentum': 0.9}, {'rescale_grad': 0.8, 'wd': 0.05, 'momentum': 0.9}] for kwarg in kwargs: - compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, w_stype='default_storage', g_stype='row_sparse') + compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, w_stype='default', g_stype='row_sparse') # ADAM diff --git a/tests/python/unittest/test_sparse_ndarray.py b/tests/python/unittest/test_sparse_ndarray.py index fa61d962d35e..79f8321e562d 100644 --- a/tests/python/unittest/test_sparse_ndarray.py +++ b/tests/python/unittest/test_sparse_ndarray.py @@ -20,7 +20,7 @@ def check_sparse_nd_elemwise_binary(shapes, storage_types, f, g): for i, storage_type in enumerate(storage_types): if storage_type == 'row_sparse': nd, _ = rand_sparse_ndarray(shapes[i], storage_type) - elif storage_type == 'default_storage': + elif storage_type == 'default': nd = mx.nd.array(random_arrays(shapes[i]), dtype = np.float32) else: assert(False) @@ -36,9 +36,9 @@ def test_sparse_nd_elemwise_add(): for i in range(num_repeats): shape = [rand_shape_2d()] * 2 assert_fcompex(check_sparse_nd_elemwise_binary, - shape, ['default_storage'] * 2, op, g) + shape, ['default'] * 2, op, g) assert_fcompex(check_sparse_nd_elemwise_binary, - shape, ['default_storage', 'row_sparse'], op, g) + shape, ['default', 'row_sparse'], op, g) assert_fcompex(check_sparse_nd_elemwise_binary, shape, ['row_sparse', 'row_sparse'], op, g) @@ -49,8 +49,8 @@ def test_sparse_nd_elementwise_fallback(): op = mx.nd.add_n for i in range(num_repeats): shape = [rand_shape_2d()] * 2 - check_sparse_nd_elemwise_binary(shape, ['default_storage'] * 2, op, g) - check_sparse_nd_elemwise_binary(shape, ['default_storage', 'row_sparse'], op, g) + check_sparse_nd_elemwise_binary(shape, ['default'] * 2, op, g) + check_sparse_nd_elemwise_binary(shape, ['default', 'row_sparse'], op, g) check_sparse_nd_elemwise_binary(shape, ['row_sparse', 'row_sparse'], op, g) def test_sparse_nd_zeros(): @@ -77,9 +77,9 @@ def check_sparse_nd_copy(from_stype, to_stype): assert np.sum(np.abs(from_nd.asnumpy() != to_nd.asnumpy())) == 0.0 check_sparse_nd_copy('row_sparse', 'row_sparse') - check_sparse_nd_copy('row_sparse', 'default_storage') - check_sparse_nd_copy('default_storage', 'row_sparse') - check_sparse_nd_copy('default_storage', 'csr') + check_sparse_nd_copy('row_sparse', 'default') + check_sparse_nd_copy('default', 'row_sparse') + check_sparse_nd_copy('default', 'csr') def check_sparse_nd_prop_rsp(): storage_type = 'row_sparse' @@ -132,7 +132,7 @@ def check_sparse_nd_setitem(storage_type, shape, dst): shape = rand_shape_2d() for stype in ['row_sparse', 'csr']: # ndarray assignment - check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, 'default_storage')) + check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, 'default')) check_sparse_nd_setitem(stype, shape, rand_ndarray(shape, stype)) # numpy assignment check_sparse_nd_setitem(stype, shape, np.ones(shape)) diff --git a/tests/python/unittest/test_sparse_operator.py b/tests/python/unittest/test_sparse_operator.py index 4018b236665c..1f4e2e8cc2c7 100644 --- a/tests/python/unittest/test_sparse_operator.py +++ b/tests/python/unittest/test_sparse_operator.py @@ -28,9 +28,9 @@ def check_elemwise_add_ex(lhs_stype, rhs_stype, shape, lhs_grad_stype=None, rhs_ def test_elemwise_add_ex(): shape = rand_shape_2d() - check_elemwise_add_ex('default_storage', 'default_storage', shape) - check_elemwise_add_ex('default_storage', 'row_sparse', shape) - check_elemwise_add_ex('row_sparse', 'default_storage', shape) + check_elemwise_add_ex('default', 'default', shape) + check_elemwise_add_ex('default', 'row_sparse', shape) + check_elemwise_add_ex('row_sparse', 'default', shape) check_elemwise_add_ex('row_sparse', 'row_sparse', shape, lhs_grad_stype='row_sparse', rhs_grad_stype='row_sparse') @@ -69,11 +69,11 @@ def test_elemwise_add_ex_multiple_stages(): exec_test.backward(out_grads=exec_test.outputs) assert_almost_equal(arr_grads[0].asnumpy(), arr_grads[1].asnumpy()) -# TODO(haibin) also add test for backward pass +# TODO(haibin) also add test for backward pass. Check if exception is thrown def test_cast_storage_ex(): def test_rsp_to_dns(shape): rsp, (data, row_idx) = rand_sparse_ndarray(shape, 'row_sparse') - dns_out = mx.nd.cast_storage(rsp, storage_type='default_storage') + dns_out = mx.nd.cast_storage(rsp, storage_type='default') dns_expected = np.zeros(shape, dtype=default_dtype()) if row_idx is not None: for k, v in enumerate(row_idx): @@ -81,9 +81,9 @@ def test_rsp_to_dns(shape): assert same(dns_out.asnumpy(), dns_expected) def test_dns_to_rsp(shape): - dns_in = rand_ndarray(shape, 'default_storage') + dns_in = rand_ndarray(shape, 'default') rsp_out = mx.nd.cast_storage(mx.nd.array(dns_in, dtype=default_dtype()), storage_type='row_sparse') - ret = mx.nd.cast_storage(rsp_out, storage_type='default_storage') + ret = mx.nd.cast_storage(rsp_out, storage_type='default') assert same(ret.asnumpy(), dns_in.asnumpy()) def test_csr_to_dns(shape): @@ -95,7 +95,7 @@ def test_csr_to_dns(shape): def test_dns_to_csr(dns_in): dns_in = np.array(dns_in) csr_out = mx.nd.cast_storage(mx.nd.array(dns_in, dtype=default_dtype()), storage_type='csr') - ret = mx.nd.cast_storage(csr_out, storage_type='default_storage') + ret = mx.nd.cast_storage(csr_out, storage_type='default') assert same(ret.asnumpy(), dns_in) shape = rand_shape_2d() @@ -106,11 +106,11 @@ def test_dns_to_csr(dns_in): def test_sparse_dot(): def test_dot_csr_dns(csr_shape, dns_shape, trans_csr): - dns1 = rand_ndarray(csr_shape, 'default_storage') - dns2 = rand_ndarray(dns_shape, 'default_storage') + dns1 = rand_ndarray(csr_shape, 'default') + dns2 = rand_ndarray(dns_shape, 'default') csr = mx.nd.cast_storage(dns1, storage_type='csr') out = mx.nd.dot(csr, dns2, transpose_a=trans_csr) - assert out.storage_type == 'default_storage' + assert out.storage_type == 'default' out_expected = mx.nd.dot(dns1, dns2, transpose_a=trans_csr) out_np = out_expected.asnumpy() backward_trans = not trans_csr @@ -119,7 +119,7 @@ def test_dot_csr_dns(csr_shape, dns_shape, trans_csr): # test symbolic forward lhs = mx.symbol.Variable('lhs', storage_type='csr') - rhs = mx.symbol.Variable('rhs', storage_type='default_storage') + rhs = mx.symbol.Variable('rhs', storage_type='default') test = mx.symbol.dot(lhs, rhs, transpose_a=trans_csr) location = {'lhs': csr, 'rhs': dns2} expected = {'rhs': rhs_backward_grad} From 0026aad1a51add84bbd1c8390000ddb79cf4500a Mon Sep 17 00:00:00 2001 From: eric-haibin-lin Date: Mon, 29 May 2017 17:59:44 +0000 Subject: [PATCH 4/4] remove unused cpp tests --- tests/cpp/engine/threaded_engine_test.cc | 8 +- tests/cpp/operator/ndarray_test.cc | 244 ----------------------- 2 files changed, 4 insertions(+), 248 deletions(-) diff --git a/tests/cpp/engine/threaded_engine_test.cc b/tests/cpp/engine/threaded_engine_test.cc index 509f50bdef51..73dc53060b63 100644 --- a/tests/cpp/engine/threaded_engine_test.cc +++ b/tests/cpp/engine/threaded_engine_test.cc @@ -100,7 +100,7 @@ double EvaluateWorloads(const std::vector& workloads, return dmlc::GetTime() - t; } -/*TEST(Engine, RandSumExpr) { +TEST(Engine, RandSumExpr) { std::vector workloads; int num_repeat = 5; const int num_engine = 4; @@ -134,11 +134,11 @@ double EvaluateWorloads(const std::vector& workloads, LOG(INFO) << "NaiveEngine\t\t" << t[1] << " sec"; LOG(INFO) << "ThreadedEnginePooled\t" << t[2] << " sec"; LOG(INFO) << "ThreadedEnginePerDevice\t" << t[3] << " sec"; -}*/ +} void Foo(mxnet::RunContext, int i) { printf("The fox says %d\n", i); } -/*TEST(Engine, basics) { +TEST(Engine, basics) { auto&& engine = mxnet::Engine::Get(); auto&& var = engine->NewVariable(); std::vector oprs; @@ -235,4 +235,4 @@ void Foo(mxnet::RunContext, int i) { printf("The fox says %d\n", i); } var = nullptr; oprs.clear(); LOG(INFO) << "All pass"; -}*/ +} diff --git a/tests/cpp/operator/ndarray_test.cc b/tests/cpp/operator/ndarray_test.cc index d8658efc1aa1..f2ed30793881 100644 --- a/tests/cpp/operator/ndarray_test.cc +++ b/tests/cpp/operator/ndarray_test.cc @@ -4,247 +4,3 @@ * \brief ndarray unit test utility functions * \author Haibin Lin */ -/*#include -#include -#include -#include -#include - -#include -#include -#include "../src/executor/graph_executor.h" -#include "../src/operator/tensor/elemwise_binary_op.h" -#include "../src/operator/tensor/elemwise_unary_op.h" -#include "../src/operator/tensor/indexing_op.h" -#include "../src/operator/optimizer_op-inl.h" -#include "../src/operator/tensor/init_op.h" -#include "test_ndarray_utils.h" - -using namespace mxnet; -// Conversion Tests -void CastDnsDnsTest() { - Context ctx; - TShape shape({2, 2}); - NDArray nd = DnsND(shape, ctx, {}); - auto nd_copy = Convert(kDefaultStorage, nd); - CheckDataRegion(nd_copy.data(), nd.data()); -} - -void CastRspDnsTest() { - Context ctx; - // Sparse ndarray - TShape shape({2, 2}); - float v1 = RandFloat(); - float v2 = RandFloat(); - NDArray nd = RspND(shape, ctx, {0}, {v1, v2}); - // Dense ndarray - NDArray dense_nd = DnsND(shape, ctx, {v1, v2, 0, 0}); - NDArray converted = Convert(kDefaultStorage, nd); - CheckDataRegion(converted.data(), dense_nd.data()); -} - -// NDArray function tests -void SetValueTest() { - Context ctx = Context::CPU(); - TShape data_shape({2, 2}); - float v = RandFloat(); - NDArray nd0 = DnsND(data_shape, ctx, {v, v, v, v}); - NDArray nd1(data_shape, ctx, false); - nd1 = v; - nd1.WaitToRead(); - CheckDataRegion(nd0.data(), nd1.data()); -} - -// InferStorage -void InferElemwiseStorageTest() { - nnvm::NodeAttrs attrs; - attrs.name = "test_op"; - std::vector in_attrs({kRowSparseStorage, kDefaultStorage}); - std::vector out_attrs({kUndefinedStorage}); - // rsp, default -> default - op::ElemwiseStorageType<2, 1>(attrs, &in_attrs, &out_attrs); - EXPECT_EQ(out_attrs[0], kDefaultStorage); - // default, rsp -> default - in_attrs = {kDefaultStorage, kRowSparseStorage}; - out_attrs = {kUndefinedStorage}; - op::ElemwiseStorageType<2, 1>(attrs, &in_attrs, &out_attrs); - EXPECT_EQ(out_attrs[0], kDefaultStorage); - // rsp, rsp -> rsp - in_attrs = {kRowSparseStorage}; - out_attrs = {kUndefinedStorage, kUndefinedStorage}; - op::ElemwiseStorageType<1, 2>(attrs, &in_attrs, &out_attrs); - EXPECT_EQ(out_attrs[0], kRowSparseStorage); - EXPECT_EQ(out_attrs[1], kRowSparseStorage); -} - -// Optimizer -void SGDDnsRspTest() { - TShape shape({4, 2}); - Context ctx = Context::CPU(); - NDArray weight = DnsND(shape, ctx, {1, 2, 3, 4, 5, 6, 7, 8}); - NDArray rsp_grad = RspND(shape, ctx, {0, 3}, {1, 2, 3, 4}); - NDArray output = weight; - float lr = RandFloat(); - float wd = RandFloat(); - float rescale = RandFloat(); - op::SGDParam param; - param.lr = lr; - param.wd = wd; - param.rescale_grad = rescale; - param.clip_gradient = -1.0f; - Engine::Get()->PushSync([weight, rsp_grad, output, param](RunContext ctx) { - std::vector inputs{weight, rsp_grad}, outputs{output}; - std::vector req({kAddTo}); - op::SparseSGDUpdateDnsRspImpl(param, {}, inputs, req, outputs); - }, weight.ctx(), {rsp_grad.var()}, {output.var()}, - FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); - auto sgd = [lr, wd, rescale] (TEST_DTYPE weight, TEST_DTYPE grad) { - return (1.f-lr*wd)*weight - (lr*rescale)*grad; - }; - - NDArray expected = DnsND(shape, ctx, - {1 + sgd(1, 1), 2 + sgd(2, 2), 3, 4, 5, 6, - 7 + sgd(7, 3), 8 + sgd(8, 4)}); - output.WaitToRead(); - CheckDataRegion(output.data(), expected.data()); -} - -void CopyFromToRspDnsTest() { - Context ctx; - // Sparse ndarray - TShape shape({2, 2}); - NDArray nd = RspND(shape, ctx, {0}, {1, 1}); - // Dense ndarray - NDArray dns_nd = DnsND(shape, ctx, {}); - CopyFromTo(nd, &dns_nd); - dns_nd.WaitToRead(); - CheckDataRegion(nd.data(), dns_nd.data()); -} - -void CopyFromToRspRspReuseTest() { - Context ctx; - // Sparse ndarray - TShape shape({3, 2}); - NDArray nd = RspND(shape, ctx, {0}, {1,2}); - // Sparse ndarray with enough memory. It's expected to reuse the memory - NDArray dst_nd = RspND(shape, ctx, {0, 1, 2}, {6,6,6,6,6,6}); - nd.WaitToRead(); - CopyFromTo(nd, &dst_nd); - dst_nd.WaitToRead(); - CheckDataRegion(nd.data(), dst_nd.data()); - CHECK_EQ(dst_nd.aux_shape(rowsparse::kIdx)[0], 1); - CHECK_EQ(dst_nd.storage_shape()[0], 1); - CHECK_EQ(dst_nd.storage_shape()[1], 2); -} - - -void CopyFromToRspRspFreeTest() { - Context ctx; - // Sparse ndarray - TShape shape({3, 2}); - NDArray nd = RspND(shape, ctx, {0, 1}, {1,1,1,1}); - // Sparse ndarray with enough memory. It's expected to reuse the memory - NDArray dst_nd = RspND(shape, ctx, {0}, {2,2}); - nd.WaitToRead(); - CopyFromTo(nd, &dst_nd); - dst_nd.WaitToRead(); - CheckDataRegion(nd.data(), dst_nd.data()); -} - -void BinaryAddRspRsp() { - Context ctx = Context::CPU(); - - TShape output_shape({4, 2}); - NDArray input_nd0 = RspND(output_shape, ctx, {0, 1}, {10,10,10,10}); - NDArray input_nd1 = RspND(output_shape, ctx, {0, 2}, {5,5,5,5}); - - NDArray output(kRowSparseStorage, output_shape, ctx); - std::vector const_vars; - const_vars.push_back(input_nd0.var()); - const_vars.push_back(input_nd1.var()); - - Engine::Get()->PushSync([input_nd0, input_nd1, output](RunContext ctx) { - OpContext op_ctx; - std::vector inputs, outputs; - std::vector req; - inputs.push_back(input_nd0); - inputs.push_back(input_nd1); - outputs.push_back(output); - op::BinaryComputeRspRsp({}, op_ctx, inputs, req, outputs); - }, input_nd0.ctx(), const_vars, {output.var()}, - FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); - - // Check the data region of output ndarray - NDArray dense_output = DnsND(output_shape, ctx, {15, 15, 10, 10, 5, 5, 0, 0}); - NDArray copy = Convert(kDefaultStorage, output); - CheckDataRegion(dense_output.data(), copy.data()); -} - -void SparseEmbeddingBackwardTest() { - Context ctx = Context::CPU(); - // d1 .. dk - // idx shape : (2, 3) - // input dim 4, output dim 2 - int input_dim = 4; - int output_dim = 2; - TShape idx_shape({2, 3}); - NDArray idx = RspIdxND(idx_shape, ctx, {1, 2, 3, 1, 2, 3}); - TShape grad_shape({2, 3, 2}); - NDArray grad = DnsND(grad_shape, ctx, {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2}); - TShape out_shape({4, 2}); - NDArray output = NDArray(kRowSparseStorage, out_shape, ctx); - op::EmbeddingParam param; - param.input_dim = input_dim; - param.output_dim = output_dim; - param.dtype = 0; - - Engine::Get()->PushSync([idx, grad, output, param](RunContext ctx) { - std::vector inputs{grad, idx}, outputs{output, output}; - // this is a hack - std::vector req({kNullOp, kAddTo}); - op::SparseEmbeddingOpBackwardEx({}, {}, inputs, req, outputs); - }, output.ctx(), {grad.var(), idx.var()}, {output.var()}, - FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); - - NDArray expected = DnsND(out_shape, ctx, {0,0,0,0,0,0,0,0}); - Engine::Get()->PushSync([idx, grad, expected, param](RunContext ctx) { - std::vector inputs{grad.data(), idx.data()}, outputs{expected.data(), expected.data()}; - std::vector req({kNullOp, kWriteTo}); - op::EmbeddingOpBackward({}, {}, inputs, req, outputs); - }, expected.ctx(), {grad.var(), idx.var()}, {expected.var()}, - FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); - NDArray converted = Convert(kDefaultStorage, output); - expected.WaitToRead(); - CheckDataRegion(converted.data(), expected.data()); -} - -TEST(NDArray, binary_add) { - BinaryAddRspRsp(); -} - -TEST(NDArray, conversion) { - CastDnsDnsTest(); - CastRspDnsTest(); -} - -TEST(NDArray, functions) { - SetValueTest(); -} - -TEST(NDArray, optimizer) { - SGDDnsRspTest(); -} - -TEST(NDArray, copy) { - CopyFromToRspDnsTest(); - CopyFromToRspRspReuseTest(); - CopyFromToRspRspFreeTest(); -} - -TEST(NDArray, infer_storage) { - InferElemwiseStorageTest(); -} - -TEST(NDArray, sparse_embedding) { - SparseEmbeddingBackwardTest(); -}*/