Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cleanup some testing code and enums #57

Merged
merged 4 commits into from
May 29, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,8 @@ ifeq ($(DEV), 1)
endif

# CFLAGS for debug
# FIXME(haibin) temporarily turn on -DDMLC_LOG_FATAL_THROW for debug
ifeq ($(DEBUG), 1)
CFLAGS += -g -O0 -DDMLC_LOG_FATAL_THROW=1
CFLAGS += -g -O0
else
CFLAGS += -O3 -DNDEBUG=1
endif
Expand Down
2 changes: 1 addition & 1 deletion python/mxnet/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def _get_outputs(self):
ctypes.byref(storage_type)))
assert(storage_type != _STORAGE_TYPE_STR_TO_ID['undefined'])
output = NDArray(NDArrayHandle(handles[i])) \
if storage_type.value == _STORAGE_TYPE_STR_TO_ID['default_storage'] \
if storage_type.value == _STORAGE_TYPE_STR_TO_ID['default'] \
else SparseNDArray(NDArrayHandle(handles[i]))
outputs.append(output)
return outputs
Expand Down
4 changes: 2 additions & 2 deletions python/mxnet/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,13 @@
}
_STORAGE_TYPE_ID_TO_STR = {
-1 : 'undefined',
0 : 'default_storage',
0 : 'default',
1 : 'row_sparse',
2 : 'csr',
}
_STORAGE_TYPE_STR_TO_ID = {
'undefined' : -1,
'default_storage' : 0,
'default' : 0,
'row_sparse' : 1,
'csr' : 2,
}
Expand Down
8 changes: 4 additions & 4 deletions python/mxnet/sparse_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -495,11 +495,11 @@ def row_sparse(values, indices, shape, ctx=None, dtype=None, indices_type=None):
"""Creates a row sparse array with a set of tensor slices at given indices.

A SparseNDArray with `row_sparse` storage is typically used to represent a subset of a larger
NDArray with `default_storage` of shape [LARGE0, D1, .. , DN] where LARGE0 >> D0. The values
NDArray with `default` of shape [LARGE0, D1, .. , DN] where LARGE0 >> D0. The values
in indices are the indices in the first dimension of the slices that have been extracted from
the larger NDArray. The indices are expected to be sorted in ascending order.

The corresponding NDArray ``dense`` with `default_storage` represented by a ``rsp``
The corresponding NDArray ``dense`` with `default` represented by a ``rsp``
SparseNDArray with `row_sparse` storage has

``dense[rsp.indices[i], :, :, :, ...] = rsp.values[i, :, :, :, ...]``
Expand Down Expand Up @@ -558,7 +558,7 @@ def to_dense(source):
SparseNDArray
The dense array with default storage
"""
return ndarray.cast_storage(source, storage_type='default_storage')
return ndarray.cast_storage(source, storage_type='default')

def zeros(storage_type, shape, ctx=None, dtype=None, aux_types=None):
"""Return a new array of given shape and type, filled with zeros.
Expand Down Expand Up @@ -604,7 +604,7 @@ def _ndarray_cls(handle):
stype = _storage_type(handle)
# TODO(haibin) in the long run, we want to have CSRNDArray and RowSparseNDArray which
# inherit from SparseNDArray
return NDArray(handle) if stype == 'default_storage' else SparseNDArray(handle)
return NDArray(handle) if stype == 'default' else SparseNDArray(handle)

# pylint: enable=too-many-locals, invalid-name
def _init_ndarray_module(ndarray_class, root_namespace):
Expand Down
2 changes: 1 addition & 1 deletion python/mxnet/symbol.py
Original file line number Diff line number Diff line change
Expand Up @@ -1416,7 +1416,7 @@ def simple_bind(self, ctx, grad_req='write', type_dict=None, storage_type_dict=N
shared_buffer_names = []
shared_buffer_handles = []
for k, v in shared_buffer.items():
assert(v.storage_type == 'default_storage'), \
assert(v.storage_type == 'default'), \
"shared_buffer is expected to only contain NDArrays with default storage"
shared_buffer_names.append(c_str(k))
shared_buffer_handles.append(v.handle)
Expand Down
5 changes: 4 additions & 1 deletion python/mxnet/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,12 +95,15 @@ def rand_sparse_ndarray(shape, storage_type, density=None):
assert(False), "unknown storage type"

def rand_ndarray(shape, storage_type, density=None):
if storage_type == 'default_storage':
if storage_type == 'default':
arr = mx.nd.array(random_arrays(shape))
else:
arr, _ = rand_sparse_ndarray(shape, storage_type, density=density)
return arr

def rand_shape_2d():
return (rnd.randint(1, 10), rnd.randint(1, 10))

def np_reduce(dat, axis, keepdims, numpy_reduce_func):
"""Compatible reduce for old version of NumPy.

Expand Down
4 changes: 1 addition & 3 deletions src/operator/tensor/elemwise_unary_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -206,12 +206,10 @@ struct relu_grad {
} // namespace kernel_launch_op

struct CastStorageParam : public dmlc::Parameter<CastStorageParam> {
// use int for enumeration
// TODO(haibin) add enum for storage_type. Probably also aux-types
int storage_type;
DMLC_DECLARE_PARAMETER(CastStorageParam) {
DMLC_DECLARE_FIELD(storage_type)
.add_enum("default_storage", kDefaultStorage)
.add_enum("default", kDefaultStorage)
.add_enum("row_sparse", kRowSparseStorage)
.add_enum("csr", kCSRStorage)
.describe("Output storage type.");
Expand Down
8 changes: 4 additions & 4 deletions tests/cpp/engine/threaded_engine_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ double EvaluateWorloads(const std::vector<Workload>& workloads,
return dmlc::GetTime() - t;
}

/*TEST(Engine, RandSumExpr) {
TEST(Engine, RandSumExpr) {
std::vector<Workload> workloads;
int num_repeat = 5;
const int num_engine = 4;
Expand Down Expand Up @@ -134,11 +134,11 @@ double EvaluateWorloads(const std::vector<Workload>& workloads,
LOG(INFO) << "NaiveEngine\t\t" << t[1] << " sec";
LOG(INFO) << "ThreadedEnginePooled\t" << t[2] << " sec";
LOG(INFO) << "ThreadedEnginePerDevice\t" << t[3] << " sec";
}*/
}

void Foo(mxnet::RunContext, int i) { printf("The fox says %d\n", i); }

/*TEST(Engine, basics) {
TEST(Engine, basics) {
auto&& engine = mxnet::Engine::Get();
auto&& var = engine->NewVariable();
std::vector<mxnet::Engine::OprHandle> oprs;
Expand Down Expand Up @@ -235,4 +235,4 @@ void Foo(mxnet::RunContext, int i) { printf("The fox says %d\n", i); }
var = nullptr;
oprs.clear();
LOG(INFO) << "All pass";
}*/
}
244 changes: 0 additions & 244 deletions tests/cpp/operator/ndarray_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4,247 +4,3 @@
* \brief ndarray unit test utility functions
* \author Haibin Lin
*/
/*#include <unistd.h>
#include <dmlc/logging.h>
#include <cstdio>
#include <gtest/gtest.h>
#include <vector>

#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include "../src/executor/graph_executor.h"
#include "../src/operator/tensor/elemwise_binary_op.h"
#include "../src/operator/tensor/elemwise_unary_op.h"
#include "../src/operator/tensor/indexing_op.h"
#include "../src/operator/optimizer_op-inl.h"
#include "../src/operator/tensor/init_op.h"
#include "test_ndarray_utils.h"

using namespace mxnet;
// Conversion Tests
void CastDnsDnsTest() {
Context ctx;
TShape shape({2, 2});
NDArray nd = DnsND(shape, ctx, {});
auto nd_copy = Convert(kDefaultStorage, nd);
CheckDataRegion(nd_copy.data(), nd.data());
}

void CastRspDnsTest() {
Context ctx;
// Sparse ndarray
TShape shape({2, 2});
float v1 = RandFloat();
float v2 = RandFloat();
NDArray nd = RspND(shape, ctx, {0}, {v1, v2});
// Dense ndarray
NDArray dense_nd = DnsND(shape, ctx, {v1, v2, 0, 0});
NDArray converted = Convert(kDefaultStorage, nd);
CheckDataRegion(converted.data(), dense_nd.data());
}

// NDArray function tests
void SetValueTest() {
Context ctx = Context::CPU();
TShape data_shape({2, 2});
float v = RandFloat();
NDArray nd0 = DnsND(data_shape, ctx, {v, v, v, v});
NDArray nd1(data_shape, ctx, false);
nd1 = v;
nd1.WaitToRead();
CheckDataRegion(nd0.data(), nd1.data());
}

// InferStorage
void InferElemwiseStorageTest() {
nnvm::NodeAttrs attrs;
attrs.name = "test_op";
std::vector<int> in_attrs({kRowSparseStorage, kDefaultStorage});
std::vector<int> out_attrs({kUndefinedStorage});
// rsp, default -> default
op::ElemwiseStorageType<2, 1>(attrs, &in_attrs, &out_attrs);
EXPECT_EQ(out_attrs[0], kDefaultStorage);
// default, rsp -> default
in_attrs = {kDefaultStorage, kRowSparseStorage};
out_attrs = {kUndefinedStorage};
op::ElemwiseStorageType<2, 1>(attrs, &in_attrs, &out_attrs);
EXPECT_EQ(out_attrs[0], kDefaultStorage);
// rsp, rsp -> rsp
in_attrs = {kRowSparseStorage};
out_attrs = {kUndefinedStorage, kUndefinedStorage};
op::ElemwiseStorageType<1, 2>(attrs, &in_attrs, &out_attrs);
EXPECT_EQ(out_attrs[0], kRowSparseStorage);
EXPECT_EQ(out_attrs[1], kRowSparseStorage);
}

// Optimizer
void SGDDnsRspTest() {
TShape shape({4, 2});
Context ctx = Context::CPU();
NDArray weight = DnsND(shape, ctx, {1, 2, 3, 4, 5, 6, 7, 8});
NDArray rsp_grad = RspND(shape, ctx, {0, 3}, {1, 2, 3, 4});
NDArray output = weight;
float lr = RandFloat();
float wd = RandFloat();
float rescale = RandFloat();
op::SGDParam param;
param.lr = lr;
param.wd = wd;
param.rescale_grad = rescale;
param.clip_gradient = -1.0f;
Engine::Get()->PushSync([weight, rsp_grad, output, param](RunContext ctx) {
std::vector<NDArray> inputs{weight, rsp_grad}, outputs{output};
std::vector<OpReqType> req({kAddTo});
op::SparseSGDUpdateDnsRspImpl<cpu>(param, {}, inputs, req, outputs);
}, weight.ctx(), {rsp_grad.var()}, {output.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
auto sgd = [lr, wd, rescale] (TEST_DTYPE weight, TEST_DTYPE grad) {
return (1.f-lr*wd)*weight - (lr*rescale)*grad;
};

NDArray expected = DnsND(shape, ctx,
{1 + sgd(1, 1), 2 + sgd(2, 2), 3, 4, 5, 6,
7 + sgd(7, 3), 8 + sgd(8, 4)});
output.WaitToRead();
CheckDataRegion(output.data(), expected.data());
}

void CopyFromToRspDnsTest() {
Context ctx;
// Sparse ndarray
TShape shape({2, 2});
NDArray nd = RspND(shape, ctx, {0}, {1, 1});
// Dense ndarray
NDArray dns_nd = DnsND(shape, ctx, {});
CopyFromTo(nd, &dns_nd);
dns_nd.WaitToRead();
CheckDataRegion(nd.data(), dns_nd.data());
}

void CopyFromToRspRspReuseTest() {
Context ctx;
// Sparse ndarray
TShape shape({3, 2});
NDArray nd = RspND(shape, ctx, {0}, {1,2});
// Sparse ndarray with enough memory. It's expected to reuse the memory
NDArray dst_nd = RspND(shape, ctx, {0, 1, 2}, {6,6,6,6,6,6});
nd.WaitToRead();
CopyFromTo(nd, &dst_nd);
dst_nd.WaitToRead();
CheckDataRegion(nd.data(), dst_nd.data());
CHECK_EQ(dst_nd.aux_shape(rowsparse::kIdx)[0], 1);
CHECK_EQ(dst_nd.storage_shape()[0], 1);
CHECK_EQ(dst_nd.storage_shape()[1], 2);
}


void CopyFromToRspRspFreeTest() {
Context ctx;
// Sparse ndarray
TShape shape({3, 2});
NDArray nd = RspND(shape, ctx, {0, 1}, {1,1,1,1});
// Sparse ndarray with enough memory. It's expected to reuse the memory
NDArray dst_nd = RspND(shape, ctx, {0}, {2,2});
nd.WaitToRead();
CopyFromTo(nd, &dst_nd);
dst_nd.WaitToRead();
CheckDataRegion(nd.data(), dst_nd.data());
}

void BinaryAddRspRsp() {
Context ctx = Context::CPU();

TShape output_shape({4, 2});
NDArray input_nd0 = RspND(output_shape, ctx, {0, 1}, {10,10,10,10});
NDArray input_nd1 = RspND(output_shape, ctx, {0, 2}, {5,5,5,5});

NDArray output(kRowSparseStorage, output_shape, ctx);
std::vector<Engine::VarHandle> const_vars;
const_vars.push_back(input_nd0.var());
const_vars.push_back(input_nd1.var());

Engine::Get()->PushSync([input_nd0, input_nd1, output](RunContext ctx) {
OpContext op_ctx;
std::vector<NDArray> inputs, outputs;
std::vector<OpReqType> req;
inputs.push_back(input_nd0);
inputs.push_back(input_nd1);
outputs.push_back(output);
op::BinaryComputeRspRsp<cpu, cpu>({}, op_ctx, inputs, req, outputs);
}, input_nd0.ctx(), const_vars, {output.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);

// Check the data region of output ndarray
NDArray dense_output = DnsND(output_shape, ctx, {15, 15, 10, 10, 5, 5, 0, 0});
NDArray copy = Convert(kDefaultStorage, output);
CheckDataRegion(dense_output.data(), copy.data());
}

void SparseEmbeddingBackwardTest() {
Context ctx = Context::CPU();
// d1 .. dk
// idx shape : (2, 3)
// input dim 4, output dim 2
int input_dim = 4;
int output_dim = 2;
TShape idx_shape({2, 3});
NDArray idx = RspIdxND(idx_shape, ctx, {1, 2, 3, 1, 2, 3});
TShape grad_shape({2, 3, 2});
NDArray grad = DnsND(grad_shape, ctx, {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2});
TShape out_shape({4, 2});
NDArray output = NDArray(kRowSparseStorage, out_shape, ctx);
op::EmbeddingParam param;
param.input_dim = input_dim;
param.output_dim = output_dim;
param.dtype = 0;

Engine::Get()->PushSync([idx, grad, output, param](RunContext ctx) {
std::vector<NDArray> inputs{grad, idx}, outputs{output, output};
// this is a hack
std::vector<OpReqType> req({kNullOp, kAddTo});
op::SparseEmbeddingOpBackwardEx<cpu>({}, {}, inputs, req, outputs);
}, output.ctx(), {grad.var(), idx.var()}, {output.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);

NDArray expected = DnsND(out_shape, ctx, {0,0,0,0,0,0,0,0});
Engine::Get()->PushSync([idx, grad, expected, param](RunContext ctx) {
std::vector<TBlob> inputs{grad.data(), idx.data()}, outputs{expected.data(), expected.data()};
std::vector<OpReqType> req({kNullOp, kWriteTo});
op::EmbeddingOpBackward<cpu>({}, {}, inputs, req, outputs);
}, expected.ctx(), {grad.var(), idx.var()}, {expected.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
NDArray converted = Convert(kDefaultStorage, output);
expected.WaitToRead();
CheckDataRegion(converted.data(), expected.data());
}

TEST(NDArray, binary_add) {
BinaryAddRspRsp();
}

TEST(NDArray, conversion) {
CastDnsDnsTest();
CastRspDnsTest();
}

TEST(NDArray, functions) {
SetValueTest();
}

TEST(NDArray, optimizer) {
SGDDnsRspTest();
}

TEST(NDArray, copy) {
CopyFromToRspDnsTest();
CopyFromToRspRspReuseTest();
CopyFromToRspRspFreeTest();
}

TEST(NDArray, infer_storage) {
InferElemwiseStorageTest();
}

TEST(NDArray, sparse_embedding) {
SparseEmbeddingBackwardTest();
}*/
6 changes: 3 additions & 3 deletions tests/python/unittest/test_infer_shape.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,9 +138,9 @@ def check_infer_storage(v1, v2, v1_storage, v2_storage, out_chunk):
def test_elemwise_add_infer_storage_type():
v1 = mx.symbol.Variable('v1')
v2 = mx.symbol.Variable('v2')
check_infer_storage(v1, v2, 'default_storage', 'default_storage', 'default_storage')
check_infer_storage(v1, v2, 'default_storage', 'row_sparse', 'default_storage')
check_infer_storage(v1, v2, 'row_sparse', 'default_storage', 'default_storage')
check_infer_storage(v1, v2, 'default', 'default', 'default')
check_infer_storage(v1, v2, 'default', 'row_sparse', 'default')
check_infer_storage(v1, v2, 'row_sparse', 'default', 'default')
check_infer_storage(v1, v2, 'row_sparse', 'row_sparse', 'row_sparse')

if __name__ == "__main__":
Expand Down
Loading