Skip to content
This repository has been archived by the owner on Feb 1, 2020. It is now read-only.

merge existing nnvm/sparse into nnvm/master #119

Merged
merged 4 commits into from
Jun 7, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions include/nnvm/graph_attr_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,21 @@ using ShapeVector = std::vector<TShape>;
*/
using DTypeVector = std::vector<int>;

/*!
* \brief The result holder of storage type of each NodeEntry in the graph.
* \note Stored under graph.attrs["storage_type"], provided by Pass "InferStorageType"
*
* \code
* Graph g = ApplyPass(src_graph, "InferStorageType");
* const StorageVector& stypes = g.GetAttr<StorageTypeVector>("storage_type");
* // get shape by entry id
* int entry_type = stypes[g.indexed_graph().entry_id(my_entry)];
* \endcode
*
* \sa FInferStorageType
*/
using StorageTypeVector = std::vector<int>;

/*!
* \brief The result holder of device of each operator in the graph.
* \note Stored under graph.attrs["device"], provided by Pass "PlaceDevice"
Expand Down
8 changes: 8 additions & 0 deletions include/nnvm/op_attr_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,14 @@ using FInferShape = FInferNodeEntryAttr<TShape>;
*/
using FInferType = FInferNodeEntryAttr<int>;

/*!
* \brief Storage type inference function.
* Update the type given the known type information.
*
* \note Register under "FInferStorageType",
* by default set all the output types to 1.
*/
using FInferStorageType = FInferNodeEntryAttr<int>;
/*!
* \brief Whether this op is an explicit backward operator,
* If TIsBackward is true:
Expand Down
20 changes: 20 additions & 0 deletions include/nnvm/pass_functions.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,26 @@ inline Graph InferType(Graph graph,
return ApplyPass(std::move(graph), "InferType");
}

/*!
* \brief Infer storage types in the graph given the information.
* \param graph The input graph.
* \param storage_type_inputs The storage types of input symbols to the graph.
* \param storage_type_attr_key The key to the node attribute that can indicate storage types.
This is the place where manual hint for types could be injected.
* \return A graph with new attribute "storage_type" containing inferred type of each NodeEntry.
* The index of StorageTypeVector is given by graph.indexed_graph().entry_id.
*/
inline Graph InferStorageType(Graph graph,
StorageTypeVector storage_type_inputs,
std::string storage_type_attr_key = "") {
if (storage_type_inputs.size() != 0) {
graph.attrs["storage_type_inputs"] = std::make_shared<any>(std::move(storage_type_inputs));
}
if (storage_type_attr_key.length() != 0) {
graph.attrs["storage_type_attr_key"] = std::make_shared<any>(std::move(storage_type_attr_key));
}
return ApplyPass(std::move(graph), "InferStorageType");
}
/*!
* \brief Place the devices for each operator in the graph.
*
Expand Down
1 change: 0 additions & 1 deletion src/core/symbolic.cc
Original file line number Diff line number Diff line change
Expand Up @@ -436,7 +436,6 @@ Symbol Symbol::GetInternals() const {
}

Symbol Symbol::GetChildren() const {
static auto& fnum_vis_output = Op::GetAttr<FNumVisibleOutputs>("FNumVisibleOutputs");
Symbol ret;
std::unordered_set<Node*> visited;
for (const auto& p : this->outputs) {
Expand Down
41 changes: 36 additions & 5 deletions src/pass/infer_shape_type.cc
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
namespace nnvm {
namespace pass {
namespace {

template<typename AttrType, typename IsNone, typename FDefault>
Graph InferAttr(Graph &&ret,
const AttrType empty_val,
Expand All @@ -20,7 +19,8 @@ Graph InferAttr(Graph &&ret,
const char* attr_name,
const char* unknown_name,
IsNone fis_none,
FDefault fdefault) {
FDefault fdefault,
bool backward_identity_assign) {
using AttrVector = std::vector<AttrType>;
const IndexedGraph& idx = ret.indexed_graph();
static auto& finfer_shape =
Expand Down Expand Up @@ -88,7 +88,8 @@ Graph InferAttr(Graph &&ret,
CHECK(is >> rshape[out_ent_id]) << "Invalid attribute";
}
}
} else if (is_backward.get(inode.source->op(), false) && inode.control_deps.size()) {
} else if (is_backward.get(inode.source->op(), false) &&
inode.control_deps.size() && backward_identity_assign) {
CHECK_GE(inode.control_deps.size(), 1U)
<< "BackwardOp need to have control_deps to its forward op";
const IndexedGraph::Node& fnode = idx[inode.control_deps[0]];
Expand Down Expand Up @@ -208,7 +209,7 @@ NNVM_REGISTER_PASS(InferShape)
"FInferShape", "shape_inputs", "shape_attr_key",
"shape", "shape_num_unknown_nodes",
[](const TShape& s) { return s.ndim() == 0 || s.Size() == 0; },
nullptr);
nullptr, true);
})
.set_change_graph(false)
.provide_graph_attr("shape");
Expand Down Expand Up @@ -240,6 +241,35 @@ inline bool SameType(const NodeAttrs& attrs,
return true;
}

// assigning default type N to both input and output attrs with value -1
template <int default_val, int none>
inline bool DefaultType(const NodeAttrs& attrs,
std::vector<int> *iattr,
std::vector<int> *oattr) {
for (int& v : *oattr) {
if (v == none) v = default_val;
}
for (int& v : *iattr) {
if (v == none) v = default_val;
}
return true;
}

NNVM_REGISTER_PASS(InferStorageType)
.describe("Infer the storage type of each node entries.")
.set_body([](Graph ret) {
// for storage type, the backward attr is not necessarily the same as it's correspondence
const int kDefaultStorage = 0;
return InferAttr<int>(
std::move(ret), -1,
"FInferStorageType", "storage_type_inputs", "storage_type_attr_key",
"storage_type", "storage_type_num_unknown_nodes",
[](const int t) { return t == -1; },
DefaultType<kDefaultStorage, -1>, false);
})
.set_change_graph(false)
.provide_graph_attr("storage_type");

NNVM_REGISTER_PASS(InferType)
.describe("Infer the dtype of each node entries.")
.set_body([](Graph ret) {
Expand All @@ -248,13 +278,14 @@ NNVM_REGISTER_PASS(InferType)
"FInferType", "dtype_inputs", "dtype_attr_key",
"dtype", "dtype_num_unknown_nodes",
[](const int t) { return t == -1; },
SameType);
SameType, true);
})
.set_change_graph(false)
.provide_graph_attr("dtype");

DMLC_JSON_ENABLE_ANY(ShapeVector, list_shape);
DMLC_JSON_ENABLE_ANY(DTypeVector, list_int);
DMLC_JSON_ENABLE_ANY(StorageTypeVector, list_int);
DMLC_JSON_ENABLE_ANY(size_t, size_t);

} // namespace
Expand Down
9 changes: 6 additions & 3 deletions src/pass/plan_memory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ class GraphAllocator {
static const StorageID kBadStorageID = -1;
// external storage id
static const StorageID kExternalStorageID = -2;
// dynamic storage id
static const StorageID kDynamicStorageID = -3;

// request a free storage
StorageID Request(int dev_id, int dtype, TShape shape, uint32_t node_id) {
Expand Down Expand Up @@ -153,7 +155,6 @@ size_t AllocMemory(const Graph& ret, const IndexedGraph& idx, StorageVector* sto
const DTypeVector& dtype_vec = ret.GetAttr<DTypeVector>("dtype");
const DeviceVector* device_vec = nullptr;
static auto& finplace_option = Op::GetAttr<FInplaceOption>("FInplaceOption");

if (ret.attrs.count("device") != 0) {
device_vec = &(ret.GetAttr<DeviceVector>("device"));
}
Expand Down Expand Up @@ -216,15 +217,17 @@ size_t AllocMemory(const Graph& ret, const IndexedGraph& idx, StorageVector* sto
if (ref_count[eid] == 0) continue;
// if we decrease it to zero, means we are ready to relase
--ref_count[eid];
if (ref_count[eid] == 0 && storage[eid] != GraphAllocator::kBadStorageID) {
if (ref_count[eid] == 0 && storage[eid] != GraphAllocator::kBadStorageID &&
storage[eid] != GraphAllocator::kDynamicStorageID) {
allocator->Release(storage[eid], nid);
}
}
// check if there are outputs that can be freeded immediately
// these output are not referenced by any operator.
for (uint32_t index = 0; index < inode.source->num_outputs(); ++index) {
uint32_t eid = idx.entry_id(nid, index);
if (ref_count[eid] == 0 && storage[eid] != GraphAllocator::kBadStorageID) {
if (ref_count[eid] == 0 && storage[eid] != GraphAllocator::kBadStorageID &&
storage[eid] != GraphAllocator::kDynamicStorageID) {
allocator->Release(storage[eid], nid);
// use -2 to indicate that the node was never touched.
storage_inplace_index[eid] = -2;
Expand Down
15 changes: 15 additions & 0 deletions tests/python/test_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,20 @@ def test_infer_type():
assert g.json_attr('dtype')[jnode_row_ptr[nindex["cast1"]]] == 1
assert g.json_attr('dtype')[jnode_row_ptr[nindex["add1"]]] == 0


def test_infer_storage_type():
# Test the default storage type inferred
x = sym.Variable('x')
y = sym.add(x, x, name='add1')
g = graph.create(y)
g._set_json_attr("storage_type_attr_key", "storage_type")
g = g.apply('InferStorageType')
jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
jnodes = jgraph['nodes']
jnode_row_ptr = jgraph['node_row_ptr']
nindex = {n['name']: i for i, n in enumerate(jnodes)}
assert g.json_attr('storage_type')[jnode_row_ptr[nindex["add1"]]] == 0

def test_place_device():
x = sym.Variable('x', device_group="stage1")
y = sym.add(x, x, name='add1')
Expand Down Expand Up @@ -149,6 +163,7 @@ def test_plan_memory():
test_infer_shape()
test_infer_shape_known_partial()
test_infer_type()
test_infer_storage_type()
test_place_device()
test_plan_memory()
test_list_args()