From 2df76e3a957a12f5540803d0004014965376464f Mon Sep 17 00:00:00 2001 From: Kimish Patel Date: Mon, 19 Aug 2019 08:45:39 -0700 Subject: [PATCH] Fixes for the changes in PT API. Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags: --- torch_tvm/compiler.cpp | 19 ++++++++++++++----- torch_tvm/operators.cpp | 6 ++++-- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/torch_tvm/compiler.cpp b/torch_tvm/compiler.cpp index c89b37b..e774fc3 100644 --- a/torch_tvm/compiler.cpp +++ b/torch_tvm/compiler.cpp @@ -32,6 +32,7 @@ tvm::relay::Var TVMCompiler::convertToRelay(Value* val, TVMContext ctx) { auto optional_ivalue = toIValue(val); if (optional_ivalue.has_value()) { if (optional_ivalue.value().isTensor()) { + auto t = optional_ivalue.value().toTensor(); val->inferTypeFrom(optional_ivalue.value().toTensor()); } else { auto expr = convertToRelay(optional_ivalue.value(), ctx) @@ -45,15 +46,23 @@ tvm::relay::Var TVMCompiler::convertToRelay(Value* val, TVMContext ctx) { if (val->isCompleteTensor()) { // Ensure if complete tensor has device type then it is CPU // otherwise it is assume to be CPU. - auto pt_t = val->type()->cast(); - auto device_type = pt_t->device(); + auto pt_t = val->type()->cast(); + TORCH_INTERNAL_ASSERT(pt_t); + auto optional_device_type = pt_t->device(); + TORCH_INTERNAL_ASSERT(optional_device_type); + auto device_type = optional_device_type.value(); AT_CHECK(device_type == at::DeviceType::CPU, "Expected CPU device type but got:", device_type); tvm::Array sizes; - for (const auto& size : pt_t->sizes()) { - sizes.push_back(tvm::relay::IndexExpr(static_cast(size))); + const auto& varying_sizes = pt_t->sizes(); + for (const auto& optional_size : varying_sizes.sizes()) { + TORCH_INTERNAL_ASSERT(optional_size); + sizes.push_back(tvm::relay::IndexExpr( + static_cast(optional_size.value()))); } - at::ScalarType pt_type = pt_t->scalarType(); + auto optional_dtype = pt_t->scalarType(); + TORCH_INTERNAL_ASSERT(optional_dtype); + at::ScalarType pt_type = optional_dtype.value(); auto t = tvm::relay::TensorTypeNode::make(sizes, scalarTypeToTVMType(pt_type)); auto v = tvm::relay::VarNode::make( val->debugName() + diff --git a/torch_tvm/operators.cpp b/torch_tvm/operators.cpp index 3164495..cd4fe79 100644 --- a/torch_tvm/operators.cpp +++ b/torch_tvm/operators.cpp @@ -506,9 +506,11 @@ RegisterTVMOperator reg({ {Symbol::fromQualString("aten::linear"), [](Node* node, tvm::Array inputs) { Value* input = node->input(0); - auto d_tensor = input->type()->cast(); + auto d_tensor = input->type()->cast(); if (d_tensor) { - int64_t n_dim = d_tensor->dim(); + auto optional_n_dim = d_tensor->dim(); + TORCH_INTERNAL_ASSERT(optional_n_dim); + int64_t n_dim = optional_n_dim.value(); TORCH_CHECK(n_dim == 2, "WARNING: relay does not support dense operation on inputs more than 2 dim"); }