Skip to content

Commit

Permalink
feat(aten::size [static]): Implement a aten::size converter for stati…
Browse files Browse the repository at this point in the history
…c input size

Signed-off-by: Naren Dasan <naren@narendasan.com>
Signed-off-by: Naren Dasan <narens@nvidia.com>
  • Loading branch information
narendasan committed May 3, 2020
1 parent c5b6202 commit 0548540
Show file tree
Hide file tree
Showing 8 changed files with 45 additions and 7 deletions.
2 changes: 1 addition & 1 deletion core/conversion/conversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ void AddLayer(ConversionCtx* ctx, const torch::jit::Node* n) {
} else {
LOG_DEBUG(ctx->logger, "Found the value to be a tensor (shape " << eval.value().toTensor().sizes() << ')');
}
ctx->evaluated_value_map[input] = std::move(eval.value());
ctx->AssociateValueAndIValue(input, eval.value());
node_args.push_back(&(ctx->evaluated_value_map[input]));
} else {
LOG_DEBUG(ctx->logger, "Found the value is None");;
Expand Down
6 changes: 6 additions & 0 deletions core/conversion/conversionctx/ConversionCtx.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,15 @@ nvinfer1::ITensor* ConversionCtx::AssociateValueAndTensor(const torch::jit::Valu
return tensor;
}

torch::jit::IValue* ConversionCtx::AssociateValueAndIValue(const torch::jit::Value* value, torch::jit::IValue ivalue) {
this->evaluated_value_map[value] = std::move(ivalue);
return &this->evaluated_value_map[value];
}

std::string ConversionCtx::SerializeEngine() {
auto engine = builder->buildEngineWithConfig(*net, *cfg);
auto serialized_engine = engine->serialize();
engine->destroy();
return std::string((const char*)serialized_engine->data(), serialized_engine->size());
}

Expand Down
2 changes: 1 addition & 1 deletion core/conversion/conversionctx/ConversionCtx.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@

#include "core/util/prelude.h"


namespace trtorch {
namespace core {
namespace conversion {
Expand Down Expand Up @@ -39,6 +38,7 @@ struct ConversionCtx {
ConversionCtx(BuilderSettings settings);
std::string SerializeEngine();
nvinfer1::ITensor* AssociateValueAndTensor(const torch::jit::Value* value, nvinfer1::ITensor* tensor);
torch::jit::IValue* AssociateValueAndIValue(const torch::jit::Value* value, torch::jit::IValue tensor);
bool CheckLayerAddition(const torch::jit::Node* n);

~ConversionCtx();
Expand Down
1 change: 1 addition & 0 deletions core/conversion/converters/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ cc_library(
"impl/matrix_multiply.cpp",
"impl/pooling.cpp",
"impl/reduce.cpp",
"impl/shape.cpp",
"impl/shuffle.cpp",
"impl/softmax.cpp",
"impl/unary.cpp",
Expand Down
32 changes: 32 additions & 0 deletions core/conversion/converters/impl/shape.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
#include "core/conversion/converters/converters.h"

#include "torch/torch.h"

namespace trtorch {
namespace core {
namespace conversion {
namespace converters {
namespace impl {
namespace {

static auto shape_registrations = RegisterNodeConversionPatterns()
.pattern({
// To use in static input size cases (explicit batch)
"aten::size.int(Tensor self, int dim) -> (Tensor)",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
auto in = args[0].ITensor();
auto in_shape = util::toVec(in->getDimensions());

auto size = in_shape[args[1].unwrapToInt()];

ctx->AssociateValueAndIValue(n->outputs()[0], size);
LOG_DEBUG("Output Value: " << size);
return true;
}
});
} // namespace
} // namespace impl
} // namespace converters
} // namespace conversion
} // namespace core
} // namespace trtorch
5 changes: 3 additions & 2 deletions core/conversion/converters/impl/shuffle.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,12 @@ static auto shuffle_registrations = RegisterNodeConversionPatterns()
"aten::reshape(Tensor self, int[] shape) -> (Tensor)",
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
auto in = args[0].ITensor();
auto new_shape = util::toDimsPad(args[1].unwrapToIntList(), 2);
auto in_shape = util::toVec(in->getDimensions());
auto new_shape = torch::reshape(torch::rand(in_shape), args[1].unwrapToIntList().vec()).sizes();

auto shuffle = ctx->net->addShuffle(*in);
TRTORCH_CHECK(shuffle, "Unable to create shuffle layer from node: " << *n);
shuffle->setReshapeDimensions(new_shape);
shuffle->setReshapeDimensions(util::toDims(new_shape));
shuffle->setName(util::node_info(n).c_str());

auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], shuffle->getOutput(0));
Expand Down
2 changes: 0 additions & 2 deletions core/execution/TRTEngine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,6 @@ namespace trtorch {
namespace core {
namespace execution {

TRTEngine::TRTEngine() {}

TRTEngine::TRTEngine(nvinfer1::ILogger& logger, std::string& serialized_engine) {
rt = nvinfer1::createInferRuntime(logger);

Expand Down
2 changes: 1 addition & 1 deletion core/execution/execution.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ struct TRTEngine {
std::pair<uint64_t, uint64_t> num_io;
EngineID id;

TRTEngine();
TRTEngine() = default;
TRTEngine(nvinfer1::ILogger& logger, std::string& serialized_engine);
TRTEngine& operator=(const TRTEngine& other);
};
Expand Down

0 comments on commit 0548540

Please sign in to comment.