Skip to content
Permalink
Browse files

Handroll make_unique impl for future-compat with LLVM (#3566)

Summary:
Newer versions of LLVM (10+) will use C++14, eliding the need for make_unique.
This diff removes it entirely: https://reviews.llvm.org/D66259

Documentation: Updated docs/Tracing.md and all examples + core code with `llvm::make_unique`.
Pull Request resolved: #3566

Test Plan: Build and run all tests

Reviewed By: rdzhabarov

Differential Revision: D17752195

Pulled By: bertmaher

fbshipit-source-id: 74e3d6dae6c9a86fff0f5b45914fe53df5849004
  • Loading branch information...
Bram Wasti authored and facebook-github-bot committed Nov 6, 2019
1 parent 3d1caf9 commit f299e9c6efb0ca675fb80a554f8f002d73502840
Showing with 249 additions and 209 deletions.
  1. +6 −6 examples/resnet-runtime.cpp
  2. +2 −2 examples/tracing-compare.cpp
  3. +4 −4 include/glow/ExecutionContext/ExecutionContext.h
  4. +3 −3 include/glow/Importer/CommonOperatorLoader.h
  5. +36 −0 include/glow/Support/Memory.h
  6. +1 −1 lib/Backends/CPU/CPUBackend.cpp
  7. +1 −1 lib/Backends/DeviceManagers.cpp
  8. +16 −16 lib/Backends/Habana/Habana.cpp
  9. +4 −4 lib/Backends/Habana/HabanaDeviceManager.cpp
  10. +4 −4 lib/Backends/Habana/HabanaFunction.cpp
  11. +1 −1 lib/Backends/Habana/HabanaFunction.h
  12. +1 −1 lib/Backends/Interpreter/Interpreter.cpp
  13. +1 −1 lib/Backends/NNPI/NNPI.cpp
  14. +2 −2 lib/Backends/OpenCL/OpenCL.cpp
  15. +1 −1 lib/Backends/OpenCL/OpenCLDeviceManager.cpp
  16. +2 −2 lib/ExecutionEngine/ExecutionEngine.cpp
  17. +4 −4 lib/Importer/Caffe2ModelLoader.cpp
  18. +1 −1 lib/LLVMIRCodeGen/DebugInfo.cpp
  19. +1 −1 lib/LLVMIRCodeGen/LLVMBackend.cpp
  20. +1 −1 lib/LLVMIRCodeGen/LLVMIRGen.cpp
  21. +2 −2 lib/Onnxifi/Base.cpp
  22. +3 −3 lib/Onnxifi/HostManagerOnnxifi.cpp
  23. +1 −1 lib/Optimizer/GraphOptimizer/PassManager.cpp
  24. +1 −1 lib/Optimizer/IROptimizer/IROptimizer.cpp
  25. +2 −2 lib/Partitioner/Partitioner.cpp
  26. +3 −3 lib/Partitioner/PartitionerBase.cpp
  27. +2 −2 lib/Runtime/Executor/ExecutionState.cpp
  28. +7 −7 lib/Runtime/HostManager/HostManager.cpp
  29. +2 −2 lib/Runtime/Provisioner/Provisioner.cpp
  30. +4 −3 lib/Support/ZipUtils.cpp
  31. +2 −2 tests/benchmark/AddBench.cpp
  32. +2 −2 tests/benchmark/BatchGemmBench.cpp
  33. +2 −2 tests/benchmark/GemmBench.cpp
  34. +2 −2 tests/benchmark/GemmParallelBench.cpp
  35. +10 −10 tests/benchmark/RuntimeBench.cpp
  36. +2 −2 tests/benchmark/SLSBench.cpp
  37. +2 −2 tests/benchmark/TransposeBench.cpp
  38. +2 −2 tests/unittests/BackendCorrectnessTest.cpp
  39. +3 −3 tests/unittests/BackendTest.cpp
  40. +2 −2 tests/unittests/BackendTestUtils.cpp
  41. +2 −2 tests/unittests/BackendTestUtils.h
  42. +13 −13 tests/unittests/DeviceManagerTest.cpp
  43. +2 −2 tests/unittests/HabanaGlowTest.cpp
  44. +15 −15 tests/unittests/HostManagerTest.cpp
  45. +2 −2 tests/unittests/NNPIBackendTestUtils.h
  46. +1 −1 tests/unittests/PartitionerTest.cpp
  47. +4 −4 tests/unittests/ProvisionerTest.cpp
  48. +1 −1 tests/unittests/RecommendationSystemTest.cpp
  49. +3 −3 tests/unittests/Repro.cpp
  50. +3 −3 tests/unittests/StatsExporterTest.cpp
  51. +23 −23 tests/unittests/ThreadPoolExecutorTest.cpp
  52. +4 −1 tests/unittests/ThreadPoolTest.cpp
  53. +18 −18 tests/unittests/TraceEventsTest.cpp
  54. +5 −5 tools/loader/ImageClassifier.cpp
  55. +2 −2 tools/loader/Loader.cpp
  56. +2 −2 torch_glow/src/CachingGraphRunner.cpp
  57. +1 −1 torch_glow/src/PyTorchModelLoader.cpp
@@ -132,17 +132,17 @@ int main(int argc, char **argv) {

std::vector<std::unique_ptr<DeviceConfig>> configs;
for (unsigned int i = 0; i < numDevices; ++i) {
auto config = llvm::make_unique<DeviceConfig>(backend);
auto config = glow::make_unique<DeviceConfig>(backend);
configs.push_back(std::move(config));
}

std::unique_ptr<HostManager> hostManager =
llvm::make_unique<HostManager>(std::move(configs));
glow::make_unique<HostManager>(std::move(configs));

// If tracing is enabled, create a TraceContext to merge each runs events
// into.
if (!tracePath.empty()) {
traceContext = llvm::make_unique<TraceContext>(TraceLevel::STANDARD);
traceContext = glow::make_unique<TraceContext>(TraceLevel::STANDARD);
}

// Load model, create a context, and add to HostManager.
@@ -152,7 +152,7 @@ int main(int argc, char **argv) {
Placeholder *input;
PlaceholderList phList;

std::unique_ptr<Module> module = llvm::make_unique<Module>();
std::unique_ptr<Module> module = glow::make_unique<Module>();
TypeRef inputType = module->uniqueType(ElemKind::FloatTy, inputShape);
input = loadResnet50Model(inputType, module.get(), 0);
phList = module->getPlaceholders();
@@ -194,9 +194,9 @@ int main(int argc, char **argv) {
path, ImageNormalizationMode::k0to1, ImageChannelOrder::BGR,
ImageLayout::NCHW, imagenetNormMean, imagenetNormStd);
std::unique_ptr<ExecutionContext> context =
llvm::make_unique<ExecutionContext>();
glow::make_unique<ExecutionContext>();
context->setTraceContext(
llvm::make_unique<TraceContext>(TraceLevel::STANDARD));
glow::make_unique<TraceContext>(TraceLevel::STANDARD));

context->getPlaceholderBindings()->allocate(phList);
Tensor batch = image.getUnowned(inputShape);
@@ -146,9 +146,9 @@ int main(int argc, char **argv) {
supportedBackends.size());

for (unsigned i = 0, e = supportedBackends.size(); i < e; ++i) {
auto context = llvm::make_unique<ExecutionContext>();
auto context = glow::make_unique<ExecutionContext>();
context->setTraceContext(
llvm::make_unique<TraceContext>(TraceLevel::STANDARD));
glow::make_unique<TraceContext>(TraceLevel::STANDARD));
context->getPlaceholderBindings()->allocate(module.getPlaceholders());
updateInputPlaceholders(*(context->getPlaceholderBindings()), {input},
{&batch});
@@ -33,7 +33,7 @@ class DeviceBindings {
virtual ~DeviceBindings() {}

virtual std::unique_ptr<DeviceBindings> clone() {
return llvm::make_unique<DeviceBindings>(backend_);
return glow::make_unique<DeviceBindings>(backend_);
}
};

@@ -51,7 +51,7 @@ class ExecutionContext {

public:
ExecutionContext()
: placeholderBindings_(llvm::make_unique<PlaceholderBindings>()) {}
: placeholderBindings_(glow::make_unique<PlaceholderBindings>()) {}

ExecutionContext(std::unique_ptr<PlaceholderBindings> bindings)
: placeholderBindings_(std::move(bindings)) {}
@@ -108,10 +108,10 @@ class ExecutionContext {
ExecutionContext clone() {
if (deviceBindings_) {
return ExecutionContext(
llvm::make_unique<PlaceholderBindings>(placeholderBindings_->clone()),
glow::make_unique<PlaceholderBindings>(placeholderBindings_->clone()),
deviceBindings_->clone());
} else {
return ExecutionContext(llvm::make_unique<PlaceholderBindings>(
return ExecutionContext(glow::make_unique<PlaceholderBindings>(
placeholderBindings_->clone()));
}
}
@@ -78,7 +78,7 @@ class CommonOperatorLoader : public ProtobufLoader {
}

LoadWeightResult result;
result.t = llvm::make_unique<Tensor>();
result.t = glow::make_unique<Tensor>();

std::vector<size_t> dims;
for (unsigned i = 0; i < in.dimensions; ++i) {
@@ -143,8 +143,8 @@ class CommonOperatorLoader : public ProtobufLoader {
} else {
Type scalesTy(ElemKind::FloatTy, llvm::makeArrayRef({qparams}));
Type offsetsTy(ElemKind::Int32ITy, llvm::makeArrayRef({qparams}));
result.scales = llvm::make_unique<Tensor>((void *)in.scales, &scalesTy);
result.offsets = llvm::make_unique<Tensor>((void *)in.biases, &offsetsTy);
result.scales = glow::make_unique<Tensor>((void *)in.scales, &scalesTy);
result.offsets = glow::make_unique<Tensor>((void *)in.biases, &offsetsTy);
}

if (in.dataType == ONNXIFI_DATATYPE_UINT8) {
@@ -21,6 +21,7 @@
#include <glog/logging.h>

#include <cstdlib>
#include <memory>

namespace glow {

@@ -48,6 +49,41 @@ inline size_t alignedSize(size_t size, size_t alignment) {
return mod ? size + alignment - mod : size;
}

// Implement make_unique according to N3656.

/// \brief Constructs a `new T()` with the given args and returns a
/// `unique_ptr<T>` which owns the object.
///
/// Example:
///
/// auto p = make_unique<int>();
/// auto p = make_unique<std::tuple<int, int>>(0, 1);
template <class T, class... Args>
typename std::enable_if<!std::is_array<T>::value, std::unique_ptr<T>>::type
make_unique(Args &&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}

/// \brief Constructs a `new T[n]` with the given args and returns a
/// `unique_ptr<T[]>` which owns the object.
///
/// \param n size of the new array.
///
/// Example:
///
/// auto p = make_unique<int[]>(2); // value-initializes the array with 0's.
template <class T>
typename std::enable_if<std::is_array<T>::value && std::extent<T>::value == 0,
std::unique_ptr<T>>::type
make_unique(size_t n) {
return std::unique_ptr<T>(new typename std::remove_extent<T>::type[n]());
}

/// This function isn't used and is only here to provide better compile errors.
template <class T, class... Args>
typename std::enable_if<std::extent<T>::value != 0>::type
make_unique(Args &&...) = delete;

} // end namespace glow

#endif // GLOW_SUPPORT_MEMORY_H
@@ -390,7 +390,7 @@ bool CPUBackend::shouldLower(const Node *N) const {
std::unique_ptr<CompiledFunction> CPUBackend::createCompiledFunction(
std::unique_ptr<llvm::orc::GlowJIT> JIT,
runtime::RuntimeBundle &&runtimeBundle) const {
return llvm::make_unique<CPUFunction>(std::move(JIT),
return glow::make_unique<CPUFunction>(std::move(JIT),
std::move(runtimeBundle));
}

@@ -130,7 +130,7 @@ DeviceManager::generateDeviceConfigs(llvm::StringRef backendName) {
std::vector<std::unique_ptr<runtime::DeviceConfig>> configs;
auto deviceCount = numDevices(backendName);
for (int i = 0; i < deviceCount; i++) {
auto config = llvm::make_unique<runtime::DeviceConfig>(backendName);
auto config = glow::make_unique<runtime::DeviceConfig>(backendName);
config->deviceID = i;
configs.push_back(std::move(config));
}
@@ -260,7 +260,7 @@ class TensorHandle final {
static std::unique_ptr<synConvolutionParams> makeSynConvolutionParams(
llvm::ArrayRef<unsigned_t> kernel, llvm::ArrayRef<unsigned_t> stride,
llvm::ArrayRef<unsigned_t> pad, unsigned_t groups, bool doRelu) {
auto params = llvm::make_unique<synConvolutionParams>();
auto params = glow::make_unique<synConvolutionParams>();

// Kernel
params->kH = kernel[0];
@@ -288,7 +288,7 @@ static std::unique_ptr<ns_SpatialReduction::Params>
makeSynPoolParams(llvm::ArrayRef<unsigned_t> kernel,
llvm::ArrayRef<unsigned_t> stride,
llvm::ArrayRef<unsigned_t> pad) {
auto params = llvm::make_unique<ns_SpatialReduction::Params>();
auto params = glow::make_unique<ns_SpatialReduction::Params>();

// Kernel
params->kernel_w = kernel[0];
@@ -310,7 +310,7 @@ makeSynPoolParams(llvm::ArrayRef<unsigned_t> kernel,

static std::unique_ptr<synTransposeParams>
makeSynTransposeParams(llvm::ArrayRef<unsigned_t> shuffle) {
auto params = llvm::make_unique<synTransposeParams>();
auto params = glow::make_unique<synTransposeParams>();

params->tensorDim = shuffle.size();

@@ -329,7 +329,7 @@ makeSynTransposeParams(llvm::ArrayRef<unsigned_t> shuffle) {
static std::unique_ptr<synSliceAxisParams>
makeSynSliceAxisParams(unsigned axis, unsigned axes, unsigned outputAxisSize,
unsigned axisOffset) {
auto params = llvm::make_unique<synSliceAxisParams>();
auto params = glow::make_unique<synSliceAxisParams>();

// The axis complement must be taken since Habana's axes in reverse order
// compared to Glow.
@@ -342,7 +342,7 @@ makeSynSliceAxisParams(unsigned axis, unsigned axes, unsigned outputAxisSize,

static std::unique_ptr<ns_LrnKernel::Params>
makeLrnParams(float alpha, float beta, float knorm, int halfWindowSize) {
auto params = llvm::make_unique<ns_LrnKernel::Params>();
auto params = glow::make_unique<ns_LrnKernel::Params>();
params->alpha = alpha;
params->beta = beta;
params->knorm = knorm;
@@ -352,20 +352,20 @@ makeLrnParams(float alpha, float beta, float knorm, int halfWindowSize) {

static std::unique_ptr<ns_ConstantKernel::Params>
makeConstantParams(float value) {
auto params = llvm::make_unique<ns_ConstantKernel::Params>();
auto params = glow::make_unique<ns_ConstantKernel::Params>();
params->constant.f = value;
return params;
}

static std::unique_ptr<ns_FullyConnected::Params> makeFCFPParams(bool isRelu) {
auto params = llvm::make_unique<ns_FullyConnected::Params>();
auto params = glow::make_unique<ns_FullyConnected::Params>();
params->is_relu = isRelu;
return params;
}

static std::unique_ptr<ns_TileKernel::Params> makeTileParams(unsigned count,
unsigned axis) {
auto params = llvm::make_unique<ns_TileKernel::Params>();
auto params = glow::make_unique<ns_TileKernel::Params>();

// The repeat member of ns_TileKernel::Params has an explicit size of 4.
for (size_t i = 0; i < 4; ++i) {
@@ -382,11 +382,11 @@ static std::unique_ptr<ns_TileKernel::Params> makeTileParams(unsigned count,

static std::unique_ptr<unsigned> makeConcatParams(unsigned axis,
unsigned axes) {
return llvm::make_unique<unsigned>(axes - axis - 1);
return glow::make_unique<unsigned>(axes - axis - 1);
}

static std::unique_ptr<ns_Softmax::Params> makeSoftmaxParams() {
auto params = llvm::make_unique<ns_Softmax::Params>();
auto params = glow::make_unique<ns_Softmax::Params>();
params->dim = 0;
return params;
}
@@ -488,7 +488,7 @@ HabanaBackend::compile(Function *F, const BackendOptions &opts) const {
auto *NI = llvm::cast<HabanaFullyConnectedNode>(&I);

if (NI->getInput().getType()->isQuantizedType()) {
auto params = llvm::make_unique<synFCParams>();
auto params = glow::make_unique<synFCParams>();
params->activation.reluEnable = NI->getDoRelu();
chk(synFullyConnected(
tensors[NI->getInput()].get(), tensors[NI->getWeights()].get(),
@@ -562,7 +562,7 @@ HabanaBackend::compile(Function *F, const BackendOptions &opts) const {
}
case Kinded::Kind::BatchedReduceAddNodeKind: {
auto *RA = llvm::cast<BatchedReduceAddNode>(&I);
auto params = llvm::make_unique<ns_Reduction::Params>();
auto params = glow::make_unique<ns_Reduction::Params>();
params->reductionDimension =
RA->getBatch().dims().size() - RA->getAxis() - 1;

@@ -713,7 +713,7 @@ HabanaBackend::compile(Function *F, const BackendOptions &opts) const {
// Let GEMM run on MME via FullyConnected node.
// MME only runs on quantized types, e.g., int8 or int16.
// The default params are OK - don't transpose A and B
auto params = llvm::make_unique<synGEMMParams>();
auto params = glow::make_unique<synGEMMParams>();
std::vector<synTensor> inputs;
inputs.push_back(tensors[MI->getLHS()].get());
inputs.push_back(tensors[MI->getRHS()].get());
@@ -1021,7 +1021,7 @@ HabanaBackend::compile(Function *F, const BackendOptions &opts) const {
std::vector<synTensor> inputs = {tensors[gather->getData()].get(),
tensors[gather->getIndices()].get()};

auto params = llvm::make_unique<ns_GatherKernel::Params>();
auto params = glow::make_unique<ns_GatherKernel::Params>();
params->axis =
gather->getData().dims().size() - gather->getBatchDims() - 1;

@@ -1041,7 +1041,7 @@ HabanaBackend::compile(Function *F, const BackendOptions &opts) const {
tensors[BBC->getLambda1()].get(),
tensors[BBC->getLambda2()].get(),
};
auto params = llvm::make_unique<float>(BBC->getEpsilon());
auto params = glow::make_unique<float>(BBC->getEpsilon());
chk(synCreateGenericNode(
inputs.data(), &tensors[BBC].get(), inputs.size(), 1, params.get(),
"batch_box_cox_f32", BBC->getName().data(), nullptr, nullptr));
@@ -1072,7 +1072,7 @@ HabanaBackend::compile(Function *F, const BackendOptions &opts) const {
chk(synDestroyGraph());

return Expected<std::unique_ptr<CompiledFunction>>(
llvm::make_unique<HabanaFunction>(runtime::RuntimeBundle::create(*F),
glow::make_unique<HabanaFunction>(runtime::RuntimeBundle::create(*F),
recipeName, F));
}

@@ -105,8 +105,8 @@ Error HabanaDeviceManager::init() {
RETURN_IF_ERR(updateMemoryUsage());

// Create thread pools for running functions and waiting on function results.
runPool_ = llvm::make_unique<ThreadPool>(numRunners_);
waitPool_ = llvm::make_unique<ThreadPool>(numWaiters_);
runPool_ = glow::make_unique<ThreadPool>(numRunners_);
waitPool_ = glow::make_unique<ThreadPool>(numWaiters_);

if (!runPool_ || !waitPool_) {
RETURN_ERR("Failed to create HabanaDeviceManager thread pools");
@@ -184,7 +184,7 @@ void HabanaDeviceManager::addNetwork(const Module *module,
std::tie(std::ignore, inserted) = functions_.insert(std::make_pair(
func.first,
HabanaFunctionMeta{topologyId, habanaFunction,
llvm::make_unique<HabanaIOBufferPool>(
glow::make_unique<HabanaIOBufferPool>(
deviceId_, habanaFunction->getInputs(),
habanaFunction->getOutputs())}));

@@ -334,7 +334,7 @@ void HabanaDeviceManager::runFunctionImpl(RunIdentifierTy runId,

// Execute the function.
auto deviceBindings =
llvm::make_unique<HabanaBindings>(deviceId_, topologyId);
glow::make_unique<HabanaBindings>(deviceId_, topologyId);
deviceBindings->setIOBuffer(ioBufferPool->get());
ctx->setDeviceBindings(std::move(deviceBindings));

@@ -66,7 +66,7 @@ HabanaIOBufferPool::HabanaIOBufferPool(uint32_t deviceId,
uint8_t *copyOffset = buffer_;
for (unsigned i = 0; i < numBuffers_; ++i) {
ioBuffers_.push(
llvm::make_unique<HabanaIOBuffer>(deviceId_, copyOffset, offsets_));
glow::make_unique<HabanaIOBuffer>(deviceId_, copyOffset, offsets_));
copyOffset += perBufferSize_;
}
}
@@ -214,10 +214,10 @@ static Error dumpTopologyInfo(uint32_t deviceId, uint64_t topologyId) {
numOfIntermediates));

using TensorNames = char[ENQUEUE_TENSOR_NAME_MAX_SIZE];
auto inputTensorNames = llvm::make_unique<TensorNames[]>(numOfInputs);
auto outputTensorNames = llvm::make_unique<TensorNames[]>(numOfOutputs);
auto inputTensorNames = glow::make_unique<TensorNames[]>(numOfInputs);
auto outputTensorNames = glow::make_unique<TensorNames[]>(numOfOutputs);
auto intermediateTensorNames =
llvm::make_unique<TensorNames[]>(numOfIntermediates);
glow::make_unique<TensorNames[]>(numOfIntermediates);

chk(synGetTensorsName(deviceId, topologyId, inputTensorNames.get(),
numOfInputs, outputTensorNames.get(), numOfOutputs,
@@ -164,7 +164,7 @@ class HabanaBindings : public DeviceBindings {
virtual ~HabanaBindings() {}

std::unique_ptr<DeviceBindings> clone() override {
return llvm::make_unique<HabanaBindings>(deviceId_, topologyId_);
return glow::make_unique<HabanaBindings>(deviceId_, topologyId_);
}

uint32_t getDeviceId() const { return deviceId_; }
@@ -64,7 +64,7 @@ Interpreter::compileIRWithoutConstants(std::unique_ptr<IRFunction> IR) const {
runtime::RuntimeBundle bundle = runtime::RuntimeBundle::create(
*IR, constantWeightsAllocator, placeholderWeightsAllocator,
activationsAllocator);
return llvm::make_unique<InterpreterFunction>(std::move(IR),
return glow::make_unique<InterpreterFunction>(std::move(IR),
std::move(bundle));
}

@@ -333,7 +333,7 @@ NNPIBackend::compile(Function *F, const BackendOptions &opts) const {
F->dumpDAG(fname);
}
std::unique_ptr<NNPICompiledFunction> compiledFunc =
llvm::make_unique<NNPICompiledFunction>(F);
glow::make_unique<NNPICompiledFunction>(F);
auto compileHasError = compiledFunc->compile(F, opts);
if (compileHasError) {
return std::move(compileHasError);

0 comments on commit f299e9c

Please sign in to comment.
You can’t perform that action at this time.