Skip to content

Commit

Permalink
Removed context from compile
Browse files Browse the repository at this point in the history
  • Loading branch information
gcatron committed Nov 13, 2018
1 parent a5814a4 commit c5b88ef
Show file tree
Hide file tree
Showing 18 changed files with 58 additions and 79 deletions.
8 changes: 3 additions & 5 deletions include/glow/Backends/Backend.h
Expand Up @@ -40,10 +40,8 @@ class Backend {
/// Dtor.
virtual ~Backend() = default;

/// Generate code for input function \param F. \p ctx is the context that maps
/// the graph to the concrete execution environment for a specific function.
virtual std::unique_ptr<CompiledFunction>
compile(Function *F, const Context &ctx) const = 0;
/// Generate code for input function \param F.
virtual std::unique_ptr<CompiledFunction> compile(Function *F) const = 0;

/// Save the bundle for \p F for a later standalone execution
/// in \p outputDir. Make \p networkName the function name for
Expand Down Expand Up @@ -93,7 +91,7 @@ class BackendUsingGlowIR : public Backend {
/// maps the graph to the concrete execution environment for a specific
/// function. This is used only for unit testing.
virtual std::unique_ptr<CompiledFunction>
compileIR(std::unique_ptr<IRFunction> IR, const Context &ctx) const = 0;
compileIR(std::unique_ptr<IRFunction> IR) const = 0;
};

} // namespace glow
Expand Down
2 changes: 1 addition & 1 deletion include/glow/Backends/CompiledFunction.h
Expand Up @@ -60,7 +60,7 @@ class CompiledFunction {
virtual ~CompiledFunction() = default;
/// Execute the network and allocate Placeholder memory with given
/// \p ctx providing mapping between Placeholder and populated tensor.
virtual void execute(Context &ctx) = 0;
virtual void execute() = 0;

/// Does any needed initialization work for the Backend.
/// This includes device init constant memory allocation and copying to
Expand Down
8 changes: 3 additions & 5 deletions lib/Backends/CPU/CPUBackend.cpp
Expand Up @@ -98,8 +98,7 @@ CPUBackend::createIRGen(IRFunction *IR,
}

std::unique_ptr<CompiledFunction>
CPUBackend::compileIR(std::unique_ptr<IRFunction> IR,
const Context &ctx) const {
CPUBackend::compileIR(std::unique_ptr<IRFunction> IR) const {
AllocationsInfo allocationsInfo;
std::unique_ptr<LLVMIRGen> irgen = createIRGen(IR.get(), allocationsInfo);
irgen->initTargetMachine(target.empty() ? "" : target.getValue(),
Expand All @@ -121,10 +120,9 @@ CPUBackend::compileIR(std::unique_ptr<IRFunction> IR,
return llvm::make_unique<CPUFunction>(std::move(JIT), runtimeInfo);
}

std::unique_ptr<CompiledFunction>
CPUBackend::compile(Function *F, const Context &ctx) const {
std::unique_ptr<CompiledFunction> CPUBackend::compile(Function *F) const {
auto IR = generateAndOptimizeIR(F, shouldShareBuffers());
return compileIR(std::move(IR), ctx);
return compileIR(std::move(IR));
}

void CPUBackend::save(Function *F, llvm::StringRef outputDir,
Expand Down
5 changes: 2 additions & 3 deletions lib/Backends/CPU/CPUBackend.h
Expand Up @@ -43,10 +43,9 @@ class CPUBackend : public BackendUsingGlowIR {
~CPUBackend() override = default;

std::unique_ptr<CompiledFunction>
compileIR(std::unique_ptr<IRFunction> IR, const Context &ctx) const override;
compileIR(std::unique_ptr<IRFunction> IR) const override;

std::unique_ptr<CompiledFunction> compile(Function *F,
const Context &ctx) const override;
std::unique_ptr<CompiledFunction> compile(Function *F) const override;

void save(Function *F, llvm::StringRef outputDir,
llvm::StringRef networkName) const override;
Expand Down
7 changes: 1 addition & 6 deletions lib/Backends/CPU/CPUFunction.cpp
Expand Up @@ -77,10 +77,7 @@ void CPUFunction::tearDownRuns() {
}
}

void CPUFunction::execute(Context &ctx) {
setupRuns();
beforeRun(ctx);

void CPUFunction::execute() {
auto sym = JIT_->findSymbol("jitmain");
assert(sym && "Unable to JIT the code!");
using JitFuncType =
Expand All @@ -91,9 +88,7 @@ void CPUFunction::execute(Context &ctx) {
JitFuncType funcPtr = reinterpret_cast<JitFuncType>(address.get());
funcPtr(runtimeBundle_.constants, baseMutableWeightVarsAddress_,
baseActivationsAddress_);
afterRun(ctx);
} else {
GLOW_ASSERT(false && "Error getting address.");
}
tearDownRuns();
}
2 changes: 1 addition & 1 deletion lib/Backends/CPU/CPUFunction.h
Expand Up @@ -50,7 +50,7 @@ class CPUFunction final : public CompiledFunction {
/// \name CompiledFunction interface
///@{
~CPUFunction() override;
void execute(Context &ctx) override;
void execute() override;
///@}
};
} // end namespace glow
Expand Down
10 changes: 4 additions & 6 deletions lib/Backends/Interpreter/Interpreter.cpp
Expand Up @@ -46,17 +46,15 @@ runtime::RuntimeBundle generateInterpreterRuntimeBundle(const IRFunction *F) {
return bundle;
}

std::unique_ptr<CompiledFunction>
Interpreter::compile(Function *F, const Context &ctx) const {
std::unique_ptr<CompiledFunction> Interpreter::compile(Function *F) const {
auto IR = generateAndOptimizeIR(F, shouldShareBuffers());
return compileIR(std::move(IR), ctx);
return compileIR(std::move(IR));
}

std::unique_ptr<CompiledFunction>
Interpreter::compileIR(std::unique_ptr<IRFunction> IR,
const Context &ctx) const {
Interpreter::compileIR(std::unique_ptr<IRFunction> IR) const {
runtime::RuntimeBundle bundle = generateInterpreterRuntimeBundle(IR.get());
return llvm::make_unique<InterpreterFunction>(std::move(IR), ctx, bundle);
return llvm::make_unique<InterpreterFunction>(std::move(IR), bundle);
}

bool Interpreter::isOpSupported(Kinded::Kind opKind, ElemKind elementTy) const {
Expand Down
5 changes: 2 additions & 3 deletions lib/Backends/Interpreter/Interpreter.h
Expand Up @@ -35,10 +35,9 @@ class Interpreter final : public BackendUsingGlowIR {
~Interpreter() override = default;

std::unique_ptr<CompiledFunction>
compileIR(std::unique_ptr<IRFunction> IR, const Context &ctx) const override;
compileIR(std::unique_ptr<IRFunction> IR) const override;

std::unique_ptr<CompiledFunction> compile(Function *F,
const Context &ctx) const override;
std::unique_ptr<CompiledFunction> compile(Function *F) const override;

bool isOpSupported(Kinded::Kind opKind, ElemKind elementTy) const override;

Expand Down
7 changes: 1 addition & 6 deletions lib/Backends/Interpreter/InterpreterFunction.cpp
Expand Up @@ -25,7 +25,6 @@
using namespace glow;

InterpreterFunction::InterpreterFunction(std::unique_ptr<IRFunction> F,
const Context &ctx,
const runtime::RuntimeBundle &bundle)
: F_(std::move(F)), bundle_(bundle) {}

Expand Down Expand Up @@ -135,9 +134,7 @@ void InterpreterFunction::deleteTensor(const Value *v) {
tensors_.erase(it);
}

void InterpreterFunction::execute(Context &ctx) {
setupRuns();
beforeRun(ctx);
void InterpreterFunction::execute() {
// Do the forward pass.
#define DEF_VALUE(CLASS, NAME)
#define DEF_INSTR(CLASS, NAME) \
Expand All @@ -155,6 +152,4 @@ void InterpreterFunction::execute(Context &ctx) {
llvm_unreachable("Invalid instruction.");
}
}
afterRun(ctx);
tearDownRuns();
}
4 changes: 2 additions & 2 deletions lib/Backends/Interpreter/InterpreterFunction.h
Expand Up @@ -53,7 +53,7 @@ class InterpreterFunction final : public CompiledFunction {
runtime::RuntimeBundle bundle_;

public:
InterpreterFunction(std::unique_ptr<IRFunction> F, const Context &ctx,
InterpreterFunction(std::unique_ptr<IRFunction> F,
const runtime::RuntimeBundle &bundle);

/// \name CompiledFunction interface
Expand All @@ -71,7 +71,7 @@ class InterpreterFunction final : public CompiledFunction {
/// Final cleanup, remove created constant Tensors.
void tearDownRuns() override;

void execute(Context &ctx) override;
void execute() override;
///@}

private:
Expand Down
12 changes: 4 additions & 8 deletions lib/Backends/OpenCL/OpenCL.cpp
Expand Up @@ -618,9 +618,7 @@ static void topK(Tensor &outW, Tensor &indW, Tensor &inW, size_t k) {
}
}
}
void OpenCLFunction::execute(Context &ctx) {
setupRuns();
beforeRun(ctx);
void OpenCLFunction::execute() {
for (const auto &I : F_->getInstrs()) {
// The kernels are named after the name of the instruction, plus the "W"
// suffix to prevent name colissions for functions like 'tanh' that are also
Expand Down Expand Up @@ -1386,7 +1384,6 @@ void OpenCLFunction::execute(Context &ctx) {
clReleaseKernel(kl.kernel_);
}
kernelLaunches_.clear();
afterRun(ctx);
}

uint64_t OpenCLFunction::copyValueToDevice(const Value *v, void *buf) {
Expand Down Expand Up @@ -1608,13 +1605,12 @@ cl_mem OpenCLFunction::allocDeviceBuffer(uint64_t size) {
void OpenCLFunction::freeDeviceBuffer(cl_mem buf) { clReleaseMemObject(buf); }

std::unique_ptr<CompiledFunction>
OCLBackend::compileIR(std::unique_ptr<IRFunction> IR, const Context &) const {
OCLBackend::compileIR(std::unique_ptr<IRFunction> IR) const {
runtime::RuntimeBundle bundle = generateRuntimeBundle(IR.get());
return llvm::make_unique<OpenCLFunction>(std::move(IR), bundle);
}

std::unique_ptr<CompiledFunction>
OCLBackend::compile(Function *F, const Context &ctx) const {
std::unique_ptr<CompiledFunction> OCLBackend::compile(Function *F) const {
auto IR = generateAndOptimizeIR(F, shouldShareBuffers());
return compileIR(std::move(IR), ctx);
return compileIR(std::move(IR));
}
7 changes: 3 additions & 4 deletions lib/Backends/OpenCL/OpenCL.h
Expand Up @@ -97,7 +97,7 @@ class OpenCLFunction final : public CompiledFunction {
///@{
~OpenCLFunction() override;

void execute(Context &ctx) override;
void execute() override;
///@}
/// Allocates on device buffer and copies Constant weights to device.
void setupRuns() override;
Expand Down Expand Up @@ -161,10 +161,9 @@ class OCLBackend final : public BackendUsingGlowIR {
~OCLBackend() override = default;

std::unique_ptr<CompiledFunction>
compileIR(std::unique_ptr<IRFunction> IR, const Context &ctx) const override;
compileIR(std::unique_ptr<IRFunction> IR) const override;

std::unique_ptr<CompiledFunction> compile(Function *F,
const Context &ctx) const override;
std::unique_ptr<CompiledFunction> compile(Function *F) const override;

bool transformPostLowering(Function *F, CompilationMode mode) const override;

Expand Down
24 changes: 9 additions & 15 deletions lib/ExecutionEngine/ExecutionEngine.cpp
Expand Up @@ -76,19 +76,13 @@ void glow::updateInputPlaceholdersByName(Context &ctx, Module *mod,

void ExecutionEngine::run(Context &ctx) {
assert(function_ && "No function has been compiled");
// TODO call runtime functions from EE instead of in the compiled function.
// copyFunctionToDevice()
// copyConstantsToDevice()
// allocateMutableBuffersOnDevice()
// copyInputsToDevice(ctx)
// copyOutputsFromDevice(ctx)
// freeAllocations()
// We are working toward moving memory allocation and initialization to
// runtime. As an intermediate the runtime functions are being called within
// execute to maintain the current API. Once all backends are ported the API
// will expose the runtime functions from the ExecutionEngine interface. This
// is related to Issue #1904.
function_->execute(ctx);
// Make sure that the context has backing tensors for all placeholders.
ctx.allocate(M_.getPlaceholders());
function_->setupRuns();
function_->beforeRun(ctx);
function_->execute();
function_->afterRun(ctx);
function_->tearDownRuns();
}

void glow::runBatch(ExecutionEngine &EE, Context &ctx, size_t iterations,
Expand Down Expand Up @@ -157,8 +151,8 @@ void ExecutionEngine::optimizeFunction(CompilationMode mode, Function *F) {
void ExecutionEngine::compile(CompilationMode mode, Function *F, Context &ctx) {
optimizeFunction(mode, F);
// Make sure that the context has backing tensors for all placeholders.
ctx.allocate(M_.getPlaceholders());
function_ = backend_->compile(F, ctx);
// ctx.allocate(M_.getPlaceholders());
function_ = backend_->compile(F);
}

void ExecutionEngine::save(CompilationMode mode, Function *F,
Expand Down
2 changes: 2 additions & 0 deletions lib/Onnxifi/Base.cpp
Expand Up @@ -109,6 +109,8 @@ void Graph::run(

// Run inference.
auto &EE = backendPtr_->getEE();
auto &mod = EE.getModule();
ctx_.allocate(mod.getPlaceholders());
updateInputPlaceholders(ctx_, phs, tensors);
EE.run(ctx_);

Expand Down
16 changes: 10 additions & 6 deletions tests/unittests/BackendCorrectnessTest.cpp
Expand Up @@ -236,13 +236,12 @@ class MockCPUBackend : public BackendUsingGlowIR {
backend_.reset(
static_cast<BackendUsingGlowIR *>(createBackend(BackendKind::CPU)));
}
std::unique_ptr<CompiledFunction> compile(Function *F,
const Context &ctx) const override {
return backend_->compile(F, ctx);
std::unique_ptr<CompiledFunction> compile(Function *F) const override {
return backend_->compile(F);
}
std::unique_ptr<CompiledFunction>
compileIR(std::unique_ptr<IRFunction> IR, const Context &ctx) const override {
return backend_->compileIR(std::move(IR), ctx);
compileIR(std::unique_ptr<IRFunction> IR) const override {
return backend_->compileIR(std::move(IR));
}
bool isOpSupported(Kinded::Kind opKind, ElemKind elementTy) const override {
return true;
Expand Down Expand Up @@ -309,7 +308,12 @@ TEST_P(CPUOnly, dataParallelStackingTest) {
}

MockCPUBackend backend;
backend.compileIR(std::move(M), ctx)->execute(ctx);
auto function = backend.compileIR(std::move(M));
function->setupRuns();
function->beforeRun(ctx);
function->execute();
function->afterRun(ctx);
function->tearDownRuns();
auto H = outputTensor->getHandle();
EXPECT_EQ(H.at(0), 3);
EXPECT_EQ(H.at(1), 4);
Expand Down
8 changes: 6 additions & 2 deletions tests/unittests/BackendTest.cpp
Expand Up @@ -160,8 +160,12 @@ TEST_P(BackendTest, debugPrint) {

std::unique_ptr<BackendUsingGlowIR> backend(
static_cast<BackendUsingGlowIR *>(createBackend(GetParam())));
auto function = backend->compileIR(std::move(IR), ctx);
function->execute(ctx);
auto function = backend->compileIR(std::move(IR));
function->setupRuns();
function->beforeRun(ctx);
function->execute();
function->afterRun(ctx);
function->tearDownRuns();
}

/// This test checks that we can compile a function without depending on the
Expand Down
5 changes: 2 additions & 3 deletions tests/unittests/BackendTestUtils.h
Expand Up @@ -23,14 +23,13 @@ namespace glow {
/// MockBackend used only for unit testing.
class MockBackend : public Backend {
class MockFunction : public CompiledFunction {
void execute(Context &ctx) override{};
void execute() override{};
void setupRuns() override{};
void beforeRun(const Context &ctx) override{};
void afterRun(const Context &ctx) override{};
void tearDownRuns() override{};
};
std::unique_ptr<CompiledFunction> compile(Function *F,
const Context &ctx) const override {
std::unique_ptr<CompiledFunction> compile(Function *F) const override {
return llvm::make_unique<MockFunction>();
}
bool isOpSupported(Kinded::Kind opKind, ElemKind elementTy) const override {
Expand Down
5 changes: 2 additions & 3 deletions tests/unittests/quantizationTest.cpp
Expand Up @@ -731,9 +731,8 @@ class MockQuantBackend : public Backend {
MockQuantBackend() {
backend_.reset(createBackend(BackendKind::Interpreter));
}
std::unique_ptr<CompiledFunction> compile(Function *F,
const Context &ctx) const override {
return backend_->compile(F, ctx);
std::unique_ptr<CompiledFunction> compile(Function *F) const override {
return backend_->compile(F);
}
bool isOpSupported(Kinded::Kind opKind, ElemKind elementTy) const override {
if (opKind == Kinded::Kind::SoftMaxNodeKind ||
Expand Down

0 comments on commit c5b88ef

Please sign in to comment.