Skip to content
Permalink
Browse files

Replace ExecutionEngine with ExecutionEngine2 [NFC] (#3388)

Summary:
This PR replaces the old EE with the new EE2, this removes the old EE and BackendTestUtils and renames EE2 and BackendTestUtils2 to take their place. This PR also updates all instance of EE2 and BackendTestUtils2

Documentation: Coming soon.. There will be a follow up PR to update docs and close the port.

Progress on #3239
Pull Request resolved: #3388

Test Plan: Verify everything builds, and unit tests pass. NFC just mechanical.

Reviewed By: bertmaher

Differential Revision: D16671350

Pulled By: gcatron

fbshipit-source-id: ff4f1ee0c9a6bb87764ad12cbb4414832a4f04d9
  • Loading branch information...
gcatron authored and facebook-github-bot committed Aug 6, 2019
1 parent 41a964d commit 3bd95b6ecf0a70e6c478cf1aa9a333776391bd62
Showing with 972 additions and 2,765 deletions.
  1. +9 −9 examples/CMakeLists.txt
  2. +6 −7 examples/char-rnn.cpp
  3. +5 −5 examples/cifar10.cpp
  4. +3 −4 examples/fr2en.cpp
  5. +3 −3 examples/lenet-loader.cpp
  6. +11 −11 examples/mnist.cpp
  7. +4 −4 examples/ptb.cpp
  8. +3 −3 examples/resnet-runtime.cpp
  9. +3 −3 examples/resnet-verify.cpp
  10. +3 −3 examples/tracing-compare.cpp
  11. +1 −1 examples/training/resnet50/CMakeLists.txt
  12. +2 −2 examples/training/resnet50/main.cpp
  13. +38 −50 include/glow/ExecutionEngine/ExecutionEngine.h
  14. +0 −157 include/glow/ExecutionEngine/ExecutionEngine2.h
  15. +1 −1 include/glow/Importer/ProtobufLoader.h
  16. +0 −12 lib/ExecutionEngine/CMakeLists.txt
  17. +47 −107 lib/ExecutionEngine/ExecutionEngine.cpp
  18. +0 −211 lib/ExecutionEngine/ExecutionEngine2.cpp
  19. +1 −1 lib/Onnxifi/Base.h
  20. +2 −2 lib/Onnxifi/CMakeLists.txt
  21. +1 −1 lib/Onnxifi/InlineOnnxifi.h
  22. +3 −3 tests/stress/CMakeLists.txt
  23. +6 −6 tests/stress/ParameterSweepTest.cpp
  24. +3 −3 tests/stress/SparseLengthsSumTest.cpp
  25. +2 −2 tests/unittests/BackendCorrectnessTest.cpp
  26. +10 −10 tests/unittests/BackendTest.cpp
  27. +301 −184 tests/unittests/BackendTestUtils.cpp
  28. +10 −2 tests/unittests/BackendTestUtils.h
  29. +0 −1,134 tests/unittests/BackendTestUtils2.cpp
  30. +0 −311 tests/unittests/BackendTestUtils2.h
  31. +27 −42 tests/unittests/CMakeLists.txt
  32. +77 −77 tests/unittests/Caffe2ImporterTest.cpp
  33. +31 −31 tests/unittests/DeviceManagerTest.cpp
  34. +3 −3 tests/unittests/GemmTest.cpp
  35. +21 −21 tests/unittests/GradCheckTest.cpp
  36. +11 −11 tests/unittests/GraphGradTest.cpp
  37. +18 −18 tests/unittests/GraphTest.cpp
  38. +2 −2 tests/unittests/HabanaBackendTest.cpp
  39. +8 −8 tests/unittests/HyphenTest.cpp
  40. +1 −1 tests/unittests/IROptTest.cpp
  41. +55 −57 tests/unittests/MLTest.cpp
  42. +2 −2 tests/unittests/OCLTest.cpp
  43. +2 −2 tests/unittests/OnnxExporterTest.cpp
  44. +75 −75 tests/unittests/OnnxImporterTest.cpp
  45. +2 −2 tests/unittests/OperatorGradTest.cpp
  46. +59 −60 tests/unittests/OperatorTest.cpp
  47. +27 −27 tests/unittests/PartitionerTest.cpp
  48. +31 −32 tests/unittests/QuantizationTest.cpp
  49. +5 −5 tests/unittests/RecommendationSystemTest.cpp
  50. +25 −25 tests/unittests/TraceEventsTest.cpp
  51. +2 −2 tests/unittests/TypeAToTypeBFunctionConverterTest.cpp
  52. +3 −3 tools/loader/CMakeLists.txt
  53. +1 −1 tools/loader/ImageClassifier.cpp
  54. +1 −1 tools/loader/Loader.h
  55. +1 −1 tools/loader/TextTranslator.cpp
  56. +1 −1 torch_glow/src/CMakeLists.txt
  57. +2 −2 torch_glow/src/CachingGraphRunner.h
  58. +1 −1 torch_glow/src/PyTorchLoaderTest.cpp
@@ -3,7 +3,7 @@ add_executable(cifar10
target_link_libraries(cifar10
PRIVATE
Backends
ExecutionEngine2
ExecutionEngine
Graph
IR
Support
@@ -14,7 +14,7 @@ add_executable(mnist
target_link_libraries(mnist
PRIVATE
Backends
ExecutionEngine2
ExecutionEngine
Graph
Importer
IR
@@ -27,7 +27,7 @@ add_executable(ptb
target_link_libraries(ptb
PRIVATE
Backends
ExecutionEngine2
ExecutionEngine
Graph
IR
Support
@@ -38,7 +38,7 @@ add_executable(char-rnn
target_link_libraries(char-rnn
PRIVATE
Backends
ExecutionEngine2
ExecutionEngine
Graph
IR
GraphOptimizer
@@ -49,7 +49,7 @@ add_executable(fr2en
target_link_libraries(fr2en
PRIVATE
Base
ExecutionEngine2
ExecutionEngine
IR
GraphOptimizer
Quantization
@@ -60,7 +60,7 @@ add_executable(lenet-loader
lenet-loader.cpp)
target_link_libraries(lenet-loader
PRIVATE
ExecutionEngine2
ExecutionEngine
Graph
Importer
Support)
@@ -70,7 +70,7 @@ if(GLOW_WITH_CPU)
resnet-verify.cpp)
target_link_libraries(resnet-verify
PRIVATE
ExecutionEngine2
ExecutionEngine
Graph
Importer)

@@ -80,7 +80,7 @@ if(GLOW_WITH_CPU)
PRIVATE
Backends
ExecutionContext
ExecutionEngine2
ExecutionEngine
HostManager
Partitioner
Graph
@@ -93,7 +93,7 @@ if(GLOW_WITH_CPU)
PRIVATE
Backend
Backends
ExecutionEngine2
ExecutionEngine
Graph
Importer
GraphOptimizer)
@@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "glow/ExecutionEngine/ExecutionEngine2.h"
#include "glow/ExecutionEngine/ExecutionEngine.h"
#include "glow/Graph/Graph.h"
#include "glow/IR/IR.h"
#include "glow/Optimizer/GraphOptimizer/GraphOptimizer.h"
@@ -220,8 +220,7 @@ int main(int argc, char **argv) {
CHECK_GT(text.size(), numSteps) << "Text is too short";
TrainingConfig TC;

// ExecutionEngine2 EEI(executionBackend);
ExecutionEngine2 EET(executionBackend);
ExecutionEngine EET(executionBackend);
TC.learningRate = 0.001;
TC.momentum = 0.9;
TC.batchSize = minibatchSize;
@@ -252,10 +251,10 @@ int main(int argc, char **argv) {

// Train the network on the whole input.
LOG(INFO) << "Iteration " << i + 1 << "/" << numEpochs;
runBatch2(EET, trainingBindings, batchSize / minibatchSize, sampleCounter,
{XT, YT}, {&thisCharTrain, &nextCharTrain});
runBatch(EET, trainingBindings, batchSize / minibatchSize, sampleCounter,
{XT, YT}, {&thisCharTrain, &nextCharTrain});

ExecutionEngine2 EEO(executionBackend);
ExecutionEngine EEO(executionBackend);
inferBindings.clear();
auto &mod = EEO.getModule();
auto OF =
@@ -285,7 +284,7 @@ int main(int argc, char **argv) {
// Generate a sentence by running inference over and over again.
for (unsigned i = 0; i < generateChars; i++) {
// Generate a char:
updateInputPlaceholders2(inferBindings, {X}, {&currCharInfer});
updateInputPlaceholders(inferBindings, {X}, {&currCharInfer});
EEO.run(inferBindings);

// Pick a char at random from the softmax distribution.
@@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "glow/ExecutionEngine/ExecutionEngine2.h"
#include "glow/ExecutionEngine/ExecutionEngine.h"
#include "glow/Graph/Graph.h"
#include "glow/Support/Support.h"

@@ -157,7 +157,7 @@ void testCIFAR10() {
// Construct the network:
TrainingConfig TC;

ExecutionEngine2 EE(executionBackend);
ExecutionEngine EE(executionBackend);
PlaceholderBindings bindings;

TC.learningRate = 0.001;
@@ -206,15 +206,15 @@ void testCIFAR10() {

// Bind the images tensor to the input array A, and the labels tensor
// to the softmax node SM.
runBatch2(EE, bindings, reportRate, sampleCounter, {A, E},
{&images, &labels}, tfName);
runBatch(EE, bindings, reportRate, sampleCounter, {A, E},
{&images, &labels}, tfName);

unsigned score = 0;

for (unsigned int i = 0; i < 100 / minibatchSize; i++) {
Tensor sample(ElemKind::FloatTy, {minibatchSize, 32, 32, 3});
sample.copyConsecutiveSlices(&images, minibatchSize * i);
updateInputPlaceholders2(bindings, {A}, {&sample});
updateInputPlaceholders(bindings, {A}, {&sample});
EE.run(bindings);

for (unsigned int iter = 0; iter < minibatchSize; iter++) {
@@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "glow/ExecutionEngine/ExecutionEngine2.h"
#include "glow/ExecutionEngine/ExecutionEngine.h"
#include "glow/Graph/Graph.h"
#include "glow/Optimizer/GraphOptimizer/GraphOptimizer.h"
#include "glow/Quantization/Quantization.h"
@@ -126,7 +126,7 @@ void loadMatrixFromFile(llvm::StringRef filename, Tensor &result) {
/// few references to input/output Variables.
struct Model {
unsigned batchSize_;
ExecutionEngine2 EE_{ExecutionBackend};
ExecutionEngine EE_{ExecutionBackend};
Function *F_;
Vocabulary en_, fr_;
Placeholder *input_;
@@ -376,8 +376,7 @@ void Model::translate(const std::vector<std::string> &batch) {
(words.size() - 1) + j * MAX_LENGTH;
}

updateInputPlaceholders2(bindings, {input_, seqLength_},
{&input, &seqLength});
updateInputPlaceholders(bindings, {input_, seqLength_}, {&input, &seqLength});
EE_.run(bindings);

auto OH = bindings.get(output_)->getHandle<int64_t>();
@@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "glow/Base/Image.h"
#include "glow/ExecutionEngine/ExecutionEngine2.h"
#include "glow/ExecutionEngine/ExecutionEngine.h"
#include "glow/Importer/Caffe2ModelLoader.h"
#include "glow/Support/Error.h"

@@ -24,7 +24,7 @@ using namespace glow;
/// inference.
int main() {
glow::PlaceholderBindings bindings;
glow::ExecutionEngine2 EE;
glow::ExecutionEngine EE;
auto &mod = EE.getModule();
auto *F = mod.createFunction("lenet_mnist");
auto *inputType = mod.uniqueType(glow::ElemKind::FloatTy, {1, 1, 28, 28});
@@ -50,7 +50,7 @@ int main() {

// Allocate memory for input and bind it to the placeholders.
bindings.allocate(mod.getPlaceholders());
glow::updateInputPlaceholders2(bindings, {input}, {&batch});
glow::updateInputPlaceholders(bindings, {input}, {&batch});

// Perform inference.
EE.run(bindings);
@@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "glow/Base/Image.h"
#include "glow/ExecutionEngine/ExecutionEngine2.h"
#include "glow/ExecutionEngine/ExecutionEngine.h"
#include "glow/Graph/Graph.h"
#include "glow/Importer/Caffe2ModelLoader.h"
#include "glow/Optimizer/GraphOptimizer/GraphOptimizer.h"
@@ -82,7 +82,7 @@ unsigned loadMNIST(Tensor &imageInputs, Tensor &labelInputs) {
return numImages;
}

void createModel(ExecutionEngine2 &EE, Function *F,
void createModel(ExecutionEngine &EE, Function *F,
PlaceholderBindings &bindings, unsigned minibatchSize,
Placeholder *&inputPH, Placeholder *&outputPH,
Placeholder *&selectedPH) {
@@ -107,8 +107,8 @@ void createModel(ExecutionEngine2 &EE, Function *F,
outputPH = result->getPlaceholder();
}

void trainModel(ExecutionEngine2 &EE, PlaceholderBindings &bindings,
Function *F, unsigned minibatchSize, unsigned numIterations,
void trainModel(ExecutionEngine &EE, PlaceholderBindings &bindings, Function *F,
unsigned minibatchSize, unsigned numIterations,
Tensor &imageInputs, Tensor &labelInputs, Placeholder *inputPH,
Placeholder *selectedPH) {
llvm::Timer timer("Training", "Training");
@@ -141,14 +141,14 @@ void trainModel(ExecutionEngine2 &EE, PlaceholderBindings &bindings,
// On each training iteration take a slice of imageInputs and labelInputs
// and put them into variables A and B, then run forward and backward passes
// and update weights.
runBatch2(EE, bindings, numIterations, sampleCounter, {inputPH, selectedPH},
{&imageInputs, &labelInputs}, tfName);
runBatch(EE, bindings, numIterations, sampleCounter, {inputPH, selectedPH},
{&imageInputs, &labelInputs}, tfName);

timer.stopTimer();
}
}

void validateModel(ExecutionEngine2 &EE, PlaceholderBindings &bindings,
void validateModel(ExecutionEngine &EE, PlaceholderBindings &bindings,
Function *F, unsigned minibatchSize, unsigned numIterations,
Tensor &imageInputs, Tensor &labelInputs,
Placeholder *inputPH, Placeholder *outputPH,
@@ -215,13 +215,13 @@ void testMNIST() {
PlaceholderBindings trainingBindings, inferBindings;
Placeholder *A, *E, *selected;

ExecutionEngine2 EEI_(executionBackend);
ExecutionEngine EEI_(executionBackend);
auto &inferMod = EEI_.getModule();
Function *F = inferMod.createFunction("mnist");
createModel(EEI_, F, inferBindings, minibatchSize, A, E, selected);
inferBindings.allocate(inferMod.getPlaceholders());

ExecutionEngine2 EET_(executionBackend);
ExecutionEngine EET_(executionBackend);
auto &trainMod = EET_.getModule();
Function *TF = trainMod.createFunction("mnist");
createModel(EET_, TF, trainingBindings, minibatchSize, A, E, selected);
@@ -247,7 +247,7 @@ void testMNISTLoadAndTraining() {
imageInputsTransposed.transpose(&imageInputs, NHWC2NCHW);

PlaceholderBindings trainingBindings, inferBindings;
ExecutionEngine2 EEI_(executionBackend);
ExecutionEngine EEI_(executionBackend);
auto &inferMod = EEI_.getModule();
auto *F = inferMod.createFunction("lenet_mnist");
unsigned minibatchSize = 8;
@@ -280,7 +280,7 @@ void testMNISTLoadAndTraining() {

// Load the model a second time for training.
// TODO: remove once EE2 is able to compile in different modes.
ExecutionEngine2 EET_(executionBackend);
ExecutionEngine EET_(executionBackend);
auto &trainMod = EET_.getModule();
auto *TF = trainMod.createFunction("lenet_mnist_train");
glow::Caffe2ModelLoader trainingLoader("lenet_mnist/predict_net.pb",
@@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "glow/ExecutionEngine/ExecutionEngine2.h"
#include "glow/ExecutionEngine/ExecutionEngine.h"
#include "glow/Graph/Graph.h"
#include "glow/IR/IR.h"
#include "glow/Support/Support.h"
@@ -182,7 +182,7 @@ void testPTB() {
unsigned numWords = loadPTB(inputWords, targetWords, numSteps, vocabSize,
minibatchSize, maxNumWords);
LOG(INFO) << "Loaded " << numWords << " words.";
ExecutionEngine2 EE(executionBackend);
ExecutionEngine EE(executionBackend);
PlaceholderBindings bindings;

// Construct the network:
@@ -270,8 +270,8 @@ void testPTB() {
targetWordsBatch.copyConsecutiveSlices(&targetWords,
minibatchSize * batch);

runBatch2(EE, bindings, 1, sampleCounter, {X, Y},
{&inputWordsBatch, &targetWordsBatch}, tfName);
runBatch(EE, bindings, 1, sampleCounter, {X, Y},
{&inputWordsBatch, &targetWordsBatch}, tfName);
for (size_t step = 0; step < numSteps; step++) {
for (unsigned int i = 0; i < minibatchSize; i++) {
auto T =
@@ -15,7 +15,7 @@
*/

#include "glow/Base/Image.h"
#include "glow/ExecutionEngine/ExecutionEngine2.h"
#include "glow/ExecutionEngine/ExecutionEngine.h"
#include "glow/Graph/Graph.h"
#include "glow/Importer/Caffe2ModelLoader.h"
#include "glow/Runtime/HostManager/HostManager.h"
@@ -200,8 +200,8 @@ int main(int argc, char **argv) {

context->getPlaceholderBindings()->allocate(phList);
Tensor batch = image.getUnowned(inputShape);
updateInputPlaceholders2(*(context->getPlaceholderBindings()), {input},
{&batch});
updateInputPlaceholders(*(context->getPlaceholderBindings()), {input},
{&batch});

dispatchClassify(0, hostManager.get(), std::move(path), std::move(context),
returned, finished);
@@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "glow/Base/Image.h"
#include "glow/ExecutionEngine/ExecutionEngine2.h"
#include "glow/ExecutionEngine/ExecutionEngine.h"
#include "glow/Graph/Hook.h"
#include "glow/Importer/Caffe2ModelLoader.h"

@@ -27,7 +27,7 @@ const char inputName[] = "gpu_0/data";

class Tester {
PlaceholderBindings bindings, inferBindings;
ExecutionEngine2 EEI;
ExecutionEngine EEI;
std::unique_ptr<Module> mod;
Function *F;
TypeRef inputType;
@@ -49,7 +49,7 @@ class Tester {
void bindInput(Tensor *batch) {
// Allocate memory for input and bind it to the placeholders.
bindings.allocate(mod->getPlaceholders());
updateInputPlaceholders2(bindings, {input}, {batch});
updateInputPlaceholders(bindings, {input}, {batch});
}

TypeRef getInputType() const { return inputType; }
@@ -16,7 +16,7 @@

#include "glow/Backends/DeviceManager.h"
#include "glow/Base/Image.h"
#include "glow/ExecutionEngine/ExecutionEngine2.h"
#include "glow/ExecutionEngine/ExecutionEngine.h"
#include "glow/Graph/Graph.h"
#include "glow/Importer/Caffe2ModelLoader.h"
#include "glow/Optimizer/GraphOptimizer/GraphOptimizer.h"
@@ -152,8 +152,8 @@ int main(int argc, char **argv) {
context->setTraceContext(
llvm::make_unique<TraceContext>(TraceLevel::STANDARD));
context->getPlaceholderBindings()->allocate(module.getPlaceholders());
updateInputPlaceholders2(*(context->getPlaceholderBindings()), {input},
{&batch});
updateInputPlaceholders(*(context->getPlaceholderBindings()), {input},
{&batch});

devices[i]->runFunction(
"resnet50", std::move(context),

0 comments on commit 3bd95b6

Please sign in to comment.
You can’t perform that action at this time.