Skip to content

Commit

Permalink
[testing] Move numerical FC tests to OperatorTest (#2373)
Browse files Browse the repository at this point in the history
Description:
This commit moves the parts of Caffe2ImporterTest for FC operators that check numerical
correctness over to OperatorTest.

Testing:
All unit tests pass.
  • Loading branch information
gabagam authored and SplitInfinity committed Feb 15, 2019
1 parent eaf135b commit f22de71
Show file tree
Hide file tree
Showing 2 changed files with 171 additions and 38 deletions.
179 changes: 141 additions & 38 deletions tests/unittests/Caffe2ImporterTest.cpp
Expand Up @@ -605,16 +605,47 @@ TEST(caffe2, FC) {
updateInputPlaceholdersByName(ctx, &mod, {"inputs"}, {&inputs});
}

EE.compile(CompilationMode::Infer, F);
EE.run(ctx);
// High level check on the content of the graph. We have 1 FC node and 1 save.
EXPECT_EQ(F->getNodes().size(), 2);
auto *saveNode = getSaveNodeFromDest(output);
auto *fcNode =
llvm::dyn_cast<FullyConnectedNode>(saveNode->getInput().getNode());
EXPECT_TRUE(fcNode);

auto result = ctx.get(output)->getHandle();
std::vector<size_t> expectedDims = {2, 4};
std::vector<float> expectedValues = {14.1f, 32.2f, 50.3f, 68.4f,
32.1f, 77.2f, 122.3f, 167.4f};
EXPECT_TRUE(result.dims().vec() == expectedDims);
for (size_t i = 0; i < 2 * 4; i++)
EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
// Check the numerical values of the weights and biases.
{
const Constant *constant = mod.getConstantByName("weights");
ASSERT_TRUE(constant);
const Tensor &weights = constant->getPayload();
const std::vector<size_t> expectedDimensions = {3, 4};
const std::vector<float> expectedValues = {1.0f, 4.0f, 7.0f, 10.0f, //
2.0f, 5.0f, 8.0f, 11.0f, //
3.0f, 6.0f, 9.0f, 12.0f};
EXPECT_EQ(expectedDimensions, weights.dims().vec());
ASSERT_EQ(expectedValues.size(), weights.size());
const auto elements = weights.getHandle();
for (size_t i = 0; i < expectedValues.size(); ++i) {
EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
<< "Where i = " << i;
}
}
{
const Constant *constant = mod.getConstantByName("biases");
ASSERT_TRUE(constant);
const Tensor &bias = constant->getPayload();
const std::vector<size_t> expectedDimensions = {4};
const std::vector<float> expectedValues = {0.1f, 0.2f, 0.3f, 0.4f};
EXPECT_EQ(expectedDimensions, bias.dims().vec());
ASSERT_EQ(expectedValues.size(), bias.size());
const auto elements = bias.getHandle();
for (size_t i = 0; i < expectedValues.size(); ++i) {
EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
<< "Where i = " << i;
}
}

// We don't actually check that the output is correct, because this is
// already covered in the Operator.FC/* tests.
}

/// Test loading a FC node : I * transpose(W) + B, where I is need to be
Expand Down Expand Up @@ -656,16 +687,40 @@ TEST(caffe2, FCWithFlatten) {
auto *reshape = llvm::dyn_cast<ReshapeNode>(fcNode->getInput());
ASSERT_TRUE(reshape);

EE.compile(CompilationMode::Infer, F);
EE.run(ctx);
auto result = ctx.get(output)->getHandle();
std::vector<size_t> expectedDims = {2, 4};
std::vector<float> expectedValues = {14.1f, 32.2f, 50.3f, 68.4f,
32.1f, 77.2f, 122.3f, 167.4f};
result = ctx.get(output)->getHandle();
EXPECT_TRUE(result.dims().vec() == expectedDims);
for (size_t i = 0; i < 2 * 4; i++)
EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
// Check the numerical values of the weights and biases.
{
const Constant *constant = mod.getConstantByName("weights");
ASSERT_TRUE(constant);
const Tensor &weights = constant->getPayload();
const std::vector<size_t> expectedDimensions = {3, 4};
const std::vector<float> expectedValues = {1.0f, 4.0f, 7.0f, 10.0f, //
2.0f, 5.0f, 8.0f, 11.0f, //
3.0f, 6.0f, 9.0f, 12.0f};
EXPECT_EQ(expectedDimensions, weights.dims().vec());
ASSERT_EQ(expectedValues.size(), weights.size());
const auto elements = weights.getHandle();
for (size_t i = 0; i < expectedValues.size(); ++i) {
EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
<< "Where i = " << i;
}
}
{
const Constant *constant = mod.getConstantByName("biases");
ASSERT_TRUE(constant);
const Tensor &bias = constant->getPayload();
const std::vector<size_t> expectedDimensions = {4};
const std::vector<float> expectedValues = {0.1f, 0.2f, 0.3f, 0.4f};
EXPECT_EQ(expectedDimensions, bias.dims().vec());
ASSERT_EQ(expectedValues.size(), bias.size());
const auto elements = bias.getHandle();
for (size_t i = 0; i < expectedValues.size(); ++i) {
EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
<< "Where i = " << i;
}
}

// We don't actually check that the output is correct, because this is
// already covered in the Operator.FCWithFlatten/* tests.
}

/// Test loading a FCTransposed node: I * W + B
Expand Down Expand Up @@ -706,16 +761,40 @@ TEST(caffe2, FCTransposed) {
llvm::dyn_cast<FullyConnectedNode>(saveNode->getInput().getNode());
ASSERT_TRUE(fcNode);

EE.compile(CompilationMode::Infer, F);
EE.run(ctx);
// Check the numerical values of the weights and biases.
{
const Constant *constant = mod.getConstantByName("weights");
ASSERT_TRUE(constant);
const Tensor &weights = constant->getPayload();
const std::vector<size_t> expectedDimensions = {3, 4};
const std::vector<float> expectedValues = {1.0f, 4.0f, 7.0f, 10.0f, //
2.0f, 5.0f, 8.0f, 11.0f, //
3.0f, 6.0f, 9.0f, 12.0f};
EXPECT_EQ(expectedDimensions, weights.dims().vec());
ASSERT_EQ(expectedValues.size(), weights.size());
const auto elements = weights.getHandle();
for (size_t i = 0; i < expectedValues.size(); ++i) {
EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
<< "Where i = " << i;
}
}
{
const Constant *constant = mod.getConstantByName("biases");
ASSERT_TRUE(constant);
const Tensor &bias = constant->getPayload();
const std::vector<size_t> expectedDimensions = {4};
const std::vector<float> expectedValues = {0.1f, 0.2f, 0.3f, 0.4f};
EXPECT_EQ(expectedDimensions, bias.dims().vec());
ASSERT_EQ(expectedValues.size(), bias.size());
const auto elements = bias.getHandle();
for (size_t i = 0; i < expectedValues.size(); ++i) {
EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
<< "Where i = " << i;
}
}

auto result = ctx.get(output)->getHandle();
std::vector<size_t> expectedDims = {2, 4};
std::vector<float> expectedValues = {14.1f, 32.2f, 50.3f, 68.4f,
32.1f, 77.2f, 122.3f, 167.4f};
EXPECT_TRUE(result.dims().vec() == expectedDims);
for (size_t i = 0; i < 2 * 4; i++)
EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
// We don't actually check that the output is correct, because this is
// already covered in the Operator.FCWithFlatten/* tests.
}

/// Test loading a FCTransposed node: I * W + B, where I is need to be flatten.
Expand Down Expand Up @@ -757,16 +836,40 @@ TEST(caffe2, FCTransposedWithFlatten) {
auto *reshape = llvm::dyn_cast<ReshapeNode>(fcNode1->getInput());
ASSERT_TRUE(reshape);

EE.compile(CompilationMode::Infer, F);
EE.run(ctx);
auto result = ctx.get(output)->getHandle();
std::vector<size_t> expectedDims = {2, 4};
std::vector<float> expectedValues = {14.1f, 32.2f, 50.3f, 68.4f,
32.1f, 77.2f, 122.3f, 167.4f};
result = ctx.get(output)->getHandle();
EXPECT_TRUE(result.dims().vec() == expectedDims);
for (size_t i = 0; i < 2 * 4; i++)
EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
// Check the numerical values of the weights and biases.
{
const Constant *constant = mod.getConstantByName("weights");
ASSERT_TRUE(constant);
const Tensor &weights = constant->getPayload();
const std::vector<size_t> expectedDimensions = {3, 4};
const std::vector<float> expectedValues = {1.0f, 4.0f, 7.0f, 10.0f, //
2.0f, 5.0f, 8.0f, 11.0f, //
3.0f, 6.0f, 9.0f, 12.0f};
EXPECT_EQ(expectedDimensions, weights.dims().vec());
ASSERT_EQ(expectedValues.size(), weights.size());
const auto elements = weights.getHandle();
for (size_t i = 0; i < expectedValues.size(); ++i) {
EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
<< "Where i = " << i;
}
}
{
const Constant *constant = mod.getConstantByName("biases");
ASSERT_TRUE(constant);
const Tensor &bias = constant->getPayload();
const std::vector<size_t> expectedDimensions = {4};
const std::vector<float> expectedValues = {0.1f, 0.2f, 0.3f, 0.4f};
EXPECT_EQ(expectedDimensions, bias.dims().vec());
ASSERT_EQ(expectedValues.size(), bias.size());
const auto elements = bias.getHandle();
for (size_t i = 0; i < expectedValues.size(); ++i) {
EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
<< "Where i = " << i;
}
}

// We don't actually check that the output is correct, because this is
// already covered in the Operator.FCWithFlatten/* tests.
}

/// Test loading clip op from a Caffe2 model.
Expand Down
30 changes: 30 additions & 0 deletions tests/unittests/OperatorTest.cpp
Expand Up @@ -1842,6 +1842,36 @@ TEST_P(InterpAndCPU, IntConcat) {
}
}

TEST_P(Operator, FCWithFlatten) {
auto *input =
mod_.createPlaceholder(ElemKind::FloatTy, {2, 1, 3}, "input", false);
auto *weights =
mod_.createPlaceholder(ElemKind::FloatTy, {3, 4}, "weights", true);
auto *bias = mod_.createPlaceholder(ElemKind::FloatTy, {4}, "bias", true);

ctx_.allocate(input)->getHandle() = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
ctx_.allocate(weights)->getHandle() = {1.0f, 4.0f, 7.0f, 10.0f, //
2.0f, 5.0f, 8.0f, 11.0f, //
3.0f, 6.0f, 9.0f, 12.0f};
ctx_.allocate(bias)->getHandle() = {0.1f, 0.2f, 0.3f, 0.4f};

auto *FC = F_->createFullyConnected("fc", input, weights, bias);
auto *S = F_->createSave("save", FC);
ctx_.allocate(S->getPlaceholder());

EE_.compile(CompilationMode::Infer, F_);
EE_.run(ctx_);

auto result = ctx_.get(S->getPlaceholder())->getHandle();
std::vector<size_t> expectedDimensions = {2, 4};
std::vector<float> expectedValues = {14.1f, 32.2f, 50.3f, 68.4f,
32.1f, 77.2f, 122.3f, 167.4f};
EXPECT_TRUE(result.dims().vec() == expectedDimensions);
for (size_t i = 0; i < 2 * 4; i++) {
EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
}
}

TEST_P(Operator, IntFC) {
// In this test we subtract the outputs of a quantized FC and a floating-point
// FC and ensure that the error is below some low value.
Expand Down

0 comments on commit f22de71

Please sign in to comment.