Skip to content

Commit

Permalink
glow: add an operator unittest for back to back SWLS infernece (#3624)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #3624

ATT.

Reviewed By: jfix71

Differential Revision: D17915657

fbshipit-source-id: ef438458e47803a6bf2659519ad7696fd2f6a5ed
  • Loading branch information
tracelogfb authored and facebook-github-bot committed Oct 15, 2019
1 parent 8439a0c commit ef39067
Show file tree
Hide file tree
Showing 4 changed files with 86 additions and 0 deletions.
2 changes: 2 additions & 0 deletions lib/Backends/CPU/tests/CPUOperatorTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,8 @@ std::set<std::string> glow::backendTestBlacklist = {
"FusedRowwiseQuantizedSparseLengthsWeightedSum_Float16_AccumFloat/0",
"FusedRowwiseQuantizedSparseLengthsWeightedSum_Float16_AccumFloat16/0",
"FusedRowwiseQuantizedSparseLengthsWeightedSum_ConvertedFloat16/0",
"FusedRowwiseQuantizedSparseLengthsWeightedSum_ConvertedFloat16_back_to_"
"back/0",
"FusedRowwiseQuantizedSparseLengthsSum_Float16_AccumFloat/0",
"FusedRowwiseQuantizedSparseLengthsSum_Float16_AccumFloat16/0",
"SparseToDenseMask1/0",
Expand Down
2 changes: 2 additions & 0 deletions lib/Backends/NNPI/tests/NNPIOperatorTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,8 @@ struct EmulatorOnlyTests {
"ConvertFrom_Int64ITy_To_FloatTy/0",
"ConvertFrom_Int64ITy_To_FloatTy_AndBack/0",
"Exp/0",
"FusedRowwiseQuantizedSparseLengthsWeightedSum_ConvertedFloat16_back_"
"to_back/0",
"MaxPool/0",
"NonSquareKernelAveragePool/0",
"NonSquareKernelConvolution/0",
Expand Down
2 changes: 2 additions & 0 deletions lib/Backends/OpenCL/tests/OpenCLOperatorTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,8 @@ std::set<std::string> glow::backendTestBlacklist = {
"FusedRowwiseQuantizedSparseLengthsWeightedSum_Float16_AccumFloat/0",
"FusedRowwiseQuantizedSparseLengthsWeightedSum_Float16_AccumFloat16/0",
"FusedRowwiseQuantizedSparseLengthsWeightedSum_ConvertedFloat16/0",
"FusedRowwiseQuantizedSparseLengthsWeightedSum_ConvertedFloat16_back_to_"
"back/0",
"FusedRowwiseQuantizedSparseLengthsSum_Float/0",
"FusedRowwiseQuantizedSparseLengthsSum_Float16_AccumFloat/0",
"FusedRowwiseQuantizedSparseLengthsSum_Float16_AccumFloat16/0",
Expand Down
80 changes: 80 additions & 0 deletions tests/unittests/OperatorTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7460,6 +7460,86 @@ TEST_P(OperatorTest,
EXPECT_TRUE(expected.isEqual(result, 0.02));
}

TEST_P(
OperatorTest,
FusedRowwiseQuantizedSparseLengthsWeightedSum_ConvertedFloat16_back_to_back) {
CHECK_IF_ENABLED();
/*
DATA = [[2.0, -0.5, 13]]
WEIGHTS = [1]
INDICES = [0]
LENGTHS = [0, 0, 0, 1] and then [1, 0, 0, 0]
OUTPUT = [[0, 0, 0, 0.2]] and then [[2.0, 0, 0, 0]]
*/
Tensor data(ElemKind::FloatTy, {3, 1});
data.getHandle() = {
2.0,
-0.5,
13,
};

Constant *weights = mod_.createConstant(ElemKind::FloatTy, {1}, "weights");
weights->getPayloadMutable().getHandle<float>() = {1.};

Placeholder *indices =
mod_.createPlaceholder(ElemKind::Int64ITy, {1}, "indices",
/* isTrainable */ false);
Placeholder *lengths =
mod_.createPlaceholder(ElemKind::Int32ITy, {4}, "lengths",
/* isTrainable */ false);

bindings_.allocate(indices)->getHandle<int64_t>() = {
0,
};
bindings_.allocate(lengths)->getHandle<int32_t>() = {
0,
0,
0,
1,
};

auto *R = F_->createFusedRowwiseQuantizedSparseLengthsWeightedSum(
"RQSLWS", data, weights, indices, lengths);
SaveNode *S = F_->createSave("save", R);
bindings_.allocate(S->getPlaceholder());

CompilationContext cctx;
cctx.precisionConfig.convertToFP16 = true;
cctx.precisionConfig.convertFusedToFP16 = true;
EE_.compile(cctx);
EE_.run(bindings_);

Tensor &result = *bindings_.get(S->getPlaceholder());
Tensor expected(ElemKind::FloatTy, {4, 1});
expected.getHandle<float>() = {
0,
0,
0,
2.0,
};

EXPECT_TRUE(expected.isEqual(result, 0.02));

// Send another inference
bindings_.get(lengths)->getHandle<int32_t>() = {
1,
0,
0,
0,
};
EE_.run(bindings_);

Tensor &result1 = *bindings_.get(S->getPlaceholder());
Tensor expected1(ElemKind::FloatTy, {4, 1});
expected1.getHandle<float>() = {
2.0,
0,
0,
0,
};
EXPECT_TRUE(expected1.isEqual(result1, 0.02));
}

/// Helper to test FusedRowwiseQuantizedSparseLengthsSum using \p DTy.
template <typename DataType>
static void testFusedRowwiseQuantizedSparseLengthsSum(
Expand Down

0 comments on commit ef39067

Please sign in to comment.