Skip to content
Permalink
Browse files

Add whitelisting option for FP16 conversion (#3386)

Summary:
Add option to whitelist node kinds for conversion instead of the default blacklist. This makes testing of networks easier.
Pull Request resolved: #3386

Test Plan: Added unit test.

Differential Revision: D16685611

Pulled By: jfix71

fbshipit-source-id: 17ec8c1a5a2ef691d927401eaab5c2c40bb90124
  • Loading branch information...
jfix71 authored and facebook-github-bot committed Aug 13, 2019
1 parent 2fc5da0 commit ec836047b23bc39f2697cdd79aa60b0508a53c28
@@ -20,6 +20,7 @@

#include "glow/Base/Traits.h" // For KindSet.
#include "glow/Base/Type.h"
#include "glow/Optimizer/GraphOptimizer/CompilationContext.h"

namespace glow {

@@ -38,8 +39,8 @@ class TypeAToTypeBFunctionConverter : public FunctionConverter {
/// Source type of the conversions. I.e., the values of this
/// element type are going to be converted.
ElemKind srcKind_;
/// Set of node kinds that should not be converted.
KindSet doNotConvertKinds_;
/// Precision configuration used during conversion.
const PrecisionConfiguration &precConfig_;

/// If the element type of \p out is srcKind_ returns a similarly shaped type
/// using dstKind_. Otherwise returns nullptr.
@@ -64,11 +65,10 @@ class TypeAToTypeBFunctionConverter : public FunctionConverter {
void convertTensor(Tensor &tensor, TypeRef destTy) override;

public:
/// Create a type converter from \p fromKind to \p toKind for \p F.
/// If \p doNotConvertKinds is not nullptr, the nodes which kind
/// is in this set won't be converted.
/// Create a type converter from \p fromKind to \p toKind for \p F given
/// \p precConfig.
TypeAToTypeBFunctionConverter(Function &F, ElemKind fromKind, ElemKind toKind,
const KindSet *doNotConvertKinds = nullptr);
const PrecisionConfiguration &precConfig);
};
} // namespace glow
#endif
@@ -45,6 +45,10 @@ struct PrecisionConfiguration {
/// convolutions for profiling; see `-do-not-lower-nodes-for-profiling` in
/// docs/Quantization.md).
KindSet precisionModeKindSet;

/// Whether to use the precisionModeKindSet as a whitelist instead of the
/// default blacklist. Currently only supported for convertToFP16.
bool useSetAsWhitelist{false};
};

using QuantizationMode = PrecisionConfiguration::QuantizationMode;
@@ -87,6 +91,11 @@ struct CompilationContext {
/// \returns an error if the CompilationContext is malformed for whatever
/// configuration it is set up for, otherwise returns success.
llvm::Error verify() const {
RETURN_ERR_IF_NOT(!precisionConfig.useSetAsWhitelist ||
precisionConfig.convertToFP16,
"Can only use the precisionModeKindSet as a whitelist in "
"convertToFP16 mode.");

switch (precisionConfig.quantMode) {
case QuantizationMode::Profile:
RETURN_ERR_IF_NOT(bindings, GlowErr::ErrorCode::COMPILE_CONTEXT_MALFORMED,
@@ -23,16 +23,15 @@ using namespace glow;

TypeAToTypeBFunctionConverter::TypeAToTypeBFunctionConverter(
Function &F, ElemKind fromKind, ElemKind toKind,
const KindSet *doNotConvertKinds)
const PrecisionConfiguration &precConfig)
: FunctionConverter(F), mod_(*F.getParent()), dstKind_(toKind),
srcKind_(fromKind) {
if (doNotConvertKinds) {
doNotConvertKinds_ = *doNotConvertKinds;
}
}
srcKind_(fromKind), precConfig_(precConfig) {}

bool TypeAToTypeBFunctionConverter::canConvert(const Node &node) const {
if (doNotConvertKinds_.count(node.getKind())) {
const bool inSet = precConfig_.precisionModeKindSet.count(node.getKind());
const bool allowConversion = precConfig_.useSetAsWhitelist ? inSet : !inSet;

if (!allowConversion) {
return false;
}
return FunctionConverter::canConvert(node);
@@ -2933,8 +2933,7 @@ static void transformForPrecisionMode(const Backend &B, Function *F,
LOG_SCOPE(F->getLogContext(), "TypeAToTypeBFunctionConverter::convert()")

TypeAToTypeBFunctionConverter converter(*F, ElemKind::FloatTy,
ElemKind::Float16Ty,
&precConfig.precisionModeKindSet);
ElemKind::Float16Ty, precConfig);
converter.convert();
}
}
@@ -79,8 +79,9 @@ TEST_P(AllBackends, SimpleOneUseConversionFloatToFloat16) {

size_t origGraphSize = F->getNodes().size();

PrecisionConfiguration precConfig;
TypeAToTypeBFunctionConverter converter(*F, ElemKind::FloatTy,
ElemKind::Float16Ty);
ElemKind::Float16Ty, precConfig);
converter.convert();

// We should have 4 more nodes:
@@ -185,8 +186,9 @@ TEST_P(AllBackends, SimpleChainOfComputationConversionFloatToFloat16) {

size_t origGraphSize = F->getNodes().size();

PrecisionConfiguration precConfig;
TypeAToTypeBFunctionConverter converter(*F, ElemKind::FloatTy,
ElemKind::Float16Ty);
ElemKind::Float16Ty, precConfig);
converter.convert();

// We should have 6 more nodes:
@@ -250,8 +252,8 @@ TEST_P(AllBackends, SimpleChainOfComputationConversionFloatToFloat16) {
input);
}

/// Check that the conversion honor the doNotConvertKinds set (here ReLU)
/// for a graph with a simple chain of computation.
/// Check that the conversion honor the precision configuration for blacklisting
/// a node kind (Relu here) for a graph with a simple chain of computation.
/// Namely, check that:
/// \verbatim
/// Input: Placeholder(float)
@@ -308,10 +310,10 @@ TEST_P(AllBackends, DoNotConvertReLUConversionFloatToFloat16) {

size_t origGraphSize = F->getNodes().size();

KindSet doNotConvertKinds;
doNotConvertKinds.insert(Kinded::Kind::ReluNodeKind);
TypeAToTypeBFunctionConverter converter(
*F, ElemKind::FloatTy, ElemKind::Float16Ty, &doNotConvertKinds);
PrecisionConfiguration precConfig;
precConfig.precisionModeKindSet.insert(Kinded::Kind::ReluNodeKind);
TypeAToTypeBFunctionConverter converter(*F, ElemKind::FloatTy,
ElemKind::Float16Ty, precConfig);
converter.convert();

// We should have 4 more nodes:
@@ -361,6 +363,111 @@ TEST_P(AllBackends, DoNotConvertReLUConversionFloatToFloat16) {
input);
}

/// Check that the conversion honor the precision configuration for whitelisting
/// a node kind (Relu here) for a graph with a simple chain of computation.
/// Namely, check that:
/// \verbatim
/// Input: Placeholder(float)
/// |
/// V
/// FC(float)
/// |
/// V
/// ReLU(float) Output: Placeholder(float)
/// | |
/// | +-------+
/// | /
/// V V
/// Save
/// \endverbatim
///
/// Gets converted into:
/// \verbatim
/// Input: Placeholder(float)
/// |
/// V
/// FC(float)
/// |
/// V
/// ConvertTo(float16)
/// |
/// V
/// ReLU(float16)
/// |
/// V
/// ConvertTo(float) Output: Placeholder(float)
/// | |
/// | +---------------+
/// | /
/// V V
/// Save
/// \endverbatim
///
/// In particular, the input and output of the network shouldn't be modified.
TEST_P(AllBackends, OnlyReluConversionFloatToFloat16) {
Module mod;
Function *F = mod.createFunction("test");
PlaceholderBindings bindings;

auto *input =
mod.createPlaceholder(ElemKind::FloatTy, {20, 13}, "Input", false);
auto *output =
mod.createPlaceholder(ElemKind::FloatTy, {20, 10}, "Output", false);

auto *FC = F->createFullyConnected(bindings, "FC", input, 10);
auto *RN =
F->createRELU("Relu", FC, FC->getType(FullyConnectedNode::ResultIdx));
auto *result = F->createSave("save", RN, output);

size_t origGraphSize = F->getNodes().size();

PrecisionConfiguration precConfig;
precConfig.precisionModeKindSet.insert(Kinded::Kind::ReluNodeKind);
precConfig.useSetAsWhitelist = true;
TypeAToTypeBFunctionConverter converter(*F, ElemKind::FloatTy,
ElemKind::Float16Ty, precConfig);
converter.convert();

// We should have 4 more nodes:
// 1 conversion float to float16 for the input of Relu.
// 1 conversion float16 to float for the result of Relu.
EXPECT_EQ(F->getNodes().size(), origGraphSize + 2);
// Make sure the save node is still in the function and is unchanged.
EXPECT_TRUE(std::find(F->getNodes().begin(), F->getNodes().end(), *result) !=
F->getNodes().end());
EXPECT_EQ(result->getOutput(), output->getOutput());
// Check that the save is fed from a conversion from float16 to float.
auto *resultInput = llvm::dyn_cast<ConvertToNode>(result->getInput());
ASSERT_NE(resultInput, nullptr);
EXPECT_EQ(resultInput->getInput().getElementType(), ElemKind::Float16Ty);
EXPECT_EQ(resultInput->getResult().getElementType(), ElemKind::FloatTy);

// Check the Relu has FP16 inputs and outputs.
auto *convertedRelu = llvm::dyn_cast<ReluNode>(resultInput->getInput());
ASSERT_NE(convertedRelu, nullptr);
EXPECT_EQ(convertedRelu->getInput().getElementType(), ElemKind::Float16Ty);
EXPECT_EQ(convertedRelu->getResult().getElementType(), ElemKind::Float16Ty);

// Check that the Relu is fed from a conversion from float to float16.
auto *convertedToReluInput = llvm::dyn_cast<ConvertToNode>(RN->getInput());
ASSERT_NE(convertedToReluInput, nullptr);
EXPECT_EQ(convertedToReluInput->getInput().getElementType(),
ElemKind::FloatTy);
EXPECT_EQ(convertedToReluInput->getResult().getElementType(),
ElemKind::Float16Ty);

// Check that this conversion comes from the original float FC node.
EXPECT_EQ(convertedToReluInput->getInput().getNode(), FC);
EXPECT_EQ(FC->getResult().getElementType(), ElemKind::FloatTy);
// Check that all the input of FC are float.
for (unsigned idx = 0, end = FC->getNumInputs(); idx != end; ++idx) {
EXPECT_EQ(FC->getNthInput(idx).getElementType(), ElemKind::FloatTy);
}
// Check that the original placeholder is still the input to the FC and float.
EXPECT_EQ(FC->getInput().getNode(), input);
EXPECT_EQ(input->getOutput().getElementType(), ElemKind::FloatTy);
}

/// Check that don't convert types we didn't asked for.
/// Namely, check that:
/// \verbatim
@@ -422,8 +529,9 @@ TEST_P(AllBackends, int64IConversionFloatToFloat16) {

size_t origGraphSize = F->getNodes().size();

PrecisionConfiguration precConfig;
TypeAToTypeBFunctionConverter converter(*F, ElemKind::FloatTy,
ElemKind::Float16Ty);
ElemKind::Float16Ty, precConfig);
converter.convert();

// We should have 2 more nodes:
@@ -553,8 +661,9 @@ TEST_P(AllBackends, OptimizeMiddleConversionsFloatToFloat16) {

size_t origGraphSize = F->getNodes().size();

PrecisionConfiguration precConfig;
TypeAToTypeBFunctionConverter converter(*F, ElemKind::FloatTy,
ElemKind::Float16Ty);
ElemKind::Float16Ty, precConfig);
converter.convert();

optimize(F, CompilationMode::Infer);
@@ -747,8 +856,9 @@ TEST_P(AllBackends, convertPlaceholderFloatToFloat16) {
size_t f2OrigGraphSize = F2->getNodes().size();
size_t f3OrigGraphSize = F3->getNodes().size();

PrecisionConfiguration precConfig;
TypeAToTypeBFunctionConverter converter(*F, ElemKind::FloatTy,
ElemKind::Float16Ty);
ElemKind::Float16Ty, precConfig);
for (auto *placeholder : mod.getPlaceholders()) {
if (output2 == placeholder) {
continue;
@@ -883,8 +993,9 @@ TEST_P(AllBackends, convertExistingConversionToNoop) {

size_t origSize = F->getNodes().size();

PrecisionConfiguration precConfig;
TypeAToTypeBFunctionConverter converter(*F, ElemKind::FloatTy,
ElemKind::Float16Ty);
ElemKind::Float16Ty, precConfig);
converter.convert();

EXPECT_EQ(F->getNodes().size(), origSize + 1);
@@ -213,8 +213,10 @@ buildAndCompileAndGetInAndOutPair(Loader &loader, PlaceholderBindings &bindings,
// Convert the placeholders for now. The backing Tensor's data will be
// converted later.
if (convertInAndOutToFp16) {
TypeAToTypeBFunctionConverter converter(
*loader.getFunction(), ElemKind::FloatTy, ElemKind::Float16Ty);
PrecisionConfiguration precConfig;
TypeAToTypeBFunctionConverter converter(*loader.getFunction(),
ElemKind::FloatTy,
ElemKind::Float16Ty, precConfig);
for (auto *placeholder : loader.getModule()->getPlaceholders()) {
converter.convertPlaceholder(*placeholder, &bindings);
}

0 comments on commit ec83604

Please sign in to comment.
You can’t perform that action at this time.