Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[TF FE] Fix centernet and correct FloorDiv translator for signed integer type #22684

Merged
merged 31 commits into from Feb 14, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
7c7936b
fix centernet
pavel-esir Feb 6, 2024
ad11f4c
rewrite solution with pass
pavel-esir Feb 7, 2024
5bdff7f
add transformation tests
pavel-esir Feb 7, 2024
58d493d
generalize floordiv layer test
pavel-esir Feb 7, 2024
44adf98
Merge branch 'master' into fix_centernet
pavel-esir Feb 7, 2024
c12f0c8
Merge remote-tracking branch 'upstream/master' into fix_centernet
pavel-esir Feb 8, 2024
5b0d6fc
update layer test run through OVC; corrected pattern; remove rt_info
pavel-esir Feb 8, 2024
f6e7eb4
do not use ovc, leave only save_model in layer tests
pavel-esir Feb 9, 2024
539ab3d
Update comment in src/common/transformations/src/transformations/fp16…
pavel-esir Feb 9, 2024
45f6af3
fully revert common_utils.py
pavel-esir Feb 9, 2024
95ac470
Merge branch 'master' into fix_centernet
pavel-esir Feb 9, 2024
821b73f
fix tests
pavel-esir Feb 12, 2024
5b5e2cf
rewrite solution with integers
pavel-esir Feb 12, 2024
14c64f0
style_fix; add comment description
pavel-esir Feb 12, 2024
b5c036b
fix for ARM
pavel-esir Feb 12, 2024
510962f
xfail for arm
pavel-esir Feb 12, 2024
f6bab91
cosmetic corrections
pavel-esir Feb 12, 2024
379d297
fix for ARM: ensure Divide is truncative
pavel-esir Feb 13, 2024
5c1c17b
style_fix
pavel-esir Feb 13, 2024
a75f71c
revert FE changes: solution with a pass
pavel-esir Feb 14, 2024
931c3a4
namespace typo fix
pavel-esir Feb 14, 2024
c96333d
Revert "namespace typo fix"
pavel-esir Feb 14, 2024
2eb52c9
Revert "revert FE changes: solution with a pass"
pavel-esir Feb 14, 2024
75b9a98
Revert "style_fix"
pavel-esir Feb 14, 2024
02e8fdf
final solution
pavel-esir Feb 14, 2024
3f6cedd
xfail all int tests for ARM
pavel-esir Feb 14, 2024
5c36ae4
xfail arm 2nd attempt
pavel-esir Feb 14, 2024
75766fd
Apply suggestions from code review
pavel-esir Feb 14, 2024
362b4cf
add branch for unsigned, other corrections
pavel-esir Feb 14, 2024
edef731
xfail for all arm platforms
pavel-esir Feb 14, 2024
e114877
concise xfail for all ARM platforms
pavel-esir Feb 14, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -39,7 +39,7 @@ class TRANSFORMATIONS_API DisableFP16Compression : public RuntimeAttribute {
}

bool is_copyable() const override {
return false;
pavel-esir marked this conversation as resolved.
Show resolved Hide resolved
return true;
}
};

Expand Down
Expand Up @@ -13,6 +13,7 @@
#include "openvino/op/divide.hpp"
#include "openvino/op/exp.hpp"
#include "openvino/op/fake_quantize.hpp"
#include "openvino/op/floor.hpp"
#include "openvino/op/interpolate.hpp"
#include "openvino/op/max_pool.hpp"
#include "openvino/op/maximum.hpp"
Expand Down Expand Up @@ -423,6 +424,35 @@ class PropagateDownDisableSensitivityForQuantized : public pass::MatcherPass {
}
};

/* FloorDiv should be kept in fp32 precision for better accuracy
when it accepts large values (> fp16_max).
*/
class MarkFloorDiv : public pass::MatcherPass {
public:
OPENVINO_RTTI("MarkFloorDiv", "0");
MarkFloorDiv() {
MATCHER_SCOPE(MarkFloorDiv);
// FloorDiv is repesented either as:
// Floor(Div(input_1, input_2))
// Floor(Mul(input_1, inversed_const_input_2)), if input_2 is constant
auto div_mul_pattern = pattern::wrap_type<ov::op::v1::Divide, ov::op::v1::Multiply>();
auto floor_pattern = pattern::wrap_type<ov::op::v0::Floor>({div_mul_pattern});
pavel-esir marked this conversation as resolved.
Show resolved Hide resolved

matcher_pass_callback callback = [=](pattern::Matcher& m) {
const auto& node = m.get_match_root();
if (!node)
return false;
disable_fp16_compression(node);
disable_fp16_compression(node->input_value(0).get_node_shared_ptr());

return true;
};

auto m = make_shared<pattern::Matcher>(floor_pattern, matcher_name);
register_matcher(m, callback);
}
};

bool MarkSugraphsToKeepInMixedPrecision::run_on_model(const shared_ptr<ov::Model>& m) {
RUN_ON_MODEL_SCOPE(MarkSugraphsToKeepInMixedPrecision);

Expand All @@ -432,6 +462,8 @@ bool MarkSugraphsToKeepInMixedPrecision::run_on_model(const shared_ptr<ov::Model
REGISTER_PASS(manager, MarkExpInReduceOpPath)
REGISTER_PASS(manager, PropagateDownDisableSensitivityForQuantized)

REGISTER_PASS(manager, MarkFloorDiv)

// both Up and Down propagations are needed.
// Why both of them are needed is explained in comments in passes declarations.
REGISTER_PASS(manager, PropagateDownMarkToKeepInMixedPrecision)
Expand Down
Expand Up @@ -1269,3 +1269,82 @@ TEST(TransformationTests, MarkDivWithEpsToKeepInMixedPrecision_disable_for_quant
result = fc(model, model_ref);
ASSERT_TRUE(result.valid) << result.message;
}

TEST(TransformationTests, keep_floor_div_1) {
shared_ptr<Model> model, model_ref;
pass::Manager manager;
{
auto input_1 = make_shared<Parameter>(element::f32, Shape{1, 1000});
auto input_2 = make_shared<Parameter>(element::f32, Shape{1000});

auto div_1 = make_shared<Divide>(input_1, input_2);
auto floor_1 = make_shared<Floor>(div_1);

model = make_shared<Model>(NodeVector{floor_1}, ParameterVector{input_1, input_2});

manager.register_pass<pass::MarkSugraphsToKeepInMixedPrecision>();
manager.run_passes(model);
}

{
auto input_1 = make_shared<Parameter>(element::f32, Shape{1, 1000});
auto input_2 = make_shared<Parameter>(element::f32, Shape{1000});

auto div_1 = make_shared<Divide>(input_1, input_2);
auto floor_1 = make_shared<Floor>(div_1);

// marking nodes to be kept in fp32 for mixed precision
disable_fp16_compression(div_1);
disable_fp16_compression(floor_1);

model_ref = make_shared<Model>(NodeVector{floor_1}, ParameterVector{input_1, input_2});
}

const FunctionsComparator func_comparator =
FunctionsComparator::with_default().enable(FunctionsComparator::RUNTIME_KEYS);
// need to compare twice to ensure that no extra nodes are marked
FunctionsComparator::Result result = func_comparator(model_ref, model);
ASSERT_TRUE(result.valid) << result.message;
result = func_comparator(model, model_ref);
ASSERT_TRUE(result.valid) << result.message;
}

TEST(TransformationTests, keep_floor_div_2) {
shared_ptr<Model> model, model_ref;
pass::Manager manager;
{
auto input_1 = make_shared<Parameter>(element::f32, Shape{1, 100});
auto const_input_2 = Constant::create(element::f32, Shape{100}, {100000});

auto mul_1 = make_shared<Multiply>(input_1, const_input_2);
auto floor_1 = make_shared<Floor>(mul_1);

model = make_shared<Model>(NodeVector{floor_1}, ParameterVector{input_1});

manager.register_pass<pass::MarkSugraphsToKeepInMixedPrecision>();
manager.run_passes(model);
}

{
auto input_1 = make_shared<Parameter>(element::f32, Shape{1, 100});
auto const_input_2 = Constant::create(element::f32, Shape{100}, {100000});

auto mul_1 = make_shared<Multiply>(input_1, const_input_2);
auto floor_1 = make_shared<Floor>(mul_1);

// marking nodes to be kept in fp32 for mixed precision
disable_fp16_compression(mul_1);
disable_fp16_compression(floor_1);
disable_fp16_compression(const_input_2);

model_ref = make_shared<Model>(NodeVector{floor_1}, ParameterVector{input_1});
}

const FunctionsComparator func_comparator =
FunctionsComparator::with_default().enable(FunctionsComparator::RUNTIME_KEYS);
// need to compare twice to ensure that no extra nodes are marked
FunctionsComparator::Result result = func_comparator(model_ref, model);
ASSERT_TRUE(result.valid) << result.message;
result = func_comparator(model, model_ref);
ASSERT_TRUE(result.valid) << result.message;
}
2 changes: 2 additions & 0 deletions src/core/src/graph_util.cpp
Expand Up @@ -20,6 +20,7 @@
#include "transformations/common_optimizations/compress_float_constants.hpp"
#include "transformations/common_optimizations/fused_names_cleanup.hpp"
#include "transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp"
#include "transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.hpp"

namespace {

Expand Down Expand Up @@ -331,6 +332,7 @@ void serialize(const std::shared_ptr<const ov::Model>& m,
void save_model(const std::shared_ptr<const ov::Model>& m, const std::string& output_model, bool compress_to_fp16) {
ov::pass::Manager manager;
if (compress_to_fp16) {
manager.register_pass<ov::pass::MarkSugraphsToKeepInMixedPrecision>();
manager.register_pass<ov::pass::MarkPrecisionSensitiveConstants>();
manager.register_pass<ov::pass::CompressFloatConstants>(/*postponed=*/true);
}
Expand Down
12 changes: 8 additions & 4 deletions tests/layer_tests/tensorflow_tests/test_tf_FloorDiv.py
Expand Up @@ -5,7 +5,6 @@
import pytest

from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc


class TestFloorDiv(CommonTFLayerTest):
Expand All @@ -17,9 +16,8 @@ def create_add_placeholder_const_net(self, x_shape, dtype, ir_version, use_new_f
# Create the graph and model
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(dtype, x_shape, 'Input')
constant_value = np.array(-10).astype(dtype)
constant_value = np.array(-256).astype(dtype)
y = tf.constant(constant_value)
x = tf.raw_ops.Abs(x=x)
pavel-esir marked this conversation as resolved.
Show resolved Hide resolved
pavel-esir marked this conversation as resolved.
Show resolved Hide resolved
res = tf.raw_ops.FloorDiv(x=x, y=y)

tf.compat.v1.global_variables_initializer()
Expand All @@ -28,13 +26,19 @@ def create_add_placeholder_const_net(self, x_shape, dtype, ir_version, use_new_f
ref_net = None

return tf_net, ref_net


def _prepare_input(self, inputs_dict):
for input in inputs_dict.keys():
inputs_dict[input] = np.random.randint(-100000, 100000, inputs_dict[input]).astype(np.float32)
return inputs_dict

# TODO: implement tests for 2 Consts + Add

test_data_1D = [
dict(x_shape=[], dtype=np.int32),
dict(x_shape=[2], dtype=np.int64),
dict(x_shape=[2, 4, 5], dtype=np.int32),
dict(x_shape=[2, 1000], dtype=np.int32),
dict(x_shape=[], dtype=np.float32),
dict(x_shape=[2], dtype=np.float64),
dict(x_shape=[2, 4, 5], dtype=np.float32),
Expand Down