From 9b2f85a8fba504453f98638e24696702cd1823fb Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Thu, 8 Nov 2018 03:10:44 -0800 Subject: [PATCH] Replace std::vector with std::vector. In std::vector the intializer_list's underlying arrays will be out of scope after the full-expression containing the initialization of the std::vector (c++11 [class.temporary] p5). PiperOrigin-RevId: 220609867 --- tensorflow/compiler/xla/index_util_test.cc | 5 ++- .../xla/service/layout_assignment_test.cc | 4 +-- tensorflow/lite/kernels/comparisons_test.cc | 12 +++---- .../kernels/detection_postprocess_test.cc | 30 ++++++++-------- tensorflow/lite/kernels/sub_test.cc | 35 +++++++++---------- 5 files changed, 42 insertions(+), 44 deletions(-) diff --git a/tensorflow/compiler/xla/index_util_test.cc b/tensorflow/compiler/xla/index_util_test.cc index 93522d2ca87a7e..fa94d0afb4c928 100644 --- a/tensorflow/compiler/xla/index_util_test.cc +++ b/tensorflow/compiler/xla/index_util_test.cc @@ -24,8 +24,7 @@ limitations under the License. namespace xla { namespace { -void SetMinorToMajorLayout(Shape* shape, - std::initializer_list dimensions) { +void SetMinorToMajorLayout(Shape* shape, std::vector dimensions) { shape->mutable_layout()->clear_minor_to_major(); for (auto dimension : dimensions) { shape->mutable_layout()->add_minor_to_major(dimension); @@ -122,7 +121,7 @@ TEST(IndexUtilTest, LinearToMultiToLinear) { std::vector linear_indexes = {0, 1439999999, 1145567336, 43883404, 617295214, 1117613654}; - std::vector> minor_to_major_orders; + std::vector> minor_to_major_orders; minor_to_major_orders.push_back({6, 5, 4, 3, 2, 1, 0}); minor_to_major_orders.push_back({0, 1, 2, 3, 4, 5, 6}); minor_to_major_orders.push_back({4, 5, 1, 2, 6, 0, 3}); diff --git a/tensorflow/compiler/xla/service/layout_assignment_test.cc b/tensorflow/compiler/xla/service/layout_assignment_test.cc index 11c57682c11577..12c2a045d5cb87 100644 --- a/tensorflow/compiler/xla/service/layout_assignment_test.cc +++ b/tensorflow/compiler/xla/service/layout_assignment_test.cc @@ -91,7 +91,7 @@ class LayoutAssignmentTest : public HloVerifiedTestBase { TEST_F(LayoutAssignmentTest, ComputationLayout) { // Verify the layouts of the root and parameter instructions of a computation // match the ComputationLayout for two different layouts. - std::vector> minor_to_majors = {{0, 1}, {1, 0}}; + std::vector> minor_to_majors = {{0, 1}, {1, 0}}; for (auto& minor_to_major : minor_to_majors) { auto builder = HloComputation::Builder(TestName()); Shape ashape = ShapeUtil::MakeShape(F32, {42, 12}); @@ -160,7 +160,7 @@ TEST_F(LayoutAssignmentTest, FusionInstruction) { // Verify that the layout of the fused parameters in a fusion instruction // match that of the fusion operands. Other fused instructions should have no // layout. - std::vector> minor_to_majors = {{0, 1}, {1, 0}}; + std::vector> minor_to_majors = {{0, 1}, {1, 0}}; for (auto& minor_to_major : minor_to_majors) { auto builder = HloComputation::Builder(TestName()); auto constant_literal1 = LiteralUtil::CreateR2WithLayout( diff --git a/tensorflow/lite/kernels/comparisons_test.cc b/tensorflow/lite/kernels/comparisons_test.cc index 3c278c1f9e1097..ab10c959a4d6b2 100644 --- a/tensorflow/lite/kernels/comparisons_test.cc +++ b/tensorflow/lite/kernels/comparisons_test.cc @@ -455,7 +455,7 @@ TEST(ComparisonsTest, LessEqualQuantized) { TEST(ComparisonsTest, QuantizedEqualWithBroadcast) { const float kMin = -1.f; const float kMax = 128.f; - std::vector> test_shapes = { + std::vector> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; for (int i = 0; i < test_shapes.size(); ++i) { ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax}, @@ -473,7 +473,7 @@ TEST(ComparisonsTest, QuantizedEqualWithBroadcast) { TEST(ComparisonsTest, QuantizedNotEqualWithBroadcast) { const float kMin = -1.f; const float kMax = 128.f; - std::vector> test_shapes = { + std::vector> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; for (int i = 0; i < test_shapes.size(); ++i) { ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax}, @@ -491,7 +491,7 @@ TEST(ComparisonsTest, QuantizedNotEqualWithBroadcast) { TEST(ComparisonsTest, QuantizedGreaterWithBroadcast) { const float kMin = -1.f; const float kMax = 128.f; - std::vector> test_shapes = { + std::vector> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; for (int i = 0; i < test_shapes.size(); ++i) { ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax}, @@ -509,7 +509,7 @@ TEST(ComparisonsTest, QuantizedGreaterWithBroadcast) { TEST(ComparisonsTest, QuantizedGreaterEqualWithBroadcast) { const float kMin = -1.f; const float kMax = 128.f; - std::vector> test_shapes = { + std::vector> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; for (int i = 0; i < test_shapes.size(); ++i) { ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax}, @@ -527,7 +527,7 @@ TEST(ComparisonsTest, QuantizedGreaterEqualWithBroadcast) { TEST(ComparisonsTest, QuantizedLessWithBroadcast) { const float kMin = -1.f; const float kMax = 128.f; - std::vector> test_shapes = { + std::vector> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; for (int i = 0; i < test_shapes.size(); ++i) { ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax}, @@ -545,7 +545,7 @@ TEST(ComparisonsTest, QuantizedLessWithBroadcast) { TEST(ComparisonsTest, QuantizedLessEqualWithBroadcast) { const float kMin = -1.f; const float kMax = 128.f; - std::vector> test_shapes = { + std::vector> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; for (int i = 0; i < test_shapes.size(); ++i) { ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax}, diff --git a/tensorflow/lite/kernels/detection_postprocess_test.cc b/tensorflow/lite/kernels/detection_postprocess_test.cc index d7ffaf1d82b542..a1c061a3cad440 100644 --- a/tensorflow/lite/kernels/detection_postprocess_test.cc +++ b/tensorflow/lite/kernels/detection_postprocess_test.cc @@ -194,7 +194,7 @@ TEST(DetectionPostprocessOpTest, QuantizedTest) { {TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}}); // six boxes in center-size encoding - std::vector> inputs1 = {{ + std::vector> inputs1 = {{ 0.0, 0.0, 0.0, 0.0, // box #1 0.0, 1.0, 0.0, 0.0, // box #2 0.0, -1.0, 0.0, 0.0, // box #3 @@ -204,12 +204,12 @@ TEST(DetectionPostprocessOpTest, QuantizedTest) { }}; m.QuantizeAndPopulate(m.input1(), inputs1[0]); // class scores - two classes with background - std::vector> inputs2 = { - {0., .9, .8, 0., .75, .72, 0., .6, .5, 0., .93, .95, 0., .5, .4, 0., .3, - .2}}; + std::vector> inputs2 = {{0., .9, .8, 0., .75, .72, 0., .6, + .5, 0., .93, .95, 0., .5, .4, 0., + .3, .2}}; m.QuantizeAndPopulate(m.input2(), inputs2[0]); // six anchors in center-size encoding - std::vector> inputs3 = {{ + std::vector> inputs3 = {{ 0.5, 0.5, 1.0, 1.0, // anchor #1 0.5, 0.5, 1.0, 1.0, // anchor #2 0.5, 0.5, 1.0, 1.0, // anchor #3 @@ -405,7 +405,7 @@ TEST(DetectionPostprocessOpTest, QuantizedTestFastNMS) { {TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}}, false); // six boxes in center-size encoding - std::vector> inputs1 = {{ + std::vector> inputs1 = {{ 0.0, 0.0, 0.0, 0.0, // box #1 0.0, 1.0, 0.0, 0.0, // box #2 0.0, -1.0, 0.0, 0.0, // box #3 @@ -415,12 +415,12 @@ TEST(DetectionPostprocessOpTest, QuantizedTestFastNMS) { }}; m.QuantizeAndPopulate(m.input1(), inputs1[0]); // class scores - two classes with background - std::vector> inputs2 = { - {0., .9, .8, 0., .75, .72, 0., .6, .5, 0., .93, .95, 0., .5, .4, 0., .3, - .2}}; + std::vector> inputs2 = {{0., .9, .8, 0., .75, .72, 0., .6, + .5, 0., .93, .95, 0., .5, .4, 0., + .3, .2}}; m.QuantizeAndPopulate(m.input2(), inputs2[0]); // six anchors in center-size encoding - std::vector> inputs3 = {{ + std::vector> inputs3 = {{ 0.5, 0.5, 1.0, 1.0, // anchor #1 0.5, 0.5, 1.0, 1.0, // anchor #2 0.5, 0.5, 1.0, 1.0, // anchor #3 @@ -517,7 +517,7 @@ TEST(DetectionPostprocessOpTest, QuantizedTestRegularNMS) { {TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}}, true); // six boxes in center-size encoding - std::vector> inputs1 = {{ + std::vector> inputs1 = {{ 0.0, 0.0, 0.0, 0.0, // box #1 0.0, 1.0, 0.0, 0.0, // box #2 0.0, -1.0, 0.0, 0.0, // box #3 @@ -527,12 +527,12 @@ TEST(DetectionPostprocessOpTest, QuantizedTestRegularNMS) { }}; m.QuantizeAndPopulate(m.input1(), inputs1[0]); // class scores - two classes with background - std::vector> inputs2 = { - {0., .9, .8, 0., .75, .72, 0., .6, .5, 0., .93, .95, 0., .5, .4, 0., .3, - .2}}; + std::vector> inputs2 = {{0., .9, .8, 0., .75, .72, 0., .6, + .5, 0., .93, .95, 0., .5, .4, 0., + .3, .2}}; m.QuantizeAndPopulate(m.input2(), inputs2[0]); // six anchors in center-size encoding - std::vector> inputs3 = {{ + std::vector> inputs3 = {{ 0.5, 0.5, 1.0, 1.0, // anchor #1 0.5, 0.5, 1.0, 1.0, // anchor #2 0.5, 0.5, 1.0, 1.0, // anchor #3 diff --git a/tensorflow/lite/kernels/sub_test.cc b/tensorflow/lite/kernels/sub_test.cc index f0b9447ff61ced..41503300ab599f 100644 --- a/tensorflow/lite/kernels/sub_test.cc +++ b/tensorflow/lite/kernels/sub_test.cc @@ -99,7 +99,7 @@ TEST(FloatSubOpModel, ActivationRELU_N1_TO_1) { } TEST(FloatSubOpModel, VariousInputShapes) { - std::vector> test_shapes = { + std::vector> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; for (int i = 0; i < test_shapes.size(); ++i) { FloatSubOpModel m({TensorType_FLOAT32, test_shapes[i]}, @@ -116,7 +116,7 @@ TEST(FloatSubOpModel, VariousInputShapes) { } TEST(FloatSubOpModel, WithBroadcast) { - std::vector> test_shapes = { + std::vector> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; for (int i = 0; i < test_shapes.size(); ++i) { FloatSubOpModel m({TensorType_FLOAT32, test_shapes[i]}, @@ -153,7 +153,7 @@ TEST(IntegerSubOpModel, ActivationRELU_N1_TO_1) { } TEST(IntegerSubOpModel, VariousInputShapes) { - std::vector> test_shapes = { + std::vector> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; for (int i = 0; i < test_shapes.size(); ++i) { IntegerSubOpModel m({TensorType_INT32, test_shapes[i]}, @@ -168,7 +168,7 @@ TEST(IntegerSubOpModel, VariousInputShapes) { } TEST(IntegerSubOpModel, WithBroadcast) { - std::vector> test_shapes = { + std::vector> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; for (int i = 0; i < test_shapes.size(); ++i) { IntegerSubOpModel m({TensorType_INT32, test_shapes[i]}, @@ -185,14 +185,13 @@ TEST(IntegerSubOpModel, WithBroadcast) { TEST(QuantizedSubOpModel, QuantizedTestsNoActivation) { float kQuantizedTolerance = GetTolerance(-1.0, 1.0); - std::vector> inputs1 = { + std::vector> inputs1 = { {0.1, 0.2, 0.3, 0.4}, {-0.2, 0.2, 0.4, 0.7}, {-0.01, 0.2, 0.7, 0.3}}; - std::vector> inputs2 = { + std::vector> inputs2 = { {0.6, 0.4, 0.3, 0.1}, {0.6, 0.4, 0.5, -0.2}, {0.6, 0.4, -0.18, 0.5}}; - std::vector> results = { - {-0.5, -0.2, 0.0, 0.3}, - {-0.8, -0.2, -0.1, 0.9}, - {-0.61, -0.2, 0.88, -0.2}}; + std::vector> results = {{-0.5, -0.2, 0.0, 0.3}, + {-0.8, -0.2, -0.1, 0.9}, + {-0.61, -0.2, 0.88, -0.2}}; for (int i = 0; i < inputs1.size(); ++i) { QuantizedSubOpModel m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0}, {TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0}, @@ -209,12 +208,12 @@ TEST(QuantizedSubOpModel, QuantizedTestsNoActivation) { TEST(QuantizedSubOpModel, QuantizedTestsActivationRELU_N1_TO_1) { float kQuantizedTolerance = GetTolerance(-1.0, 1.0); - std::vector> inputs1 = {{-0.8, 0.2, 0.9, 0.7}, - {-0.8, 0.2, 0.7, 0.5}}; - std::vector> inputs2 = {{0.6, 0.4, 0.9, -0.8}, - {0.6, 0.4, -0.8, 0.3}}; - std::vector> results = {{-1.0, -0.2, 0.0, 1.0}, - {-1.0, -0.2, 1.0, 0.2}}; + std::vector> inputs1 = {{-0.8, 0.2, 0.9, 0.7}, + {-0.8, 0.2, 0.7, 0.5}}; + std::vector> inputs2 = {{0.6, 0.4, 0.9, -0.8}, + {0.6, 0.4, -0.8, 0.3}}; + std::vector> results = {{-1.0, -0.2, 0.0, 1.0}, + {-1.0, -0.2, 1.0, 0.2}}; for (int i = 0; i < inputs1.size(); ++i) { QuantizedSubOpModel m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0}, {TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0}, @@ -231,7 +230,7 @@ TEST(QuantizedSubOpModel, QuantizedTestsActivationRELU_N1_TO_1) { TEST(QuantizedSubOpModel, QuantizedVariousInputShapes) { float kQuantizedTolerance = GetTolerance(-3.0, 3.0); - std::vector> test_shapes = { + std::vector> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; for (int i = 0; i < test_shapes.size(); ++i) { QuantizedSubOpModel m({TensorType_UINT8, test_shapes[i], -3.0, 3.0}, @@ -250,7 +249,7 @@ TEST(QuantizedSubOpModel, QuantizedVariousInputShapes) { TEST(QuantizedSubOpModel, QuantizedWithBroadcast) { float kQuantizedTolerance = GetTolerance(-3.0, 3.0); - std::vector> test_shapes = { + std::vector> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; for (int i = 0; i < test_shapes.size(); ++i) { QuantizedSubOpModel m({TensorType_UINT8, test_shapes[i], -3.0, 3.0},