Skip to content

Commit

Permalink
Replace std::vector<std::initializer_list> with std::vector<std::vect…
Browse files Browse the repository at this point in the history
…or>.

In std::vector<std::initializer_list> the intializer_list's underlying arrays
will be out of scope after the full-expression containing the initialization of
the std::vector (c++11 [class.temporary] p5).

PiperOrigin-RevId: 220609867
  • Loading branch information
tensorflower-gardener committed Nov 8, 2018
1 parent aeb04dd commit 9b2f85a
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 44 deletions.
5 changes: 2 additions & 3 deletions tensorflow/compiler/xla/index_util_test.cc
Expand Up @@ -24,8 +24,7 @@ limitations under the License.
namespace xla {
namespace {

void SetMinorToMajorLayout(Shape* shape,
std::initializer_list<int64> dimensions) {
void SetMinorToMajorLayout(Shape* shape, std::vector<int64> dimensions) {
shape->mutable_layout()->clear_minor_to_major();
for (auto dimension : dimensions) {
shape->mutable_layout()->add_minor_to_major(dimension);
Expand Down Expand Up @@ -122,7 +121,7 @@ TEST(IndexUtilTest, LinearToMultiToLinear) {
std::vector<int64> linear_indexes = {0, 1439999999, 1145567336,
43883404, 617295214, 1117613654};

std::vector<std::initializer_list<int64>> minor_to_major_orders;
std::vector<std::vector<int64>> minor_to_major_orders;
minor_to_major_orders.push_back({6, 5, 4, 3, 2, 1, 0});
minor_to_major_orders.push_back({0, 1, 2, 3, 4, 5, 6});
minor_to_major_orders.push_back({4, 5, 1, 2, 6, 0, 3});
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/compiler/xla/service/layout_assignment_test.cc
Expand Up @@ -91,7 +91,7 @@ class LayoutAssignmentTest : public HloVerifiedTestBase {
TEST_F(LayoutAssignmentTest, ComputationLayout) {
// Verify the layouts of the root and parameter instructions of a computation
// match the ComputationLayout for two different layouts.
std::vector<std::initializer_list<int64>> minor_to_majors = {{0, 1}, {1, 0}};
std::vector<std::vector<int64>> minor_to_majors = {{0, 1}, {1, 0}};
for (auto& minor_to_major : minor_to_majors) {
auto builder = HloComputation::Builder(TestName());
Shape ashape = ShapeUtil::MakeShape(F32, {42, 12});
Expand Down Expand Up @@ -160,7 +160,7 @@ TEST_F(LayoutAssignmentTest, FusionInstruction) {
// Verify that the layout of the fused parameters in a fusion instruction
// match that of the fusion operands. Other fused instructions should have no
// layout.
std::vector<std::initializer_list<int64>> minor_to_majors = {{0, 1}, {1, 0}};
std::vector<std::vector<int64>> minor_to_majors = {{0, 1}, {1, 0}};
for (auto& minor_to_major : minor_to_majors) {
auto builder = HloComputation::Builder(TestName());
auto constant_literal1 = LiteralUtil::CreateR2WithLayout<float>(
Expand Down
12 changes: 6 additions & 6 deletions tensorflow/lite/kernels/comparisons_test.cc
Expand Up @@ -455,7 +455,7 @@ TEST(ComparisonsTest, LessEqualQuantized) {
TEST(ComparisonsTest, QuantizedEqualWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::initializer_list<int>> test_shapes = {
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
Expand All @@ -473,7 +473,7 @@ TEST(ComparisonsTest, QuantizedEqualWithBroadcast) {
TEST(ComparisonsTest, QuantizedNotEqualWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::initializer_list<int>> test_shapes = {
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
Expand All @@ -491,7 +491,7 @@ TEST(ComparisonsTest, QuantizedNotEqualWithBroadcast) {
TEST(ComparisonsTest, QuantizedGreaterWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::initializer_list<int>> test_shapes = {
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
Expand All @@ -509,7 +509,7 @@ TEST(ComparisonsTest, QuantizedGreaterWithBroadcast) {
TEST(ComparisonsTest, QuantizedGreaterEqualWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::initializer_list<int>> test_shapes = {
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
Expand All @@ -527,7 +527,7 @@ TEST(ComparisonsTest, QuantizedGreaterEqualWithBroadcast) {
TEST(ComparisonsTest, QuantizedLessWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::initializer_list<int>> test_shapes = {
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
Expand All @@ -545,7 +545,7 @@ TEST(ComparisonsTest, QuantizedLessWithBroadcast) {
TEST(ComparisonsTest, QuantizedLessEqualWithBroadcast) {
const float kMin = -1.f;
const float kMax = 128.f;
std::vector<std::initializer_list<int>> test_shapes = {
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
ComparisonOpModel model({TensorType_UINT8, test_shapes[i], kMin, kMax},
Expand Down
30 changes: 15 additions & 15 deletions tensorflow/lite/kernels/detection_postprocess_test.cc
Expand Up @@ -194,7 +194,7 @@ TEST(DetectionPostprocessOpTest, QuantizedTest) {
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}});
// six boxes in center-size encoding
std::vector<std::initializer_list<float>> inputs1 = {{
std::vector<std::vector<float>> inputs1 = {{
0.0, 0.0, 0.0, 0.0, // box #1
0.0, 1.0, 0.0, 0.0, // box #2
0.0, -1.0, 0.0, 0.0, // box #3
Expand All @@ -204,12 +204,12 @@ TEST(DetectionPostprocessOpTest, QuantizedTest) {
}};
m.QuantizeAndPopulate<uint8_t>(m.input1(), inputs1[0]);
// class scores - two classes with background
std::vector<std::initializer_list<float>> inputs2 = {
{0., .9, .8, 0., .75, .72, 0., .6, .5, 0., .93, .95, 0., .5, .4, 0., .3,
.2}};
std::vector<std::vector<float>> inputs2 = {{0., .9, .8, 0., .75, .72, 0., .6,
.5, 0., .93, .95, 0., .5, .4, 0.,
.3, .2}};
m.QuantizeAndPopulate<uint8_t>(m.input2(), inputs2[0]);
// six anchors in center-size encoding
std::vector<std::initializer_list<float>> inputs3 = {{
std::vector<std::vector<float>> inputs3 = {{
0.5, 0.5, 1.0, 1.0, // anchor #1
0.5, 0.5, 1.0, 1.0, // anchor #2
0.5, 0.5, 1.0, 1.0, // anchor #3
Expand Down Expand Up @@ -405,7 +405,7 @@ TEST(DetectionPostprocessOpTest, QuantizedTestFastNMS) {
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, false);
// six boxes in center-size encoding
std::vector<std::initializer_list<float>> inputs1 = {{
std::vector<std::vector<float>> inputs1 = {{
0.0, 0.0, 0.0, 0.0, // box #1
0.0, 1.0, 0.0, 0.0, // box #2
0.0, -1.0, 0.0, 0.0, // box #3
Expand All @@ -415,12 +415,12 @@ TEST(DetectionPostprocessOpTest, QuantizedTestFastNMS) {
}};
m.QuantizeAndPopulate<uint8_t>(m.input1(), inputs1[0]);
// class scores - two classes with background
std::vector<std::initializer_list<float>> inputs2 = {
{0., .9, .8, 0., .75, .72, 0., .6, .5, 0., .93, .95, 0., .5, .4, 0., .3,
.2}};
std::vector<std::vector<float>> inputs2 = {{0., .9, .8, 0., .75, .72, 0., .6,
.5, 0., .93, .95, 0., .5, .4, 0.,
.3, .2}};
m.QuantizeAndPopulate<uint8_t>(m.input2(), inputs2[0]);
// six anchors in center-size encoding
std::vector<std::initializer_list<float>> inputs3 = {{
std::vector<std::vector<float>> inputs3 = {{
0.5, 0.5, 1.0, 1.0, // anchor #1
0.5, 0.5, 1.0, 1.0, // anchor #2
0.5, 0.5, 1.0, 1.0, // anchor #3
Expand Down Expand Up @@ -517,7 +517,7 @@ TEST(DetectionPostprocessOpTest, QuantizedTestRegularNMS) {
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
{TensorType_FLOAT32, {}}, true);
// six boxes in center-size encoding
std::vector<std::initializer_list<float>> inputs1 = {{
std::vector<std::vector<float>> inputs1 = {{
0.0, 0.0, 0.0, 0.0, // box #1
0.0, 1.0, 0.0, 0.0, // box #2
0.0, -1.0, 0.0, 0.0, // box #3
Expand All @@ -527,12 +527,12 @@ TEST(DetectionPostprocessOpTest, QuantizedTestRegularNMS) {
}};
m.QuantizeAndPopulate<uint8_t>(m.input1(), inputs1[0]);
// class scores - two classes with background
std::vector<std::initializer_list<float>> inputs2 = {
{0., .9, .8, 0., .75, .72, 0., .6, .5, 0., .93, .95, 0., .5, .4, 0., .3,
.2}};
std::vector<std::vector<float>> inputs2 = {{0., .9, .8, 0., .75, .72, 0., .6,
.5, 0., .93, .95, 0., .5, .4, 0.,
.3, .2}};
m.QuantizeAndPopulate<uint8_t>(m.input2(), inputs2[0]);
// six anchors in center-size encoding
std::vector<std::initializer_list<float>> inputs3 = {{
std::vector<std::vector<float>> inputs3 = {{
0.5, 0.5, 1.0, 1.0, // anchor #1
0.5, 0.5, 1.0, 1.0, // anchor #2
0.5, 0.5, 1.0, 1.0, // anchor #3
Expand Down
35 changes: 17 additions & 18 deletions tensorflow/lite/kernels/sub_test.cc
Expand Up @@ -99,7 +99,7 @@ TEST(FloatSubOpModel, ActivationRELU_N1_TO_1) {
}

TEST(FloatSubOpModel, VariousInputShapes) {
std::vector<std::initializer_list<int>> test_shapes = {
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
FloatSubOpModel m({TensorType_FLOAT32, test_shapes[i]},
Expand All @@ -116,7 +116,7 @@ TEST(FloatSubOpModel, VariousInputShapes) {
}

TEST(FloatSubOpModel, WithBroadcast) {
std::vector<std::initializer_list<int>> test_shapes = {
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
FloatSubOpModel m({TensorType_FLOAT32, test_shapes[i]},
Expand Down Expand Up @@ -153,7 +153,7 @@ TEST(IntegerSubOpModel, ActivationRELU_N1_TO_1) {
}

TEST(IntegerSubOpModel, VariousInputShapes) {
std::vector<std::initializer_list<int>> test_shapes = {
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
IntegerSubOpModel m({TensorType_INT32, test_shapes[i]},
Expand All @@ -168,7 +168,7 @@ TEST(IntegerSubOpModel, VariousInputShapes) {
}

TEST(IntegerSubOpModel, WithBroadcast) {
std::vector<std::initializer_list<int>> test_shapes = {
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
IntegerSubOpModel m({TensorType_INT32, test_shapes[i]},
Expand All @@ -185,14 +185,13 @@ TEST(IntegerSubOpModel, WithBroadcast) {

TEST(QuantizedSubOpModel, QuantizedTestsNoActivation) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
std::vector<std::initializer_list<float>> inputs1 = {
std::vector<std::vector<float>> inputs1 = {
{0.1, 0.2, 0.3, 0.4}, {-0.2, 0.2, 0.4, 0.7}, {-0.01, 0.2, 0.7, 0.3}};
std::vector<std::initializer_list<float>> inputs2 = {
std::vector<std::vector<float>> inputs2 = {
{0.6, 0.4, 0.3, 0.1}, {0.6, 0.4, 0.5, -0.2}, {0.6, 0.4, -0.18, 0.5}};
std::vector<std::initializer_list<float>> results = {
{-0.5, -0.2, 0.0, 0.3},
{-0.8, -0.2, -0.1, 0.9},
{-0.61, -0.2, 0.88, -0.2}};
std::vector<std::vector<float>> results = {{-0.5, -0.2, 0.0, 0.3},
{-0.8, -0.2, -0.1, 0.9},
{-0.61, -0.2, 0.88, -0.2}};
for (int i = 0; i < inputs1.size(); ++i) {
QuantizedSubOpModel m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
Expand All @@ -209,12 +208,12 @@ TEST(QuantizedSubOpModel, QuantizedTestsNoActivation) {

TEST(QuantizedSubOpModel, QuantizedTestsActivationRELU_N1_TO_1) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
std::vector<std::initializer_list<float>> inputs1 = {{-0.8, 0.2, 0.9, 0.7},
{-0.8, 0.2, 0.7, 0.5}};
std::vector<std::initializer_list<float>> inputs2 = {{0.6, 0.4, 0.9, -0.8},
{0.6, 0.4, -0.8, 0.3}};
std::vector<std::initializer_list<float>> results = {{-1.0, -0.2, 0.0, 1.0},
{-1.0, -0.2, 1.0, 0.2}};
std::vector<std::vector<float>> inputs1 = {{-0.8, 0.2, 0.9, 0.7},
{-0.8, 0.2, 0.7, 0.5}};
std::vector<std::vector<float>> inputs2 = {{0.6, 0.4, 0.9, -0.8},
{0.6, 0.4, -0.8, 0.3}};
std::vector<std::vector<float>> results = {{-1.0, -0.2, 0.0, 1.0},
{-1.0, -0.2, 1.0, 0.2}};
for (int i = 0; i < inputs1.size(); ++i) {
QuantizedSubOpModel m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
Expand All @@ -231,7 +230,7 @@ TEST(QuantizedSubOpModel, QuantizedTestsActivationRELU_N1_TO_1) {

TEST(QuantizedSubOpModel, QuantizedVariousInputShapes) {
float kQuantizedTolerance = GetTolerance(-3.0, 3.0);
std::vector<std::initializer_list<int>> test_shapes = {
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
QuantizedSubOpModel m({TensorType_UINT8, test_shapes[i], -3.0, 3.0},
Expand All @@ -250,7 +249,7 @@ TEST(QuantizedSubOpModel, QuantizedVariousInputShapes) {

TEST(QuantizedSubOpModel, QuantizedWithBroadcast) {
float kQuantizedTolerance = GetTolerance(-3.0, 3.0);
std::vector<std::initializer_list<int>> test_shapes = {
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
QuantizedSubOpModel m({TensorType_UINT8, test_shapes[i], -3.0, 3.0},
Expand Down

0 comments on commit 9b2f85a

Please sign in to comment.