Skip to content

Commit

Permalink
[WIP] More libnd4j exec tests (#7009)
Browse files Browse the repository at this point in the history
* new op: evaluate_reduction_shape

* couple of tests for new op

* bunch of commented out tests re-enabled

* - axis handling for legacy reduce ops wrappers
- one more graph + test

* unsorted_segment_* tweaked for last argument optional source

* histogram tweaks

* more skips removed
  • Loading branch information
raver119 committed Jan 15, 2019
1 parent a44a637 commit d0e7378
Show file tree
Hide file tree
Showing 20 changed files with 214 additions and 67 deletions.
Expand Up @@ -23,11 +23,11 @@

namespace nd4j {
namespace ops {
CUSTOM_OP_IMPL(unsorted_segment_max, 2, 1, false, 0, 1) {
CUSTOM_OP_IMPL(unsorted_segment_max, 2, 1, false, 0, 0) {
auto input = INPUT_VARIABLE(0);
auto idxSegments = INPUT_VARIABLE(1);
auto segmentedOutput = OUTPUT_VARIABLE(0);
Nd4jLong numOfClasses = INT_ARG(0);
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
REQUIRE_TRUE(idxSegments->isVector(), 0, "unsorted_segment_max: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
REQUIRE_TRUE(idxSegments->lengthOf() == input->sizeAt(0), 0, "unsorted_segment_max: segment indexes array length should be equal to the input first dimension, but %i != %i.", idxSegments->lengthOf(), input->sizeAt(0));

Expand All @@ -50,7 +50,7 @@ namespace nd4j {

auto in = inputShape->at(0);
int outRank = shape::rank(in);
Nd4jLong numOfClasses = INT_ARG(0);
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
Nd4jLong* outputShape;

ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);
Expand Down
Expand Up @@ -23,11 +23,11 @@

namespace nd4j {
namespace ops {
CUSTOM_OP_IMPL(unsorted_segment_mean, 2, 1, false, 0, 1) {
CUSTOM_OP_IMPL(unsorted_segment_mean, 2, 1, false, 0, 0) {
auto input = INPUT_VARIABLE(0);
auto idxSegments = INPUT_VARIABLE(1);
auto segmentedOutput = OUTPUT_VARIABLE(0);
Nd4jLong numOfClasses = INT_ARG(0);
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);

REQUIRE_TRUE(idxSegments->isVector(), 0, "unsorted_segment_mean: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
REQUIRE_TRUE(idxSegments->lengthOf() == input->sizeAt(0), 0, "unsorted_segment_mean: segment indexes array length should be equal to the input first dimension, but %i != %i.", idxSegments->lengthOf(), input->sizeAt(0));
Expand All @@ -53,7 +53,7 @@ namespace nd4j {
auto in = inputShape->at(0);
int outRank = shape::rank(in);
Nd4jLong* outputShape = nullptr;
Nd4jLong numOfClasses = INT_ARG(0);
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);

ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);

Expand Down
Expand Up @@ -23,11 +23,11 @@

namespace nd4j {
namespace ops {
CUSTOM_OP_IMPL(unsorted_segment_min, 2, 1, false, 0, 1) {
CUSTOM_OP_IMPL(unsorted_segment_min, 2, 1, false, 0, 0) {
auto input = INPUT_VARIABLE(0);
auto idxSegments = INPUT_VARIABLE(1);
auto segmentedOutput = OUTPUT_VARIABLE(0);
Nd4jLong numOfClasses = INT_ARG(0);
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
REQUIRE_TRUE(idxSegments->isVector(), 0, "unsorted_segment_min: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
REQUIRE_TRUE(idxSegments->lengthOf() == input->sizeAt(0), 0, "unsorted_segment_min: segment indexes array length should be equal to the input first dimension, but %i != %i.", idxSegments->lengthOf(), input->sizeAt(0));

Expand All @@ -46,7 +46,7 @@ namespace nd4j {
Nd4jLong* in = inputShape->at(0);
int outRank = shape::rank(in);
Nd4jLong* outputShape = nullptr;
Nd4jLong numOfClasses = INT_ARG(0);
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);

ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);

Expand Down
Expand Up @@ -23,11 +23,11 @@

namespace nd4j {
namespace ops {
CUSTOM_OP_IMPL(unsorted_segment_prod, 2, 1, false, 0, 1) {
CUSTOM_OP_IMPL(unsorted_segment_prod, 2, 1, false, 0, 0) {
auto input = INPUT_VARIABLE(0);
auto idxSegments = INPUT_VARIABLE(1);
auto segmentedOutput = OUTPUT_VARIABLE(0);
Nd4jLong numOfClasses = INT_ARG(0);
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
REQUIRE_TRUE(idxSegments->isVector(), 0, "unsorted_segment_prod: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
REQUIRE_TRUE(idxSegments->lengthOf() == input->sizeAt(0), 0, "unsorted_segment_prod: segment indexes array length should be equal to the input first dimension, but %i != %i.", idxSegments->lengthOf(), input->sizeAt(0));

Expand All @@ -46,7 +46,7 @@ namespace nd4j {
auto in = inputShape->at(0);
int outRank = shape::rank(in);
Nd4jLong* outputShape = nullptr;
Nd4jLong numOfClasses = INT_ARG(0);
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);

ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);

Expand Down
Expand Up @@ -23,11 +23,11 @@

namespace nd4j {
namespace ops {
CUSTOM_OP_IMPL(unsorted_segment_sqrt_n, 2, 1, false, 0, 1) {
CUSTOM_OP_IMPL(unsorted_segment_sqrt_n, 2, 1, false, 0, 0) {
auto input = INPUT_VARIABLE(0);
auto idxSegments = INPUT_VARIABLE(1);
auto segmentedOutput = OUTPUT_VARIABLE(0);
Nd4jLong numOfClasses = INT_ARG(0);
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
REQUIRE_TRUE(idxSegments->isVector(), 0, "unsorted_segment_sqrt_n: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
REQUIRE_TRUE(idxSegments->lengthOf() == input->sizeAt(0), 0, "unsorted_segment_sqrt_n: segment indexes array length should be equal to the input first dimension, but %i != %i.", idxSegments->lengthOf(), input->sizeAt(0));

Expand All @@ -46,7 +46,7 @@ namespace nd4j {
auto in = inputShape->at(0);
int outRank = shape::rank(in);
Nd4jLong* outputShape = nullptr;
Nd4jLong numOfClasses = INT_ARG(0);
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);

ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);

Expand Down
Expand Up @@ -23,11 +23,11 @@

namespace nd4j {
namespace ops {
CUSTOM_OP_IMPL(unsorted_segment_sum, 2, 1, false, 0, 1) {
CUSTOM_OP_IMPL(unsorted_segment_sum, 2, 1, false, 0, 0) {
auto input = INPUT_VARIABLE(0);
auto idxSegments = INPUT_VARIABLE(1);
auto segmentedOutput = OUTPUT_VARIABLE(0);
Nd4jLong numOfClasses = INT_ARG(0);
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
REQUIRE_TRUE(idxSegments->isVector(), 0, "unsorted_segment_sum: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
REQUIRE_TRUE(idxSegments->lengthOf() == input->sizeAt(0), 0, "unsorted_segment_sum: segment indexes array length should be equal to the input first dimension, but %i != %i.", idxSegments->lengthOf(), input->sizeAt(0));

Expand All @@ -52,7 +52,7 @@ namespace nd4j {
auto in = inputShape->at(0);
int outRank = shape::rank(in);
Nd4jLong* outputShape = nullptr;
Nd4jLong numOfClasses = INT_ARG(0);
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);

ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);

Expand Down
@@ -0,0 +1,85 @@
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/

//
// @author raver119@gmail.com
//

#include <op_boilerplate.h>
#if NOT_EXCLUDED(OP_evaluate_reduction_shape)

#include <ops/declarable/CustomOperations.h>

namespace nd4j {
namespace ops {
CUSTOM_OP_IMPL(evaluate_reduction_shape, 2, 1, false, 0, 0) {
auto inputShape = INPUT_VARIABLE(0);
auto axis = INPUT_VARIABLE(1)->asVectorT<int>();
auto keepDims = block.numB() > 0 ? B_ARG(0) : false;
auto oldFormat = block.numB() > 1 ? B_ARG(1) : false;
auto output = OUTPUT_VARIABLE(0);

auto shape = inputShape->asVectorT<Nd4jLong>();

auto tempShapeInfo = ShapeBuilders::createShapeInfo(nd4j::DataType::INT64, 'c', shape, block.workspace());
auto tempReductionShapeInfo = ShapeUtils::evalReduceShapeInfo('c', axis, tempShapeInfo, keepDims, oldFormat, block.workspace());

REQUIRE_TRUE(output->lengthOf() == shape::rank(tempReductionShapeInfo), 0, "evaluate_reduction_shape: output length should be %i, but got %i instead", shape::rank(tempReductionShapeInfo), output->lengthOf());

for (int e = 0; e < shape::rank(tempReductionShapeInfo); e++)
output->p(e, tempReductionShapeInfo[e+1]);

// we must release temporary shapeInfo
RELEASE(tempReductionShapeInfo, block.workspace());
RELEASE(tempShapeInfo, block.workspace());

return Status::OK();
}

DECLARE_TYPES(evaluate_reduction_shape) {
getOpDescriptor()
->setAllowedInputTypes(0, {ALL_INTS})
->setAllowedInputTypes(1, {ALL_INTS})
->setAllowedOutputTypes(0, nd4j::DataType::INT64);
}

DECLARE_SHAPE_FN(evaluate_reduction_shape) {
auto input = INPUT_VARIABLE(0);
auto axis = INPUT_VARIABLE(1)->asVectorT<int>();

auto keepDims = block.numB() > 0 ? B_ARG(0) : false;
auto oldFormat = block.numB() > 1 ? B_ARG(1) : false;

Nd4jLong length = input->lengthOf();

if (keepDims) {
if (oldFormat) {
// for oldFormat we can't go below rank 2
length = nd4j::math::nd4j_max<int>(2, length);
}
} else {
length -= axis.size();
if (oldFormat) {
length = nd4j::math::nd4j_max<int>(2, length);
}
}


return SHAPELIST(ShapeBuilders::createVectorShapeInfo(nd4j::DataType::INT64, length, block.workspace()));
}
}
}
#endif
Expand Up @@ -33,7 +33,7 @@ CUSTOM_OP_IMPL(histogram_fixed_width, 2, 1, false, 0, 0) {
auto range = INPUT_VARIABLE(1);
auto output = OUTPUT_VARIABLE(0);

const int nbins = block.getIArguments()->empty() ? 100 : INT_ARG(0);
const int nbins = block.width() == 3 ? INPUT_VARIABLE(2)->e<int>(0) : block.getIArguments()->empty() ? 100 : INT_ARG(0);

const double leftEdge = range->e<double>(0);
const double rightEdge = range->e<double>(1);
Expand All @@ -56,7 +56,7 @@ DECLARE_TYPES(histogram_fixed_width) {
//////////////////////////////////////////////////////////////////////////
DECLARE_SHAPE_FN(histogram_fixed_width) {

const int nbins = block.getIArguments()->empty() ? 100 : INT_ARG(0);
const int nbins = block.width() == 3 ? INPUT_VARIABLE(2)->e<int>(0) : block.getIArguments()->empty() ? 100 : INT_ARG(0);
auto outShapeInfo = ShapeBuilders::createVectorShapeInfo(DataType::INT64, nbins, block.workspace());

return SHAPELIST(outShapeInfo);
Expand Down
12 changes: 6 additions & 6 deletions libnd4j/include/ops/declarable/headers/parity_ops.h
Expand Up @@ -1011,7 +1011,7 @@ namespace nd4j {
* tensor with max values according to indices sets.
*/
#if NOT_EXCLUDED(OP_unsorted_segment_max)
DECLARE_CUSTOM_OP(unsorted_segment_max, 2, 1, false, 0, 1);
DECLARE_CUSTOM_OP(unsorted_segment_max, 2, 1, false, 0, 0);
#endif
#if NOT_EXCLUDED(OP_unsorted_segment_max_bp)
DECLARE_CUSTOM_OP(unsorted_segment_max_bp, 3, 2, false, 0, 1);
Expand All @@ -1031,7 +1031,7 @@ namespace nd4j {
* tensor with min values according to indices sets.
*/
#if NOT_EXCLUDED(OP_unsorted_segment_min_bp)
DECLARE_CUSTOM_OP(unsorted_segment_min, 2, 1, false, 0, 1);
DECLARE_CUSTOM_OP(unsorted_segment_min, 2, 1, false, 0, 0);
#endif
#if NOT_EXCLUDED(OP_unsorted_segment_min_bp)
DECLARE_CUSTOM_OP(unsorted_segment_min_bp, 3, 2, false, 0, 1);
Expand All @@ -1051,7 +1051,7 @@ namespace nd4j {
* tensor with sum of values according to indices sets.
*/
#if NOT_EXCLUDED(OP_unsorted_segment_sum)
DECLARE_CUSTOM_OP(unsorted_segment_sum, 2, 1, false, 0, 1);
DECLARE_CUSTOM_OP(unsorted_segment_sum, 2, 1, false, 0, 0);
#endif
#if NOT_EXCLUDED(OP_unsorted_segment_sum_bp)
DECLARE_CUSTOM_OP(unsorted_segment_sum_bp, 3, 2, false, 0, 1);
Expand All @@ -1071,7 +1071,7 @@ namespace nd4j {
* tensor with product of values according to indices sets.
*/
#if NOT_EXCLUDED(OP_unsorted_segment_prod)
DECLARE_CUSTOM_OP(unsorted_segment_prod, 2, 1, false, 0, 1);
DECLARE_CUSTOM_OP(unsorted_segment_prod, 2, 1, false, 0, 0);
#endif
#if NOT_EXCLUDED(OP_unsorted_segment_prod_bp)
DECLARE_CUSTOM_OP(unsorted_segment_prod_bp, 3, 2, false, 0, 1);
Expand All @@ -1091,7 +1091,7 @@ namespace nd4j {
* tensor with average of values according to indices sets.
*/
#if NOT_EXCLUDED(OP_unsorted_segment_mean)
DECLARE_CUSTOM_OP(unsorted_segment_mean, 2, 1, false, 0, 1);
DECLARE_CUSTOM_OP(unsorted_segment_mean, 2, 1, false, 0, 0);
#endif
#if NOT_EXCLUDED(OP_unsorted_segment_mean_bp)
DECLARE_CUSTOM_OP(unsorted_segment_mean_bp, 3, 2, false, 0, 1);
Expand All @@ -1111,7 +1111,7 @@ namespace nd4j {
* tensor with average of values according to indices sets.
*/
#if NOT_EXCLUDED(OP_unsorted_segment_sqrt)
DECLARE_CUSTOM_OP(unsorted_segment_sqrt_n, 2, 1, false, 0, 1);
DECLARE_CUSTOM_OP(unsorted_segment_sqrt_n, 2, 1, false, 0, 0);
#endif
#if NOT_EXCLUDED(OP_unsorted_segment_sqrt_n_bp)
DECLARE_CUSTOM_OP(unsorted_segment_sqrt_n_bp, 3, 2, false, 0, 1);
Expand Down
5 changes: 5 additions & 0 deletions libnd4j/include/ops/declarable/headers/shape.h
Expand Up @@ -94,6 +94,11 @@ namespace nd4j {
#if NOT_EXCLUDED(OP_broadcast_to)
DECLARE_CUSTOM_OP(broadcast_to, 2, 1, false, 0, 0);
#endif


#if NOT_EXCLUDED(OP_evaluate_reduction_shape)
DECLARE_CUSTOM_OP(evaluate_reduction_shape, 2, 1, false, 0, 0);
#endif
}
}

Expand Down
10 changes: 6 additions & 4 deletions libnd4j/include/ops/declarable/impl/LegacyReduceBoolOp.cpp
Expand Up @@ -43,21 +43,23 @@ namespace nd4j {
int opNum = block.opNum() < 0 ? this->_opNum : block.opNum();
nd4j_debug("Executing LegacyReduceFloatOp: [%i]\n", opNum);

auto axis = *block.getAxis();

bool allAxes = false;

if (block.width() == 1) {
auto z = OUTPUT_VARIABLE(0);

if (block.getIArguments()->size() == x->rankOf())
if (axis.size() == x->rankOf())
allAxes = true;

if ((block.getIArguments()->size() == 0) ||
(block.getIArguments()->size() == 1 && INT_ARG(0) == MAX_INT) || allAxes) {
if ((axis.empty()) ||
(axis.size() == 1 && axis[0] == MAX_INT) || allAxes) {
// scalar
NativeOpExcutioner::execReduceBoolScalar(opNum, x->getBuffer(), x->getShapeInfo(), block.getTArguments()->data(), z->buffer(), z->shapeInfo());
} else {
// TAD
std::vector<int> dims(*block.getIArguments());
std::vector<int> dims(axis);

for (int e = 0; e < dims.size(); e++)
if (dims[e] < 0)
Expand Down
3 changes: 2 additions & 1 deletion libnd4j/include/ops/declarable/impl/LegacyReduceFloatOp.cpp
Expand Up @@ -44,11 +44,12 @@ namespace nd4j {
nd4j_debug("Executing LegacyReduceFloatOp: [%i]\n", opNum);

bool allAxes = false;
auto axis = *block.getAxis();

if (block.width() == 1) {
auto z = OUTPUT_VARIABLE(0);

if (block.getIArguments()->size() == x->rankOf())
if (axis.size() == x->rankOf())
allAxes = true;

// _axis.(block.getIArguments()->size() == 0) ||
Expand Down
9 changes: 5 additions & 4 deletions libnd4j/include/ops/declarable/impl/LegacyReduceLongOp.cpp
Expand Up @@ -43,21 +43,22 @@ namespace nd4j {
int opNum = block.opNum() < 0 ? this->_opNum : block.opNum();
nd4j_debug("Executing LegacyReduceFloatOp: [%i]\n", opNum);

auto axis = *block.getAxis();
bool allAxes = false;

if (block.width() == 1) {
auto z = OUTPUT_VARIABLE(0);

if (block.getIArguments()->size() == x->rankOf())
if (axis.size() == x->rankOf())
allAxes = true;

if ((block.getIArguments()->size() == 0) ||
(block.getIArguments()->size() == 1 && INT_ARG(0) == MAX_INT) || allAxes) {
if ((axis.empty()) ||
(axis.size() == 1 && axis[0] == MAX_INT) || allAxes) {
// scalar
NativeOpExcutioner::execReduceLongScalar(opNum, x->getBuffer(), x->getShapeInfo(), block.getTArguments()->data(), z->buffer(), z->shapeInfo());
} else {
// TAD
std::vector<int> dims(*block.getIArguments());
std::vector<int> dims(axis);

for (int e = 0; e < dims.size(); e++)
if (dims[e] < 0)
Expand Down

0 comments on commit d0e7378

Please sign in to comment.