Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
Browse files Browse the repository at this point in the history
  • Loading branch information
alalek committed Mar 5, 2022
2 parents 44c2c77 + a082375 commit 901e0dd
Show file tree
Hide file tree
Showing 9 changed files with 74 additions and 42 deletions.
2 changes: 1 addition & 1 deletion modules/dnn/src/darknet/darknet_io.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ namespace cv {
int begin[] = {0, split_size * group_id, 0, 0};
cv::dnn::DictValue paramBegin = cv::dnn::DictValue::arrayInt(begin, 4);

int end[] = {-1, begin[1] + split_size, -1, -1};
int end[] = {INT_MAX, begin[1] + split_size, INT_MAX, INT_MAX};
cv::dnn::DictValue paramEnd = cv::dnn::DictValue::arrayInt(end, 4);

darknet::LayerParameter lp;
Expand Down
52 changes: 37 additions & 15 deletions modules/dnn/src/layers/slice_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,29 +64,53 @@ namespace cv
namespace dnn
{

void sliceRangesFromShape(const MatShape& inpShape, int& axis, std::vector<std::vector<cv::Range> >& sliceRanges)
Range normalizeRange(const Range& input_range, int n)
{
Range range = input_range;

range.start = std::min(std::max(range.start, -n), n - 1);
if (range.start < 0)
{
range.start += n;
}

range.end = std::min(std::max(range.end, -n), n);
if (range.end < 0)
{
range.end += n;
}

return range;
}

std::vector<std::vector<cv::Range> > finalizeSliceRange(const MatShape& inpShape, int& axis,
const std::vector<std::vector<cv::Range> >& inputSliceRanges)
{
std::vector<std::vector<cv::Range> > sliceRanges = inputSliceRanges;
CV_Assert(inpShape.size() > 0);
bool axisNeg = (axis < 0);
axis = (axis + static_cast<int>(inpShape.size())) % inpShape.size();
int n = inpShape[axis];

for (size_t i = 0; i < sliceRanges.size(); ++i){
std::vector<Range>& ranges = sliceRanges[i];
if (axisNeg)
{
ranges.insert(ranges.begin(), axis, Range::all());
}
Range& range = ranges.back();

if (range.start >= 0)
for (size_t j = 0; j < ranges.size(); ++j)
{
continue;
}
int n = inpShape[j];
if (n <= 0)
{
continue;
}

CV_Assert(n != 0);
range.start = (n + range.start) % n;
ranges[j] = normalizeRange(ranges[j], n);
}
}

return sliceRanges;
}

class SliceLayerImpl : public SliceLayer
Expand Down Expand Up @@ -136,7 +160,7 @@ class SliceLayerImpl : public SliceLayer
{
int size = sizeOrEnd;
CV_Assert(size == -1 || size > 0); // -1 value means range [start, axis_size).
sliceRanges[0][i].end = size > 0 ? (start + size) : -1; // We'll finalize a negative value later.
sliceRanges[0][i].end = size > 0 ? (start + size) : INT_MAX; // We'll finalize a negative value later.
}
else
{
Expand Down Expand Up @@ -186,8 +210,7 @@ class SliceLayerImpl : public SliceLayer
MatShape inpShape = inputs[0];

int axis_rw = axis;
std::vector<std::vector<cv::Range> > sliceRanges_rw = sliceRanges;
sliceRangesFromShape(inpShape, axis_rw, sliceRanges_rw);
std::vector<std::vector<cv::Range> > sliceRanges_rw = finalizeSliceRange(inpShape, axis_rw, sliceRanges);

if (!sliceRanges_rw.empty())
{
Expand All @@ -198,7 +221,7 @@ class SliceLayerImpl : public SliceLayer
for (int j = 0; j < sliceRanges_rw[i].size(); ++j)
{
if (shapesInitialized || inpShape[j] > 0)
outputs[i][j] = normalize_axis_range(sliceRanges_rw[i][j], inpShape[j]).size();
outputs[i][j] = normalizeRange(sliceRanges_rw[i][j], inpShape[j]).size();

if (!sliceSteps.empty() && (i < sliceSteps.size()) && (j < sliceSteps[i].size()) && (sliceSteps[i][j] > 1))
outputs[i][j] = (outputs[i][j] + sliceSteps[i][j] - 1) / sliceSteps[i][j];
Expand Down Expand Up @@ -235,8 +258,7 @@ class SliceLayerImpl : public SliceLayer
CV_Assert(inputs.size() == 1);
const MatSize& inpShape = inputs[0].size;

sliceRangesFromShape(shape(inputs[0]), axis, sliceRanges);
finalSliceRanges = sliceRanges;
finalSliceRanges = finalizeSliceRange(shape(inputs[0]), axis, sliceRanges);

if (sliceRanges.empty())
{
Expand Down Expand Up @@ -266,7 +288,7 @@ class SliceLayerImpl : public SliceLayer
// Clamp.
for (int j = 0; j < finalSliceRanges[i].size(); ++j)
{
finalSliceRanges[i][j] = normalize_axis_range(finalSliceRanges[i][j], inpShape[j]);
finalSliceRanges[i][j] = normalizeRange(finalSliceRanges[i][j], inpShape[j]);
}
}

Expand Down
23 changes: 12 additions & 11 deletions modules/dnn/src/onnx/onnx_importer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1275,13 +1275,12 @@ void ONNXImporter::parseSlice(LayerParams& layerParams, const opencv_onnx::NodeP
if (axis > 0) {
CV_CheckLE(axis, 1024, "Slice layer can't have more than 1024 axes"); // arbitrary limit
begin.resize(axis, 0);
end.resize(axis, -1);
end.resize(axis, INT_MAX);
}
for (int i = 0; i < starts.size(); ++i)
{
begin.push_back(starts.get<int>(i));
int finish = ends.get<int>(i);
end.push_back((finish < 0) ? --finish : finish); // numpy doesn't include last dim
end.push_back(ends.get<int>(i));
}
} else { // inp_size > 1
CV_Assert(inp_size >= 3);
Expand All @@ -1305,14 +1304,10 @@ void ONNXImporter::parseSlice(LayerParams& layerParams, const opencv_onnx::NodeP
const int* ends = end_blob.ptr<int>();
if (axis > 0) {
begin.resize(axis, 0);
end.resize(axis, -1);
end.resize(axis, INT_MAX);
}
std::copy(starts, starts + start_blob.total(), std::back_inserter(begin));
for (int i = 0; i < end_blob.total(); ++i)
{
int finish = ends[i];
end.push_back((finish < 0) ? --finish : finish); // numpy doesn't include last dim
}
std::copy(ends, ends + end_blob.total(), std::back_inserter(end));

if (inp_size == 5) {
CV_Assert(constBlobs.find(node_proto.input(4)) != constBlobs.end());
Expand Down Expand Up @@ -2485,9 +2480,15 @@ void ONNXImporter::parseExpand(LayerParams& layerParams, const opencv_onnx::Node

if (!haveVariables)
{
if (broadcast_axes.size() != 1)
if (broadcast_axes.size() > 1)
CV_Error(Error::StsNotImplemented, "Expand op doesn't support multiple axes for constant input");

if (broadcast_axes.empty())
{
addConstant(output_name, getBlob(node_proto, 0));
return;
}

Mat input = getBlob(node_proto, 0);
input = input.reshape(0, total(inpShape, 0, broadcast_axes[0]));
Mat output = cv::repeat(input, 1, targetShape[broadcast_axes[0]]);
Expand Down Expand Up @@ -2708,7 +2709,7 @@ void ONNXImporter::parseGather(LayerParams& layerParams, const opencv_onnx::Node
sliceLp.type = "Slice";
sliceLp.name = inpShape.size() > 1 ? layerParams.name + "/slice" : layerParams.name;
std::vector<int> begin(inpShape.size(), 0);
std::vector<int> end(inpShape.size(), -1);
std::vector<int> end(inpShape.size(), INT_MAX);
begin[axis] = index;
end[axis] = index + 1;

Expand Down
13 changes: 6 additions & 7 deletions modules/dnn/src/tensorflow/tf_importer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1681,10 +1681,8 @@ void TFImporter::parseStridedSlice(tensorflow::GraphDef& net, const tensorflow::
int end_mask = getLayerAttr(layer, "end_mask").i();
for (int i = 0; i < num; ++i)
{
if (ends.at<int>(i) < 0)
ends.at<int>(i) -= 1;
if (end_mask & (1 << i))
ends.at<int>(i) = -1;
ends.at<int>(i) = INT_MAX;
if (strides.at<int>(i) != 1)
CV_Error(Error::StsNotImplemented,
format("StridedSlice with stride %d", strides.at<int>(i)));
Expand Down Expand Up @@ -1982,15 +1980,16 @@ void TFImporter::parseConv2DBackpropInput(tensorflow::GraphDef& net, const tenso
int64_t pads[8];
bool explicit_pads = getExplicitPadding(layerParams, layer, pads);
int64_t begs[4] = {};
int64_t ends[4] = {-1, -1, -1, -1};
int64_t ends[4] = {};
if (explicit_pads)
{
name += "/deconv";
layerParams.set("pad_mode", "VALID");
ends[0] = ends[1] = INT_MAX;
for (int i = 2; i < 4; ++i) // begins=[0, 0, a, b], ends=[-1, -1, c, d]
{
begs[i] = pads[2*i];
ends[i] = -1 - pads[2*i + 1];
ends[i] = -pads[2*i + 1];
}
}

Expand All @@ -2010,8 +2009,8 @@ void TFImporter::parseConv2DBackpropInput(tensorflow::GraphDef& net, const tenso
const int strideX = layerParams.get<int>("stride_w");
Mat outShape = getTensorContent(getConstBlob(layer, value_id, 0));
int shift = (getDataLayout(layer) == DATA_LAYOUT_NCHW);
const int outH = outShape.at<int>(1 + shift) + begs[2] - 1 - ends[2];
const int outW = outShape.at<int>(2 + shift) + begs[3] - 1 - ends[3];
const int outH = outShape.at<int>(1 + shift) + begs[2] - ends[2];
const int outW = outShape.at<int>(2 + shift) + begs[3] - ends[3];
if (layerParams.get<String>("pad_mode") == "SAME")
{
layerParams.set("adj_w", (outW - 1) % strideX);
Expand Down
2 changes: 1 addition & 1 deletion modules/dnn/src/torch/torch_importer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -954,7 +954,7 @@ struct TorchImporter
int size = scalarParams.get<int>("size");

int begins[] = {0, 0, size, size};
int ends[] = {-1, -1, -size - 1, -size - 1};
int ends[] = {INT_MAX, INT_MAX, -size, -size};

newModule->apiType = "Slice";
layerParams.set("begin", DictValue::arrayInt<int*>(&begins[0], 4));
Expand Down
2 changes: 1 addition & 1 deletion modules/dnn/test/test_layers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2082,7 +2082,7 @@ TEST_P(Layer_Test_Slice, variable_input_shape)
int targetId = get<1>(GetParam());

int begin[] = {0, 0, 0, 0};
int end[] = {-1, -1, -1, -1};
int end[] = {INT_MAX, INT_MAX, INT_MAX, INT_MAX};

Net net;
LayerParams lp;
Expand Down
6 changes: 3 additions & 3 deletions modules/photo/src/seamless_cloning.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,18 +53,18 @@ namespace cv
class Cloning
{
public:
void normalClone(const cv::Mat& destination, const cv::Mat &mask, const cv::Mat &wmask, cv::Mat &cloned, int flag);
void normalClone(const cv::Mat& destination, const cv::Mat &mask, cv::Mat &wmask, cv::Mat &cloned, int flag);
void illuminationChange(cv::Mat &I, cv::Mat &mask, cv::Mat &wmask, cv::Mat &cloned, float alpha, float beta);
void localColorChange(cv::Mat &I, cv::Mat &mask, cv::Mat &wmask, cv::Mat &cloned, float red_mul, float green_mul, float blue_mul);
void textureFlatten(cv::Mat &I, cv::Mat &mask, cv::Mat &wmask, float low_threshold, float high_threhold, int kernel_size, cv::Mat &cloned);

protected:

void initVariables(const cv::Mat &destination, const cv::Mat &binaryMask);
void computeDerivatives(const cv::Mat &destination, const cv::Mat &patch, const cv::Mat &binaryMask);
void computeDerivatives(const cv::Mat &destination, const cv::Mat &patch, cv::Mat &binaryMask);
void scalarProduct(cv::Mat mat, float r, float g, float b);
void poisson(const cv::Mat &destination);
void evaluate(const cv::Mat &I, const cv::Mat &wmask, const cv::Mat &cloned);
void evaluate(const cv::Mat &I, cv::Mat &wmask, const cv::Mat &cloned);
void dst(const Mat& src, Mat& dest, bool invert = false);
void solve(const Mat &img, Mat& mod_diff, Mat &result);

Expand Down
6 changes: 3 additions & 3 deletions modules/photo/src/seamless_cloning_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ void Cloning::initVariables(const Mat &destination, const Mat &binaryMask)
filter_Y[j] = 2.0f * (float)std::cos(scale * (j + 1));
}

void Cloning::computeDerivatives(const Mat& destination, const Mat &patch, const Mat &binaryMask)
void Cloning::computeDerivatives(const Mat& destination, const Mat &patch, Mat &binaryMask)
{
initVariables(destination, binaryMask);

Expand Down Expand Up @@ -306,7 +306,7 @@ void Cloning::poisson(const Mat &destination)
}
}

void Cloning::evaluate(const Mat &I, const Mat &wmask, const Mat &cloned)
void Cloning::evaluate(const Mat &I, Mat &wmask, const Mat &cloned)
{
bitwise_not(wmask,wmask);

Expand All @@ -320,7 +320,7 @@ void Cloning::evaluate(const Mat &I, const Mat &wmask, const Mat &cloned)
merge(output,cloned);
}

void Cloning::normalClone(const Mat &destination, const Mat &patch, const Mat &binaryMask, Mat &cloned, int flag)
void Cloning::normalClone(const Mat &destination, const Mat &patch, Mat &binaryMask, Mat &cloned, int flag)
{
const int w = destination.cols;
const int h = destination.rows;
Expand Down
10 changes: 10 additions & 0 deletions modules/python/test/test_misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -643,6 +643,16 @@ def test_class_from_submodule_has_global_alias(self):
msg="Classes from submodules and global module don't refer "
"to the same type")

def test_class_from_submodule_has_global_alias(self):
self.assertTrue(hasattr(cv.ml, "Boost"),
msg="Class is not registered in the submodule")
self.assertTrue(hasattr(cv, "ml_Boost"),
msg="Class from submodule doesn't have alias in the "
"global module")
self.assertEqual(cv.ml.Boost, cv.ml_Boost,
msg="Classes from submodules and global module don't refer "
"to the same type")

def test_inner_class_has_global_alias(self):
self.assertTrue(hasattr(cv.SimpleBlobDetector, "Params"),
msg="Class is not registered as inner class")
Expand Down

0 comments on commit 901e0dd

Please sign in to comment.