Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

0/1D Einsum Layer Test #25567

Merged
merged 4 commits into from
May 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 15 additions & 3 deletions modules/dnn/src/layers/einsum_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,12 +65,12 @@ static Mat Transpose(


bool IsTransposeRequired(size_t input_rank, const std::vector<size_t>& permutation) {
CV_Assert(input_rank == permutation.size());

// No transpose required for scalars
if (input_rank == 0){
if (input_rank == 0 || permutation.size() == 0){
return false;
}
CV_Assert(input_rank == permutation.size());

// Weeds out cases where permutation is something like [0, 1, 2] for a 3D input and so on
bool transpose_required = false;
Expand Down Expand Up @@ -616,6 +616,10 @@ void LayerEinsumImpl::preProcessInputs(InputArrayOfArrays& inputs_arr)
// variable to hold processed version of the original input
MatShape input_dims = shape(input);

if (inputSubscriptIndices.empty()){
homogenizedInputDims.emplace_back(MatShape(numLetterIndices, 1));
continue;
}
const auto& currSubscriptIndices = inputSubscriptIndices[inputIter];

// There should be subscript index (subscript label) for each dim of the input
Expand Down Expand Up @@ -870,6 +874,9 @@ void LayerEinsumImpl::processEquation(const std::vector<MatShape>& inputs)
// Check if number of tokens in equal to number of inputs.
// For install "ij, jk -> ik" needs to have 2 inputs tensors
int num_input_tensors = inputs.size();
if (lhs_eq_tokens.empty() || (lhs_eq_tokens.size() == 1 && lhs_eq_tokens[0].empty() && lhs_eq == ",") ) {
return;
}
CV_CheckEQ(static_cast<int>(lhs_eq_tokens.size()), num_input_tensors,
"Number of input tensors does not match the number of subscripts in the input equation");

Expand Down Expand Up @@ -1363,7 +1370,12 @@ Mat LayerEinsumImpl::batchwiseMatMul(
}

output = Mat(M, N, reshapedInput1.type());
fastGemm(false, false, 1.0, reshapedInput1, reshapedInput2, 0.0, output, opt);
if (shape(reshapedInput1).empty() && shape(reshapedInput2).empty())
{
output = reshapedInput1.mul(reshapedInput2); // fastGemm does not support 0D * 0D multiplication
} else {
fastGemm(false, false, 1.0, reshapedInput1, reshapedInput2, 0.0, output, opt);
}

output = output.reshape(1, {1, M, N});
}
Expand Down
75 changes: 75 additions & 0 deletions modules/dnn/test/test_layers_1d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -682,4 +682,79 @@ INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_Const_Test, testing::Values(
std::vector<int>({4, 1})
));

typedef testing::TestWithParam<tuple<std::vector<int>, std::string>> Layer_Einsum_Test;
TEST_P(Layer_Einsum_Test, Accuracy_01D)
{
auto tup = GetParam();
std::vector<int> input_shape = std::get<0>(tup);
std::string equation = std::get<1>(tup);

LayerParams lp;
lp.type = "Einsum";
lp.name = "EinsumLayer";
lp.set("equation", equation);
lp.set("inputSize", 2);
lp.set("outputSize", 1);
lp.set("inputShapes0", DictValue::arrayInt(&input_shape[0], input_shape.size()));
lp.set("inputShapes1", DictValue::arrayInt(&input_shape[0], input_shape.size()));

Ptr<Layer> layer = EinsumLayer::create(lp);

cv::Mat input1(input_shape.size(), input_shape.data(), CV_32F);
cv::Mat input2(input_shape.size(), input_shape.data(), CV_32F);
cv::randn(input1, 0.0, 1.0); cv::randn(input2, 0.0, 1.0);

std::vector<Mat> inputs = {input1, input2};
std::vector<Mat> outputs;
runLayer(layer, inputs, outputs);
ASSERT_EQ(1, outputs.size());

// create output_ref to compare with outputs
cv::Mat output_ref;
int size[] = {1};
if (equation == ",->"){
output_ref = input1.mul(input2);
}else if (equation == "i, i->i"){
output_ref = input1.mul(input2);
} else if (equation == "i, i->"){
output_ref = input1.mul(input2);
cv::Scalar sum = cv::sum(output_ref);
output_ref = cv::Mat(0, nullptr, CV_32F, sum[0]);
} else if (equation == "ij, ij->ij"){
output_ref = input1.mul(input2);
} else if (equation == "ij, ij->i"){
output_ref = input1.mul(input2);
if (input_shape[0] == 1){
cv::Scalar sum = cv::sum(output_ref);
output_ref = cv::Mat(1, size, CV_32F, sum[0]);
} else if (input_shape[1] == 1){
size[0] = input_shape[0];
output_ref = output_ref.reshape(1, 1, size);
} else {
cv::reduce(output_ref, output_ref, 1, cv::REDUCE_SUM, CV_32F);
size[0] = input_shape[0];
output_ref = output_ref.reshape(1, 1, size);
}
} else {
output_ref = cv::Mat();
}

ASSERT_EQ(shape(output_ref), shape(outputs[0]));
normAssert(output_ref, outputs[0]);
}

INSTANTIATE_TEST_CASE_P(/*nothing*/, Layer_Einsum_Test, testing::Values(
std::make_tuple(std::vector<int>({}), std::string(",->")),
std::make_tuple(std::vector<int>({1}), std::string("i, i->i")),
std::make_tuple(std::vector<int>({1}), std::string("i, i->")),
std::make_tuple(std::vector<int>({4}), std::string("i, i->i")),
std::make_tuple(std::vector<int>({4}), std::string("i, i->")),
std::make_tuple(std::vector<int>({1, 4}), std::string("ij, ij->ij")),
std::make_tuple(std::vector<int>({4, 1}), std::string("ij, ij->ij")),
std::make_tuple(std::vector<int>({1, 4}), std::string("ij, ij->i")),
std::make_tuple(std::vector<int>({4, 1}), std::string("ij, ij->i")),
std::make_tuple(std::vector<int>({4, 4}), std::string("ij, ij->i"))
));


}}
Loading