Skip to content

Commit

Permalink
恢复convolution的错误,flatten中的检查
Browse files Browse the repository at this point in the history
  • Loading branch information
zjhellofss committed Mar 12, 2024
1 parent f21e20c commit d263093
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 17 deletions.
4 changes: 2 additions & 2 deletions source/layer/details/base_convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ StatusCode BaseConvolutionLayer::Forward(const std::vector<std::shared_ptr<Tenso

uint32_t kernel_h = this->weights_.at(0)->rows();
uint32_t kernel_w = this->weights_.at(0)->cols();
CHECK(kernel_h > 0 && kernel_w > 0 && kernel_channel > 0)
CHECK(kernel_count > 0 && kernel_h > 0 && kernel_w > 0 && kernel_channel > 0)
<< "The size of kernel matrix in the convolution layer should be greater "
"than zero";

Expand All @@ -161,7 +161,7 @@ StatusCode BaseConvolutionLayer::Forward(const std::vector<std::shared_ptr<Tenso
CHECK(kernel->channels() == kernel_channel);
}

if (this->kernel_matrix_arr_.empty()) {
if (kernel_matrix_arr_.size() != kernel_count) {
InitIm2ColWeight();
}
const uint32_t batch_size = inputs.size();
Expand Down
24 changes: 12 additions & 12 deletions source/layer/details/convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,13 @@
namespace kuiper_infer {

bool ConvolutionLayer::Is1x1KernelNoPadding(uint32_t kernel_h, uint32_t kernel_w) const {
if ((stride_h_ == 1 && stride_w_ == 1) && (dilation_h_ == 1 && dilation_w_ == 1) &&
(kernel_w == 1 && kernel_h == 1) && (padding_w_ == 0 && padding_h_ == 0)) {
return true;
} else {
return false;
if (stride_h_ == 1 && stride_w_ == 1 && dilation_h_ == 1 && dilation_w_ == 1 && kernel_w == 1 &&
kernel_h == 1) {
if (padding_w_ == 0 && padding_h_ == 0) {
return true;
}
}
return false;
}

void ConvolutionLayer::InitIm2ColWeight() {
Expand All @@ -61,13 +62,12 @@ void ConvolutionLayer::InitIm2ColWeight() {
kernel_matrix_c = arma::fmat(row_len * kernel_c, 1);
} else {
kernel_matrix_c = arma::fmat(1, row_len * kernel_c);
const std::shared_ptr<Tensor<float>>& kernel = this->weights_.at(k);
for (uint32_t ic = 0; ic < kernel->channels(); ++ic) {
memcpy(kernel_matrix_c.memptr() + row_len * ic, kernel->matrix_raw_ptr(ic),
row_len * sizeof(float));
}
}

const std::shared_ptr<Tensor<float>>& kernel = this->weights_.at(k);
for (uint32_t ic = 0; ic < kernel->channels(); ++ic) {
memcpy(kernel_matrix_c.memptr() + row_len * ic, kernel->matrix_raw_ptr(ic),
row_len * sizeof(float));
}
kernel_matrix_arr_.at(k) = kernel_matrix_c;
}

Expand Down Expand Up @@ -147,7 +147,7 @@ void ConvolutionLayer::ConvGEMMBias(const arma::fmat& input_matrix, sftensor out
<< "The output tensor of the gemm function cannot be empty.";

kernel_index = kernel_index + group * kernel_count_group;
const arma::fmat& kernel = kernel_matrix_arr_.at(kernel_index);
const arma::fmat& kernel = this->kernel_matrix_arr_.at(kernel_index);

arma::fmat output(output_tensor->matrix_raw_ptr(kernel_index), output_h, output_w, false, true);
if (is_1x1conv_nopadding) {
Expand Down
12 changes: 9 additions & 3 deletions source/layer/details/flatten.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,15 @@ StatusCode FlattenLayer::Forward(const std::vector<std::shared_ptr<Tensor<float>
end_dim = total_dims + end_dim;
}

CHECK(end_dim > start_dim) << "The end dim must greater than start dim";
CHECK(end_dim <= 3 && start_dim >= 1)
<< "The end dim must less than two and start dim must greater than zero";
if (end_dim <= start_dim) {
LOG(ERROR) << "The end dim must greater than start dim";
return StatusCode::kInferParameterError;
}

if (end_dim > 3 || start_dim < 1) {
LOG(ERROR) << "The end dim must less than two and start dim must greater than zero";
return StatusCode::kInferParameterError;
}

const uint32_t batch_size = inputs.size();
for (uint32_t i = 0; i < batch_size; ++i) {
Expand Down

0 comments on commit d263093

Please sign in to comment.