Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon 6th Fundable Projects 2 No.2】Fix bugprone-branch-clone-final #65589

Open
wants to merge 8 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/ps/table/ctr_accessor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ int32_t CtrCommonAccessor::Update(float** update_values,
bool CtrCommonAccessor::CreateValue(int stage, const float* value) {
// stage == 0, pull
// stage == 1, push
if (stage == 0) {
if (stage == 0) { // NOLINT
return true;
} else if (stage == 1) {
// operation
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/ps/table/ctr_double_accessor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ int32_t CtrDoubleAccessor::Update(float** update_values,
bool CtrDoubleAccessor::CreateValue(int stage, const float* value) {
// stage == 0, pull
// stage == 1, push
if (stage == 0) {
if (stage == 0) { // NOLINT
return true;
} else if (stage == 1) {
auto show = CtrDoublePushValue::Show(const_cast<float*>(value));
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/ps/table/ctr_dymf_accessor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ int32_t CtrDymfAccessor::Update(float** update_values,
bool CtrDymfAccessor::CreateValue(int stage, const float* value) {
// stage == 0, pull
// stage == 1, push
if (stage == 0) {
if (stage == 0) { // NOLINT
return true;
} else if (stage == 1) {
// operation
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/ps/table/memory_dense_table.cc
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ int32_t MemoryDenseTable::InitializeOptimizer() {
auto name = common.name();
auto attrs = common.attributes();

if (name == "sgd") {
if (name == "sgd") { // NOLINT
optimizer_ = std::make_shared<DSGD>(common, &values_);
optimizer_->SetGlobalLR(_global_lr);
} else if (name == "adam") {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/ps/table/sparse_accessor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ int32_t SparseAccessor::Update(float** update_values,
bool SparseAccessor::CreateValue(int stage, const float* value) {
// stage == 0, pull
// stage == 1, push
if (stage == 0) {
if (stage == 0) { // NOLINT
return true;
} else if (stage == 1) {
// operation
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/distributed/ps/table/ssd_sparse_table.cc
Original file line number Diff line number Diff line change
Expand Up @@ -882,7 +882,7 @@ int32_t SSDSparseTable::SaveWithStringMultiOutput(const std::string& path,
int shard_num,
int part_num,
int split_num) {
if (compress && (save_param == 0 || save_param == 3)) {
if (compress && (save_param == 0 || save_param == 3)) { // NOLINT
// return
// ::paddle::string::format_string("%s/part-%03d-%05d-%03d-%03d.gz",
// table_path, node_num, shard_num, part_num, split_num);
Expand Down Expand Up @@ -1191,7 +1191,7 @@ int32_t SSDSparseTable::SaveWithStringMultiOutput_v2(const std::string& path,
int part_num,
int split_num,
const char* prefix = "") {
if (compress && (save_param == 0 || save_param == 3)) {
if (compress && (save_param == 0 || save_param == 3)) { // NOLINT
return ::paddle::string::format_string("%s/%s/part-%05d-%03d.gz",
table_path,
prefix,
Expand Down Expand Up @@ -1624,7 +1624,7 @@ int32_t SSDSparseTable::SaveWithBinary(const std::string& path,
int shard_num,
int part_num,
int split_num) {
if (compress && (save_param == 0 || save_param == 3)) {
if (compress && (save_param == 0 || save_param == 3)) { // NOLINT
return paddle::string::format_string("%s/part-%03d-%05d-%03d-%03d.gz",
table_path,
node_num,
Expand Down Expand Up @@ -1945,7 +1945,7 @@ int32_t SSDSparseTable::SaveWithBinary_v2(const std::string& path,
int part_num,
int split_num,
const char* prefix = "") {
if (compress && (save_param == 0 || save_param == 3)) {
if (compress && (save_param == 0 || save_param == 3)) { // NOLINT
return paddle::string::format_string(
"%s/%s/part-%03d-%05d-%03d-%03d.gz",
table_path,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/data_feed.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2132,7 +2132,7 @@ void SlotRecordInMemoryDataFeed::Init(const DataFeedDesc& data_feed_desc) {
size_t pos = pipe_command_.find(".so");
if (pos != std::string::npos) { // NOLINT
pos = pipe_command_.rfind('|');
if (pos == std::string::npos) {
if (pos == std::string::npos) { // NOLINT
so_parser_name_ = pipe_command_;
pipe_command_.clear();
} else {
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/hogwild_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1400,7 +1400,7 @@ void HogwildWorker::TrainFilesWithProfiler() {
timeline.Pause();
read_time += timeline.ElapsedSec();
total_time += timeline.ElapsedSec();
if (infer_out_of_ins) {
if (infer_out_of_ins) { // NOLINT
for (size_t i = 0; i < ops_.size(); ++i) {
timeline.Start();
auto &op = ops_[i];
Expand Down Expand Up @@ -1620,7 +1620,7 @@ void HogwildWorker::TrainFiles() {
if (cur_batch <= 0 && !infer_out_of_ins) {
break;
}
if (infer_out_of_ins) {
if (infer_out_of_ins) { // NOLINT
for (auto &op : ops_) {
if (op->Type() == "c_broadcast") {
op->Run(*thread_scope_, place_);
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/new_executor/pir_interpreter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -779,7 +779,7 @@ void PirInterpreter::BuildInstruction() {
continue;
}
} else if (op.dialect()->name() == "cf") {
if (op.isa<pir::TuplePushOp>()) {
if (op.isa<pir::TuplePushOp>()) { // NOLINT
CREATE_INSTR(TuplePushInstruction);
} else if (op.isa<pir::TuplePopOp>()) {
CREATE_INSTR(TuplePopInstruction);
Expand Down Expand Up @@ -892,7 +892,7 @@ void PirInterpreter::BuildInstruction() {
.AsString();
VLOG(6) << "process " << op_name;

if (op.isa<paddle::dialect::OneDNNPhiKernelOp>()) {
if (op.isa<paddle::dialect::OneDNNPhiKernelOp>()) { // NOLINT
CREATE_INSTR(OneDNNPhiKernelInstruction);
} else if (op.isa<paddle::dialect::OneDNNMixedPhiKernelOp>()) {
CREATE_INSTR(OneDNNMixedPhiKernelInstruction);
Expand Down
34 changes: 15 additions & 19 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1857,16 +1857,14 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
// 2. Whether this op has specific implementation;
// 3. Whether onednn kernel can be used.
#ifdef PADDLE_WITH_DNNL
if (!this->DnnFallback() &&
!paddle::platform::in_mkldnn_white_list(type_) &&
this->CanMKLDNNBeUsed(exe_ctx, kernel_type_->data_type_)) {
kernel_type_->library_type_ = framework::LibraryType::kMKLDNN;
kernel_type_->data_layout_ = framework::DataLayout::ONEDNN;
} else if (platform::is_cpu_place(kernel_type_->place_) &&
kernel_type_->data_type_ ==
proto::VarType::Type::VarType_Type_BF16 &&
!this->SupportsCPUBF16() &&
this->SupportsMKLDNN(phi::DataType::BFLOAT16)) {
if ((!this->DnnFallback() &&
!paddle::platform::in_mkldnn_white_list(type_) &&
this->CanMKLDNNBeUsed(exe_ctx, kernel_type_->data_type_)) ||
(platform::is_cpu_place(kernel_type_->place_) &&
kernel_type_->data_type_ ==
proto::VarType::Type::VarType_Type_BF16 &&
!this->SupportsCPUBF16() &&
this->SupportsMKLDNN(phi::DataType::BFLOAT16))) {
kernel_type_->library_type_ = framework::LibraryType::kMKLDNN;
kernel_type_->data_layout_ = framework::DataLayout::ONEDNN;
}
Expand Down Expand Up @@ -2191,15 +2189,13 @@ OpKernelType OperatorWithKernel::InnerGetExpectedKernelType(
// 2. Whether this op has specific implementation;
// 3. Whether onednn kernel can be used.
#ifdef PADDLE_WITH_DNNL
if (!this->DnnFallback() && !paddle::platform::in_mkldnn_white_list(type_) &&
this->CanMKLDNNBeUsed(ctx, expected_kernel_key.data_type_)) {
expected_kernel_key.library_type_ = framework::LibraryType::kMKLDNN;
expected_kernel_key.data_layout_ = framework::DataLayout::ONEDNN;
} else if (platform::is_cpu_place(expected_kernel_key.place_) &&
expected_kernel_key.data_type_ ==
proto::VarType::Type::VarType_Type_BF16 &&
!this->SupportsCPUBF16() &&
this->SupportsMKLDNN(phi::DataType::BFLOAT16)) {
if ((!this->DnnFallback() && !paddle::platform::in_mkldnn_white_list(type_) &&
this->CanMKLDNNBeUsed(ctx, expected_kernel_key.data_type_)) ||
(platform::is_cpu_place(expected_kernel_key.place_) &&
expected_kernel_key.data_type_ ==
proto::VarType::Type::VarType_Type_BF16 &&
!this->SupportsCPUBF16() &&
this->SupportsMKLDNN(phi::DataType::BFLOAT16))) {
expected_kernel_key.library_type_ = framework::LibraryType::kMKLDNN;
expected_kernel_key.data_layout_ = framework::DataLayout::ONEDNN;
}
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/framework/program_converter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,6 @@ void ConvertAssignValueOp(OpDesc* op) {
op->SetAttr("bool_values", ExtractPlainVector<int>(values));
break;
case phi::DataType::FLOAT32:
op->SetAttr("fp32_values", ExtractPlainVector<float>(values));
break;
case phi::DataType::FLOAT64:
op->SetAttr("fp32_values", ExtractPlainVector<float>(values));
break;
Expand Down
15 changes: 5 additions & 10 deletions paddle/fluid/inference/tensorrt/convert/prelu_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,22 +80,17 @@ class PReluOpConverter : public OpConverter {
hw_tensor = Add1DConstantLayer(
std::vector<int32_t>(input_dims.nbDims - 2, 1));
}
if (data_format == "NCHW") {
if (hw_tensor != nullptr) {
if (hw_tensor != nullptr) {
if (data_format == "NCHW") {
shape_tensor = Concat(
std::vector<nvinfer1::ITensor*>{n_tensor, c_tensor, hw_tensor});
} else {
shape_tensor =
Concat(std::vector<nvinfer1::ITensor*>{n_tensor, c_tensor});
}
} else {
if (hw_tensor != nullptr) {
shape_tensor = Concat(
std::vector<nvinfer1::ITensor*>{n_tensor, hw_tensor, c_tensor});
} else {
shape_tensor =
Concat(std::vector<nvinfer1::ITensor*>{n_tensor, c_tensor});
}
} else {
shape_tensor =
Concat(std::vector<nvinfer1::ITensor*>{n_tensor, c_tensor});
}
reshape_layer->setInput(1, *shape_tensor);
} else {
Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/memory/allocation/allocator_facade.cc
Original file line number Diff line number Diff line change
Expand Up @@ -436,10 +436,11 @@ class AllocatorFacadePrivate {

const std::shared_ptr<Allocator> GetDefaultStreamSafeCUDAAllocator(
const platform::CUDAPlace& place) const {
if (auto iter = default_stream_safe_cuda_allocators_.find(place);
if (auto iter = default_stream_safe_cuda_allocators_.find(place); // NOLINT
iter != default_stream_safe_cuda_allocators_.end())
return iter->second;
if (auto iter = default_cuda_malloc_async_allocators_.find(place);
if (auto iter =
default_cuda_malloc_async_allocators_.find(place); // NOLINT
iter != default_cuda_malloc_async_allocators_.end())
return iter->second;
PADDLE_THROW(platform::errors::NotFound(
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pir/drr/src/rewrite_pattern.cc
Original file line number Diff line number Diff line change
Expand Up @@ -507,7 +507,7 @@ MatchContextImpl DrrRewritePattern::CreateOperations(
auto ir_val = res_match_ctx.GetIrValue(input->name());
if (ir_val) {
pir::Operation* ir_input_op = ir_val.defining_op();
if (op_2_temp_program_index.count(ir_input_op) == 0) {
if (op_2_temp_program_index.count(ir_input_op) == 0) { // NOLINT
// do nothing
} else if (max_res_idx < op_2_temp_program_index.at(ir_input_op)) {
max_res_idx = op_2_temp_program_index.at(ir_input_op);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ class NTransposeFlattenConcatFusePattern : public paddle::drr::DrrPatternBase {
res.ComputeAttr([](const paddle::drr::MatchContext &match_ctx) -> int {
int start_axis = match_ctx.Attr<int>("start_axis_0");
int stop_axis = match_ctx.Attr<int>("stop_axis_0");
if (start_axis == stop_axis) {
if (start_axis == stop_axis) { // NOLINT
return start_axis;
} else if (start_axis == 0) {
return stop_axis + 1;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ class ReshapeTransposeFusedMatmulFusePattern
});

pat.AddConstraint([&](const paddle::drr::MatchContext &match_ctx) {
if (as_x_) {
if (as_x_) { // NOLINT
if (!(match_ctx.Attr<std::vector<int>>("fused_reshape_x").empty()))
return false;
} else {
Expand Down
20 changes: 12 additions & 8 deletions paddle/fluid/platform/profiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -283,10 +283,12 @@ RecordMemEvent::RecordMemEvent(const void *ptr,
RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][3];
RecordMemEvent::has_initialized["gpu"][place.GetDeviceId()] = true;
} else {
current_reserved = DEVICE_MEMORY_STAT_CURRENT_VALUE(
Reserved, place.GetDeviceId()); // NOLINT
peak_reserved = DEVICE_MEMORY_STAT_PEAK_VALUE(
Reserved, place.GetDeviceId()); // NOLINT
current_reserved = DEVICE_MEMORY_STAT_CURRENT_VALUE( // NOLINT
Reserved,
place.GetDeviceId());
peak_reserved = DEVICE_MEMORY_STAT_PEAK_VALUE( // NOLINT
Reserved,
place.GetDeviceId());
RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][1] =
current_reserved;
RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][3] =
Expand Down Expand Up @@ -449,10 +451,12 @@ RecordMemEvent::RecordMemEvent(const void *ptr,
RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][3];
RecordMemEvent::has_initialized["gpu"][place.GetDeviceId()] = true;
} else {
current_reserved = DEVICE_MEMORY_STAT_CURRENT_VALUE(
Reserved, place.GetDeviceId()); // NOLINT
peak_reserved = DEVICE_MEMORY_STAT_PEAK_VALUE(
Reserved, place.GetDeviceId()); // NOLINT
current_reserved = DEVICE_MEMORY_STAT_CURRENT_VALUE( // NOLINT
Reserved,
place.GetDeviceId());
peak_reserved = DEVICE_MEMORY_STAT_PEAK_VALUE( // NOLINT
Reserved,
place.GetDeviceId());
RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][1] =
current_reserved;
RecordMemEvent::size_cache["gpu"][place.GetDeviceId()][3] =
Expand Down
21 changes: 11 additions & 10 deletions paddle/phi/api/lib/kernel_dispatch.cc
Original file line number Diff line number Diff line change
Expand Up @@ -66,16 +66,17 @@ BackendSet GetTensorBackendSet(const phi::TensorBase& t) {
phi::Backend backend_key = phi::TransToPhiBackend(t.place());
BackendSet backend_set(backend_key);
VLOG(10) << "update BackendSet by tensor: add [" << backend_key << "]";
if (backend_key == Backend::GPU && phi::DenseTensor::classof(&t) &&
static_cast<const phi::DenseTensor&>(t).meta().use_gpudnn) {
backend_set = backend_set | BackendSet(Backend::GPUDNN);
} else if (backend_key == Backend::GPU &&
phi::distributed::DistTensor::classof(&t) &&
static_cast<const phi::distributed::DistTensor&>(t)
.value()
.meta()
.use_gpudnn) {
backend_set = backend_set | BackendSet(Backend::GPUDNN);
if (backend_key == Backend::GPU) {
if (phi::DenseTensor::classof(&t) &&
static_cast<const phi::DenseTensor&>(t).meta().use_gpudnn) {
backend_set = backend_set | BackendSet(Backend::GPUDNN);
} else if (phi::distributed::DistTensor::classof(&t) &&
static_cast<const phi::distributed::DistTensor&>(t)
.value()
.meta()
.use_gpudnn) {
backend_set = backend_set | BackendSet(Backend::GPUDNN);
}
}
return backend_set;
}
Expand Down
16 changes: 8 additions & 8 deletions paddle/phi/backends/dynload/dynamic_loader.cc
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ void* GetCublasDsoHandle() {
#if defined(__APPLE__) || defined(__OSX__)
return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcublas.dylib");
#elif defined(_WIN32) && defined(PADDLE_WITH_CUDA)
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) {
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) { // NOLINT
#ifdef PADDLE_WITH_PIP_CUDA_LIBRARIES
return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "cublas64_11.dll");
#else
Expand All @@ -371,7 +371,7 @@ void* GetCublasDsoHandle() {
return nullptr;
}
#elif defined(__linux__) && defined(PADDLE_WITH_CUDA)
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) {
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) { // NOLINT
#ifdef PADDLE_WITH_PIP_CUDA_LIBRARIES
return GetDsoHandleFromSearchPath(FLAGS_cublas_dir, "libcublas.so.11");
#else
Expand Down Expand Up @@ -399,7 +399,7 @@ void* GetCublasDsoHandle() {
void* GetCublasLtDsoHandle() {
// APIs available after CUDA 10.1
#if defined(__linux__) && defined(PADDLE_WITH_CUDA)
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) {
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) { // NOLINT
#ifdef PADDLE_WITH_PIP_CUDA_LIBRARIES
return GetDsoHandleFromSearchPath(FLAGS_cublas_dir, "libcublasLt.so.11");
#else
Expand Down Expand Up @@ -487,7 +487,7 @@ void* GetCUPTIDsoHandle() {
return GetDsoHandleFromSearchPath(
FLAGS_cupti_dir, "libcupti.dylib", false, {cupti_lib_path});
#elif defined(__linux__) && defined(PADDLE_WITH_CUDA)
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) {
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) { // NOLINT
#ifdef PADDLE_WITH_PIP_CUDA_LIBRARIES
return GetDsoHandleFromSearchPath(
FLAGS_cupti_dir, "libcupti.so.11.8", false, {cupti_lib_path});
Expand Down Expand Up @@ -584,7 +584,7 @@ void* GetCusparseDsoHandle() {
#if defined(__APPLE__) || defined(__OSX__)
return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcusparse.dylib");
#elif defined(_WIN32) && defined(PADDLE_WITH_CUDA)
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) {
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) { // NOLINT
#ifdef PADDLE_WITH_PIP_CUDA_LIBRARIES
return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "cusparse64_11.dll");
#else
Expand All @@ -605,7 +605,7 @@ void* GetCusparseDsoHandle() {
return nullptr;
}
#elif defined(__linux__) && defined(PADDLE_WITH_CUDA)
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) {
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) { // NOLINT
#ifdef PADDLE_WITH_PIP_CUDA_LIBRARIES
return GetDsoHandleFromSearchPath(FLAGS_cusparse_dir, "libcusparse.so.11");
#else
Expand Down Expand Up @@ -781,7 +781,7 @@ void* GetCUFFTDsoHandle() {
#if defined(__APPLE__) || defined(__OSX__)
return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcufft.dylib");
#elif defined(__linux__) && defined(PADDLE_WITH_CUDA)
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) {
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) { // NOLINT
#ifdef PADDLE_WITH_PIP_CUDA_LIBRARIES
return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcufft.so.10");
#else
Expand All @@ -796,7 +796,7 @@ void* GetCUFFTDsoHandle() {
return nullptr;
}
#elif defined(_WIN32) && defined(PADDLE_WITH_CUDA)
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) {
if (CUDA_VERSION >= 11000 && CUDA_VERSION < 12000) { // NOLINT
#ifdef PADDLE_WITH_PIP_CUDA_LIBRARIES
return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "cufft64_10.dll");
#else
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -320,11 +320,7 @@ SpmdInfo CrossEntropyWithSoftmaxInferSpmdReverse(
if (!use_softmax) {
x_dims_mapping[axis] = -1;
} else {
if (axis != x_ndim - 1) {
x_dims_mapping[axis] = -1;
s_out_dims_mapping_dst[axis] = -1;
label_dims_mapping[axis] = -1;
} else if (soft_label) {
if (axis != x_ndim - 1 || soft_label) {
x_dims_mapping[axis] = -1;
s_out_dims_mapping_dst[axis] = -1;
label_dims_mapping[axis] = -1;
Expand Down
Loading