Skip to content

Commit

Permalink
updates codes, test=develop
Browse files Browse the repository at this point in the history
  • Loading branch information
Shixiaowei02 committed Oct 25, 2021
1 parent 2b3ef04 commit 93422e5
Show file tree
Hide file tree
Showing 16 changed files with 57 additions and 97 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/framework/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -458,4 +458,4 @@ if(WITH_TESTING AND TEST selected_rows_test)
endif()

cc_test(scope_guard_test SRCS scope_guard_test.cc)
#cc_test(pten_utils_test SRCS pten_utils_test.cc DEPS pten_utils)
cc_test(pten_utils_test SRCS pten_utils_test.cc DEPS pten_utils)
4 changes: 2 additions & 2 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1820,7 +1820,7 @@ pten::KernelContext OperatorWithKernel::BuildPtenKernelContext(
paddle::SmallVector<std::shared_ptr<pten::TensorBase>> tmp_inputs;
for (auto var : ins_vector) {
tmp_inputs.emplace_back(
experimental::MakePtenTensorFromVar(*var, in_def));
experimental::MakePtenTensorBaseFromVar(*var, in_def));
}
op_kernel_ctx.EmplaceBackInputs(std::move(tmp_inputs));
}
Expand All @@ -1832,7 +1832,7 @@ pten::KernelContext OperatorWithKernel::BuildPtenKernelContext(
paddle::SmallVector<std::shared_ptr<pten::TensorBase>> tmp_outputs;
for (auto var : outs_vector) {
tmp_outputs.emplace_back(
experimental::MakePtenTensorFromVar(var, out_def));
experimental::MakePtenTensorBaseFromVar(var, out_def));
}
op_kernel_ctx.EmplaceBackOutputs(std::move(tmp_outputs));
}
Expand Down
18 changes: 18 additions & 0 deletions paddle/fluid/framework/pten_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,24 @@ limitations under the License. */
namespace paddle {
namespace framework {

OpKernelType TransPtenKernelKeyToOpKernelType(
const pten::KernelKey& kernel_key) {
proto::VarType::Type data_type =
pten::TransToProtoVarType(kernel_key.dtype());
platform::Place place = pten::TransToFluidPlace(kernel_key.backend());
DataLayout data_layout = pten::TransToFluidDataLayout(kernel_key.layout());
LibraryType library_type = LibraryType::kPlain;
if (kernel_key.backend() == pten::Backend::MKLDNN) {
library_type = LibraryType::kMKLDNN;
} else if (kernel_key.backend() == pten::Backend::CUDNN) {
library_type = LibraryType::kCUDNN;
} else {
// do nothing
}
// TODO(chenweihang): the customized_type_value is lost
return OpKernelType(data_type, place, data_layout, library_type);
}

pten::KernelKey TransOpKernelTypeToPtenKernelKey(
const OpKernelType& kernel_type) {
pten::Backend backend = pten::TransToPtenBackend(kernel_type.place_);
Expand Down
58 changes: 0 additions & 58 deletions paddle/fluid/framework/pten_utils_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,64 +18,6 @@ limitations under the License. */
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/variable.h"

TEST(PtenUtils, FluidTensorToPtenTensor) {
// 1. create tensor
paddle::framework::LoDTensor x;
paddle::framework::Tensor x2;
x.Resize({2});
x.mutable_data<float>(paddle::platform::CPUPlace());
x.data<float>()[0] = 0.2;
x.data<float>()[1] = 0.5;

// 2. test API
auto dense_x = paddle::framework::MakeTensorImpl<pten::DenseTensor>(
x, x.place(), x.type());

// 3. check result
std::vector<float> expect_value = {0.2, 0.5};
ASSERT_EQ(dense_x->data<float>()[0], expect_value[0]);
ASSERT_EQ(dense_x->data<float>()[1], expect_value[1]);
ASSERT_EQ(dense_x->data_type(), pten::DataType::FLOAT32);
}

TEST(PtenUtils, VarToPtenTensor) {
// 1. create Variable
paddle::framework::Variable v;
auto selected_rows = v.GetMutable<paddle::framework::SelectedRows>();
paddle::framework::Tensor* value = selected_rows->mutable_value();
auto* data = value->mutable_data<int>(paddle::framework::make_ddim({1, 1}),
paddle::platform::CPUPlace());
data[0] = 123;
pten::Backend expect_backend = pten::Backend::CPU;

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
expect_backend = pten::Backend::CUDA;
#endif
auto tensor_def = pten::TensorArgDef(expect_backend, pten::DataLayout::NCHW,
pten::DataType::INT32);
// 2. test API
auto tensor_x = paddle::framework::InputVariableToPtenTensor(v, tensor_def);
// 3. check result
ASSERT_EQ(tensor_x->data_type(), pten::DataType::INT32);
}

TEST(PtenUtils, PtenTensorToFluidTensor) {
pten::DenseTensor dense_tensor(
pten::TensorMeta(paddle::framework::make_ddim({1, 1}), pten::Backend::CPU,
pten::DataType::FLOAT32, pten::DataLayout::ANY),
pten::TensorStatus());
auto* data_ptr = dense_tensor.mutable_data<float>();
data_ptr[0] = 0.5;
// share allocation into fluid Tensor
paddle::framework::Tensor tensor;
paddle::framework::LoDTensor lod_tensor;
paddle::framework::ShareTensorImpl(&dense_tensor, &tensor);
paddle::framework::ShareTensorImpl(&dense_tensor, &lod_tensor);
// compare
ASSERT_EQ(tensor.data<float>()[0], 0.5);
ASSERT_EQ(lod_tensor.data<float>()[0], 0.5);
}

TEST(PtenUtils, TransPtenKernelKeyToOpKernelType) {
pten::KernelKey kernel_key(pten::Backend::CPU, pten::DataLayout::NCHW,
pten::DataType::FLOAT32);
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/imperative/prepared_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ static pten::KernelContext BuildDygraphPtenKernelContext(
for (auto var : ins_vector) {
const auto& variable = var->Var();
tmp_inputs.emplace_back(
experimental::MakePtenTensorFromVar(variable, in_def));
experimental::MakePtenTensorBaseFromVar(variable, in_def));
}
op_kernel_ctx.EmplaceBackInputs(std::move(tmp_inputs));
}
Expand All @@ -307,7 +307,7 @@ static pten::KernelContext BuildDygraphPtenKernelContext(
for (auto var : outs_vector) {
auto* variable = var->MutableVar();
tmp_outputs.emplace_back(
experimental::MakePtenTensorFromVar(variable, out_def));
experimental::MakePtenTensorBaseFromVar(variable, out_def));
}
op_kernel_ctx.EmplaceBackOutputs(std::move(tmp_outputs));
}
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/operators/dot_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -244,9 +244,9 @@ class DotKernel : public framework::OpKernel<T> {
auto& dev_ctx = ctx.device_context<DeviceContext>();
out->mutable_data<T>(x->place());

auto pt_x = paddle::experimental::MakeSharedDenseTensor(*x);
auto pt_y = paddle::experimental::MakeSharedDenseTensor(*y);
auto pt_out = paddle::experimental::MakeSharedDenseTensor(*out);
auto pt_x = paddle::experimental::MakePtenDenseTensor(*x);
auto pt_y = paddle::experimental::MakePtenDenseTensor(*y);
auto pt_out = paddle::experimental::MakePtenDenseTensor(*out);

// call new kernel
pten::Dot<T>(dev_ctx, *pt_x.get(), *pt_y.get(), pt_out.get());
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/fill_any_like_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,8 @@ class FillAnyLikeKernel : public framework::OpKernel<T> {
std::isnan(value), false,
platform::errors::InvalidArgument("The filled value is NaN."));

auto pt_x = paddle::experimental::MakeSharedDenseTensor(*in);
auto pt_out = paddle::experimental::MakeSharedDenseTensor(*out);
auto pt_x = paddle::experimental::MakePtenDenseTensor(*in);
auto pt_out = paddle::experimental::MakePtenDenseTensor(*out);

const auto& dev_ctx = context.template device_context<DeviceContext>();
// call new kernel
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/mean_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,8 @@ class MeanKernel : public framework::OpKernel<T> {
auto& dev_ctx = context.device_context<DeviceContext>();
out->mutable_data<T>(x->place());

auto pt_x = paddle::experimental::MakeSharedDenseTensor(*x);
auto pt_out = paddle::experimental::MakeSharedDenseTensor(*out);
auto pt_x = paddle::experimental::MakePtenDenseTensor(*x);
auto pt_out = paddle::experimental::MakePtenDenseTensor(*out);

// call new kernel
VLOG(1) << "chenweihang: call original mean kernel compute.";
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/scale_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ class ScaleKernel : public framework::OpKernel<T> {
out->mutable_data<T>(in->place());
auto& dev_ctx = ctx.device_context<DeviceContext>();

auto pt_x = paddle::experimental::MakeSharedDenseTensor(*in);
auto pt_out = paddle::experimental::MakeSharedDenseTensor(*out);
auto pt_x = paddle::experimental::MakePtenDenseTensor(*in);
auto pt_out = paddle::experimental::MakePtenDenseTensor(*out);

// call new kernel
pten::Scale<T>(dev_ctx, *pt_x.get(), scale, bias, bias_after_scale,
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/sign_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ class SignKernel : public framework::OpKernel<T> {
auto& dev_ctx = context.device_context<DeviceContext>();
out->mutable_data<T>(x->place());

auto pt_x = paddle::experimental::MakeSharedDenseTensor(*x);
auto pt_out = paddle::experimental::MakeSharedDenseTensor(*out);
auto pt_x = paddle::experimental::MakePtenDenseTensor(*x);
auto pt_out = paddle::experimental::MakePtenDenseTensor(*out);

// call new kernel
pten::Sign<T>(dev_ctx, *pt_x.get(), pt_out.get());
Expand Down
2 changes: 1 addition & 1 deletion paddle/pten/core/dense_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ class DenseTensor : public TensorBase,
}

/// \brief Set the lod of the tensor.
void SetLod(const std::vector<std::vector<size_t>>& lod) { meta_.lod = lod; }
void set_lod(const std::vector<std::vector<size_t>>& lod) { meta_.lod = lod; }

/// \brief Returns the data type of the tensor.
/// \return The data type of the tensor.
Expand Down
20 changes: 10 additions & 10 deletions paddle/pten/hapi/lib/utils/tensor_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ void SetLoD(DstLoD* dst, const SrcLoD& src) {
}
}

std::unique_ptr<pten::DenseTensor> MakeSharedDenseTensor(
std::unique_ptr<pten::DenseTensor> MakePtenDenseTensor(
const paddle::framework::Tensor& src) {
pten::DenseTensorMeta meta{pten::TransToPtenDataType(src.type()),
src.dims(),
Expand All @@ -36,7 +36,7 @@ std::unique_ptr<pten::DenseTensor> MakeSharedDenseTensor(
std::move(meta));
}

std::unique_ptr<pten::DenseTensor> MakeSharedDenseTensor(
std::unique_ptr<pten::DenseTensor> MakePtenDenseTensor(
const paddle::framework::LoDTensor& src) {
pten::DenseTensorMeta meta{pten::TransToPtenDataType(src.type()),
src.dims(),
Expand All @@ -47,7 +47,7 @@ std::unique_ptr<pten::DenseTensor> MakeSharedDenseTensor(
std::move(meta));
}

std::unique_ptr<pten::TensorBase> MakePtenTensorFromVar(
std::unique_ptr<pten::TensorBase> MakePtenTensorBaseFromVar(
const framework::Variable& variable, const pten::TensorArgDef& arg_def) {
auto expected_place = pten::TransToFluidPlace(arg_def.backend);

Expand All @@ -56,9 +56,9 @@ std::unique_ptr<pten::TensorBase> MakePtenTensorFromVar(
if (!platform::is_same_place(tensor.place(), expected_place)) {
framework::LoDTensor tmp_tensor;
framework::TensorCopySync(tensor, expected_place, &tmp_tensor);
return MakeSharedDenseTensor(tmp_tensor);
return MakePtenDenseTensor(tmp_tensor);
} else {
return MakeSharedDenseTensor(tensor);
return MakePtenDenseTensor(tensor);
}
} else if (variable.IsType<framework::SelectedRows>()) {
// TODO(chenweihang): now we don't deal with row and height
Expand All @@ -68,9 +68,9 @@ std::unique_ptr<pten::TensorBase> MakePtenTensorFromVar(
framework::Tensor tmp_tensor;
TensorCopySync(tensor.value(), expected_place, &tmp_tensor);
// TODO(chenweihang): adapt SelectedRows by xiaowei's design
return MakeSharedDenseTensor(tmp_tensor);
return MakePtenDenseTensor(tmp_tensor);
} else {
return MakeSharedDenseTensor(tensor.value());
return MakePtenDenseTensor(tensor.value());
}
} else {
PADDLE_THROW(platform::errors::Unimplemented(
Expand All @@ -80,23 +80,23 @@ std::unique_ptr<pten::TensorBase> MakePtenTensorFromVar(
return {};
}

std::unique_ptr<pten::DenseTensor> MakePtenTensorFromVar(
std::unique_ptr<pten::TensorBase> MakePtenTensorBaseFromVar(
framework::Variable* variable, const pten::TensorArgDef& arg_def) {
// mutable_data before run kernel, to avoid share output form
// KernelContext to original tensor
if (variable->template IsType<framework::LoDTensor>()) {
auto* tensor = variable->template GetMutable<framework::LoDTensor>();
tensor->mutable_data(pten::TransToFluidPlace(arg_def.backend),
pten::TransToProtoVarType(arg_def.dtype));
return MakeSharedDenseTensor(*tensor);
return MakePtenDenseTensor(*tensor);
} else if (variable->template IsType<framework::SelectedRows>()) {
auto* tensor = variable->template GetMutable<framework::SelectedRows>();
tensor->mutable_value()->mutable_data(
pten::TransToFluidPlace(arg_def.backend),
pten::TransToProtoVarType(arg_def.dtype));
// TODO(chenweihang): adapt SelectedRows by xiaowei's design,
// here the row and height will lost in output!
MakeSharedDenseTensor(tensor->value());
MakePtenDenseTensor(tensor->value());
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported shared output `%s` type now when call pt kernel.",
Expand Down
8 changes: 4 additions & 4 deletions paddle/pten/hapi/lib/utils/tensor_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,16 @@ limitations under the License. */
namespace paddle {
namespace experimental {

std::unique_ptr<pten::DenseTensor> MakeSharedDenseTensor(
std::unique_ptr<pten::DenseTensor> MakePtenDenseTensor(
const paddle::framework::Tensor& src);

std::unique_ptr<pten::DenseTensor> MakeSharedDenseTensor(
std::unique_ptr<pten::DenseTensor> MakePtenDenseTensor(
const paddle::framework::LoDTensor& src);

std::unique_ptr<pten::TensorBase> MakePtenTensorFromVar(
std::unique_ptr<pten::TensorBase> MakePtenTensorBaseFromVar(
const framework::Variable& variable, const pten::TensorArgDef& arg_def);

std::unique_ptr<pten::DenseTensor> MakePtenTensorFromVar(
std::unique_ptr<pten::TensorBase> MakePtenTensorBaseFromVar(
framework::Variable* variable, const pten::TensorArgDef& arg_def);

void MovesStorage(pten::DenseTensor* src, paddle::framework::Tensor* dst);
Expand Down
8 changes: 4 additions & 4 deletions paddle/pten/hapi/lib/utils/tests/test_tensor_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ TEST(tensor_utils, dense_tensor_to_lod_tensor) {
CHECK(lod_tensor.data<float>()[0] == 1.0f);
CHECK(lod_tensor.data<float>()[1] == 2.1f);

auto dense_tensor_1 = MakeSharedDenseTensor(lod_tensor);
auto dense_tensor_1 = MakePtenDenseTensor(lod_tensor);
CHECK(dense_tensor_1->dims() == dims);
CHECK(dense_tensor_1->data_type() == dtype);
CHECK(dense_tensor_1->layout() == layout);
Expand Down Expand Up @@ -90,7 +90,7 @@ TEST(tensor_utils, dense_tensor_to_tensor) {
CHECK(tensor.data<float>()[0] == 1.0f);
CHECK(tensor.data<float>()[1] == 2.1f);

auto dense_tensor_1 = MakeSharedDenseTensor(tensor);
auto dense_tensor_1 = MakePtenDenseTensor(tensor);
CHECK(dense_tensor_1->dims() == dims);
CHECK(dense_tensor_1->data_type() == dtype);
CHECK(dense_tensor_1->layout() == layout);
Expand All @@ -99,7 +99,7 @@ TEST(tensor_utils, dense_tensor_to_tensor) {
CHECK(data_1[1] == 2.1f);
}

TEST(TcmptUtils, VarToPtTensor) {
TEST(PtenUtils, VarToPtTensor) {
// 1. create Variable
paddle::framework::Variable v;
auto selected_rows = v.GetMutable<paddle::framework::SelectedRows>();
Expand All @@ -115,7 +115,7 @@ TEST(TcmptUtils, VarToPtTensor) {
auto tensor_def = pten::TensorArgDef(
expect_backend, pten::DataLayout::NCHW, pten::DataType::INT32);
// 2. test API
auto tensor_x = MakePtenTensorFromVar(v, tensor_def);
auto tensor_x = MakePtenTensorBaseFromVar(v, tensor_def);
// 3. check result
ASSERT_EQ(tensor_x->data_type(), pten::DataType::INT32);
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/pten/kernels/cpu/manipulation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ void Flatten(const CPUContext& dev_ctx,
DenseTensor* out) {
auto out_meta = FlattenInferShape(x.meta(), start_axis, stop_axis);
pten::Copy(dev_ctx, x, out);
out->SetLod(out_meta.lod);
out->set_lod(out_meta.lod);
out->Resize(out_meta.dims);
}

Expand All @@ -48,7 +48,7 @@ void FlattenWithXShape(const CPUContext& dev_ctx,
xshape_dims[i + 1] = in_dims[i];
}
xshape->Resize(paddle::framework::make_ddim(xshape_dims));
xshape->SetLod(x.lod());
xshape->set_lod(x.lod());
}

} // namespace pten
Expand Down
4 changes: 2 additions & 2 deletions paddle/pten/kernels/cuda/manipulation.cu
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ void Flatten(const CUDAContext& dev_ctx,
DenseTensor* out) {
auto out_meta = FlattenInferShape(x.meta(), start_axis, stop_axis);
pten::Copy(dev_ctx, x, out);
out->SetLod(out_meta.lod);
out->set_lod(out_meta.lod);
out->Resize(out_meta.dims);
}

Expand All @@ -48,7 +48,7 @@ void FlattenWithXShape(const CUDAContext& dev_ctx,
xshape_dims[i + 1] = in_dims[i];
}
xshape->Resize(paddle::framework::make_ddim(xshape_dims));
xshape->SetLod(x.lod());
xshape->set_lod(x.lod());
}

} // namespace pten
Expand Down

1 comment on commit 93422e5

@paddle-bot-old
Copy link

@paddle-bot-old paddle-bot-old bot commented on 93422e5 Oct 25, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🕵️ CI failures summary

🔍 PR: #30 Commit ID: 93422e5 contains failed CI.

🔹 Failed: PR-CI-Build

Unknown Failed
2021-10-25 16:54:53 exe task(buildId=731eb9e5544241ae875a36c9dce6df56, actionName=BUILD_START) confirm status is true
2021-10-25 16:54:53 insert listener now cache size is 1
2021-10-25 16:54:53 log path: /data/docker/containers/fa8f31daf3e9f713625dfb780195ffaeb12832866baedcabdda015989a494d4f
2021-10-25 16:54:53 SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
2021-10-25 16:54:53 SLF4J: Defaulting to no-operation (NOP) logger implementation
2021-10-25 16:54:53 SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
2021-10-25 16:54:53 the build(731eb9e5544241ae875a36c9dce6df56) state is START
2021-10-25 16:54:54 mkdir file PaddlePaddle/Paddle start!
2021-10-25 16:54:54 mkdir -p /workspace/Paddle
2021-10-25 16:54:54 the build(731eb9e5544241ae875a36c9dce6df56) state is BUILD_CODE_START
2021-10-25 16:54:54 exe task(buildId=731eb9e5544241ae875a36c9dce6df56, actionName=BUILD_CODE) confirm status is true
2021-10-25 16:54:54 build Paddle start!
2021-10-25 16:54:54 + set -e
2021-10-25 16:54:54 + set -x
2021-10-25 16:54:54 + cd /workspace
2021-10-25 16:54:54 + set +x
2021-10-25 16:54:54 PADDLE DOCKER BUILD md5=
2021-10-25 16:54:54 check docker md5 fail !
2021-10-25 16:54:55 the build(731eb9e5544241ae875a36c9dce6df56) state is BUILD_CODE_FAIL

🔹 Failed: PR-CI-APPROVAL

approve_failed
2021-10-25 16:58:53 正在保存至: “bk.txt”
2021-10-25 16:58:53 0K 100% 3.16M=0s
2021-10-25 16:58:53 2021-10-25 16:58:53 (3.16 MB/s) - 已保存 “bk.txt” [5/5])
2021-10-25 16:59:01 ****************
2021-10-25 16:59:01 0. You must have one RD (lanxianghit (Recommend), phlrain or luotao1) approval for changing the FLAGS, which manages the environment variables.
2021-10-25 16:59:01 1. You must have Dianhai approval for change 20+ files or add than 1000+ lines of content.
2021-10-25 16:59:01 2. You must have one RD (XiaoguangHu01,chenwhql,zhiqiu,Xreki,luotao1) approval for paddle/fluid/framework/operator.h, which manages the underlying code for fluid.
2021-10-25 16:59:01 3. You must have one RD (zhiqiu (Recommend) , phlrain) approval for the changes of paddle/fluid/pybind/op_function_generator.cc, which manages the logic of automatic generating op functions for dygraph.
2021-10-25 16:59:01 4. You must have one RD (XiaoguangHu01,chenwhql,zhiqiu,Xreki,luotao1) approval for the usage of const_cast.
2021-10-25 16:59:01 5. You must have one RD (Avin0323(Recommend) or zhouwei25 or wanghuancoder or luotao1) approval for modifying unity_build_rule.cmake which the rules of Unity Build.
2021-10-25 16:59:01 There are 6 approved errors.
2021-10-25 16:59:01 ****************
2021-10-25 16:59:01 + EXCODE=6
2021-10-25 16:59:01 + echo 'EXCODE: 6'
2021-10-25 16:59:01 EXCODE: 6
2021-10-25 16:59:01 + echo 'ipipe_log_param_EXCODE: 6'
2021-10-25 16:59:01 ipipe_log_param_EXCODE: 6
2021-10-25 16:59:01 + exit 6

🔹 Failed: PR-CI-Mac-Python3

test_failed
2021-10-25 17:40:20 The following tests FAILED:
2021-10-25 17:40:20 864 - test_scale_op (Failed)
2021-10-25 17:40:20 867 - test_scale_op (Failed)
2021-10-25 17:40:20 + EXCODE=8
2021-10-25 17:40:20 + echo 'EXCODE: 8'
2021-10-25 17:40:20 EXCODE: 8
2021-10-25 17:40:20 + echo 'ipipe_log_param_EXCODE: 8'
2021-10-25 17:40:20 ipipe_log_param_EXCODE: 8
2021-10-25 17:40:20 + '[' 8 -eq 0 ']'
2021-10-25 17:40:20 + set +x
2021-10-25 17:40:20 Sorry, some tests failed.
2021-10-25 17:40:20 + exit 8

🔹 Failed: PR-CI-Windows

test_failed
2021-10-25 18:31:00 The following tests FAILED:
2021-10-25 18:31:00 923 - test_scale_op (Failed)
2021-10-25 18:31:00 1067 - test_fuse_all_reduce_pass (Failed)
2021-10-25 18:31:00 923 - test_scale_op (Failed)
2021-10-25 18:31:00 1067 - test_fuse_all_reduce_pass (Failed)
2021-10-25 18:31:00 923 - test_scale_op (Failed)
2021-10-25 18:31:00 1067 - test_fuse_all_reduce_pass (Failed)
2021-10-25 18:31:00 C:\Users\Administrator\Downloads\workspace\817bd579-7e7a-4aa0-a1a8-79100850b119\Paddle\build>goto:eof
2021-10-25 18:31:00 C:\Users\Administrator\Downloads\workspace\817bd579-7e7a-4aa0-a1a8-79100850b119\Paddle\build>for /F %# in ('wmic os get localdatetime|findstr 20') do set end=%#
2021-10-25 18:31:00 C:\Users\Administrator\Downloads\workspace\817bd579-7e7a-4aa0-a1a8-79100850b119\Paddle\build>set end=20211025183100.502000+480
2021-10-25 18:31:00 C:\Users\Administrator\Downloads\workspace\817bd579-7e7a-4aa0-a1a8-79100850b119\Paddle\build>set end=1025183100
2021-10-25 18:31:00 C:\Users\Administrator\Downloads\workspace\817bd579-7e7a-4aa0-a1a8-79100850b119\Paddle\build>call :timestamp "1025173044" "1025183100" "1 card TestCases Total"
2021-10-25 18:31:00 C:\Users\Administrator\Downloads\workspace\817bd579-7e7a-4aa0-a1a8-79100850b119\Paddle\build>setlocal enabledelayedexpansion
2021-10-25 18:31:00 2223044
2021-10-25 18:31:00 "Windows 1 card TestCases Total Time: 3616s"
2021-10-25 18:31:00 ipipe_log_param_Windows_1_card_TestCases_Total_Time: 3616s
2021-10-25 18:31:00 2223044
2021-10-25 18:31:00 "Windows TestCases Total Time: 3616s"
2021-10-25 18:31:00 ipipe_log_param_Windows_TestCases_Total_Time: 3616s
2021-10-25 18:31:00 Running unit tests failed, will exit
2021-10-25 18:31:03 EXCODE: 8

Please sign in to comment.