Skip to content

Commit

Permalink
[Refactoring Tensor PR #5] replace storage with pten allocation (Padd…
Browse files Browse the repository at this point in the history
…lePaddle#39085)

* updates callers, test=develop

* updates tensor, test=develop

* fixes errors, test=develop

* remove some dtypes, test=develop

* fix errors in the base storage modification, test=develop

* fixes a bug, test=develop

* fixes the bugs in push the whole, test=develop

* updates, test=develop

* update

* update, test=develop

* fixes the mac-py3 CI, test=develop

* remove the storage impl, test=develop

* updates some codes, test=develop

* update, test=develop

* updates pten allocation, test=develop
  • Loading branch information
Shixiaowei02 committed Jan 24, 2022
1 parent 95b081e commit a56e16a
Show file tree
Hide file tree
Showing 80 changed files with 490 additions and 684 deletions.
5 changes: 3 additions & 2 deletions paddle/fluid/eager/accumulation/gradient_accumulation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -216,8 +216,9 @@ void TensorAdd(const egr::EagerTensor& src, egr::EagerTensor* dst) {

#define PADDLE_TENSOR_ADD(cpp_type) \
if (data_type == paddle::framework::DataTypeTrait<cpp_type>::DataType()) { \
TensorAddFunctor<cpp_type> func(numel, src_tensor->data<cpp_type>(), \
dst_tensor->mutable_data<cpp_type>()); \
TensorAddFunctor<cpp_type> func( \
numel, src_tensor->data<cpp_type>(), \
dst_tensor->mutable_data<cpp_type>(place)); \
paddle::platform::VisitPlace(place, func); \
return; \
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ TEST(AccumulationNode, EagerTensor) {
paddle::platform::CPUPlace())
.get(),
meta);
dt0->mutable_data<paddle::platform::float16>()[0] = 10.0;
dt0->mutable_data<paddle::platform::float16>(
paddle::platform::CPUPlace())[0] = 10.0;
EagerTensor et0 = EagerTensor(dt0);

std::shared_ptr<pten::DenseTensor> dt1 = std::make_shared<pten::DenseTensor>(
Expand All @@ -45,7 +46,8 @@ TEST(AccumulationNode, EagerTensor) {
.get(),
meta);

dt1->mutable_data<paddle::platform::float16>()[0] = 20.0;
dt1->mutable_data<paddle::platform::float16>(
paddle::platform::CPUPlace())[0] = 20.0;
EagerTensor et1 = EagerTensor(dt1);

std::shared_ptr<pten::DenseTensor> grad_dt =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ TEST(AutogradMeta, MemberFunction) {
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<float>();
auto* dt_ptr = dt->mutable_data<float>(paddle::platform::CPUPlace());
dt_ptr[0] = 5.0f;
dt_ptr[1] = 10.0f;
grad_t->set_impl(dt);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ TEST(EagerTensor, Constructor) {
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<float>();
auto* dt_ptr = dt->mutable_data<float>(paddle::platform::CPUPlace());
dt_ptr[0] = 5.0f;
dt_ptr[1] = 10.0f;
egr::EagerTensor et3 = egr::EagerTensor(dt);
Expand Down Expand Up @@ -70,7 +70,7 @@ TEST(EagerTensor, MemberFunction) {
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<float>();
auto* dt_ptr = dt->mutable_data<float>(paddle::platform::CPUPlace());
dt_ptr[0] = 5.0f;
dt_ptr[1] = 10.0f;
VLOG(6) << "Make Dense Tensor";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ TEST(GradNodeInfo, GradNodeBase) {
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<float>();
auto* dt_ptr = dt->mutable_data<float>(paddle::platform::CPUPlace());
dt_ptr[0] = 5.0f;
egr::EagerTensor et1(dt);
grads = {{et1}};
Expand Down Expand Up @@ -102,7 +102,7 @@ TEST(GradNodeInfo, GradNodeBase) {
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<float>();
auto* dt_ptr = dt->mutable_data<float>(paddle::platform::CPUPlace());
dt_ptr[0] = 6.0f;
auto* et_ptr =
std::dynamic_pointer_cast<pten::DenseTensor>(et.impl())->data<float>();
Expand All @@ -121,8 +121,8 @@ TEST(GradNodeInfo, GradNodeBase) {

VLOG(6) << "Test Reduce Hook";
auto reduce_hook = [&](void) -> void {
auto* et_ptr = std::dynamic_pointer_cast<pten::DenseTensor>(et1.impl())
->mutable_data<float>();
auto* et_ptr =
std::dynamic_pointer_cast<pten::DenseTensor>(et1.impl())->data<float>();
et_ptr[0] = 100.0;
VLOG(6) << "Running Reduce Hook";
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class GradTestNode : public egr::GradNodeBase {
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<float>();
auto* dt_ptr = dt->mutable_data<float>(paddle::platform::CPUPlace());
dt_ptr[0] = 6.0f;
egr::EagerTensor et1(dt);
std::vector<std::vector<egr::EagerTensor>> res = {{et1}};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,15 +57,15 @@ TEST(GradTensorHolder, Interfaces) {
paddle::platform::CPUPlace())
.get(),
meta);
dt0->mutable_data<float>()[0] = 10.0;
dt0->mutable_data<float>(paddle::platform::CPUPlace())[0] = 10.0;
EagerTensor et0 = EagerTensor(dt0);

std::shared_ptr<pten::DenseTensor> dt1 = std::make_shared<pten::DenseTensor>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
dt1->mutable_data<float>()[0] = 20.0;
dt1->mutable_data<float>(paddle::platform::CPUPlace())[0] = 20.0;
EagerTensor et1 = EagerTensor(dt1);

// Constructor empty GradTensorHolder
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ TEST(TensorWrapper, Basic) {
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<float>();
auto* dt_ptr = dt->mutable_data<float>(paddle::platform::CPUPlace());
dt_ptr[0] = 5.0f;
dt_ptr[1] = 10.0f;
et1.set_impl(dt);
Expand All @@ -56,7 +56,7 @@ TEST(TensorWrapper, Basic) {
paddle::platform::CPUPlace())
.get(),
meta2);
auto* dt_ptr2 = dt->mutable_data<float>();
auto* dt_ptr2 = dt->mutable_data<float>(paddle::platform::CPUPlace());
dt_ptr2[0] = 6.0f;
dt_ptr2[1] = 11.0f;
et2.set_impl(dt2);
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/eager/tests/task_tests/eager_utils_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,15 +35,15 @@ TEST(EagerUtils, AutoGradMeta) {
paddle::platform::CPUPlace())
.get(),
meta);
dt0->mutable_data<float>()[0] = 10.0;
dt0->mutable_data<float>(paddle::platform::CPUPlace())[0] = 10.0;
EagerTensor et0 = EagerTensor(dt0);

std::shared_ptr<pten::DenseTensor> dt1 = std::make_shared<pten::DenseTensor>(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
meta);
dt1->mutable_data<float>()[0] = 20.0;
dt1->mutable_data<float>(paddle::platform::CPUPlace())[0] = 20.0;
EagerTensor et1 = EagerTensor(dt1);

std::vector<EagerTensor> ets = {et0, et1};
Expand Down Expand Up @@ -112,7 +112,7 @@ egr::EagerTensor CreateTestCPUTensor(T val,
paddle::platform::CPUPlace())
.get(),
meta);
auto* dt_ptr = dt->mutable_data<T>();
auto* dt_ptr = dt->mutable_data<T>(paddle::platform::CPUPlace());
for (int64_t i = 0; i < dt->numel(); i++) {
dt_ptr[i] = val;
}
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ egr::EagerTensor hook_function(const egr::EagerTensor& t) {
paddle::memory::Alloc(place, bytes_size)),
std::move(ret_meta));

float* t_ptr = t_dense->mutable_data<float>();
float* ret_ptr = ret_dense->mutable_data<float>();
float* t_ptr = t_dense->mutable_data<float>(place);
float* ret_ptr = ret_dense->mutable_data<float>(place);
for (int i = 0; i < ret_dense->numel(); i++) {
ret_ptr[i] = t_ptr[i] + 5.0;
}
Expand Down Expand Up @@ -184,7 +184,7 @@ TEST(FwdBwdJoint, BranchedNodes) {
// Examine Forward Output 2
{
auto dense_out = std::dynamic_pointer_cast<pten::DenseTensor>(out2.impl());
float* ptr = dense_out->mutable_data<float>();
float* ptr = dense_out->mutable_data<float>(paddle::platform::CPUPlace());
for (int i = 0; i < 20; i++) {
PADDLE_ENFORCE(ptr[i] == 150.0,
paddle::platform::errors::Fatal(
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/eager/tests/task_tests/hook_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ egr::EagerTensor hook_function(const egr::EagerTensor& t) {
paddle::memory::Alloc(place, bytes_size)),
std::move(ret_meta));

float* t_ptr = t_dense->mutable_data<float>();
float* ret_ptr = ret_dense->mutable_data<float>();
float* t_ptr = t_dense->mutable_data<float>(place);
float* ret_ptr = ret_dense->mutable_data<float>(place);
for (int i = 0; i < ret_dense->numel(); i++) {
ret_ptr[i] = t_ptr[i] + 3.0;
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/eager/tests/test_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ bool CompareGradTensorWithValue(const egr::EagerTensor& target, T value) {
egr::AutogradMeta* meta = egr::EagerUtils::unsafe_autograd_meta(target);
auto grad_dense =
std::dynamic_pointer_cast<pten::DenseTensor>(meta->Grad().impl());
T* ptr = grad_dense->mutable_data<T>();
T* ptr = grad_dense->data<T>();

std::vector<T> host_data(grad_dense->numel());
if (paddle::platform::is_gpu_place(grad_dense->place())) {
Expand Down Expand Up @@ -67,7 +67,7 @@ template <typename T>
bool CompareTensorWithValue(const egr::EagerTensor& target, T value) {
// TODO(jiabin): Support Selected Rows later
auto dense_t = std::dynamic_pointer_cast<pten::DenseTensor>(target.impl());
T* ptr = dense_t->mutable_data<T>();
T* ptr = dense_t->data<T>();

std::vector<T> host_data(dense_t->numel());
if (paddle::platform::is_gpu_place(dense_t->place())) {
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/pybind/eager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ void EmptyEagerTensorInitializer(
std::make_shared<pten::DenseTensor>(
pten::make_intrusive<paddle::experimental::SharedStorage>(place),
pten::DenseTensorMeta(pten::TransToPtenDataType(dtype), ddims));
dense_tensor->mutable_data(place);
self->eager_tensor.set_impl(dense_tensor);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
Expand Down
6 changes: 0 additions & 6 deletions paddle/pten/api/ext/dispatch.h
Original file line number Diff line number Diff line change
Expand Up @@ -272,16 +272,10 @@ namespace paddle {
NAME, ::pten::DataType::UINT8, uint8_t, __VA_ARGS__) \
PD_PRIVATE_CASE_TYPE( \
NAME, ::pten::DataType::INT16, int16_t, __VA_ARGS__) \
PD_PRIVATE_CASE_TYPE( \
NAME, ::pten::DataType::UINT16, uint16_t, __VA_ARGS__) \
PD_PRIVATE_CASE_TYPE( \
NAME, ::pten::DataType::INT32, int32_t, __VA_ARGS__) \
PD_PRIVATE_CASE_TYPE( \
NAME, ::pten::DataType::UINT32, uint32_t, __VA_ARGS__) \
PD_PRIVATE_CASE_TYPE( \
NAME, ::pten::DataType::INT64, int64_t, __VA_ARGS__) \
PD_PRIVATE_CASE_TYPE( \
NAME, ::pten::DataType::UINT64, uint64_t, __VA_ARGS__) \
PD_PRIVATE_CASE_TYPE(NAME, \
::pten::DataType::BFLOAT16, \
paddle::experimental::bfloat16, \
Expand Down
26 changes: 16 additions & 10 deletions paddle/pten/api/lib/tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -149,8 +149,8 @@ bool Tensor::is_cuda() const {
template <typename T>
T *Tensor::mutable_data() {
if (is_dense_tensor()) {
return std::dynamic_pointer_cast<pten::DenseTensor>(impl_)
->mutable_data<T>();
return std::dynamic_pointer_cast<pten::DenseTensor>(impl_)->mutable_data<T>(
ConvertExtPlaceToInnerPlace(place()));
}
return nullptr;
}
Expand All @@ -173,12 +173,18 @@ Tensor::mutable_data<paddle::platform::float16>();
template <typename T>
T *Tensor::mutable_data(const PlaceType &place) {
auto inner_place = ConvertExtPlaceToInnerPlace(place);
PADDLE_ENFORCE_EQ(
platform::is_same_place(inner_place, impl_->place()),
true,
platform::errors::Unimplemented("Modification of tensor place through "
"mutable_data is not supported now"));
return mutable_data<T>();
if (impl_->initialized()) {
PADDLE_ENFORCE_EQ(
platform::is_same_place(inner_place, impl_->place()),
true,
platform::errors::Unimplemented("Modification of tensor place through "
"mutable_data is not supported now"));
}
if (is_dense_tensor()) {
return std::dynamic_pointer_cast<pten::DenseTensor>(impl_)->mutable_data<T>(
inner_place);
}
return nullptr;
}

template PADDLE_API float *Tensor::mutable_data<float>(const PlaceType &place);
Expand All @@ -205,7 +211,8 @@ Tensor::mutable_data<paddle::platform::float16>(const PlaceType &place);
template <typename T>
const T *Tensor::data() const {
if (is_dense_tensor()) {
return std::dynamic_pointer_cast<pten::DenseTensor>(impl_)->data<T>();
return std::dynamic_pointer_cast<pten::DenseTensor>(impl_)->mutable_data<T>(
ConvertExtPlaceToInnerPlace(place()));
}
return nullptr;
}
Expand All @@ -217,7 +224,6 @@ template PADDLE_API const int32_t *Tensor::data<int32_t>() const;
template PADDLE_API const uint8_t *Tensor::data<uint8_t>() const;
template PADDLE_API const int8_t *Tensor::data<int8_t>() const;
template PADDLE_API const int16_t *Tensor::data<int16_t>() const;
template PADDLE_API const uint16_t *Tensor::data<uint16_t>() const;
template PADDLE_API const bool *Tensor::data<bool>() const;
template PADDLE_API const paddle::platform::complex<float>
*Tensor::data<paddle::platform::complex<float>>() const;
Expand Down
1 change: 1 addition & 0 deletions paddle/pten/api/lib/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ PADDLE_API Tensor copy_to(const Tensor& x, Backend backend, bool blocking) {
pten::make_intrusive<paddle::experimental::SharedStorage>(
pten::TransToFluidPlace(backend)),
std::move(out_meta));
dense_out->mutable_data(pten::TransToFluidPlace(backend));
kernel_context.EmplaceBackOutput(dense_out.get());
Tensor out;
out.set_impl(dense_out);
Expand Down
18 changes: 18 additions & 0 deletions paddle/pten/api/lib/utils/storage.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,18 @@ class ExternalStorage : public pten::Storage {
size_ = 0;
}

void set_data_shared(
const std::shared_ptr<paddle::memory::Allocation>& holder) override {
CHECK(holder);
data_ = holder;
size_ = holder->size();
}

std::shared_ptr<paddle::memory::Allocation>&& move_data_shared() override {
size_ = 0;
return std::move(data_);
}

size_t size() const noexcept override { return size_; }
const paddle::platform::Place& place() const override {
PADDLE_ENFORCE_NOT_NULL(
Expand Down Expand Up @@ -92,6 +104,12 @@ class SharedStorage : public pten::Storage {
}
}

std::shared_ptr<paddle::memory::Allocation>&& move_data_shared() override {
size_ = 0;
place_ = Place();
return std::move(data_);
}

size_t size() const noexcept override {
return data_ ? data_->size() : size_;
}
Expand Down
3 changes: 0 additions & 3 deletions paddle/pten/common/scalar.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,9 +133,6 @@ class ScalarBase {
case DataType::INT8:
data_.i8 = tensor.template data<int8_t>()[0];
break;
case DataType::UINT16:
data_.ui16 = tensor.template data<uint16_t>()[0];
break;
case DataType::UINT8:
data_.ui8 = tensor.template data<uint8_t>()[0];
break;
Expand Down
Loading

0 comments on commit a56e16a

Please sign in to comment.