diff --git a/paddle/fluid/pybind/eager.cc b/paddle/fluid/pybind/eager.cc index 316a19728c19b..42d8911bdafa0 100644 --- a/paddle/fluid/pybind/eager.cc +++ b/paddle/fluid/pybind/eager.cc @@ -45,7 +45,7 @@ limitations under the License. */ #ifdef PADDLE_WITH_DISTRIBUTE #include "paddle/phi/core/distributed/auto_parallel/dist_attr.h" #include "paddle/phi/core/distributed/auto_parallel/dist_tensor.h" -using phi::distributed::auto_parallel::DistTensor; +using phi::distributed::DistTensor; using phi::distributed::auto_parallel::TensorDistAttr; #endif diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 6a3f7e09c202a..6d07363805f8f 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -801,8 +801,8 @@ static PyObject* tensor_method_get_underline_tensor(TensorObject* self, return ToPyObject(tensor); } else if (self->tensor.is_dist_tensor()) { #ifdef PADDLE_WITH_DISTRIBUTE - auto* tensor = static_cast( - self->tensor.impl().get()); + auto* tensor = + static_cast(self->tensor.impl().get()); VLOG(6) << "dist tensor: " << tensor->defined(); return ToPyObject(tensor); #else diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index 42c5b97067b0e..42d53ad7bee01 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -164,9 +164,8 @@ PyObject* tensor_properties_get_dist_attr(TensorObject* self, void* closure) { EAGER_TRY if (self->tensor.is_dist_tensor()) { #ifdef PADDLE_WITH_DISTRIBUTE - phi::distributed::auto_parallel::DistTensor* dist_tensor = - static_cast( - self->tensor.impl().get()); + phi::distributed::DistTensor* dist_tensor = + static_cast(self->tensor.impl().get()); return ToPyObject(dist_tensor->dist_attr().get()); #else RETURN_PY_NONE diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index e365819928e66..8dfc7cfc8e426 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -859,7 +859,7 @@ PyObject* ToPyObject(const phi::DenseTensor* value) { } #ifdef PADDLE_WITH_DISTRIBUTE -PyObject* ToPyObject(const phi::distributed::auto_parallel::DistTensor* value) { +PyObject* ToPyObject(const phi::distributed::DistTensor* value) { auto obj = ::pybind11::cast(value, py::return_value_policy::reference); obj.inc_ref(); return obj.ptr(); diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 208d2f25e7d21..1fb53a3b9f7a6 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -113,7 +113,7 @@ PyObject* ToPyObject(const std::vector>& value, PyObject* ToPyObject(const platform::Place& value); PyObject* ToPyObject(const phi::DenseTensor* value); #ifdef PADDLE_WITH_DISTRIBUTE -PyObject* ToPyObject(const phi::distributed::auto_parallel::DistTensor* value); +PyObject* ToPyObject(const phi::distributed::DistTensor* value); PyObject* ToPyObject( const phi::distributed::auto_parallel::TensorDistAttr* value); #endif diff --git a/paddle/fluid/pybind/tensor.cc b/paddle/fluid/pybind/tensor.cc index 98ae45dd0134b..e9ad190ea3f3c 100644 --- a/paddle/fluid/pybind/tensor.cc +++ b/paddle/fluid/pybind/tensor.cc @@ -1025,7 +1025,7 @@ void BindTensor(pybind11::module &m) { // NOLINT #endif #ifdef PADDLE_WITH_DISTRIBUTE - using phi::distributed::auto_parallel::DistTensor; + using phi::distributed::DistTensor; py::class_(m, "DistTensor") .def( "get_tensor", diff --git a/paddle/phi/api/lib/tensor.cc b/paddle/phi/api/lib/tensor.cc index 40319fa9ba660..b835230e22978 100644 --- a/paddle/phi/api/lib/tensor.cc +++ b/paddle/phi/api/lib/tensor.cc @@ -133,7 +133,7 @@ bool Tensor::is_dense_tensor() const { } bool Tensor::is_dist_tensor() const { #ifdef PADDLE_WITH_DISTRIBUTE - return phi::distributed::auto_parallel::DistTensor::classof(impl_.get()); + return phi::distributed::DistTensor::classof(impl_.get()); #else return false; #endif diff --git a/paddle/phi/core/dense_tensor.h b/paddle/phi/core/dense_tensor.h index 2cfdd7493c438..8af8f745baff7 100644 --- a/paddle/phi/core/dense_tensor.h +++ b/paddle/phi/core/dense_tensor.h @@ -30,9 +30,7 @@ namespace phi { class DenseTensorUtils; namespace distributed { -namespace auto_parallel { class DistTensor; -} // namespace auto_parallel } // namespace distributed /// \brief The Dense tensor stores values in a contiguous sequential block @@ -186,7 +184,7 @@ class DenseTensor : public TensorBase, private: friend class DenseTensorUtils; - friend class phi::distributed::auto_parallel::DistTensor; + friend class phi::distributed::DistTensor; protected: DenseTensorMeta meta_; diff --git a/paddle/phi/core/distributed/auto_parallel/dist_tensor.cc b/paddle/phi/core/distributed/auto_parallel/dist_tensor.cc index 6f60773132656..b234fc3c17485 100644 --- a/paddle/phi/core/distributed/auto_parallel/dist_tensor.cc +++ b/paddle/phi/core/distributed/auto_parallel/dist_tensor.cc @@ -16,7 +16,6 @@ namespace phi { namespace distributed { -namespace auto_parallel { void* DistTensor::AllocateFrom(Allocator* allocator, DataType dtype, @@ -59,6 +58,5 @@ void DistTensor::set_meta(const DenseTensorMeta& meta) { meta_ = meta; } -} // namespace auto_parallel } // namespace distributed } // namespace phi diff --git a/paddle/phi/core/distributed/auto_parallel/dist_tensor.h b/paddle/phi/core/distributed/auto_parallel/dist_tensor.h index ed47727fe9a3a..eb3a6dbbe3e66 100644 --- a/paddle/phi/core/distributed/auto_parallel/dist_tensor.h +++ b/paddle/phi/core/distributed/auto_parallel/dist_tensor.h @@ -18,11 +18,12 @@ #include "paddle/phi/core/dense_tensor.h" namespace phi { - namespace distributed { -namespace auto_parallel { +namespace auto_parallel { class TensorDistAttr; +} +using auto_parallel::TensorDistAttr; class DistTensor final : public phi::TensorBase, @@ -125,6 +126,5 @@ class DistTensor final std::unique_ptr value_{nullptr}; }; -} // namespace auto_parallel } // namespace distributed } // namespace phi diff --git a/paddle/phi/core/utils/type_info.cc b/paddle/phi/core/utils/type_info.cc index 2a554525024c8..0de0289887507 100644 --- a/paddle/phi/core/utils/type_info.cc +++ b/paddle/phi/core/utils/type_info.cc @@ -56,8 +56,7 @@ template class TypeInfoTraits; template class TypeInfoTraits; #ifdef PADDLE_WITH_DISTRIBUTE -template class TypeInfoTraits; +template class TypeInfoTraits; #endif #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || \