Skip to content

Commit

Permalink
[DoubleGrad PR PaddlePaddle#6] Fixed issues with TensorWrapper::recov…
Browse files Browse the repository at this point in the history
…er() interface
  • Loading branch information
jim19930609 committed Apr 1, 2022
1 parent 7e6b1e2 commit 022df81
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 10 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1249,9 +1249,9 @@ def GenerateNodeDefinition(self, grad_node_creation_str):

is_optional = (name in self.optional_inputs)
if is_optional:
tensor_wrapper_recover_str = f"{indent}auto {transformed_tensor_name} = egr::EagerUtils::RecoverOptionalTensorWrapper(&this->{tensor_wrapper_name}, nullptr);"
tensor_wrapper_recover_str = f"{indent}auto {transformed_tensor_name} = egr::EagerUtils::RecoverOptionalTensorWrapper(&this->{tensor_wrapper_name}, this->shared_from_this());"
else:
tensor_wrapper_recover_str = f"{indent}auto {transformed_tensor_name} = egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, nullptr);"
tensor_wrapper_recover_str = f"{indent}auto {transformed_tensor_name} = egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name}, this->shared_from_this());"
grad_api_args[grad_api_position] = transformed_tensor_name
get_grad_in_args_list.append(tensor_wrapper_recover_str)

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/eager/grad_node_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ class GradSlotMeta {
std::shared_ptr<phi::DenseTensorMeta> meta_ = nullptr;
};

class GradNodeBase {
class GradNodeBase : public std::enable_shared_from_this<GradNodeBase> {
public:
GradNodeBase() { VLOG(6) << "Construct GradNodeBase"; }
GradNodeBase(size_t bwd_in_slot_num, size_t bwd_out_slot_num);
Expand Down
14 changes: 7 additions & 7 deletions paddle/fluid/eager/tensor_wrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,19 +95,19 @@ class TensorWrapper {
}

// if it's full_reserved just return the full copy of tensor
if (full_reserved_) {
check_inplace_version();
return intermidiate_tensor_;
} else {
check_inplace_version();

paddle::experimental::Tensor recovered_tensor = intermidiate_tensor_;
if (!full_reserved_) {
std::shared_ptr<GradNodeBase> new_grad_node = grad_node;
auto p_ab_autograd_meta =
std::make_shared<AutogradMeta>(Edge(new_grad_node, out_rank_info_));
intermidiate_tensor_.set_autograd_meta(
recovered_tensor.set_autograd_meta(
std::static_pointer_cast<paddle::experimental::AbstractAutogradMeta>(
p_ab_autograd_meta));
check_inplace_version();
return intermidiate_tensor_;
}

return recovered_tensor;
}

void check_inplace_version() {
Expand Down

0 comments on commit 022df81

Please sign in to comment.