Skip to content

Commit

Permalink
fix wrong names (#5951)
Browse files Browse the repository at this point in the history
* fix wrong names

* auto format by CI

* refine

* auto format by CI

Co-authored-by: oneflow-ci-bot <ci-bot@oneflow.org>
Co-authored-by: oneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
  • Loading branch information
3 people committed Aug 19, 2021
1 parent a99929d commit 790ddf8
Show file tree
Hide file tree
Showing 3 changed files with 51 additions and 52 deletions.
4 changes: 2 additions & 2 deletions oneflow/api/python/autograd/autograd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ Maybe<one::TensorTuple> Backward(const one::TensorTuple& outputs, const one::Ten
bool retain_graph, bool create_graph) {
if (create_graph) { retain_graph = true; }
std::shared_ptr<one::TensorTuple> gradients = JUST(CheckAndInitOutGrads(outputs, out_grads));
JUST(one::GetThreadLocalAutogradEngine()->RunBackwardAndSaveGrads4LeafTensor(
JUST(one::GetThreadLocalAutogradEngine()->RunBackwardAndSaveGrads4LeafTensorIf(
outputs, *gradients, retain_graph, create_graph));
return std::make_shared<one::TensorTuple>(0);
}
Expand All @@ -86,7 +86,7 @@ Maybe<one::TensorTuple> Grad(const one::TensorTuple& outputs, const one::TensorT
[](const std::shared_ptr<one::Tensor>& tensor) { return tensor->requires_grad(); }))
<< "All input tensors `.requires_grad` should be true";
std::shared_ptr<one::TensorTuple> gradients = JUST(CheckAndInitOutGrads(outputs, out_grads));
return one::GetThreadLocalAutogradEngine()->RunBackwardAndReturnInputsTensorGrad(
return one::GetThreadLocalAutogradEngine()->RunBackwardAndReturnInputsTensorGradIf(
outputs, inputs, *gradients, retain_graph, create_graph);
}

Expand Down
36 changes: 18 additions & 18 deletions oneflow/core/autograd/autograd_engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,25 +74,25 @@ Maybe<void> CheckConsistentTensorsMeta(const TensorTuple& tensor_tuple) {

} // namespace

Maybe<void> AutogradEngine::RunBackwardAndSaveGrads4LeafTensor(const TensorTuple& outputs,
const TensorTuple& out_grads,
bool retain_graph,
bool create_graph) {
Maybe<void> AutogradEngine::RunBackwardAndSaveGrads4LeafTensorIf(const TensorTuple& outputs,
const TensorTuple& out_grads,
bool retain_graph,
bool create_graph) {
JUST(CheckConsistentTensorsMeta(outputs));
JUST(CheckConsistentTensorsMeta(out_grads));
DisableCheckConsistentTensorMetaScope disable_meta_check;
return RunBackwardAndSaveGrads4LeafTensorIf(outputs, out_grads, retain_graph, create_graph);
return RunBackwardAndSaveGrads4LeafTensor(outputs, out_grads, retain_graph, create_graph);
}

Maybe<TensorTuple> AutogradEngine::RunBackwardAndReturnInputsTensorGrad(
Maybe<TensorTuple> AutogradEngine::RunBackwardAndReturnInputsTensorGradIf(
const TensorTuple& outputs, const TensorTuple& inputs, const TensorTuple& out_grads,
bool retain_graph, bool create_graph) {
JUST(CheckConsistentTensorsMeta(outputs));
JUST(CheckConsistentTensorsMeta(inputs));
JUST(CheckConsistentTensorsMeta(out_grads));
DisableCheckConsistentTensorMetaScope disable_meta_check;
return RunBackwardAndReturnInputsTensorGradIf(outputs, inputs, out_grads, retain_graph,
create_graph);
return RunBackwardAndReturnInputsTensorGrad(outputs, inputs, out_grads, retain_graph,
create_graph);
}

StackFunctionNode::StackFunctionNode(
Expand Down Expand Up @@ -190,10 +190,10 @@ void StackAutogradEngine::ClearReleasedFunctionNodes() {
node_list_.end());
}

Maybe<void> StackAutogradEngine::RunBackwardAndSaveGrads4LeafTensorIf(const TensorTuple& outputs,
const TensorTuple& out_grads,
bool retain_graph,
bool create_graph) {
Maybe<void> StackAutogradEngine::RunBackwardAndSaveGrads4LeafTensor(const TensorTuple& outputs,
const TensorTuple& out_grads,
bool retain_graph,
bool create_graph) {
ClearReleasedFunctionNodes();
for (int i = 0; i < outputs.size(); ++i) {
JUST(JUST(outputs.at(i)->current_grad())->PushPartialTensor(out_grads.at(i)));
Expand All @@ -213,7 +213,7 @@ Maybe<void> StackAutogradEngine::RunBackwardAndSaveGrads4LeafTensorIf(const Tens
return Maybe<void>::Ok();
}

Maybe<TensorTuple> StackAutogradEngine::RunBackwardAndReturnInputsTensorGradIf(
Maybe<TensorTuple> StackAutogradEngine::RunBackwardAndReturnInputsTensorGrad(
const TensorTuple& outputs, const TensorTuple& inputs, const TensorTuple& out_grads,
bool retain_graph, bool create_graph) {
ClearReleasedFunctionNodes();
Expand Down Expand Up @@ -419,10 +419,10 @@ Maybe<void> GraphTask::Apply(bool save_grad_for_leaf) {
return Maybe<void>::Ok();
}

Maybe<void> GraphAutogradEngine::RunBackwardAndSaveGrads4LeafTensorIf(const TensorTuple& outputs,
const TensorTuple& out_grads,
bool retain_graph,
bool create_graph) {
Maybe<void> GraphAutogradEngine::RunBackwardAndSaveGrads4LeafTensor(const TensorTuple& outputs,
const TensorTuple& out_grads,
bool retain_graph,
bool create_graph) {
for (int i = 0; i < outputs.size(); ++i) {
JUST(JUST(outputs.at(i)->current_grad())->PushPartialTensor(out_grads.at(i)));
}
Expand All @@ -432,7 +432,7 @@ Maybe<void> GraphAutogradEngine::RunBackwardAndSaveGrads4LeafTensorIf(const Tens
return Maybe<void>::Ok();
}

Maybe<TensorTuple> GraphAutogradEngine::RunBackwardAndReturnInputsTensorGradIf(
Maybe<TensorTuple> GraphAutogradEngine::RunBackwardAndReturnInputsTensorGrad(
const TensorTuple& outputs, const TensorTuple& inputs, const TensorTuple& out_grads,
bool retain_graph, bool create_graph) {
std::shared_ptr<TensorTuple> input_current_grad = std::make_shared<TensorTuple>(inputs.size());
Expand Down
63 changes: 31 additions & 32 deletions oneflow/core/autograd/autograd_engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,13 +69,13 @@ class AutogradEngine {
public:
virtual ~AutogradEngine() = default;

Maybe<void> RunBackwardAndSaveGrads4LeafTensor(const TensorTuple& outputs,
const TensorTuple& out_grads, bool retain_graph,
bool create_graph);
Maybe<TensorTuple> RunBackwardAndReturnInputsTensorGrad(const TensorTuple& outputs,
const TensorTuple& inputs,
const TensorTuple& out_grads,
bool retain_graph, bool create_graph);
Maybe<void> RunBackwardAndSaveGrads4LeafTensorIf(const TensorTuple& outputs,
const TensorTuple& out_grads, bool retain_graph,
bool create_graph);
Maybe<TensorTuple> RunBackwardAndReturnInputsTensorGradIf(const TensorTuple& outputs,
const TensorTuple& inputs,
const TensorTuple& out_grads,
bool retain_graph, bool create_graph);
virtual void ClearEngine() = 0;
// Builds FunctionNode, binding to all `outputs_` tensors and saving in AutogradEngine
virtual Maybe<FunctionNode> AddBackwardFuncPtr(
Expand All @@ -88,15 +88,14 @@ class AutogradEngine {
AutogradEngine() = default;

private:
virtual Maybe<void> RunBackwardAndSaveGrads4LeafTensorIf(const TensorTuple& outputs,
const TensorTuple& out_grads,
bool retain_graph,
bool create_graph) = 0;
virtual Maybe<TensorTuple> RunBackwardAndReturnInputsTensorGradIf(const TensorTuple& outputs,
const TensorTuple& inputs,
const TensorTuple& out_grads,
bool retain_graph,
bool create_graph) = 0;
virtual Maybe<void> RunBackwardAndSaveGrads4LeafTensor(const TensorTuple& outputs,
const TensorTuple& out_grads,
bool retain_graph, bool create_graph) = 0;
virtual Maybe<TensorTuple> RunBackwardAndReturnInputsTensorGrad(const TensorTuple& outputs,
const TensorTuple& inputs,
const TensorTuple& out_grads,
bool retain_graph,
bool create_graph) = 0;
};

// Stack Autograd Node and Engine
Expand Down Expand Up @@ -137,14 +136,14 @@ class StackAutogradEngine final : public AutogradEngine {
// moment.
std::list<std::weak_ptr<FunctionNode>> node_list_;
void ClearReleasedFunctionNodes();
Maybe<void> RunBackwardAndSaveGrads4LeafTensorIf(const TensorTuple& outputs,
const TensorTuple& out_grads, bool retain_graph,
bool create_graph) override;
Maybe<TensorTuple> RunBackwardAndReturnInputsTensorGradIf(const TensorTuple& outputs,
const TensorTuple& inputs,
const TensorTuple& out_grads,
bool retain_graph,
bool create_graph) override;
Maybe<void> RunBackwardAndSaveGrads4LeafTensor(const TensorTuple& outputs,
const TensorTuple& out_grads, bool retain_graph,
bool create_graph) override;
Maybe<TensorTuple> RunBackwardAndReturnInputsTensorGrad(const TensorTuple& outputs,
const TensorTuple& inputs,
const TensorTuple& out_grads,
bool retain_graph,
bool create_graph) override;
};

// Graph Autograd Node and Engine
Expand Down Expand Up @@ -194,14 +193,14 @@ class GraphAutogradEngine final : public AutogradEngine {
const TensorTuple& inputs, TensorTuple* outputs) override;

private:
Maybe<void> RunBackwardAndSaveGrads4LeafTensorIf(const TensorTuple& outputs,
const TensorTuple& out_grads, bool retain_graph,
bool create_graph) override;
Maybe<TensorTuple> RunBackwardAndReturnInputsTensorGradIf(const TensorTuple& outputs,
const TensorTuple& inputs,
const TensorTuple& out_grads,
bool retain_graph,
bool create_graph) override;
Maybe<void> RunBackwardAndSaveGrads4LeafTensor(const TensorTuple& outputs,
const TensorTuple& out_grads, bool retain_graph,
bool create_graph) override;
Maybe<TensorTuple> RunBackwardAndReturnInputsTensorGrad(const TensorTuple& outputs,
const TensorTuple& inputs,
const TensorTuple& out_grads,
bool retain_graph,
bool create_graph) override;
};

AutogradEngine* GetThreadLocalAutogradEngine();
Expand Down

0 comments on commit 790ddf8

Please sign in to comment.