Skip to content

Commit

Permalink
clean code (PaddlePaddle#696)
Browse files Browse the repository at this point in the history
  • Loading branch information
gglin001 committed May 5, 2022
1 parent 09354b4 commit 4ff8897
Show file tree
Hide file tree
Showing 19 changed files with 466 additions and 514 deletions.
16 changes: 7 additions & 9 deletions paddle/fluid/platform/device/ipu/ipu_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/platform/device/ipu/ipu_backend.h"
#include "paddle/fluid/platform/device/ipu/ipu_utils.h"

#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph_helper.h"
#include "paddle/fluid/framework/ir/node.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/device/ipu/ipu_compiler.h"
#include "paddle/fluid/platform/device/ipu/ipu_executor.h"

namespace paddle {
namespace platform {
Expand All @@ -40,7 +38,7 @@ IpuBackend::~IpuBackend() {
executor_.reset();
}

void IpuBackend::Compile(Graph* graph,
void IpuBackend::Compile(framework::ir::Graph* graph,
const std::vector<std::string>& feed_list,
const std::vector<std::string>& fetch_list) {
VLOG(10) << "enter IpuBackend::Compile";
Expand All @@ -63,8 +61,8 @@ void IpuBackend::Compile(Graph* graph,
VLOG(10) << "leave IpuBackend::Compile";
}

void IpuBackend::Run(const std::vector<const Tensor*>& inputs,
const std::vector<Tensor*>& outputs,
void IpuBackend::Run(const std::vector<const framework::Tensor*>& inputs,
const std::vector<framework::Tensor*>& outputs,
const framework::ExecutionContext& ctx) {
timer_->Start();
executor_->Run(inputs, outputs, ctx);
Expand All @@ -82,7 +80,7 @@ void IpuBackend::Reset() {
executor_.reset();
}

void IpuBackend::SetScope(const Scope& scope) {
void IpuBackend::SetScope(const framework::Scope& scope) {
scope_ = &scope;
executor_->SetScope(&scope);
}
Expand Down
62 changes: 30 additions & 32 deletions paddle/fluid/platform/device/ipu/ipu_backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,26 +18,25 @@ limitations under the License. */
#include <popart/names.hpp>
#include <popart/tensorinfo.hpp>

#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/platform/device/ipu/ipu_compiler.h"
#include "paddle/fluid/platform/device/ipu/ipu_device.h"
#include "paddle/fluid/platform/device/ipu/ipu_executor.h"
#include "paddle/fluid/platform/device/ipu/ipu_strategy.h"
#include "paddle/fluid/platform/device/ipu/ipu_utils.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/timer.h"

namespace paddle {
namespace framework {
class ExecutionContext;
} // namespace framework
} // namespace paddle

namespace paddle {
namespace platform {
namespace ipu {

// IpuBackend is the center of paddle-ipu, its function include:
// 1. Compile paddle model to popart model
// 2. Run popart model, inference or training
// 3. Request and release device
// 4. Other helper function
class IpuStrategy;
class Compiler;
class Executor;

class IpuBackend {
public:
static IpuBackend *GetInstance();
Expand All @@ -46,47 +45,46 @@ class IpuBackend {
IpuBackend();
~IpuBackend();

// what compile does include(call compiler_):
// 1. map paddle-op -> poart op
// 2. construct popart onnx compute graph
void Compile(Graph *graph, const std::vector<std::string> &feed_list,
// What compile method does:
// Convert paddle ops to popart ops;
// Construct a popart graph, which is a onnx compute graph;
// Load the graph and weights to ipu.
void Compile(framework::ir::Graph *graph,
const std::vector<std::string> &feed_list,
const std::vector<std::string> &fetch_list);

// what run does include:
// 1. construct forward onnx graph
// 2. graph-level optimization
// 3. autodiff
void Run(const std::vector<const Tensor *> &inputs,
const std::vector<Tensor *> &outputs,
// Run the compiled graph on ipu
void Run(const std::vector<const framework::Tensor *> &inputs,
const std::vector<framework::Tensor *> &outputs,
const framework::ExecutionContext &ctx);

// Sync weights from IPU while training
void WeightsToHost();

// detach IPU manually
// Detach IPU manually
void Detach();

// reset manually
// call it before destruct works
// Reset manually
// Call it before destruct works
void Reset();

void SetScope(const Scope &scope);
const Scope *GetScope() { return scope_; }
void SetScope(const framework::Scope &scope);
const framework::Scope *GetScope() { return scope_; }
void SetIpuStrategy(const IpuStrategy &strategy);
const IpuStrategy *GetIpuStrategy() { return ipu_strategy_; }

// save compiled model to onnx
// Save compiled model to onnx
void SaveModelProto(const std::string &path);

private:
// not own
const Scope *scope_ = nullptr;
// Not own
const framework::Scope *scope_ = nullptr;
const IpuStrategy *ipu_strategy_ = nullptr;

// own
// Own
std::unique_ptr<Compiler> compiler_;
std::unique_ptr<Executor> executor_;
std::unique_ptr<platform::Timer> timer_;
std::unique_ptr<Timer> timer_;

bool is_compiled_ = false;

Expand Down
Loading

0 comments on commit 4ff8897

Please sign in to comment.