Skip to content

Commit

Permalink
rename
Browse files Browse the repository at this point in the history
  • Loading branch information
tensor-tang committed Aug 30, 2017
1 parent bfbd066 commit c5183ca
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 21 deletions.
29 changes: 13 additions & 16 deletions paddle/gserver/layers/MKLDNNFcLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ void MKLDNNFcLayer::resetFwd() {
const MatrixPtr& bias = hasBias ? biases_->getW() : nullptr;
const MatrixPtr& out = output_.value;

if (prevIsOnlyMKLDNN()) {
if (inputIsOnlyMKLDNN()) {
const MatrixPtr& in = getInputValue(0);
inVal_ = std::dynamic_pointer_cast<MKLDNNMatrix>(in);
CHECK(inVal_) << "Input should be MKLDNNMatrix";
Expand All @@ -154,7 +154,7 @@ void MKLDNNFcLayer::resetFwd() {

// change original output value to mkldnn output value
output_.value = std::dynamic_pointer_cast<Matrix>(outVal_);
if (!nextIsOnlyMKLDNN()) {
if (!outputIsOnlyMKLDNN()) {
convertOutputToOtherDevice();
}

Expand Down Expand Up @@ -194,19 +194,16 @@ void MKLDNNFcLayer::resetBwd() {
const MatrixPtr& bias = hasBias ? biases_->getWGrad() : nullptr;

// TODO(TJ): merge outgrad
if (nextIsOnlyMKLDNN()) {
// can not directly cast outputgrad to mkldnnmatrix,
// since each layer can not write the inputgrad to mkldnn inputgrad.
// So just create from matrix with outputvalue format.
const MatrixPtr& out = getOutput(MKLDNN_DEVICE).grad;
outGrad_ = MKLDNNMatrix::create(out, outVal_->getPrimitiveDesc());
} else {
const MatrixPtr& out = getOutput(CPU_DEVICE).grad;
// fc do not need to convert from cpu device since output always nc
// only need create from cpu device
outGrad_ = MKLDNNMatrix::create(out, outVal_->getPrimitiveDesc());
}

int device = outputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE;
// for MKLDNN device:
// can not directly cast outputgrad to mkldnnmatrix,
// since each layer can not write the inputgrad to mkldnn inputgrad.
// So just create from matrix with outputvalue format.
// for CPU device:
// fc do not need to convert from cpu device since output is always nc format
// only need create from cpu device
const MatrixPtr& out = getOutput(device).grad;
outGrad_ = MKLDNNMatrix::create(out, outVal_->getPrimitiveDesc());
wgtGrad_ = MKLDNNMatrix::create(wgt, wgtVal_->getPrimitiveDesc());
biasGrad_ = hasBias ? MKLDNNMatrix::create(bias, biasVal_->getPrimitiveDesc())
: nullptr;
Expand Down Expand Up @@ -238,7 +235,7 @@ void MKLDNNFcLayer::resetBwd() {
pipelineBwd_.push_back(*bwdWgt_);

/// backward data
int device = prevIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE;
device = inputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE;
const MatrixPtr& in = getInputGrad(0, device);
if (in == nullptr) {
return;
Expand Down
12 changes: 7 additions & 5 deletions paddle/gserver/layers/MKLDNNLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,8 @@ class MKLDNNLayer : public Layer {
protected:
/**
* copy image size and sequence info to other device
* @note: can not directly use Layer::copyOutputToOtherDevice since here only
* copy base info and do not copy data value
*/
void copyOutputInfoToOtherDevice() {
for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
Expand All @@ -165,10 +167,10 @@ class MKLDNNLayer : public Layer {
}

/**
* Is previous layer only has MKLDNN type.
* If input only has MKLDNN device.
* Otherwise, only support the previous layer using CPU device.
*/
bool prevIsOnlyMKLDNN(int index = 0) {
bool inputIsOnlyMKLDNN(int index = 0) {
int prevDevice = getPrev(index)->getDeviceId();
if (prevDevice == MKLDNN_DEVICE) {
return true;
Expand All @@ -183,7 +185,7 @@ class MKLDNNLayer : public Layer {
* If output only has MKLDNN device.
* Otherwise, other devices should only using CPU device.
*/
bool nextIsOnlyMKLDNN() {
bool outputIsOnlyMKLDNN() {
for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
CHECK_EQ(outputOtherDevice_[i].deviceId, CPU_DEVICE)
<< "Only support other device is CPU yet";
Expand All @@ -195,7 +197,7 @@ class MKLDNNLayer : public Layer {
* Sync input value data
*/
void syncInputValue() {
if (prevIsOnlyMKLDNN()) {
if (inputIsOnlyMKLDNN()) {
return;
}
real* iData = getInputValue(0, CPU_DEVICE)->getData();
Expand All @@ -208,7 +210,7 @@ class MKLDNNLayer : public Layer {
* Sync output grad data
*/
void syncOutputGrad() {
if (nextIsOnlyMKLDNN()) {
if (outputIsOnlyMKLDNN()) {
return;
}

Expand Down

0 comments on commit c5183ca

Please sign in to comment.