Skip to content

Commit

Permalink
Withdraw mkldnn mul
Browse files Browse the repository at this point in the history
  • Loading branch information
mozga-intel committed May 30, 2018
1 parent 654f5d3 commit 30d3203
Show file tree
Hide file tree
Showing 6 changed files with 8 additions and 308 deletions.
197 changes: 0 additions & 197 deletions paddle/fluid/operators/mul_mkldnn_op.cc

This file was deleted.

39 changes: 0 additions & 39 deletions paddle/fluid/operators/mul_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,6 @@ limitations under the License. */
#include <string>
#include <vector>

#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

namespace paddle {
namespace operators {

Expand Down Expand Up @@ -76,22 +72,6 @@ class MulOp : public framework::OperatorWithKernel {
ctx->SetOutputDim("Out", framework::make_ddim(output_dims));
ctx->ShareLoD("X", /*->*/ "Out");
}

private:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
framework::LibraryType library{framework::LibraryType::kPlain};
#ifdef PADDLE_WITH_MKLDNN
if (library == framework::LibraryType::kPlain &&
platform::CanMKLDNNBeUsed(ctx)) {
library = framework::LibraryType::kMKLDNN;
}
#endif
framework::DataLayout layout{framework::DataLayout::kAnyLayout};
return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("X")->type()), ctx.GetPlace(),
layout, library);
}
};

class MulOpMaker : public framework::OpProtoAndCheckerMaker {
Expand Down Expand Up @@ -120,9 +100,6 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker {
)DOC")
.SetDefault(1)
.EqualGreaterThan(1);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false);
AddAttr<int>(
"y_num_col_dims",
R"DOC((int, default 1), The mul_op can take tensors with more than two,
Expand Down Expand Up @@ -177,22 +154,6 @@ class MulGradOp : public framework::OperatorWithKernel {
ctx->SetOutputDim(y_grad_name, y_dims);
}
}

private:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
framework::LibraryType library{framework::LibraryType::kPlain};
#ifdef PADDLE_WITH_MKLDNN
if (library == framework::LibraryType::kPlain &&
platform::CanMKLDNNBeUsed(ctx)) {
library = framework::LibraryType::kMKLDNN;
}
#endif
framework::DataLayout layout{framework::DataLayout::kAnyLayout};
return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("X")->type()), ctx.GetPlace(),
layout, library);
}
};

} // namespace operators
Expand Down
13 changes: 5 additions & 8 deletions python/paddle/fluid/layers/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,11 +177,8 @@ def fc(input,
inputs={"X": input_var,
"Y": w},
outputs={"Out": tmp},
attrs={
"x_num_col_dims": num_flatten_dims,
"y_num_col_dims": 1,
"use_mkldnn": use_mkldnn
})
attrs={"x_num_col_dims": num_flatten_dims,
"y_num_col_dims": 1})
mul_results.append(tmp)

if len(mul_results) == 1:
Expand Down Expand Up @@ -3929,10 +3926,10 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None):
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this layer) on a rectilinear 2D grid.
For details, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation
Args:
input (Variable): The input tensor of bilinear interpolation,
This is a 4-D tensor of the shape
Expand All @@ -3950,7 +3947,7 @@ def upsampling_bilinear2d(input, out_shape=None, scale=None, name=None):
Returns:
out (Variable): The output is a 4-D tensor of the shape
(num_batches, channls, out_h, out_w).
Examples:
.. code-block:: python
Expand Down
44 changes: 0 additions & 44 deletions python/paddle/fluid/tests/unittests/test_mul_mkldnn_op.py

This file was deleted.

Loading

0 comments on commit 30d3203

Please sign in to comment.