This repository has been archived by the owner on Nov 17, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 6.8k
/
convolution.cc
82 lines (76 loc) · 2.56 KB
/
convolution.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
/*!
* Copyright (c) 2015 by Contributors
* \file convolution.cc
* \brief
* \author Bing Xu
*/
#include "./convolution-inl.h"
#if MXNET_USE_MKL2017 == 1
#include <mkl_memory.h>
#include "./mkl/mkl_memory-inl.h"
#include "./mkl/mkl_convolution-inl.h"
#endif // MXNET_USE_MKL2017
#if MXNET_USE_NNPACK == 1
#include "./nnpack/nnpack_convolution-inl.h"
#endif // MXNET_USE_NNPACK
namespace mxnet {
namespace op {
DMLC_REGISTER_PARAMETER(ConvolutionParam);
template<>
Operator* CreateOp<cpu>(ConvolutionParam param, int dtype,
std::vector<TShape> *in_shape,
std::vector<TShape> *out_shape,
Context ctx) {
Operator *op = NULL;
#if MXNET_USE_MKL2017 == 1
if ((param.dilate[0] == 1 && param.dilate[1] == 1)
&& param.kernel.ndim() == 2) {
switch (dtype) {
case mshadow::kFloat32:
return new MKLConvolutionOp<cpu, float>(param);
case mshadow::kFloat64:
return new MKLConvolutionOp<cpu, double>(param);
default:
break;
}
}
LOG(INFO) << MKLConvolutionOp<cpu, float>::getName() << " Skip MKL optimization";
#endif
#if MXNET_USE_NNPACK == 1
const size_t batch_size = (*in_shape)[0][0];
if ((param.dilate[0] == 1 && param.dilate[1] == 1)
&& param.kernel.ndim() == 2 && (!param.no_bias)
&& param.num_group == 1 && (batch_size == 1 ||
((batch_size > 1) && (param.stride[0] == 1) &&
(param.stride[1] == 1)))) {
switch (dtype) {
case mshadow::kFloat32:
return new NNPACKConvolutionOp<cpu, float>(param);
default:
break;
}
}
#endif
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ConvolutionOp<cpu, DType>(param);
})
return op;
}
// DO_BIND_DISPATCH comes from operator_common.h
Operator *ConvolutionProp::CreateOperatorEx(Context ctx,
std::vector<TShape> *in_shape,
std::vector<int> *in_type) const {
std::vector<TShape> out_shape, aux_shape;
std::vector<int> out_type, aux_type;
CHECK(InferType(in_type, &out_type, &aux_type));
CHECK(InferShape(in_shape, &out_shape, &aux_shape));
DO_BIND_DISPATCH(CreateOp, param_, (*in_type)[0], in_shape, &out_shape, ctx);
}
MXNET_REGISTER_OP_PROPERTY(Convolution, ConvolutionProp)
.add_argument("data", "Symbol", "Input data to the ConvolutionOp.")
.add_argument("weight", "Symbol", "Weight matrix.")
.add_argument("bias", "Symbol", "Bias parameter.")
.add_arguments(ConvolutionParam::__FIELDS__())
.describe("Apply convolution to input then add a bias.");
} // namespace op
} // namespace mxnet