forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
conv_transpose_op_mobile.h
45 lines (34 loc) · 1.42 KB
/
conv_transpose_op_mobile.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
#ifndef CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_H_
#define CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_H_
#include "caffe2/core/common.h"
#ifdef C10_MOBILE
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_transpose_unpool_op_base.h"
namespace caffe2 {
template <typename T, class Context>
class ConvTransposeMobileOp final : public ConvTransposeUnpoolBase<Context> {
public:
USE_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS(Context);
ConvTransposeMobileOp(const OperatorDef& operator_def, Workspace* ws)
: ConvTransposeUnpoolBase<Context>(operator_def, ws) {
OPERATOR_NEEDS_FEATURE(order_ == StorageOrder::NCHW, "Only NCHW order is supported right now.");
OPERATOR_NEEDS_FEATURE(
this->pad_l() == 0, "operator does not handle row width padding");
OPERATOR_NEEDS_FEATURE(
this->pad_r() == 0, "operator does not handle row width padding");
OPERATOR_NEEDS_FEATURE(this->stride_w() <= 4, "stride width must be <= 4");
}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
private:
// We store a numThreasds per-worker tiles of Y, and numThreads per-worker threadBuffer for the
// gemm output, laid out in that order.
Tensor threadBuffer_{CPU};
// Input: X, W, b
// Output: Y
INPUT_TAGS(INPUT, FILTER, BIAS);
};
} // namespace caffe2
#endif // C10_MOBILE
#endif // CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_H_