New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add Layout field to Conv and Pool nodes and remove OCL specific versions #3367
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -203,6 +203,9 @@ inline ShapeHWD calculate3DConvPoolOutputDims( | |
/// Modes of the padding operation. | ||
enum PaddingMode { CONSTANT = 0, REFLECT, EDGE }; | ||
|
||
/// Convolution Layouts. | ||
enum ConvolutionLayout { NHWC = 0, NCHW }; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. since this is applicable to pools as well, i could see this as a TensorLayout. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think TensorLayout is too general, but can't think of a better name :/ |
||
|
||
/// Support for hashing the Nodes. This is required for using | ||
/// llvm::hash_combine. | ||
class Node; | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -282,6 +282,8 @@ void BoundInterpreterFunction::fwdConvolutionInstQuantizedImpl( | |
} | ||
|
||
void BoundInterpreterFunction::fwdConvolutionInst(const ConvolutionInst *I) { | ||
assert(I->getLayout() == NHWC && | ||
"Glow Interpreter supports only NHWC Convolutions"); | ||
auto kernelSizes = I->getKernels(); | ||
auto pads = I->getPads(); | ||
auto strides = I->getStrides(); | ||
|
@@ -303,6 +305,8 @@ void BoundInterpreterFunction::fwdConvolutionInst(const ConvolutionInst *I) { | |
|
||
void BoundInterpreterFunction::fwdConvolutionGradInst( | ||
const ConvolutionGradInst *I) { | ||
assert(I->getLayout() == NHWC && | ||
"Glow Interpreter supports only NHWC Convolutions"); | ||
auto inW = getWeightHandle(I->getSrc()); | ||
auto inG = getWeightHandle(I->getSrcGrad()); | ||
auto outG = getWeightHandle(I->getDestGrad()); | ||
|
@@ -753,6 +757,7 @@ static void fwdMaxPool(Tensor *inW, Tensor *outW, Tensor *argmaxW, | |
} | ||
|
||
void BoundInterpreterFunction::fwdMaxPoolInst(const MaxPoolInst *I) { | ||
assert(I->getLayout() == NHWC && "Glow Interpreter supports only NHWC Pools"); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same checks for gradient version? |
||
auto inW = getTensor(I->getSrc()); | ||
auto outW = getTensor(I->getDest()); | ||
|
||
|
@@ -770,6 +775,7 @@ void BoundInterpreterFunction::fwdMaxPoolInst(const MaxPoolInst *I) { | |
|
||
void BoundInterpreterFunction::fwdMaxPoolWithArgmaxInst( | ||
const MaxPoolWithArgmaxInst *I) { | ||
assert(I->getLayout() == NHWC && "Glow Interpreter supports only NHWC Pools"); | ||
auto inW = getTensor(I->getSrc()); | ||
auto outW = getTensor(I->getDest()); | ||
auto argmaxW = getTensor(I->getArgmax()); | ||
|
@@ -888,6 +894,7 @@ void BoundInterpreterFunction::fwdAvgPoolInstI8Impl(const AvgPoolInst *I) { | |
} | ||
|
||
void BoundInterpreterFunction::fwdAvgPoolInst(const AvgPoolInst *I) { | ||
assert(I->getLayout() == NHWC && "Glow Interpreter supports only NHWC Pools"); | ||
if (I->getSrc()->getType()->isQuantizedType()) { | ||
fwdAvgPoolInstI8Impl(I); | ||
return; | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
update doxygen with the new param everywhere.