Skip to content

Commit

Permalink
Inductor cpp wrapper: fix QMaxPool (#112379)
Browse files Browse the repository at this point in the history
Based on the `Argument types` section in this [file](https://github.com/pytorch/pytorch/tree/cb942ef2b12134bfaa1727295380fe00ebb537c0/aten/src/ATen/native#func), for non-inplace `Tensor` type in schema, it should be mapped to C++ argument of type `const Tensor&`.

For `quantized_max_pool1d` and `quantized_max_pool2d`, the type of the `qx` input is `Tensor` type in the schema, thus modified the C++ type to be `const Tensor&`:
https://github.com/pytorch/pytorch/blob/cb942ef2b12134bfaa1727295380fe00ebb537c0/aten/src/ATen/native/quantized/library.cpp#L222-L223

Pull Request resolved: #112379
Approved by: https://github.com/jgong5, https://github.com/jansel
ghstack dependencies: #112373, #112378
  • Loading branch information
chunyuan-w authored and pytorchmergebot committed Nov 6, 2023
1 parent 3be0e1c commit 46a34e8
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 8 deletions.
2 changes: 1 addition & 1 deletion aten/src/ATen/native/quantized/cpu/Pooling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -726,7 +726,7 @@ template <uint32_t kSpatialDim>
class QMaxPool_arr_args final {
public:
static Tensor run(
Tensor qx,
const Tensor& qx,
std::vector<int64_t> kernel_size,
std::vector<int64_t> stride,
std::vector<int64_t> padding,
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/quantized/cudnn/Pooling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ template <uint32_t kSpatialDim>
class QMaxPool_arr_args final {
public:
static Tensor run(
Tensor qx,
const Tensor& qx,
std::vector<int64_t> kernel_size,
std::vector<int64_t> stride,
std::vector<int64_t> padding,
Expand Down
13 changes: 13 additions & 0 deletions test/inductor/test_cpp_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,19 @@ class BaseTest(NamedTuple):
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
),
BaseTest(
"test_qconv2d_maxpool2d_linear_dynamic",
"cpu",
test_mkldnn_pattern_matcher.TestDynamicPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
func_inputs=[
[
"op_qconv2d_pointwise.call",
"op_quantized_max_pool2d_.call",
"op_qlinear_pointwise.call",
]
],
),
BaseTest(
"test_qlinear",
"cpu",
Expand Down
13 changes: 7 additions & 6 deletions test/inductor/test_mkldnn_pattern_matcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -1415,7 +1415,7 @@ def forward(self, x):
match_nodes = 12
self._test_common(mod, (v,), match_count, match_nodes, rtol=1e-2, atol=1e-2)

def test_qconv2d_maxpool2d_linear_dynamic(self):
def test_qconv2d_maxpool2d_linear_dynamic_cpu(self, include_ops=None):
r"""
This testcase will quantize a single Conv2d->Maxpool2d->Linear module
with dynamic batch size input.
Expand Down Expand Up @@ -1444,11 +1444,12 @@ def forward(self, x):

mod = M().eval()
v = torch.randn((2, 3, 8, 8), dtype=torch.float32, requires_grad=False).add(1)
include_ops = [
"torch.ops.onednn.qconv2d_pointwise",
"torch.ops.quantized.max_pool2d",
"torch.ops.onednn.qlinear_pointwise",
]
if include_ops is None:
include_ops = [
"torch.ops.onednn.qconv2d_pointwise",
"torch.ops.quantized.max_pool2d",
"torch.ops.onednn.qlinear_pointwise",
]
exclude_ops = []
self._test_code_common(
mod,
Expand Down

0 comments on commit 46a34e8

Please sign in to comment.