Skip to content
15 changes: 6 additions & 9 deletions backends/xnnpack/operators/op_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,9 @@ def define_node(
) # NHWC input
kwargs["input1_id"] = vals_to_ids[get_input_node(node, 0)]

# filter shape for pytorch convolution is (oc, inc/groups, height, width)
# shape for xnnpack convolution is (oc, height, width, inc/groups), to convert
# to the proper shape, this is essentially a NCHW to NHWC conversion
kernel_node = get_input_node(node, 1)
kernel_shape = get_shape(kernel_node)
groups = cast(int, node.args[8])
Expand All @@ -65,19 +68,13 @@ def define_node(
is_depthwise_conv = (group_input_channels == 1) and (
group_output_channels % group_input_channels == 0
)
# filter
# filter shape for pytorch convolution is (oc, inc/groups, height, width)
# shape for xnnpack convolution is (oc, height, width, inc/groups), to convert
# to the proper shape, this is essentially a NCHW to NHWC conversion
weight_node = get_input_node(node, 1)
weight_quant_params = QuantParams.from_weights(
weight_node, self._exported_program
kernel_node, self._exported_program
)

fp32_static_weights = weight_node.meta["val"].dtype == torch.float16
fp32_static_weights = kernel_node.meta["val"].dtype == torch.float16

self.define_tensor(
weight_node,
kernel_node,
xnn_graph,
vals_to_ids,
convert_to_nhwc=True,
Expand Down
2 changes: 2 additions & 0 deletions backends/xnnpack/partition/TARGETS
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ runtime.python_library(
name = "xnnpack_partitioner",
srcs = [
"xnnpack_partitioner.py",
"xnnpack_partitioner2.py",
],
visibility = [
"//executorch/...",
Expand All @@ -15,6 +16,7 @@ runtime.python_library(
":configs",
":partitioner_graphs",
"//executorch/backends/xnnpack:xnnpack_preprocess",
"//executorch/backends/xnnpack/partition/config:xnnpack_partitioner_configs",
"//executorch/exir:delegate",
"//executorch/exir:lib",
"//executorch/exir/backend:partitioner",
Expand Down
20 changes: 20 additions & 0 deletions backends/xnnpack/partition/config/TARGETS
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")

oncall("executorch")

runtime.python_library(
name = "xnnpack_partitioner_configs",
srcs = glob([
"*.py",
]),
visibility = [
"//executorch/...",
"@EXECUTORCH_CLIENTS",
],
deps = [
"//executorch/exir:lib",
"//executorch/exir/backend:partitioner",
"//executorch/exir/backend:utils",
"//executorch/exir/backend/canonical_partitioners:config_partitioner_lib",
],
)
58 changes: 58 additions & 0 deletions backends/xnnpack/partition/config/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.


from typing import List, Type

from executorch.backends.xnnpack.partition.config.gemm_configs import (
AddmmConfig,
ConvolutionConfig,
LinearConfig,
)

from executorch.backends.xnnpack.partition.config.generic_node_configs import (
AbsConfig,
AddConfig,
AvgPoolingConfig,
CatConfig,
CeilConfig,
ClampConfig,
DeQuantizedPerTensorConfig,
DivConfig,
# EluConfig,
HardtanhConfig,
MulConfig,
QuantizedPerTensorConfig,
ReLUConfig,
)
from executorch.backends.xnnpack.partition.config.node_configs import BatchNormConfig
from executorch.backends.xnnpack.partition.config.xnnpack_config import (
XNNPartitionerConfig,
)

ALL_PARTITIONER_CONFIGS: List[Type[XNNPartitionerConfig]] = [
# GEMM-like Configs
AddmmConfig,
LinearConfig,
ConvolutionConfig,
# BatchNorm Config
BatchNormConfig,
# Single Node Configs
HardtanhConfig,
AbsConfig,
AvgPoolingConfig,
AddConfig,
CatConfig,
CeilConfig,
ClampConfig,
DivConfig,
MulConfig,
# EluConfig, # Waiting for PyTorch Pin Update
ReLUConfig,
# Quantization Op Configs
QuantizedPerTensorConfig,
DeQuantizedPerTensorConfig,
]
Loading
Loading