Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions backends/qualcomm/_passes/layout_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,10 @@ class LayoutTransform(ExportPass):
exir_ops.edge.aten.add.Tensor,
exir_ops.edge.aten.amax.default,
exir_ops.edge.aten.amin.default,
exir_ops.edge.aten.asin.default,
exir_ops.edge.aten.atan.default,
exir_ops.edge.aten.bitwise_or.Tensor,
exir_ops.edge.aten.bitwise_xor.Tensor,
exir_ops.edge.aten.bmm.default,
exir_ops.edge.aten.bitwise_and.Tensor,
exir_ops.edge.aten.cat.default,
Expand All @@ -78,6 +80,7 @@ class LayoutTransform(ExportPass):
exir_ops.edge.aten.eq.Tensor,
exir_ops.edge.aten.exp.default,
exir_ops.edge.aten.floor.default,
exir_ops.edge.aten.floor_divide.default,
exir_ops.edge.aten.full.default,
exir_ops.edge.aten.full_like.default,
exir_ops.edge.aten.ge.Tensor,
Expand Down Expand Up @@ -107,6 +110,7 @@ class LayoutTransform(ExportPass):
exir_ops.edge.aten.relu.default,
exir_ops.edge.aten.round.default,
exir_ops.edge.aten.sigmoid.default,
exir_ops.edge.aten.sign.default,
exir_ops.edge.aten.split_with_sizes.default,
exir_ops.edge.aten.split_with_sizes_copy.default,
exir_ops.edge.aten.sqrt.default,
Expand Down
2 changes: 2 additions & 0 deletions backends/qualcomm/_passes/lift_constant_scalar_operands.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ class TensorOpInfo:
aten.where.ScalarOther: TensorOpInfo(aten.where.self, False, True),
aten.where.Scalar: TensorOpInfo(aten.where.self, False, True),
aten.masked_fill.Scalar: TensorOpInfo(aten.masked_fill.Tensor, False, False),
aten.bitwise_xor.Scalar: TensorOpInfo(aten.bitwise_xor.Tensor, False, False),
}


Expand All @@ -64,6 +65,7 @@ class TensorOpInfo:
aten.arange.default,
aten.scalar_tensor.default,
aten.elu.default,
aten.hardtanh.default,
}


Expand Down
23 changes: 14 additions & 9 deletions backends/qualcomm/builders/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,12 @@ The operator now should be functional for Qualcomm backends. For operator to wor
## Operator Support Status
Please help update following table if you are contributing new operators:

| Operators | HTP - 82/116 Enabled |
+ ✓ = Supported
+ ✗ = Not Supported
+ 🚫 = Deprecated, supported with other QNN Ops


| Operators | HTP - 90/116 Enabled |
|-----------|---------|
| Argmax | ✓ |
| Argmin | ✓ |
Expand All @@ -381,16 +386,16 @@ Please help update following table if you are contributing new operators:
| ElementWiseAbs | ✓ |
| ElementWiseAdd | ✓ |
| ElementWiseAnd | ✓ |
| ElementWiseAsin | ✗ |
| ElementWiseAsin | ✓ |
| ElementWiseAtan | ✓ |
| ElementWiseBinary | ✗ |
| ElementWiseBinary | ✓ |
| ElementWiseCeil | ✓ |
| ElementWiseCos | ✓ |
| ElementWiseDivide | ✓ |
| ElementWiseEqual | ✓ |
| ElementWiseExp | ✓ |
| ElementWiseFloor | ✓ |
| ElementWiseFloorDiv | ✗ |
| ElementWiseFloorDiv | ✓ |
| ElementWiseGreater | ✓ |
| ElementWiseGreaterEqual | ✓ |
| ElementWiseLess | ✓ |
Expand All @@ -408,13 +413,13 @@ Please help update following table if you are contributing new operators:
| ElementWiseRound | ✓ |
| ElementWiseRsqrt | ✓ |
| ElementWiseSelect | ✓ |
| ElementWiseSign | ✗ |
| ElementWiseSign | ✓ |
| ElementWiseSin | ✓ |
| ElementWiseSquaredDifference | ✗ |
| ElementWiseSquareRoot | ✓ |
| ElementWiseSubtract | ✓ |
| ElementWiseUnary | ✗ |
| ElementWiseXor | ✗ |
| ElementWiseXor | ✓ |
| Elu | ✓ |
| ExpandDims | ✓ |
| ExtractGlimpse | ✗ |
Expand Down Expand Up @@ -452,11 +457,11 @@ Please help update following table if you are contributing new operators:
| ReduceMin | ✓ |
| ReduceSum | ✓ |
| Relu | ✓ |
| Relu1 | ✗ |
| Relu6 | ✗ |
| Relu1 | 🚫 |
| Relu6 | 🚫 |
| ReluMinMax | ✓ |
| Reshape | ✓ |
| Resize | ✗ |
| Resize | ✓ |
| ResizeBilinear | ✓ |
| ResizeNearestNeighbor | ✓ |
| RoiAlign | ✗ |
Expand Down
8 changes: 8 additions & 0 deletions backends/qualcomm/builders/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,11 @@
op_arange,
op_argmax,
op_argmin,
op_asin,
op_atan,
op_avg_pool2d,
op_batch_norm,
op_binary,
op_bmm,
op_cat,
op_ceil,
Expand Down Expand Up @@ -79,6 +81,7 @@
op_scalar_tensor,
op_select_copy,
op_sigmoid,
op_sign,
op_sin,
op_skip_ops,
op_slice_copy,
Expand All @@ -99,6 +102,7 @@
op_upsample_bilinear2d,
op_upsample_nearest2d,
op_where,
op_xor,
)

__all__ = [
Expand All @@ -112,9 +116,11 @@
op_arange,
op_argmax,
op_argmin,
op_asin,
op_atan,
op_avg_pool2d,
op_batch_norm,
op_binary,
op_bmm,
op_cat,
op_ceil,
Expand Down Expand Up @@ -176,6 +182,7 @@
op_scalar_tensor,
op_select_copy,
op_sigmoid,
op_sign,
op_sin,
op_skip_ops,
op_slice_copy,
Expand All @@ -196,4 +203,5 @@
op_upsample_bilinear2d,
op_upsample_nearest2d,
op_where,
op_xor,
]
56 changes: 56 additions & 0 deletions backends/qualcomm/builders/op_asin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# Copyright (c) Qualcomm Innovation Center, Inc.
# All rights reserved
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.from typing import cast, Dict
from typing import Dict

import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper
import torch

from .node_visitor import NodeVisitor
from .node_visitor_manager import register_node_visitor

from .qnn_constants import OpElementWiseAsin, QNN_OP_PACKAGE_NAME_QTI_AISW


@register_node_visitor
class asin(NodeVisitor):
target = ["aten.asin.default"]

def __init__(self, *args) -> None:
super().__init__(*args)

def define_node(
self,
node: torch.fx.Node,
nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper],
) -> PyQnnWrapper.PyQnnOpWrapper:
input_node = self.get_node(node.args[0])
input_tensor = self.get_tensor(input_node, node)
input_tensor_wrapper = self.define_tensor(
input_node,
node,
input_tensor,
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
nodes_to_wrappers,
)

output_tensor = self.get_tensor(node, node)
output_tensor_wrapper = self.define_tensor(
node,
node,
output_tensor,
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
nodes_to_wrappers,
)

asin_op = PyQnnWrapper.PyQnnOpWrapper(
node.name,
QNN_OP_PACKAGE_NAME_QTI_AISW,
OpElementWiseAsin.op_name,
)
asin_op.AddInputTensors([input_tensor_wrapper])
asin_op.AddOutputTensors([output_tensor_wrapper])

return asin_op
84 changes: 84 additions & 0 deletions backends/qualcomm/builders/op_binary.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
# Copyright (c) Qualcomm Innovation Center, Inc.
# All rights reserved
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Dict

import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper
import numpy as np
import torch
from executorch.backends.qualcomm.utils.constants import QCOM_DATA
from executorch.exir.dialects._ops import ops as exir_ops

from .node_visitor import NodeVisitor
from .node_visitor_manager import register_node_visitor
from .qnn_constants import OpElementWiseBinary, QNN_OP_PACKAGE_NAME_QTI_AISW


# Refer to QnnOpDef.h for the value.
QNN_BINARY_OPERATOR = {
exir_ops.edge.aten.floor_divide.default: 4,
}


@register_node_visitor
class Binary(NodeVisitor):
target = ["aten.floor_divide.default"]

def __init__(self, *args) -> None:
super().__init__(*args)

def define_node(
self,
node: torch.fx.Node,
nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper],
) -> PyQnnWrapper.PyQnnOpWrapper:
out_tensor = self.get_tensor(node, node)
output_tensor_wrapper = self.define_tensor(
node,
node,
out_tensor,
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
nodes_to_wrappers,
)
binary_output_tensors = [output_tensor_wrapper]

binary_input_tensors = []
for index in range(2):
input_node = self.get_node(node.args[index])
input_tensor = self.get_tensor(input_node, node)
tensor_type = PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE

input_tensor_wrapper = self.define_tensor(
input_node,
node,
input_tensor,
tensor_type,
nodes_to_wrappers,
)
binary_input_tensors.append(input_tensor_wrapper)

binary_op = PyQnnWrapper.PyQnnOpWrapper(
node.name,
QNN_OP_PACKAGE_NAME_QTI_AISW,
OpElementWiseBinary.op_name,
)
binary_op.AddInputTensors(binary_input_tensors)
binary_op.AddOutputTensors(binary_output_tensors)

if node.target not in QNN_BINARY_OPERATOR:
warnings.warn(
"[QNN Delegate Op Builder]: This binary operator is not yet supported.",
stacklevel=1,
)
return None

binary_op.AddScalarParam(
OpElementWiseBinary.param_operation,
PyQnnWrapper.Qnn_DataType_t.QNN_DATATYPE_UINT_32,
{QCOM_DATA: np.uint32(QNN_BINARY_OPERATOR[node.target])},
)

return binary_op
2 changes: 1 addition & 1 deletion backends/qualcomm/builders/op_ne.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

@register_node_visitor
class NotEqual(NodeVisitor):
target = ["aten.ne.Tensor", "aten.ne.Scalar"]
target = ["aten.ne.Tensor"]

def __init__(self, *args) -> None:
super().__init__(*args)
Expand Down
56 changes: 56 additions & 0 deletions backends/qualcomm/builders/op_sign.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# Copyright (c) Qualcomm Innovation Center, Inc.
# All rights reserved
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict

import executorch.backends.qualcomm.python.PyQnnWrapperAdaptor as PyQnnWrapper

import torch

from .node_visitor import NodeVisitor
from .node_visitor_manager import register_node_visitor
from .qnn_constants import OpElementWiseSign, QNN_OP_PACKAGE_NAME_QTI_AISW


@register_node_visitor
class Sign(NodeVisitor):
target = ["aten.sign.default"]

def __init__(self, *args) -> None:
super().__init__(*args)

def define_node(
self,
node: torch.fx.Node,
nodes_to_wrappers: Dict[torch.fx.Node, PyQnnWrapper.TensorWrapper],
) -> PyQnnWrapper.PyQnnOpWrapper:
input_node = self.get_node(node.args[0])
input_tensor = self.get_tensor(input_node, node)
input_tensor_wrapper = self.define_tensor(
input_node,
node,
input_tensor,
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
nodes_to_wrappers,
)

output_tensor = self.get_tensor(node, node)
output_tensor_wrapper = self.define_tensor(
node,
node,
output_tensor,
PyQnnWrapper.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE,
nodes_to_wrappers,
)

sign_op = PyQnnWrapper.PyQnnOpWrapper(
node.name,
QNN_OP_PACKAGE_NAME_QTI_AISW,
OpElementWiseSign.op_name,
)
sign_op.AddInputTensors([input_tensor_wrapper])
sign_op.AddOutputTensors([output_tensor_wrapper])

return sign_op
Loading
Loading