From 916f383e1e894b0aed52954911b7363f395e339f Mon Sep 17 00:00:00 2001 From: Arik Horodniceanu Date: Wed, 18 Feb 2026 16:16:42 -0800 Subject: [PATCH] Adding QNN backend support for isNan core ATen op --- backends/qualcomm/builders/README.md | 1 + backends/qualcomm/builders/__init__.py | 2 + backends/qualcomm/builders/op_is_nan.py | 66 ++++++++++++++++++++ backends/qualcomm/builders/qnn_constants.py | 5 ++ backends/qualcomm/tests/models.py | 8 +++ backends/qualcomm/tests/test_qnn_delegate.py | 38 +++++++++++ 6 files changed, 120 insertions(+) create mode 100644 backends/qualcomm/builders/op_is_nan.py diff --git a/backends/qualcomm/builders/README.md b/backends/qualcomm/builders/README.md index 4ea5b1d5c40..382fe50c525 100644 --- a/backends/qualcomm/builders/README.md +++ b/backends/qualcomm/builders/README.md @@ -440,6 +440,7 @@ Please help update following table if you are contributing new operators: | HardSwish | ✓ | | InstanceNorm | ✓ | | IsInf | ✓ | +| IsNan | ✓ | | L2Norm | ✗ | | LayerNorm | ✓ | | LogSoftmax | ✓ | diff --git a/backends/qualcomm/builders/__init__.py b/backends/qualcomm/builders/__init__.py index 809c5b14cc9..518a7b7fa8c 100644 --- a/backends/qualcomm/builders/__init__.py +++ b/backends/qualcomm/builders/__init__.py @@ -56,6 +56,7 @@ op_index_select, op_instance_norm, op_is_inf, + op_is_nan, op_layer_norm, op_le, op_linear, @@ -166,6 +167,7 @@ op_index_select, op_instance_norm, op_is_inf, + op_is_nan, op_layer_norm, op_le, op_linear, diff --git a/backends/qualcomm/builders/op_is_nan.py b/backends/qualcomm/builders/op_is_nan.py new file mode 100644 index 00000000000..db711e33c99 --- /dev/null +++ b/backends/qualcomm/builders/op_is_nan.py @@ -0,0 +1,66 @@ +# Copyright (c) Qualcomm Innovation Center, Inc. +# All rights reserved +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import warnings +from typing import Dict + +import executorch.backends.qualcomm.python.PyQnnManagerAdaptor as PyQnnManager + +import torch + +from .node_visitor import NodeVisitor +from .node_visitor_manager import register_node_visitor +from .qnn_constants import OpIsNan, QNN_OP_PACKAGE_NAME_QTI_AISW + + +@register_node_visitor +class IsNan(NodeVisitor): + target = ["aten.isnan.default"] + + def __init__(self, *args) -> None: + super().__init__(*args) + + def define_node( + self, + node: torch.fx.Node, + nodes_to_wrappers: Dict[torch.fx.Node, PyQnnManager.TensorWrapper], + ) -> PyQnnManager.PyQnnOpWrapper: + input_node = self.get_node(node.args[0]) + input_tensor = self.get_tensor(input_node, node) + + if input_tensor.dtype not in [torch.float32, torch.float16]: + warnings.warn( + "[QNN Delegate Op Builder]: QNN IsNan only supports FP32 or FP16 inputs.", + stacklevel=1, + ) + return None + + input_tensor_wrapper = self.define_tensor( + input_node, + node, + self.get_tensor(input_node, node), + PyQnnManager.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE, + nodes_to_wrappers, + ) + input_tensors = [input_tensor_wrapper] + + out_tensor = self.get_tensor(node, node) + output_tensor_wrapper = self.define_tensor( + node, + node, + out_tensor, + PyQnnManager.Qnn_TensorType_t.QNN_TENSOR_TYPE_NATIVE, + nodes_to_wrappers, + ) + output_tensors = [output_tensor_wrapper] + isnan_op = PyQnnManager.PyQnnOpWrapper( + node.name, + QNN_OP_PACKAGE_NAME_QTI_AISW, + OpIsNan.op_name, + ) + isnan_op.AddInputTensors(input_tensors) + isnan_op.AddOutputTensors(output_tensors) + + return isnan_op diff --git a/backends/qualcomm/builders/qnn_constants.py b/backends/qualcomm/builders/qnn_constants.py index 48b0a02bc10..94febc4123f 100644 --- a/backends/qualcomm/builders/qnn_constants.py +++ b/backends/qualcomm/builders/qnn_constants.py @@ -396,6 +396,11 @@ class OpIsInf: param_detect_positive = "detect_positive" +@dataclass(init=False, frozen=True) +class OpIsNan: + op_name: str = "IsNan" + + @dataclass(init=False, frozen=True) class OpLayerNorm: op_name: str = "LayerNorm" diff --git a/backends/qualcomm/tests/models.py b/backends/qualcomm/tests/models.py index 5c3238d50a1..bd4060fa9f3 100644 --- a/backends/qualcomm/tests/models.py +++ b/backends/qualcomm/tests/models.py @@ -1318,6 +1318,14 @@ def forward(self, x): return torch.isinf(x) +class IsNan(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.isnan(x) + + class LargeTensorLinear(torch.nn.Module): def __init__(self): super().__init__() diff --git a/backends/qualcomm/tests/test_qnn_delegate.py b/backends/qualcomm/tests/test_qnn_delegate.py index 10c2a717e20..bdea2161d2d 100644 --- a/backends/qualcomm/tests/test_qnn_delegate.py +++ b/backends/qualcomm/tests/test_qnn_delegate.py @@ -1270,6 +1270,44 @@ def test_qnn_backend_is_inf(self): ) self.lower_module_and_test_output(module, sample_input) + def test_qnn_backend_is_nan(self): + module = IsNan() # noqa: F405 + sample_inputs = [ + ( + torch.tensor( + [ + -2.0, + float("nan"), + -float("nan"), + 0.2, + float("inf"), + 3.2, + float("nan"), + -float("inf"), + ], + dtype=torch.float32, + ), + ), + ( + torch.tensor( + [ + -0.234, + -float("nan"), + float("nan"), + -float("inf"), + 3.2, + float("nan"), + 1.26, + float("inf"), + ], + dtype=torch.float16, + ), + ), + ] + + for sample_input in sample_inputs: + self.lower_module_and_test_output(module, sample_input) + def test_qnn_backend_interpolate_bicubic(self): modules = [ ResizeBicubic([2, 2], None, False), # noqa: F405