From 564ac93bbd6fb6f6bb9ed4ae60e8a5e2712738b8 Mon Sep 17 00:00:00 2001 From: Zonglin Peng Date: Sat, 1 Nov 2025 07:44:52 -0700 Subject: [PATCH 1/2] jarvis-nightly-operators-test-aten-flip-out Pull Request resolved: https://github.com/pytorch/executorch/pull/15498 ghstack-source-id: 320280755 @exported-using-ghexport Differential Revision: [D85364548](https://our.internmc.facebook.com/intern/diff/D85364548/) --- backends/cadence/utils/facto_util.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/backends/cadence/utils/facto_util.py b/backends/cadence/utils/facto_util.py index fab7a28e760..2a6731de744 100644 --- a/backends/cadence/utils/facto_util.py +++ b/backends/cadence/utils/facto_util.py @@ -444,6 +444,12 @@ def random_size_constraint(deps: object, r: int, d: int) -> int: cp.Size.Le(lambda deps, r, d: 2**2), ] ) + case "flip.default": + tensor_constraints.extend( + [ + cp.Dtype.In(lambda deps: [torch.float32]), + ] + ) case _: pass return tensor_constraints From 043d5c50801e7f987e1c72c927ab471a4782a441 Mon Sep 17 00:00:00 2001 From: pytorchbot Date: Thu, 6 Nov 2025 15:59:29 -0800 Subject: [PATCH 2/2] jarvis-nightly-operators-test-aten-constant-pad-nd-out (#15573) This PR was created by the merge bot to help merge the original PR into the main branch. ghstack PR number: https://github.com/pytorch/executorch/pull/15499 by @zonglinpeng ^ Please use this as the source of truth for the PR details, comments, and reviews ghstack PR base: https://github.com/pytorch/executorch/tree/gh/zonglinpeng/11/base ghstack PR head: https://github.com/pytorch/executorch/tree/gh/zonglinpeng/11/head Merge bot PR base: https://github.com/pytorch/executorch/tree/gh/zonglinpeng/10/orig Merge bot PR head: https://github.com/pytorch/executorch/tree/gh/zonglinpeng/11/orig Differential Revision: [D85364553](https://our.internmc.facebook.com/intern/diff/D85364553/) @diff-train-skip-merge --------- Co-authored-by: Zonglin Peng --- backends/cadence/utils/facto_util.py | 80 ++++++++++++++++++---------- 1 file changed, 51 insertions(+), 29 deletions(-) diff --git a/backends/cadence/utils/facto_util.py b/backends/cadence/utils/facto_util.py index 2a6731de744..b5c5683ab5d 100644 --- a/backends/cadence/utils/facto_util.py +++ b/backends/cadence/utils/facto_util.py @@ -189,47 +189,37 @@ def random_size_constraint(deps: object, r: int, d: int) -> int: if index == 0: # condition tensor_constraints = [ cp.Dtype.In(lambda deps: [torch.bool]), - cp.Value.Ge(lambda deps, dtype, struct: -(2**4)), - cp.Value.Le(lambda deps, dtype, struct: 2**4), + cp.Value.Ge(lambda deps, dtype, struct: 0), + cp.Value.Le(lambda deps, dtype, struct: 1), cp.Rank.Ge(lambda deps: 1), cp.Size.Ge(lambda deps, r, d: 1), max_size_constraint, ] elif index == 1: # input tensor(a) tensor_constraints = [ - cp.Dtype.In( - lambda deps: [ - torch.int8, - torch.int16, - torch.uint8, - torch.uint16, - torch.int32, - torch.float32, - ] - ), + cp.Dtype.In(lambda deps: [torch.float32]), cp.Value.Ge(lambda deps, dtype, struct: -(2**4)), cp.Value.Le(lambda deps, dtype, struct: 2**4), cp.Rank.Ge(lambda deps: 1), cp.Size.Ge(lambda deps, r, d: 1), + cp.Size.In( + lambda deps, r, d: fn.broadcast_with(deps[0].shape, r, d) + ), max_size_constraint, ] else: # input tensor(b) tensor_constraints = [ - cp.Dtype.In( - lambda deps: [ - torch.int8, - torch.int16, - torch.uint8, - torch.uint16, - torch.int32, - torch.float32, - ] - ), + cp.Dtype.In(lambda deps: [torch.float32]), cp.Dtype.Eq(lambda deps: deps[1].dtype), cp.Value.Ge(lambda deps, dtype, struct: -(2**4)), cp.Value.Le(lambda deps, dtype, struct: 2**4), cp.Rank.Ge(lambda deps: 1), cp.Size.Ge(lambda deps, r, d: 1), + cp.Size.In( + lambda deps, r, d: fn.broadcast_with( + fn.broadcasted_shape(deps[0].shape, deps[1].shape), r, d + ) + ), max_size_constraint, ] case "embedding.default": @@ -276,6 +266,9 @@ def random_size_constraint(deps: object, r: int, d: int) -> int: tensor_constraints.extend( [ cp.Dtype.In(lambda deps: [torch.float32, torch.int32]), + # Avoid NaN/Inf values that expose clamp NaN handling bugs + cp.Value.Ge(lambda deps, dtype, struct: -(2**4)), + cp.Value.Le(lambda deps, dtype, struct: 2**4), ] ) case "rsqrt.default": @@ -351,12 +344,15 @@ def random_size_constraint(deps: object, r: int, d: int) -> int: ] ) case "constant_pad_nd.default": - tensor_constraints.extend( - [ - cp.Dtype.In(lambda deps: [torch.float32]), - cp.Size.Le(lambda deps, r, d: 2**2), - ] - ) + tensor_constraints = [ + cp.Dtype.In(lambda deps: [torch.float32]), + cp.Value.Ge(lambda deps, dtype, struct: -(2**4)), + cp.Value.Le(lambda deps, dtype, struct: 2**4), + cp.Rank.Ge(lambda deps: 1), + cp.Rank.Le(lambda deps: 2), # Reduced from 3 to 2 (max 2D tensors) + cp.Size.Ge(lambda deps, r, d: 1), + cp.Size.Le(lambda deps, r, d: 3), # Max dimension size of 3 + ] case "avg_pool2d.default": tensor_constraints.extend( [ @@ -463,6 +459,7 @@ def apply_scalar_contraints(op_name: str) -> list[ScalarDtype]: | "mul.Scalar" | "div.Scalar" | "constant_pad_nd.default" + | "clamp.default" ): return [ScalarDtype.int] case "full.default": @@ -490,7 +487,32 @@ def facto_testcase_gen( # noqa: C901 cp.Size.Le(lambda deps, r, d: 2**2), ] ) - if in_spec.name == "max_val": # hardtanh + # Special handling for clamp.default to ensure min < max with sufficient gap (at least 2) and never None + if op_name == "clamp.default": + if in_spec.name == "min": + # min must always be provided (not None) and bounded, leave room for max + spec.inspec[index].constraints.extend( + [ + cp.Optional.Eq(lambda deps: False), # Never None + cp.Value.Ge(lambda deps, dtype: -(2**4)), + cp.Value.Le( + lambda deps, dtype: 2**4 - 2 + ), # Leave room for max (at least 2 units) + ] + ) + elif in_spec.name == "max": + # max must always be provided (not None), be >= min + 2 (sufficient gap), and bounded + spec.inspec[index].deps = [0, 1] # deps on input tensor and min + spec.inspec[index].constraints.extend( + [ + cp.Optional.Eq(lambda deps: False), # Never None + cp.Value.Ge( + lambda deps, dtype: deps[1] + 2 + ), # max >= min + 2 (sufficient gap) + cp.Value.Le(lambda deps, dtype: 2**4), + ] + ) + elif in_spec.name == "max_val": # hardtanh spec.inspec[index].deps = [0, 1] spec.inspec[index].constraints.extend( [cp.Value.Ge(lambda deps, _: deps[1])]