From 5e42dbb2cbd63ca4922e0bdaf3ab75c46b29e5ea Mon Sep 17 00:00:00 2001 From: Benjamin Maxwell Date: Wed, 19 Nov 2025 14:29:19 +0000 Subject: [PATCH 1/8] Precommit tests --- .../vscale-and-sve-cnt-demandedbits.ll | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/llvm/test/CodeGen/AArch64/vscale-and-sve-cnt-demandedbits.ll b/llvm/test/CodeGen/AArch64/vscale-and-sve-cnt-demandedbits.ll index 9572778484f8d..62290c31f464d 100644 --- a/llvm/test/CodeGen/AArch64/vscale-and-sve-cnt-demandedbits.ll +++ b/llvm/test/CodeGen/AArch64/vscale-and-sve-cnt-demandedbits.ll @@ -80,6 +80,66 @@ define i64 @cntd_and_elimination() { ret i64 %result } +define i64 @cntp_nxv16i1_and_elimination( %p) { +; CHECK-LABEL: cntp_nxv16i1_and_elimination: +; CHECK: // %bb.0: +; CHECK-NEXT: cntp x8, p0, p0.b +; CHECK-NEXT: and x9, x8, #0x1ff +; CHECK-NEXT: and x8, x8, #0x3fffffffc +; CHECK-NEXT: add x0, x9, x8 +; CHECK-NEXT: ret + %cntp = tail call i64 @llvm.aarch64.sve.cntp.nxv16i1( %p, %p) + %and_redundant = and i64 %cntp, 511 + %and_required = and i64 %cntp, 17179869180 + %result = add i64 %and_redundant, %and_required + ret i64 %result +} + +define i64 @cntp_nxv8i1_and_elimination( %p) { +; CHECK-LABEL: cntp_nxv8i1_and_elimination: +; CHECK: // %bb.0: +; CHECK-NEXT: cntp x8, p0, p0.h +; CHECK-NEXT: and x9, x8, #0x3ff +; CHECK-NEXT: and x8, x8, #0x3fffffffc +; CHECK-NEXT: add x0, x9, x8 +; CHECK-NEXT: ret + %cntp = tail call i64 @llvm.aarch64.sve.cntp.nxv8i1( %p, %p) + %and_redundant = and i64 %cntp, 1023 + %and_required = and i64 %cntp, 17179869180 + %result = add i64 %and_redundant, %and_required + ret i64 %result +} + +define i64 @cntp_nxv4i1_and_elimination( %p) { +; CHECK-LABEL: cntp_nxv4i1_and_elimination: +; CHECK: // %bb.0: +; CHECK-NEXT: cntp x8, p0, p0.s +; CHECK-NEXT: and x9, x8, #0x7f +; CHECK-NEXT: and x8, x8, #0x3fffffffc +; CHECK-NEXT: add x0, x9, x8 +; CHECK-NEXT: ret + %cntp = tail call i64 @llvm.aarch64.sve.cntp.nxv4i1( %p, %p) + %and_redundant = and i64 %cntp, 127 + %and_required = and i64 %cntp, 17179869180 + %result = add i64 %and_redundant, %and_required + ret i64 %result +} + +define i64 @cntp_nxv2i1_and_elimination( %p) { +; CHECK-LABEL: cntp_nxv2i1_and_elimination: +; CHECK: // %bb.0: +; CHECK-NEXT: cntp x8, p0, p0.d +; CHECK-NEXT: and x9, x8, #0x3f +; CHECK-NEXT: and x8, x8, #0x3fffffffc +; CHECK-NEXT: add x0, x9, x8 +; CHECK-NEXT: ret + %cntp = tail call i64 @llvm.aarch64.sve.cntp.nxv2i1( %p, %p) + %and_redundant = and i64 %cntp, 63 + %and_required = and i64 %cntp, 17179869180 + %result = add i64 %and_redundant, %and_required + ret i64 %result +} + define i64 @vscale_trunc_zext() vscale_range(1,16) { ; CHECK-LABEL: vscale_trunc_zext: ; CHECK: // %bb.0: From e3224e441f982878106cb07dab7a99353780576e Mon Sep 17 00:00:00 2001 From: Benjamin Maxwell Date: Wed, 19 Nov 2025 14:31:24 +0000 Subject: [PATCH 2/8] [AArch64][SVE] Implement demanded bits for @llvm.aarch64.sve.cntp This allows DemandedBits to see that the SVE CNTP intrinsic will only ever produce small positive integers. The maximum value you could get here is 256, which is CNTP on a nxv16i1 on a machine with a 2048bit vector size (the maximum for SVE). Using this various redundant operations (zexts, sexts, ands, ors, etc) can be eliminated. --- .../Target/AArch64/AArch64ISelLowering.cpp | 28 ++++++++++++++++++- .../CodeGen/AArch64/sve-vector-compress.ll | 11 ++++---- .../vscale-and-sve-cnt-demandedbits.ll | 20 ++++++------- 3 files changed, 40 insertions(+), 19 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 8f41f230b5521..809c2af499958 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -19459,6 +19459,32 @@ static std::optional IsSVECntIntrinsic(SDValue S) { return {}; } +// Returns the element size associated with an SVE cnt[bhwdp] intrinsic. For +// cntp (predicate), the element size corresponds to the legal (packed) SVE +// vector type associated with the predicate. E.g. nxv4i1 returns 32. +static std::optional GetSVECntElementSize(SDValue Op) { + if (auto ElementSize = IsSVECntIntrinsic(Op)) + return ElementSize; + Intrinsic::ID IID = getIntrinsicID(Op.getNode()); + if (IID != Intrinsic::aarch64_sve_cntp) + return {}; + EVT PredVT = Op.getOperand(Op.getNumOperands() - 1).getValueType(); + switch (PredVT.getSimpleVT().SimpleTy) { + case MVT::nxv1i1: + return 128; + case MVT::nxv2i1: + return 64; + case MVT::nxv4i1: + return 32; + case MVT::nxv8i1: + return 16; + case MVT::nxv16i1: + return 8; + default: + llvm_unreachable("unexpected predicate type"); + } +} + /// Calculates what the pre-extend type is, based on the extension /// operation node provided by \p Extend. /// @@ -31666,7 +31692,7 @@ bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode( return false; } case ISD::INTRINSIC_WO_CHAIN: { - if (auto ElementSize = IsSVECntIntrinsic(Op)) { + if (auto ElementSize = GetSVECntElementSize(Op)) { unsigned MaxSVEVectorSizeInBits = Subtarget->getMaxSVEVectorSizeInBits(); if (!MaxSVEVectorSizeInBits) MaxSVEVectorSizeInBits = AArch64::SVEMaxBitsPerVector; diff --git a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll index cc3a3734a9721..f700dee0fb2e4 100644 --- a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll +++ b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll @@ -143,20 +143,19 @@ define @test_compress_large( %vec, %p) { ; CHECK-LABEL: cntp_nxv16i1_and_elimination: ; CHECK: // %bb.0: ; CHECK-NEXT: cntp x8, p0, p0.b -; CHECK-NEXT: and x9, x8, #0x1ff -; CHECK-NEXT: and x8, x8, #0x3fffffffc -; CHECK-NEXT: add x0, x9, x8 +; CHECK-NEXT: and x9, x8, #0x1fc +; CHECK-NEXT: add x0, x8, x9 ; CHECK-NEXT: ret %cntp = tail call i64 @llvm.aarch64.sve.cntp.nxv16i1( %p, %p) %and_redundant = and i64 %cntp, 511 @@ -99,9 +98,8 @@ define i64 @cntp_nxv8i1_and_elimination( %p) { ; CHECK-LABEL: cntp_nxv8i1_and_elimination: ; CHECK: // %bb.0: ; CHECK-NEXT: cntp x8, p0, p0.h -; CHECK-NEXT: and x9, x8, #0x3ff -; CHECK-NEXT: and x8, x8, #0x3fffffffc -; CHECK-NEXT: add x0, x9, x8 +; CHECK-NEXT: and x9, x8, #0xfc +; CHECK-NEXT: add x0, x8, x9 ; CHECK-NEXT: ret %cntp = tail call i64 @llvm.aarch64.sve.cntp.nxv8i1( %p, %p) %and_redundant = and i64 %cntp, 1023 @@ -114,9 +112,8 @@ define i64 @cntp_nxv4i1_and_elimination( %p) { ; CHECK-LABEL: cntp_nxv4i1_and_elimination: ; CHECK: // %bb.0: ; CHECK-NEXT: cntp x8, p0, p0.s -; CHECK-NEXT: and x9, x8, #0x7f -; CHECK-NEXT: and x8, x8, #0x3fffffffc -; CHECK-NEXT: add x0, x9, x8 +; CHECK-NEXT: and x9, x8, #0x7c +; CHECK-NEXT: add x0, x8, x9 ; CHECK-NEXT: ret %cntp = tail call i64 @llvm.aarch64.sve.cntp.nxv4i1( %p, %p) %and_redundant = and i64 %cntp, 127 @@ -129,9 +126,8 @@ define i64 @cntp_nxv2i1_and_elimination( %p) { ; CHECK-LABEL: cntp_nxv2i1_and_elimination: ; CHECK: // %bb.0: ; CHECK-NEXT: cntp x8, p0, p0.d -; CHECK-NEXT: and x9, x8, #0x3f -; CHECK-NEXT: and x8, x8, #0x3fffffffc -; CHECK-NEXT: add x0, x9, x8 +; CHECK-NEXT: and x9, x8, #0x3c +; CHECK-NEXT: add x0, x8, x9 ; CHECK-NEXT: ret %cntp = tail call i64 @llvm.aarch64.sve.cntp.nxv2i1( %p, %p) %and_redundant = and i64 %cntp, 63 From 62c06b30709c3b33ebddba8c5ace85ca91e0781d Mon Sep 17 00:00:00 2001 From: Benjamin Maxwell Date: Wed, 19 Nov 2025 16:21:39 +0000 Subject: [PATCH 3/8] Rework changes --- .../Target/AArch64/AArch64ISelLowering.cpp | 89 ++++++++++--------- 1 file changed, 48 insertions(+), 41 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 809c2af499958..48f3bf77851ab 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -19443,45 +19443,50 @@ AArch64TargetLowering::BuildSREMPow2(SDNode *N, const APInt &Divisor, return CSNeg; } -static std::optional IsSVECntIntrinsic(SDValue S) { +static bool IsSVECntIntrinsic(SDValue S) { switch(getIntrinsicID(S.getNode())) { default: break; case Intrinsic::aarch64_sve_cntb: - return 8; case Intrinsic::aarch64_sve_cnth: - return 16; case Intrinsic::aarch64_sve_cntw: - return 32; case Intrinsic::aarch64_sve_cntd: - return 64; + case Intrinsic::aarch64_sve_cntp: + return true; } return {}; } -// Returns the element size associated with an SVE cnt[bhwdp] intrinsic. For -// cntp (predicate), the element size corresponds to the legal (packed) SVE -// vector type associated with the predicate. E.g. nxv4i1 returns 32. -static std::optional GetSVECntElementSize(SDValue Op) { - if (auto ElementSize = IsSVECntIntrinsic(Op)) - return ElementSize; +// Creates a constexpr (IID, VT) pair that can be used in switch cases. +static constexpr uint64_t intrinsicWithType(Intrinsic::ID IID, MVT VT) { + static_assert(sizeof(VT.SimpleTy) <= sizeof(uint32_t) && + sizeof(IID) <= sizeof(uint32_t), + "IID and MVT should fit in 64 bits"); + return (uint64_t(IID) << 32) | uint64_t(VT.SimpleTy); +} + +// Returns the maximum (scalable) value that can be returned by an SVE count +// intrinsic. The supported intrinsics are covered by IsSVECntIntrinsic. +static ElementCount getMaxValueForSVECntIntrinsic(SDValue Op) { Intrinsic::ID IID = getIntrinsicID(Op.getNode()); - if (IID != Intrinsic::aarch64_sve_cntp) - return {}; - EVT PredVT = Op.getOperand(Op.getNumOperands() - 1).getValueType(); - switch (PredVT.getSimpleVT().SimpleTy) { - case MVT::nxv1i1: - return 128; - case MVT::nxv2i1: - return 64; - case MVT::nxv4i1: - return 32; - case MVT::nxv8i1: - return 16; - case MVT::nxv16i1: - return 8; + MVT VT = IID == Intrinsic::aarch64_sve_cntp + ? Op.getOperand(1).getValueType().getSimpleVT() + : MVT::Untyped; + switch (intrinsicWithType(IID, VT)) { + case intrinsicWithType(Intrinsic::aarch64_sve_cntd, MVT::Untyped): + case intrinsicWithType(Intrinsic::aarch64_sve_cntp, MVT::nxv2i1): + return ElementCount::getScalable(2); + case intrinsicWithType(Intrinsic::aarch64_sve_cntw, MVT::Untyped): + case intrinsicWithType(Intrinsic::aarch64_sve_cntp, MVT::nxv4i1): + return ElementCount::getScalable(4); + case intrinsicWithType(Intrinsic::aarch64_sve_cnth, MVT::Untyped): + case intrinsicWithType(Intrinsic::aarch64_sve_cntp, MVT::nxv8i1): + return ElementCount::getScalable(8); + case intrinsicWithType(Intrinsic::aarch64_sve_cntb, MVT::Untyped): + case intrinsicWithType(Intrinsic::aarch64_sve_cntp, MVT::nxv16i1): + return ElementCount::getScalable(16); default: - llvm_unreachable("unexpected predicate type"); + llvm_unreachable("unexpected intrininc type pair"); } } @@ -31692,22 +31697,24 @@ bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode( return false; } case ISD::INTRINSIC_WO_CHAIN: { - if (auto ElementSize = GetSVECntElementSize(Op)) { - unsigned MaxSVEVectorSizeInBits = Subtarget->getMaxSVEVectorSizeInBits(); - if (!MaxSVEVectorSizeInBits) - MaxSVEVectorSizeInBits = AArch64::SVEMaxBitsPerVector; - unsigned MaxElements = MaxSVEVectorSizeInBits / *ElementSize; - // The SVE count intrinsics don't support the multiplier immediate so we - // don't have to account for that here. The value returned may be slightly - // over the true required bits, as this is based on the "ALL" pattern. The - // other patterns are also exposed by these intrinsics, but they all - // return a value that's strictly less than "ALL". - unsigned RequiredBits = llvm::bit_width(MaxElements); - unsigned BitWidth = Known.Zero.getBitWidth(); - if (RequiredBits < BitWidth) - Known.Zero.setHighBits(BitWidth - RequiredBits); + if (!IsSVECntIntrinsic(Op)) return false; - } + unsigned MaxSVEVectorSizeInBits = Subtarget->getMaxSVEVectorSizeInBits(); + if (!MaxSVEVectorSizeInBits) + MaxSVEVectorSizeInBits = AArch64::SVEMaxBitsPerVector; + unsigned VscaleMax = MaxSVEVectorSizeInBits / 128; + unsigned MaxCount = + getMaxValueForSVECntIntrinsic(Op).getKnownMinValue() * VscaleMax; + // The SVE count intrinsics don't support the multiplier immediate so we + // don't have to account for that here. The value returned may be slightly + // over the true required bits, as this is based on the "ALL" pattern. The + // other patterns are also exposed by these intrinsics, but they all + // return a value that's strictly less than "ALL". + unsigned RequiredBits = llvm::bit_width(MaxCount); + unsigned BitWidth = Known.Zero.getBitWidth(); + if (RequiredBits < BitWidth) + Known.Zero.setHighBits(BitWidth - RequiredBits); + return false; } } From d95927cac98d5ca4ccb4cf46b63bea90fd67d194 Mon Sep 17 00:00:00 2001 From: Benjamin Maxwell Date: Wed, 19 Nov 2025 16:58:47 +0000 Subject: [PATCH 4/8] Make less silly --- .../Target/AArch64/AArch64ISelLowering.cpp | 31 ++++++------------- 1 file changed, 9 insertions(+), 22 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 48f3bf77851ab..a938bf33505f2 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -19454,39 +19454,26 @@ static bool IsSVECntIntrinsic(SDValue S) { case Intrinsic::aarch64_sve_cntp: return true; } - return {}; -} - -// Creates a constexpr (IID, VT) pair that can be used in switch cases. -static constexpr uint64_t intrinsicWithType(Intrinsic::ID IID, MVT VT) { - static_assert(sizeof(VT.SimpleTy) <= sizeof(uint32_t) && - sizeof(IID) <= sizeof(uint32_t), - "IID and MVT should fit in 64 bits"); - return (uint64_t(IID) << 32) | uint64_t(VT.SimpleTy); + return false; } // Returns the maximum (scalable) value that can be returned by an SVE count // intrinsic. The supported intrinsics are covered by IsSVECntIntrinsic. static ElementCount getMaxValueForSVECntIntrinsic(SDValue Op) { Intrinsic::ID IID = getIntrinsicID(Op.getNode()); - MVT VT = IID == Intrinsic::aarch64_sve_cntp - ? Op.getOperand(1).getValueType().getSimpleVT() - : MVT::Untyped; - switch (intrinsicWithType(IID, VT)) { - case intrinsicWithType(Intrinsic::aarch64_sve_cntd, MVT::Untyped): - case intrinsicWithType(Intrinsic::aarch64_sve_cntp, MVT::nxv2i1): + if (IID == Intrinsic::aarch64_sve_cntp) + return Op.getOperand(1).getValueType().getVectorElementCount(); + switch (IID) { + case Intrinsic::aarch64_sve_cntd: return ElementCount::getScalable(2); - case intrinsicWithType(Intrinsic::aarch64_sve_cntw, MVT::Untyped): - case intrinsicWithType(Intrinsic::aarch64_sve_cntp, MVT::nxv4i1): + case Intrinsic::aarch64_sve_cntw: return ElementCount::getScalable(4); - case intrinsicWithType(Intrinsic::aarch64_sve_cnth, MVT::Untyped): - case intrinsicWithType(Intrinsic::aarch64_sve_cntp, MVT::nxv8i1): + case Intrinsic::aarch64_sve_cnth: return ElementCount::getScalable(8); - case intrinsicWithType(Intrinsic::aarch64_sve_cntb, MVT::Untyped): - case intrinsicWithType(Intrinsic::aarch64_sve_cntp, MVT::nxv16i1): + case Intrinsic::aarch64_sve_cntb: return ElementCount::getScalable(16); default: - llvm_unreachable("unexpected intrininc type pair"); + llvm_unreachable("unexpected intrininc"); } } From db44ff8c0474381a86a2b755ee3eb90e67dc444f Mon Sep 17 00:00:00 2001 From: Benjamin Maxwell Date: Wed, 19 Nov 2025 19:56:54 +0000 Subject: [PATCH 5/8] Fix typo in llvm_unreachable message --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index a7a7622934920..0831fc9f64e75 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -19473,7 +19473,7 @@ static ElementCount getMaxValueForSVECntIntrinsic(SDValue Op) { case Intrinsic::aarch64_sve_cntb: return ElementCount::getScalable(16); default: - llvm_unreachable("unexpected intrininc"); + llvm_unreachable("Unexpected intrinsic"); } } From 245067305853a6cebb0d0c0b0255be6c8f12a230 Mon Sep 17 00:00:00 2001 From: Benjamin Maxwell Date: Wed, 19 Nov 2025 22:34:27 +0000 Subject: [PATCH 6/8] Refactor SVE count intrinsic handling and return types --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 0831fc9f64e75..3d43143dddaa8 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -19451,15 +19451,14 @@ static bool IsSVECntIntrinsic(SDValue S) { case Intrinsic::aarch64_sve_cnth: case Intrinsic::aarch64_sve_cntw: case Intrinsic::aarch64_sve_cntd: - case Intrinsic::aarch64_sve_cntp: return true; } return false; } // Returns the maximum (scalable) value that can be returned by an SVE count -// intrinsic. The supported intrinsics are covered by IsSVECntIntrinsic. -static ElementCount getMaxValueForSVECntIntrinsic(SDValue Op) { +// intrinsic. Returns std::nullopt if \p Op is not aarch64_sve_cnt*. +static std::optional getMaxValueForSVECntIntrinsic(SDValue Op) { Intrinsic::ID IID = getIntrinsicID(Op.getNode()); if (IID == Intrinsic::aarch64_sve_cntp) return Op.getOperand(1).getValueType().getVectorElementCount(); @@ -19473,7 +19472,7 @@ static ElementCount getMaxValueForSVECntIntrinsic(SDValue Op) { case Intrinsic::aarch64_sve_cntb: return ElementCount::getScalable(16); default: - llvm_unreachable("Unexpected intrinsic"); + return std::nullopt; } } @@ -31684,20 +31683,20 @@ bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode( return false; } case ISD::INTRINSIC_WO_CHAIN: { - if (!IsSVECntIntrinsic(Op)) + std::optional MaxCount = getMaxValueForSVECntIntrinsic(Op); + if (!MaxCount) return false; unsigned MaxSVEVectorSizeInBits = Subtarget->getMaxSVEVectorSizeInBits(); if (!MaxSVEVectorSizeInBits) MaxSVEVectorSizeInBits = AArch64::SVEMaxBitsPerVector; unsigned VscaleMax = MaxSVEVectorSizeInBits / 128; - unsigned MaxCount = - getMaxValueForSVECntIntrinsic(Op).getKnownMinValue() * VscaleMax; + unsigned MaxValue = MaxCount.getKnownMinValue() * VscaleMax; // The SVE count intrinsics don't support the multiplier immediate so we // don't have to account for that here. The value returned may be slightly // over the true required bits, as this is based on the "ALL" pattern. The // other patterns are also exposed by these intrinsics, but they all // return a value that's strictly less than "ALL". - unsigned RequiredBits = llvm::bit_width(MaxCount); + unsigned RequiredBits = llvm::bit_width(MaxValue); unsigned BitWidth = Known.Zero.getBitWidth(); if (RequiredBits < BitWidth) Known.Zero.setHighBits(BitWidth - RequiredBits); From 8fd21d2d08e8da54b97409e6eb7f23f52c32ba89 Mon Sep 17 00:00:00 2001 From: Benjamin Maxwell Date: Wed, 19 Nov 2025 22:38:14 +0000 Subject: [PATCH 7/8] Fix comment formatting in AArch64ISelLowering.cpp --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 3d43143dddaa8..4cf1b3e92b2cc 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -19457,7 +19457,7 @@ static bool IsSVECntIntrinsic(SDValue S) { } // Returns the maximum (scalable) value that can be returned by an SVE count -// intrinsic. Returns std::nullopt if \p Op is not aarch64_sve_cnt*. +// intrinsic. Returns std::nullopt if \p Op is not aarch64_sve_cnt*. static std::optional getMaxValueForSVECntIntrinsic(SDValue Op) { Intrinsic::ID IID = getIntrinsicID(Op.getNode()); if (IID == Intrinsic::aarch64_sve_cntp) From 6397699eeceb660c45c494c544ea8037f3189eab Mon Sep 17 00:00:00 2001 From: Benjamin Maxwell Date: Wed, 19 Nov 2025 22:46:08 +0000 Subject: [PATCH 8/8] Fix typo --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 4cf1b3e92b2cc..5c574b91e3ed0 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -31690,7 +31690,7 @@ bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode( if (!MaxSVEVectorSizeInBits) MaxSVEVectorSizeInBits = AArch64::SVEMaxBitsPerVector; unsigned VscaleMax = MaxSVEVectorSizeInBits / 128; - unsigned MaxValue = MaxCount.getKnownMinValue() * VscaleMax; + unsigned MaxValue = MaxCount->getKnownMinValue() * VscaleMax; // The SVE count intrinsics don't support the multiplier immediate so we // don't have to account for that here. The value returned may be slightly // over the true required bits, as this is based on the "ALL" pattern. The