-
Notifications
You must be signed in to change notification settings - Fork 15k
[DAG] visitTRUNCATE - more aggressively fold trunc(add(x,x)) -> add(trunc(x),trunc(x)) #164227
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
|
@llvm/pr-subscribers-backend-aarch64 @llvm/pr-subscribers-llvm-selectiondag Author: Simon Pilgrim (RKSimon) ChangesWe're very careful not to truncate binary arithmetic ops if it will affect legality, or cause additional truncation instructions, hence we limit this to cases where at one operand is constant. But if both ops are the same (i.e. add/mul) then we wouldn't increase the number of truncations,so can be slightly more aggressive at folding the truncation. Full diff: https://github.com/llvm/llvm-project/pull/164227.diff 5 Files Affected:
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 6bf9008c3d677..310d35d9b1d1e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -16433,7 +16433,8 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
case ISD::OR:
case ISD::XOR:
if (!LegalOperations && N0.hasOneUse() &&
- (isConstantOrConstantVector(N0.getOperand(0), true) ||
+ (N0.getOperand(0) == N0.getOperand(1) ||
+ isConstantOrConstantVector(N0.getOperand(0), true) ||
isConstantOrConstantVector(N0.getOperand(1), true))) {
// TODO: We already restricted this to pre-legalization, but for vectors
// we are extra cautious to not create an unsupported operation.
diff --git a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
index c4de177176e33..d7a2a83cf3660 100644
--- a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
@@ -5,32 +5,30 @@ define <16 x i8> @lower_trunc_16xi8(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16
; CHECK-LABEL: lower_trunc_16xi8:
; CHECK: // %bb.0:
; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: ldr h1, [sp]
+; CHECK-NEXT: mov x8, sp
+; CHECK-NEXT: mov v0.b[1], w1
+; CHECK-NEXT: mov v0.b[2], w2
+; CHECK-NEXT: mov v0.b[3], w3
+; CHECK-NEXT: mov v0.b[4], w4
+; CHECK-NEXT: mov v0.b[5], w5
+; CHECK-NEXT: mov v0.b[6], w6
+; CHECK-NEXT: mov v0.b[7], w7
+; CHECK-NEXT: ld1 { v0.b }[8], [x8]
; CHECK-NEXT: add x8, sp, #8
-; CHECK-NEXT: ld1 { v1.h }[1], [x8]
+; CHECK-NEXT: ld1 { v0.b }[9], [x8]
; CHECK-NEXT: add x8, sp, #16
-; CHECK-NEXT: mov v0.h[1], w1
-; CHECK-NEXT: ld1 { v1.h }[2], [x8]
+; CHECK-NEXT: ld1 { v0.b }[10], [x8]
; CHECK-NEXT: add x8, sp, #24
-; CHECK-NEXT: mov v0.h[2], w2
-; CHECK-NEXT: ld1 { v1.h }[3], [x8]
+; CHECK-NEXT: ld1 { v0.b }[11], [x8]
; CHECK-NEXT: add x8, sp, #32
-; CHECK-NEXT: mov v0.h[3], w3
-; CHECK-NEXT: ld1 { v1.h }[4], [x8]
+; CHECK-NEXT: ld1 { v0.b }[12], [x8]
; CHECK-NEXT: add x8, sp, #40
-; CHECK-NEXT: ld1 { v1.h }[5], [x8]
+; CHECK-NEXT: ld1 { v0.b }[13], [x8]
; CHECK-NEXT: add x8, sp, #48
-; CHECK-NEXT: mov v0.h[4], w4
-; CHECK-NEXT: ld1 { v1.h }[6], [x8]
+; CHECK-NEXT: ld1 { v0.b }[14], [x8]
; CHECK-NEXT: add x8, sp, #56
-; CHECK-NEXT: mov v0.h[5], w5
-; CHECK-NEXT: ld1 { v1.h }[7], [x8]
-; CHECK-NEXT: mov v0.h[6], w6
-; CHECK-NEXT: add v2.8h, v1.8h, v1.8h
-; CHECK-NEXT: mov v0.h[7], w7
-; CHECK-NEXT: add v3.8h, v0.8h, v0.8h
-; CHECK-NEXT: uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT: uzp1 v1.16b, v3.16b, v2.16b
+; CHECK-NEXT: ld1 { v0.b }[15], [x8]
+; CHECK-NEXT: add v1.16b, v0.16b, v0.16b
; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
%a1 = insertelement <16 x i16> poison, i16 %a, i16 0
@@ -59,18 +57,15 @@ define <16 x i8> @lower_trunc_16xi8(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16
define <8 x i16> @lower_trunc_8xi16(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) {
; CHECK-LABEL: lower_trunc_8xi16:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmov s0, w4
-; CHECK-NEXT: fmov s1, w0
-; CHECK-NEXT: mov v0.s[1], w5
-; CHECK-NEXT: mov v1.s[1], w1
-; CHECK-NEXT: mov v0.s[2], w6
-; CHECK-NEXT: mov v1.s[2], w2
-; CHECK-NEXT: mov v0.s[3], w7
-; CHECK-NEXT: mov v1.s[3], w3
-; CHECK-NEXT: add v2.4s, v0.4s, v0.4s
-; CHECK-NEXT: add v3.4s, v1.4s, v1.4s
-; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h
-; CHECK-NEXT: uzp1 v1.8h, v3.8h, v2.8h
+; CHECK-NEXT: fmov s0, w0
+; CHECK-NEXT: mov v0.h[1], w1
+; CHECK-NEXT: mov v0.h[2], w2
+; CHECK-NEXT: mov v0.h[3], w3
+; CHECK-NEXT: mov v0.h[4], w4
+; CHECK-NEXT: mov v0.h[5], w5
+; CHECK-NEXT: mov v0.h[6], w6
+; CHECK-NEXT: mov v0.h[7], w7
+; CHECK-NEXT: add v1.8h, v0.8h, v0.8h
; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
%a1 = insertelement <8 x i32> poison, i32 %a, i32 0
@@ -91,14 +86,11 @@ define <8 x i16> @lower_trunc_8xi16(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32
define <4 x i32> @lower_trunc_4xi32(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: lower_trunc_4xi32:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmov d0, x2
-; CHECK-NEXT: fmov d1, x0
-; CHECK-NEXT: mov v0.d[1], x3
-; CHECK-NEXT: mov v1.d[1], x1
-; CHECK-NEXT: add v2.2d, v0.2d, v0.2d
-; CHECK-NEXT: add v3.2d, v1.2d, v1.2d
-; CHECK-NEXT: uzp1 v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: uzp1 v1.4s, v3.4s, v2.4s
+; CHECK-NEXT: fmov s0, w0
+; CHECK-NEXT: mov v0.s[1], w1
+; CHECK-NEXT: mov v0.s[2], w2
+; CHECK-NEXT: mov v0.s[3], w3
+; CHECK-NEXT: add v1.4s, v0.4s, v0.4s
; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
%a1 = insertelement <4 x i64> poison, i64 %a, i64 0
@@ -115,24 +107,20 @@ define <4 x i32> @lower_trunc_4xi32(i64 %a, i64 %b, i64 %c, i64 %d) {
define <8 x i32> @lower_trunc_8xi32(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g, i64 %h) {
; CHECK-LABEL: lower_trunc_8xi32:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmov d0, x2
-; CHECK-NEXT: fmov d1, x0
-; CHECK-NEXT: fmov d2, x6
-; CHECK-NEXT: fmov d3, x4
-; CHECK-NEXT: mov v0.d[1], x3
-; CHECK-NEXT: mov v1.d[1], x1
-; CHECK-NEXT: mov v2.d[1], x7
-; CHECK-NEXT: mov v3.d[1], x5
-; CHECK-NEXT: add v4.2d, v0.2d, v0.2d
-; CHECK-NEXT: add v5.2d, v1.2d, v1.2d
-; CHECK-NEXT: add v6.2d, v2.2d, v2.2d
-; CHECK-NEXT: add v7.2d, v3.2d, v3.2d
+; CHECK-NEXT: fmov d0, x6
+; CHECK-NEXT: fmov d1, x4
+; CHECK-NEXT: fmov d2, x2
+; CHECK-NEXT: fmov d3, x0
+; CHECK-NEXT: mov v0.d[1], x7
+; CHECK-NEXT: mov v1.d[1], x5
+; CHECK-NEXT: mov v2.d[1], x3
+; CHECK-NEXT: mov v3.d[1], x1
+; CHECK-NEXT: uzp1 v1.4s, v1.4s, v0.4s
; CHECK-NEXT: uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT: uzp1 v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: uzp1 v3.4s, v5.4s, v4.4s
-; CHECK-NEXT: uzp1 v1.4s, v7.4s, v6.4s
-; CHECK-NEXT: eor v0.16b, v0.16b, v3.16b
-; CHECK-NEXT: eor v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: add v3.4s, v1.4s, v1.4s
+; CHECK-NEXT: add v0.4s, v2.4s, v2.4s
+; CHECK-NEXT: eor v1.16b, v1.16b, v3.16b
+; CHECK-NEXT: eor v0.16b, v2.16b, v0.16b
; CHECK-NEXT: ret
%a1 = insertelement <8 x i64> poison, i64 %a, i64 0
%b1 = insertelement <8 x i64> %a1, i64 %b, i64 1
diff --git a/llvm/test/CodeGen/AArch64/zext-shuffle.ll b/llvm/test/CodeGen/AArch64/zext-shuffle.ll
index 20d2071d7fe54..a0d4e18acb6c8 100644
--- a/llvm/test/CodeGen/AArch64/zext-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/zext-shuffle.ll
@@ -674,10 +674,8 @@ define <4 x i32> @isUndefDeInterleave_t1_bad(<8 x i16> %a) {
define i16 @undeftop(<8 x i16> %0) {
; CHECK-LABEL: undeftop:
; CHECK: // %bb.0:
-; CHECK-NEXT: dup v0.8h, v0.h[4]
-; CHECK-NEXT: uaddl v0.4s, v0.4h, v0.4h
-; CHECK-NEXT: xtn v0.4h, v0.4s
-; CHECK-NEXT: umov w0, v0.h[0]
+; CHECK-NEXT: add v0.8h, v0.8h, v0.8h
+; CHECK-NEXT: umov w0, v0.h[4]
; CHECK-NEXT: ret
%2 = shufflevector <8 x i16> %0, <8 x i16> zeroinitializer, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 9, i32 7, i32 5, i32 3>
%3 = zext <8 x i16> %2 to <8 x i64>
diff --git a/llvm/test/CodeGen/SystemZ/int-conv-14.ll b/llvm/test/CodeGen/SystemZ/int-conv-14.ll
index 98dc88f289620..9ce75200f7910 100644
--- a/llvm/test/CodeGen/SystemZ/int-conv-14.ll
+++ b/llvm/test/CodeGen/SystemZ/int-conv-14.ll
@@ -58,9 +58,8 @@ define i128 @f4(ptr %ptr) {
define i64 @f5(i128 %a) {
; CHECK-LABEL: f5:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvg %r2, %v0, 1
+; CHECK-NEXT: lg %r1, 8(%r2)
+; CHECK-NEXT: la %r2, 0(%r1,%r1)
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i64
@@ -137,10 +136,8 @@ define i128 @f10(ptr %ptr) {
define i32 @f11(i128 %a) {
; CHECK-LABEL: f11:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i32
@@ -218,10 +215,8 @@ define i128 @f16(ptr %ptr) {
define i16 @f17(i128 %a) {
; CHECK-LABEL: f17:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i16
@@ -299,10 +294,8 @@ define i128 @f22(ptr %ptr) {
define i8 @f23(i128 %a) {
; CHECK-LABEL: f23:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i8
@@ -388,10 +381,8 @@ define i128 @f28(ptr %ptr) {
define i1 @f29(i128 %a) {
; CHECK-LABEL: f29:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i1
diff --git a/llvm/test/CodeGen/SystemZ/int-conv-15.ll b/llvm/test/CodeGen/SystemZ/int-conv-15.ll
index 0d8ee75b10b85..7d51635141bb1 100644
--- a/llvm/test/CodeGen/SystemZ/int-conv-15.ll
+++ b/llvm/test/CodeGen/SystemZ/int-conv-15.ll
@@ -58,9 +58,8 @@ define i128 @f4(ptr %ptr) {
define i64 @f5(i128 %a) {
; CHECK-LABEL: f5:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvg %r2, %v0, 1
+; CHECK-NEXT: lg %r1, 8(%r2)
+; CHECK-NEXT: la %r2, 0(%r1,%r1)
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i64
@@ -137,10 +136,8 @@ define i128 @f10(ptr %ptr) {
define i32 @f11(i128 %a) {
; CHECK-LABEL: f11:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i32
@@ -218,10 +215,8 @@ define i128 @f16(ptr %ptr) {
define i16 @f17(i128 %a) {
; CHECK-LABEL: f17:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i16
@@ -299,10 +294,8 @@ define i128 @f22(ptr %ptr) {
define i8 @f23(i128 %a) {
; CHECK-LABEL: f23:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i8
@@ -386,10 +379,8 @@ define i128 @f28(ptr %ptr) {
define i1 @f29(i128 %a) {
; CHECK-LABEL: f29:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i1
|
|
@llvm/pr-subscribers-backend-systemz Author: Simon Pilgrim (RKSimon) ChangesWe're very careful not to truncate binary arithmetic ops if it will affect legality, or cause additional truncation instructions, hence we limit this to cases where at one operand is constant. But if both ops are the same (i.e. add/mul) then we wouldn't increase the number of truncations,so can be slightly more aggressive at folding the truncation. Full diff: https://github.com/llvm/llvm-project/pull/164227.diff 5 Files Affected:
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 6bf9008c3d677..310d35d9b1d1e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -16433,7 +16433,8 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
case ISD::OR:
case ISD::XOR:
if (!LegalOperations && N0.hasOneUse() &&
- (isConstantOrConstantVector(N0.getOperand(0), true) ||
+ (N0.getOperand(0) == N0.getOperand(1) ||
+ isConstantOrConstantVector(N0.getOperand(0), true) ||
isConstantOrConstantVector(N0.getOperand(1), true))) {
// TODO: We already restricted this to pre-legalization, but for vectors
// we are extra cautious to not create an unsupported operation.
diff --git a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
index c4de177176e33..d7a2a83cf3660 100644
--- a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
@@ -5,32 +5,30 @@ define <16 x i8> @lower_trunc_16xi8(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16
; CHECK-LABEL: lower_trunc_16xi8:
; CHECK: // %bb.0:
; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: ldr h1, [sp]
+; CHECK-NEXT: mov x8, sp
+; CHECK-NEXT: mov v0.b[1], w1
+; CHECK-NEXT: mov v0.b[2], w2
+; CHECK-NEXT: mov v0.b[3], w3
+; CHECK-NEXT: mov v0.b[4], w4
+; CHECK-NEXT: mov v0.b[5], w5
+; CHECK-NEXT: mov v0.b[6], w6
+; CHECK-NEXT: mov v0.b[7], w7
+; CHECK-NEXT: ld1 { v0.b }[8], [x8]
; CHECK-NEXT: add x8, sp, #8
-; CHECK-NEXT: ld1 { v1.h }[1], [x8]
+; CHECK-NEXT: ld1 { v0.b }[9], [x8]
; CHECK-NEXT: add x8, sp, #16
-; CHECK-NEXT: mov v0.h[1], w1
-; CHECK-NEXT: ld1 { v1.h }[2], [x8]
+; CHECK-NEXT: ld1 { v0.b }[10], [x8]
; CHECK-NEXT: add x8, sp, #24
-; CHECK-NEXT: mov v0.h[2], w2
-; CHECK-NEXT: ld1 { v1.h }[3], [x8]
+; CHECK-NEXT: ld1 { v0.b }[11], [x8]
; CHECK-NEXT: add x8, sp, #32
-; CHECK-NEXT: mov v0.h[3], w3
-; CHECK-NEXT: ld1 { v1.h }[4], [x8]
+; CHECK-NEXT: ld1 { v0.b }[12], [x8]
; CHECK-NEXT: add x8, sp, #40
-; CHECK-NEXT: ld1 { v1.h }[5], [x8]
+; CHECK-NEXT: ld1 { v0.b }[13], [x8]
; CHECK-NEXT: add x8, sp, #48
-; CHECK-NEXT: mov v0.h[4], w4
-; CHECK-NEXT: ld1 { v1.h }[6], [x8]
+; CHECK-NEXT: ld1 { v0.b }[14], [x8]
; CHECK-NEXT: add x8, sp, #56
-; CHECK-NEXT: mov v0.h[5], w5
-; CHECK-NEXT: ld1 { v1.h }[7], [x8]
-; CHECK-NEXT: mov v0.h[6], w6
-; CHECK-NEXT: add v2.8h, v1.8h, v1.8h
-; CHECK-NEXT: mov v0.h[7], w7
-; CHECK-NEXT: add v3.8h, v0.8h, v0.8h
-; CHECK-NEXT: uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT: uzp1 v1.16b, v3.16b, v2.16b
+; CHECK-NEXT: ld1 { v0.b }[15], [x8]
+; CHECK-NEXT: add v1.16b, v0.16b, v0.16b
; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
%a1 = insertelement <16 x i16> poison, i16 %a, i16 0
@@ -59,18 +57,15 @@ define <16 x i8> @lower_trunc_16xi8(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16
define <8 x i16> @lower_trunc_8xi16(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) {
; CHECK-LABEL: lower_trunc_8xi16:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmov s0, w4
-; CHECK-NEXT: fmov s1, w0
-; CHECK-NEXT: mov v0.s[1], w5
-; CHECK-NEXT: mov v1.s[1], w1
-; CHECK-NEXT: mov v0.s[2], w6
-; CHECK-NEXT: mov v1.s[2], w2
-; CHECK-NEXT: mov v0.s[3], w7
-; CHECK-NEXT: mov v1.s[3], w3
-; CHECK-NEXT: add v2.4s, v0.4s, v0.4s
-; CHECK-NEXT: add v3.4s, v1.4s, v1.4s
-; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h
-; CHECK-NEXT: uzp1 v1.8h, v3.8h, v2.8h
+; CHECK-NEXT: fmov s0, w0
+; CHECK-NEXT: mov v0.h[1], w1
+; CHECK-NEXT: mov v0.h[2], w2
+; CHECK-NEXT: mov v0.h[3], w3
+; CHECK-NEXT: mov v0.h[4], w4
+; CHECK-NEXT: mov v0.h[5], w5
+; CHECK-NEXT: mov v0.h[6], w6
+; CHECK-NEXT: mov v0.h[7], w7
+; CHECK-NEXT: add v1.8h, v0.8h, v0.8h
; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
%a1 = insertelement <8 x i32> poison, i32 %a, i32 0
@@ -91,14 +86,11 @@ define <8 x i16> @lower_trunc_8xi16(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32
define <4 x i32> @lower_trunc_4xi32(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: lower_trunc_4xi32:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmov d0, x2
-; CHECK-NEXT: fmov d1, x0
-; CHECK-NEXT: mov v0.d[1], x3
-; CHECK-NEXT: mov v1.d[1], x1
-; CHECK-NEXT: add v2.2d, v0.2d, v0.2d
-; CHECK-NEXT: add v3.2d, v1.2d, v1.2d
-; CHECK-NEXT: uzp1 v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: uzp1 v1.4s, v3.4s, v2.4s
+; CHECK-NEXT: fmov s0, w0
+; CHECK-NEXT: mov v0.s[1], w1
+; CHECK-NEXT: mov v0.s[2], w2
+; CHECK-NEXT: mov v0.s[3], w3
+; CHECK-NEXT: add v1.4s, v0.4s, v0.4s
; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
%a1 = insertelement <4 x i64> poison, i64 %a, i64 0
@@ -115,24 +107,20 @@ define <4 x i32> @lower_trunc_4xi32(i64 %a, i64 %b, i64 %c, i64 %d) {
define <8 x i32> @lower_trunc_8xi32(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g, i64 %h) {
; CHECK-LABEL: lower_trunc_8xi32:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmov d0, x2
-; CHECK-NEXT: fmov d1, x0
-; CHECK-NEXT: fmov d2, x6
-; CHECK-NEXT: fmov d3, x4
-; CHECK-NEXT: mov v0.d[1], x3
-; CHECK-NEXT: mov v1.d[1], x1
-; CHECK-NEXT: mov v2.d[1], x7
-; CHECK-NEXT: mov v3.d[1], x5
-; CHECK-NEXT: add v4.2d, v0.2d, v0.2d
-; CHECK-NEXT: add v5.2d, v1.2d, v1.2d
-; CHECK-NEXT: add v6.2d, v2.2d, v2.2d
-; CHECK-NEXT: add v7.2d, v3.2d, v3.2d
+; CHECK-NEXT: fmov d0, x6
+; CHECK-NEXT: fmov d1, x4
+; CHECK-NEXT: fmov d2, x2
+; CHECK-NEXT: fmov d3, x0
+; CHECK-NEXT: mov v0.d[1], x7
+; CHECK-NEXT: mov v1.d[1], x5
+; CHECK-NEXT: mov v2.d[1], x3
+; CHECK-NEXT: mov v3.d[1], x1
+; CHECK-NEXT: uzp1 v1.4s, v1.4s, v0.4s
; CHECK-NEXT: uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT: uzp1 v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: uzp1 v3.4s, v5.4s, v4.4s
-; CHECK-NEXT: uzp1 v1.4s, v7.4s, v6.4s
-; CHECK-NEXT: eor v0.16b, v0.16b, v3.16b
-; CHECK-NEXT: eor v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: add v3.4s, v1.4s, v1.4s
+; CHECK-NEXT: add v0.4s, v2.4s, v2.4s
+; CHECK-NEXT: eor v1.16b, v1.16b, v3.16b
+; CHECK-NEXT: eor v0.16b, v2.16b, v0.16b
; CHECK-NEXT: ret
%a1 = insertelement <8 x i64> poison, i64 %a, i64 0
%b1 = insertelement <8 x i64> %a1, i64 %b, i64 1
diff --git a/llvm/test/CodeGen/AArch64/zext-shuffle.ll b/llvm/test/CodeGen/AArch64/zext-shuffle.ll
index 20d2071d7fe54..a0d4e18acb6c8 100644
--- a/llvm/test/CodeGen/AArch64/zext-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/zext-shuffle.ll
@@ -674,10 +674,8 @@ define <4 x i32> @isUndefDeInterleave_t1_bad(<8 x i16> %a) {
define i16 @undeftop(<8 x i16> %0) {
; CHECK-LABEL: undeftop:
; CHECK: // %bb.0:
-; CHECK-NEXT: dup v0.8h, v0.h[4]
-; CHECK-NEXT: uaddl v0.4s, v0.4h, v0.4h
-; CHECK-NEXT: xtn v0.4h, v0.4s
-; CHECK-NEXT: umov w0, v0.h[0]
+; CHECK-NEXT: add v0.8h, v0.8h, v0.8h
+; CHECK-NEXT: umov w0, v0.h[4]
; CHECK-NEXT: ret
%2 = shufflevector <8 x i16> %0, <8 x i16> zeroinitializer, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 9, i32 7, i32 5, i32 3>
%3 = zext <8 x i16> %2 to <8 x i64>
diff --git a/llvm/test/CodeGen/SystemZ/int-conv-14.ll b/llvm/test/CodeGen/SystemZ/int-conv-14.ll
index 98dc88f289620..9ce75200f7910 100644
--- a/llvm/test/CodeGen/SystemZ/int-conv-14.ll
+++ b/llvm/test/CodeGen/SystemZ/int-conv-14.ll
@@ -58,9 +58,8 @@ define i128 @f4(ptr %ptr) {
define i64 @f5(i128 %a) {
; CHECK-LABEL: f5:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvg %r2, %v0, 1
+; CHECK-NEXT: lg %r1, 8(%r2)
+; CHECK-NEXT: la %r2, 0(%r1,%r1)
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i64
@@ -137,10 +136,8 @@ define i128 @f10(ptr %ptr) {
define i32 @f11(i128 %a) {
; CHECK-LABEL: f11:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i32
@@ -218,10 +215,8 @@ define i128 @f16(ptr %ptr) {
define i16 @f17(i128 %a) {
; CHECK-LABEL: f17:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i16
@@ -299,10 +294,8 @@ define i128 @f22(ptr %ptr) {
define i8 @f23(i128 %a) {
; CHECK-LABEL: f23:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i8
@@ -388,10 +381,8 @@ define i128 @f28(ptr %ptr) {
define i1 @f29(i128 %a) {
; CHECK-LABEL: f29:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i1
diff --git a/llvm/test/CodeGen/SystemZ/int-conv-15.ll b/llvm/test/CodeGen/SystemZ/int-conv-15.ll
index 0d8ee75b10b85..7d51635141bb1 100644
--- a/llvm/test/CodeGen/SystemZ/int-conv-15.ll
+++ b/llvm/test/CodeGen/SystemZ/int-conv-15.ll
@@ -58,9 +58,8 @@ define i128 @f4(ptr %ptr) {
define i64 @f5(i128 %a) {
; CHECK-LABEL: f5:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvg %r2, %v0, 1
+; CHECK-NEXT: lg %r1, 8(%r2)
+; CHECK-NEXT: la %r2, 0(%r1,%r1)
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i64
@@ -137,10 +136,8 @@ define i128 @f10(ptr %ptr) {
define i32 @f11(i128 %a) {
; CHECK-LABEL: f11:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i32
@@ -218,10 +215,8 @@ define i128 @f16(ptr %ptr) {
define i16 @f17(i128 %a) {
; CHECK-LABEL: f17:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i16
@@ -299,10 +294,8 @@ define i128 @f22(ptr %ptr) {
define i8 @f23(i128 %a) {
; CHECK-LABEL: f23:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i8
@@ -386,10 +379,8 @@ define i128 @f28(ptr %ptr) {
define i1 @f29(i128 %a) {
; CHECK-LABEL: f29:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
-; CHECK-NEXT: vlgvf %r2, %v0, 3
-; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
+; CHECK-NEXT: l %r2, 12(%r2)
+; CHECK-NEXT: ar %r2, %r2
; CHECK-NEXT: br %r14
%op = add i128 %a, %a
%res = trunc i128 %op to i1
|
…runc(x),trunc(x)) We're very careful not to truncate binary arithmetic ops if it will affect legality, or cause additional truncation instructions, hence we limit this to cases where at one operand is constant. But if both ops are the same (i.e. add/mul) then we wouldn't increase the number of truncations,so can be slightly more aggressive at folding the truncation. Alter SystemZ tests to avoid add(x,x) pattern
07a4e07 to
ae37d53
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM for AArch64 too, thanks.
Replace with trunc(add(X,Y)) to avoid premature folding in upcoming patch llvm#164227
Replace with trunc(add(X,Y)) to avoid premature folding in upcoming patch #164227
* [flang] Fix standalone build regression from llvm#161179 (llvm#164309) Fix incorrect linking and dependencies introduced in llvm#161179 that break standalone builds of Flang. Signed-off-by: Michał Górny <mgorny@gentoo.org> * [AMDGPU] Remove magic constants from V_PK_ADD_F32 pattern. NFC (llvm#164335) * [AMDGPU] Update code sequence for CU-mode Release Fences in GFX10+ (llvm#161638) They were previously optimized to not emit any waitcnt, which is technically correct because there is no reordering of operations at workgroup scope in CU mode for GFX10+. This breaks transitivity however, for example if we have the following sequence of events in one thread: - some stores - store atomic release syncscope("workgroup") - barrier then another thread follows with - barrier - load atomic acquire - store atomic release syncscope("agent") It does not work because, while the other thread sees the stores, it cannot release them at the wider scope. Our release fences aren't strong enough to "wait" on stores from other waves. We also cannot strengthen our release fences any further to allow for releasing other wave's stores because only GFX12 can do that with `global_wb`. GFX10-11 do not have the writeback instruction. It'd also add yet another level of complexity to code sequences, with both acquire/release having CU-mode only alternatives. Lastly, acq/rel are always used together. The price for synchronization has to be paid either at the acq, or the rel. Strengthening the releases would just make the memory model more complex but wouldn't help performance. So the choice here is to streamline the code sequences by making CU and WGP mode emit almost identical (vL0 inv is not needed in CU mode) code for release (or stronger) atomic ordering. This also removes the `vm_vsrc(0)` wait before barriers. Now that the release fence in CU mode is strong enough, it is no longer needed. Supersedes llvm#160501 Solves SC1-6454 * [InstSimplify] Support ptrtoaddr in simplifyGEPInst() (llvm#164262) This adds support for ptrtoaddr in the `ptradd p, ptrtoaddr(p2) - ptrtoaddr(p) -> p2` fold. This fold requires that p and p2 have the same underlying object (otherwise the provenance may not be the same). The argument I would like to make here is that because the underlying objects are the same (and the pointers in the same address space), the non-address bits of the pointer must be the same. Looking at some specific cases of underlying object relationship: * phi/select: Trivially true. * getelementptr: Only modifies address bits, non-address bits must remain the same. * addrspacecast round-trip cast: Must preserve all bits because we optimize such round-trip casts away. * non-interposable global alias: I'm a bit unsure about this one, but I guess the alias and the aliasee must have the same non-address bits? * various intrinsics like launder.invariant.group, ptrmask. I think these all either preserve all pointer bits (like the invariant.group ones) or at least the non-address bits (like ptrmask). There are some interesting cases like amdgcn.make.buffer.rsrc, but those are cross address-space. ----- There is a second `gep (gep p, C), (sub 0, ptrtoint(p)) -> C` transform in this function, which I am not extending to handle ptrtoaddr, adding negative tests instead. This transform is overall dubious for provenance reasons, but especially dubious with ptrtoaddr, as then we don't have the guarantee that provenance of `p` has been exposed. * [Hexagon] Add REQUIRES: asserts to test This test uses -debug-only, so needs an assertion-enabled build. * [AArch64] Combing scalar_to_reg into DUP if the DUP already exists (llvm#160499) If we already have a dup(x) as part of the DAG along with a scalar_to_vec(x), we can re-use the result of the dup to the scalar_to_vec(x). * [CAS] OnDiskGraphDB - fix MSVC "not all control paths return a value" warnings. NFC. (llvm#164369) * Reapply "[libc++] Optimize __hash_table::erase(iterator, iterator)" (llvm#162850) This reapplication fixes the use after free caused by not properly updating the bucket list in one case. Original commit message: Instead of just calling the single element `erase` on every element of the range, we can combine some of the operations in a custom implementation. Specifically, we don't need to search for the previous node or re-link the list every iteration. Removing this unnecessary work results in some nice performance improvements: ``` ----------------------------------------------------------------------------------------------------------------------- Benchmark old new ----------------------------------------------------------------------------------------------------------------------- std::unordered_set<int>::erase(iterator, iterator) (erase half the container)/0 457 ns 459 ns std::unordered_set<int>::erase(iterator, iterator) (erase half the container)/32 995 ns 626 ns std::unordered_set<int>::erase(iterator, iterator) (erase half the container)/1024 18196 ns 7995 ns std::unordered_set<int>::erase(iterator, iterator) (erase half the container)/8192 124722 ns 70125 ns std::unordered_set<std::string>::erase(iterator, iterator) (erase half the container)/0 456 ns 461 ns std::unordered_set<std::string>::erase(iterator, iterator) (erase half the container)/32 1183 ns 769 ns std::unordered_set<std::string>::erase(iterator, iterator) (erase half the container)/1024 27827 ns 18614 ns std::unordered_set<std::string>::erase(iterator, iterator) (erase half the container)/8192 266681 ns 226107 ns std::unordered_map<int, int>::erase(iterator, iterator) (erase half the container)/0 455 ns 462 ns std::unordered_map<int, int>::erase(iterator, iterator) (erase half the container)/32 996 ns 659 ns std::unordered_map<int, int>::erase(iterator, iterator) (erase half the container)/1024 15963 ns 8108 ns std::unordered_map<int, int>::erase(iterator, iterator) (erase half the container)/8192 136493 ns 71848 ns std::unordered_multiset<int>::erase(iterator, iterator) (erase half the container)/0 454 ns 455 ns std::unordered_multiset<int>::erase(iterator, iterator) (erase half the container)/32 985 ns 703 ns std::unordered_multiset<int>::erase(iterator, iterator) (erase half the container)/1024 16277 ns 9085 ns std::unordered_multiset<int>::erase(iterator, iterator) (erase half the container)/8192 125736 ns 82710 ns std::unordered_multimap<int, int>::erase(iterator, iterator) (erase half the container)/0 457 ns 454 ns std::unordered_multimap<int, int>::erase(iterator, iterator) (erase half the container)/32 1091 ns 646 ns std::unordered_multimap<int, int>::erase(iterator, iterator) (erase half the container)/1024 17784 ns 7664 ns std::unordered_multimap<int, int>::erase(iterator, iterator) (erase half the container)/8192 127098 ns 72806 ns ``` This reverts commit acc3a62. * [TableGen] List the indices of sub-operands (llvm#163723) Some instances of the `Operand` class used in Tablegen instruction definitions expand to a cluster of multiple operands at the MC layer, such as complex addressing modes involving base + offset + shift, or clusters of operands describing conditional Arm instructions or predicated MVE instructions. There's currently no convenient way for C++ code to know the offset of one of those sub-operands from the start of the cluster: instead it just hard-codes magic numbers like `index+2`, which is hard to read and fragile. This patch adds an extra piece of output to `InstrInfoEmitter` to define those instruction offsets, based on the name of the `Operand` class instance in Tablegen, and the names assigned to the sub-operands in the `MIOperandInfo` field. For example, if target Foo were to define def Bar : Operand { let MIOperandInfo = (ops GPR:$first, i32imm:$second); // ... } then the new constants would be `Foo::SUBOP_Bar_first` and `Foo::SUBOP_Bar_second`, defined as 0 and 1 respectively. As an example, I've converted some magic numbers related to the MVE predication operand types (`vpred_n` and its superset `vpred_r`) to use the new named constants in place of the integer literals they previously used. This is more verbose, but also clearer, because it explains why the integer is chosen instead of what its value is. * [lldb] Add bidirectional packetLog to gdbclientutils.py (llvm#162176) While debugging the tests for llvm#155000 I found it helpful to have both sides of the simulated gdb-rsp traffic rather than just the responses so I've extended the packetLog in MockGDBServerResponder to record traffic in both directions. Tests have been updated accordingly * [MLIR] [Vector] Added canonicalizer for folding from_elements + transpose (llvm#161841) ## Description Adds a new canonicalizer that folds `vector.from_elements(vector.transpose))` => `vector.from_elements`. This canonicalization reorders the input elements for `vector.from_elements`, adjusts the output shape to match the effect of the transpose op and eliminating its need. ## Testing Added a 2D vector lit test that verifies the working of the rewrite. --------- Signed-off-by: Keshav Vinayak Jha <keshavvinayakjha@gmail.com> * [DA] Add initial support for monotonicity check (llvm#162280) The dependence testing functions in DA assume that the analyzed AddRec does not wrap over the entire iteration space. For AddRecs that may wrap, DA should conservatively return unknown dependence. However, no validation is currently performed to ensure that this condition holds, which can lead to incorrect results in some cases. This patch introduces the notion of *monotonicity* and a validation logic to check whether a SCEV is monotonic. The monotonicity check classifies the SCEV into one of the following categories: - Unknown: Nothing is known about the monotonicity of the SCEV. - Invariant: The SCEV is loop-invariant. - MultivariateSignedMonotonic: The SCEV doesn't wrap in a signed sense for any iteration of the loops in the loop nest. The current validation logic basically searches an affine AddRec recursively and checks whether the `nsw` flag is present. Notably, it is still unclear whether we should also have a category for unsigned monotonicity. The monotonicity check is still under development and disabled by default for now. Since such a check is necessary to make DA sound, it should be enabled by default once the functionality is sufficient. Split off from llvm#154527. * [VPlan] Use VPlan::getRegion to shorten code (NFC) (llvm#164287) * [VPlan] Improve code using m_APInt (NFC) (llvm#161683) * [SystemZ] Avoid trunc(add(X,X)) patterns (llvm#164378) Replace with trunc(add(X,Y)) to avoid premature folding in upcoming patch llvm#164227 * [clang][CodeGen] Emit `llvm.tbaa.errno` metadata during module creation Let Clang emit `llvm.tbaa.errno` metadata in order to let LLVM carry out optimizations around errno-writing libcalls to, as long as it is proved the involved memory location does not alias `errno`. Previous discussion: https://discourse.llvm.org/t/rfc-modelling-errno-memory-effects/82972. * [LV][NFC] Remove undef from phi incoming values (llvm#163762) Split off from PR llvm#163525, this standalone patch replaces use of undef as incoming PHI values with zero, in order to reduce the likelihood of contributors hitting the `undef deprecator` warning in github. * [DA] Add option to enable specific dependence test only (llvm#164245) PR llvm#157084 added an option `da-run-siv-routines-only` to run only SIV routines in DA. This PR replaces that option with a more fine-grained one that allows to select other than SIV routines as well. This option is useful for regression testing of individual DA routines. This patch also reorganizes regression tests that use `da-run-siv-routines-only`. * [libcxx] Optimize `std::generate_n` for segmented iterators (llvm#164266) Part of llvm#102817. This is a natural follow-up to llvm#163006. We are forwarding `std::generate_n` to `std::__for_each_n` (`std::for_each_n` needs c++17), resulting in improved performance for segmented iterators. before: ``` std::generate_n(deque<int>)/32 17.5 ns 17.3 ns 40727273 std::generate_n(deque<int>)/50 25.7 ns 25.5 ns 26352941 std::generate_n(deque<int>)/1024 490 ns 487 ns 1445161 std::generate_n(deque<int>)/8192 3908 ns 3924 ns 179200 ``` after: ``` std::generate_n(deque<int>)/32 11.1 ns 11.0 ns 64000000 std::generate_n(deque<int>)/50 16.1 ns 16.0 ns 44800000 std::generate_n(deque<int>)/1024 291 ns 292 ns 2357895 std::generate_n(deque<int>)/8192 2269 ns 2250 ns 298667 ``` * [BOLT] Check entry point address is not in constant island (llvm#163418) There are cases where `addEntryPointAtOffset` is called with a given `Offset` that points to an address within a constant island. This triggers `assert(!isInConstantIsland(EntryPointAddress)` and causes BOLT to crash. This patch adds a check which ignores functions that would add such entry points and warns the user. * [llvm][dwarfdump] Pretty-print DW_AT_language_version (llvm#164222) In both verbose and non-verbose mode we will now use the `llvm::dwarf::LanguageDescription` to turn the version into a human readable string. In verbose mode we also display the raw version code (similar to how we display addresses in verbose mode). To make the version code and prettified easier to distinguish, we print the prettified name in colour (if available), which is consistent with how `DW_AT_language` is printed in colour. Before: ``` 0x0000000c: DW_TAG_compile_unit DW_AT_language_name (DW_LNAME_C) DW_AT_language_version (201112) ``` After: ``` 0x0000000c: DW_TAG_compile_unit DW_AT_language_name (DW_LNAME_C) DW_AT_language_version (201112 C11) ``` --------- Signed-off-by: Michał Górny <mgorny@gentoo.org> Signed-off-by: Keshav Vinayak Jha <keshavvinayakjha@gmail.com> Co-authored-by: Michał Górny <mgorny@gentoo.org> Co-authored-by: Stanislav Mekhanoshin <Stanislav.Mekhanoshin@amd.com> Co-authored-by: Pierre van Houtryve <pierre.vanhoutryve@amd.com> Co-authored-by: Nikita Popov <npopov@redhat.com> Co-authored-by: David Green <david.green@arm.com> Co-authored-by: Simon Pilgrim <llvm-dev@redking.me.uk> Co-authored-by: Nikolas Klauser <nikolasklauser@berlin.de> Co-authored-by: Simon Tatham <simon.tatham@arm.com> Co-authored-by: Daniel Sanders <daniel_l_sanders@apple.com> Co-authored-by: Keshav Vinayak Jha <31160700+keshavvinayak01@users.noreply.github.com> Co-authored-by: Ryotaro Kasuga <kasuga.ryotaro@fujitsu.com> Co-authored-by: Ramkumar Ramachandra <ramkumar.ramachandra@codasip.com> Co-authored-by: Antonio Frighetto <me@antoniofrighetto.com> Co-authored-by: David Sherwood <david.sherwood@arm.com> Co-authored-by: Connector Switch <c8ef@outlook.com> Co-authored-by: Asher Dobrescu <asher.dobrescu@gmail.com> Co-authored-by: Michael Buch <michaelbuch12@gmail.com>
We're very careful not to truncate binary arithmetic ops if it will affect legality, or cause additional truncation instructions, hence we currently limit this to cases where one operand is constant.
But if both ops are the same (i.e. for some add/mul cases) then we wouldn't increase the number of truncations, so can be slightly more aggressive at folding the truncation.