288 changes: 240 additions & 48 deletions llvm/test/CodeGen/AArch64/shift-amount-mod.ll
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,24 @@ define void @modify32_shl_by_negated(i32* %valptr, i32 %shamt) nounwind {
store i32 %shifted, i32* %valptr
ret void
}
define void @modify32_shl_by_negated_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind {
; CHECK-LABEL: modify32_shl_by_negated_multi_use:
; CHECK: // %bb.0:
; CHECK-NEXT: neg w8, w1
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: mov w10, #32
; CHECK-NEXT: lsl w8, w9, w8
; CHECK-NEXT: sub w9, w10, w1
; CHECK-NEXT: str w8, [x0]
; CHECK-NEXT: str w9, [x2]
; CHECK-NEXT: ret
%val = load i32, i32* %valptr
%negshamt = sub i32 32, %shamt
%shifted = shl i32 %val, %negshamt
store i32 %shifted, i32* %valptr
store i32 %negshamt, i32* %shamtptr
ret void
}

define i64 @reg64_shl_by_negated(i64 %val, i64 %shamt) nounwind {
; CHECK-LABEL: reg64_shl_by_negated:
Expand Down Expand Up @@ -105,6 +123,24 @@ define void @modify64_shl_by_negated(i64* %valptr, i64 %shamt) nounwind {
store i64 %shifted, i64* %valptr
ret void
}
define void @modify64_shl_by_negated_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind {
; CHECK-LABEL: modify64_shl_by_negated_multi_use:
; CHECK: // %bb.0:
; CHECK-NEXT: neg x8, x1
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: mov w10, #64
; CHECK-NEXT: lsl x8, x9, x8
; CHECK-NEXT: sub x9, x10, x1
; CHECK-NEXT: str x8, [x0]
; CHECK-NEXT: str x9, [x2]
; CHECK-NEXT: ret
%val = load i64, i64* %valptr
%negshamt = sub i64 64, %shamt
%shifted = shl i64 %val, %negshamt
store i64 %shifted, i64* %valptr
store i64 %negshamt, i64* %shamtptr
ret void
}

; logical shift right
;------------------------------------------------------------------------------;
Expand Down Expand Up @@ -157,6 +193,24 @@ define void @modify32_lshr_by_negated(i32* %valptr, i32 %shamt) nounwind {
store i32 %shifted, i32* %valptr
ret void
}
define void @modify32_lshr_by_negated_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind {
; CHECK-LABEL: modify32_lshr_by_negated_multi_use:
; CHECK: // %bb.0:
; CHECK-NEXT: neg w8, w1
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: mov w10, #32
; CHECK-NEXT: lsr w8, w9, w8
; CHECK-NEXT: sub w9, w10, w1
; CHECK-NEXT: str w8, [x0]
; CHECK-NEXT: str w9, [x2]
; CHECK-NEXT: ret
%val = load i32, i32* %valptr
%negshamt = sub i32 32, %shamt
%shifted = lshr i32 %val, %negshamt
store i32 %shifted, i32* %valptr
store i32 %negshamt, i32* %shamtptr
ret void
}

define i64 @reg64_lshr_by_negated(i64 %val, i64 %shamt) nounwind {
; CHECK-LABEL: reg64_lshr_by_negated:
Expand Down Expand Up @@ -206,6 +260,24 @@ define void @modify64_lshr_by_negated(i64* %valptr, i64 %shamt) nounwind {
store i64 %shifted, i64* %valptr
ret void
}
define void @modify64_lshr_by_negated_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind {
; CHECK-LABEL: modify64_lshr_by_negated_multi_use:
; CHECK: // %bb.0:
; CHECK-NEXT: neg x8, x1
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: mov w10, #64
; CHECK-NEXT: lsr x8, x9, x8
; CHECK-NEXT: sub x9, x10, x1
; CHECK-NEXT: str x8, [x0]
; CHECK-NEXT: str x9, [x2]
; CHECK-NEXT: ret
%val = load i64, i64* %valptr
%negshamt = sub i64 64, %shamt
%shifted = lshr i64 %val, %negshamt
store i64 %shifted, i64* %valptr
store i64 %negshamt, i64* %shamtptr
ret void
}

; arithmetic shift right
;------------------------------------------------------------------------------;
Expand Down Expand Up @@ -258,6 +330,24 @@ define void @modify32_ashr_by_negated(i32* %valptr, i32 %shamt) nounwind {
store i32 %shifted, i32* %valptr
ret void
}
define void @modify32_ashr_by_negated_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind {
; CHECK-LABEL: modify32_ashr_by_negated_multi_use:
; CHECK: // %bb.0:
; CHECK-NEXT: neg w8, w1
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: mov w10, #32
; CHECK-NEXT: asr w8, w9, w8
; CHECK-NEXT: sub w9, w10, w1
; CHECK-NEXT: str w8, [x0]
; CHECK-NEXT: str w9, [x2]
; CHECK-NEXT: ret
%val = load i32, i32* %valptr
%negshamt = sub i32 32, %shamt
%shifted = ashr i32 %val, %negshamt
store i32 %shifted, i32* %valptr
store i32 %negshamt, i32* %shamtptr
ret void
}

define i64 @reg64_ashr_by_negated(i64 %val, i64 %shamt) nounwind {
; CHECK-LABEL: reg64_ashr_by_negated:
Expand Down Expand Up @@ -307,6 +397,24 @@ define void @modify64_ashr_by_negated(i64* %valptr, i64 %shamt) nounwind {
store i64 %shifted, i64* %valptr
ret void
}
define void @modify64_ashr_by_negated_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind {
; CHECK-LABEL: modify64_ashr_by_negated_multi_use:
; CHECK: // %bb.0:
; CHECK-NEXT: neg x8, x1
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: mov w10, #64
; CHECK-NEXT: asr x8, x9, x8
; CHECK-NEXT: sub x9, x10, x1
; CHECK-NEXT: str x8, [x0]
; CHECK-NEXT: str x9, [x2]
; CHECK-NEXT: ret
%val = load i64, i64* %valptr
%negshamt = sub i64 64, %shamt
%shifted = ashr i64 %val, %negshamt
store i64 %shifted, i64* %valptr
store i64 %negshamt, i64* %shamtptr
ret void
}

;==============================================================================;
; the shift amount is complemented (shiftbitwidth - 1 - shiftamt)
Expand All @@ -318,8 +426,7 @@ define void @modify64_ashr_by_negated(i64* %valptr, i64 %shamt) nounwind {
define i32 @reg32_shl_by_complemented(i32 %val, i32 %shamt) nounwind {
; CHECK-LABEL: reg32_shl_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #31
; CHECK-NEXT: sub w8, w8, w1
; CHECK-NEXT: mvn w8, w1
; CHECK-NEXT: lsl w0, w0, w8
; CHECK-NEXT: ret
%negshamt = sub i32 31, %shamt
Expand All @@ -329,9 +436,8 @@ define i32 @reg32_shl_by_complemented(i32 %val, i32 %shamt) nounwind {
define i32 @load32_shl_by_complemented(i32* %valptr, i32 %shamt) nounwind {
; CHECK-LABEL: load32_shl_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #31
; CHECK-NEXT: mvn w8, w1
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: sub w8, w8, w1
; CHECK-NEXT: lsl w0, w9, w8
; CHECK-NEXT: ret
%val = load i32, i32* %valptr
Expand All @@ -342,8 +448,7 @@ define i32 @load32_shl_by_complemented(i32* %valptr, i32 %shamt) nounwind {
define void @store32_shl_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
; CHECK-LABEL: store32_shl_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #31
; CHECK-NEXT: sub w8, w8, w2
; CHECK-NEXT: mvn w8, w2
; CHECK-NEXT: lsl w8, w0, w8
; CHECK-NEXT: str w8, [x1]
; CHECK-NEXT: ret
Expand All @@ -355,24 +460,40 @@ define void @store32_shl_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) nou
define void @modify32_shl_by_complemented(i32* %valptr, i32 %shamt) nounwind {
; CHECK-LABEL: modify32_shl_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #31
; CHECK-NEXT: mvn w8, w1
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: lsl w8, w9, w8
; CHECK-NEXT: str w8, [x0]
; CHECK-NEXT: ret
%val = load i32, i32* %valptr
%negshamt = sub i32 31, %shamt
%shifted = shl i32 %val, %negshamt
store i32 %shifted, i32* %valptr
ret void
}
define void @modify32_shl_by_complemented_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind {
; CHECK-LABEL: modify32_shl_by_complemented_multi_use:
; CHECK: // %bb.0:
; CHECK-NEXT: mvn w8, w1
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: sub w8, w8, w1
; CHECK-NEXT: mov w10, #31
; CHECK-NEXT: lsl w8, w9, w8
; CHECK-NEXT: sub w9, w10, w1
; CHECK-NEXT: str w8, [x0]
; CHECK-NEXT: str w9, [x2]
; CHECK-NEXT: ret
%val = load i32, i32* %valptr
%negshamt = sub i32 31, %shamt
%shifted = shl i32 %val, %negshamt
store i32 %shifted, i32* %valptr
store i32 %negshamt, i32* %shamtptr
ret void
}

define i64 @reg64_shl_by_complemented(i64 %val, i64 %shamt) nounwind {
; CHECK-LABEL: reg64_shl_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #63
; CHECK-NEXT: sub x8, x8, x1
; CHECK-NEXT: mvn x8, x1
; CHECK-NEXT: lsl x0, x0, x8
; CHECK-NEXT: ret
%negshamt = sub i64 63, %shamt
Expand All @@ -382,9 +503,8 @@ define i64 @reg64_shl_by_complemented(i64 %val, i64 %shamt) nounwind {
define i64 @load64_shl_by_complemented(i64* %valptr, i64 %shamt) nounwind {
; CHECK-LABEL: load64_shl_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #63
; CHECK-NEXT: mvn x8, x1
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: sub x8, x8, x1
; CHECK-NEXT: lsl x0, x9, x8
; CHECK-NEXT: ret
%val = load i64, i64* %valptr
Expand All @@ -395,8 +515,7 @@ define i64 @load64_shl_by_complemented(i64* %valptr, i64 %shamt) nounwind {
define void @store64_shl_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
; CHECK-LABEL: store64_shl_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #63
; CHECK-NEXT: sub x8, x8, x2
; CHECK-NEXT: mvn x8, x2
; CHECK-NEXT: lsl x8, x0, x8
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: ret
Expand All @@ -408,16 +527,33 @@ define void @store64_shl_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) nou
define void @modify64_shl_by_complemented(i64* %valptr, i64 %shamt) nounwind {
; CHECK-LABEL: modify64_shl_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #63
; CHECK-NEXT: mvn x8, x1
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: lsl x8, x9, x8
; CHECK-NEXT: str x8, [x0]
; CHECK-NEXT: ret
%val = load i64, i64* %valptr
%negshamt = sub i64 63, %shamt
%shifted = shl i64 %val, %negshamt
store i64 %shifted, i64* %valptr
ret void
}
define void @modify64_shl_by_complemented_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind {
; CHECK-LABEL: modify64_shl_by_complemented_multi_use:
; CHECK: // %bb.0:
; CHECK-NEXT: mvn x8, x1
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: sub x8, x8, x1
; CHECK-NEXT: mov w10, #63
; CHECK-NEXT: lsl x8, x9, x8
; CHECK-NEXT: sub x9, x10, x1
; CHECK-NEXT: str x8, [x0]
; CHECK-NEXT: str x9, [x2]
; CHECK-NEXT: ret
%val = load i64, i64* %valptr
%negshamt = sub i64 63, %shamt
%shifted = shl i64 %val, %negshamt
store i64 %shifted, i64* %valptr
store i64 %negshamt, i64* %shamtptr
ret void
}

Expand All @@ -427,8 +563,7 @@ define void @modify64_shl_by_complemented(i64* %valptr, i64 %shamt) nounwind {
define i32 @reg32_lshr_by_complemented(i32 %val, i32 %shamt) nounwind {
; CHECK-LABEL: reg32_lshr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #31
; CHECK-NEXT: sub w8, w8, w1
; CHECK-NEXT: mvn w8, w1
; CHECK-NEXT: lsr w0, w0, w8
; CHECK-NEXT: ret
%negshamt = sub i32 31, %shamt
Expand All @@ -438,9 +573,8 @@ define i32 @reg32_lshr_by_complemented(i32 %val, i32 %shamt) nounwind {
define i32 @load32_lshr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
; CHECK-LABEL: load32_lshr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #31
; CHECK-NEXT: mvn w8, w1
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: sub w8, w8, w1
; CHECK-NEXT: lsr w0, w9, w8
; CHECK-NEXT: ret
%val = load i32, i32* %valptr
Expand All @@ -451,8 +585,7 @@ define i32 @load32_lshr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
define void @store32_lshr_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
; CHECK-LABEL: store32_lshr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #31
; CHECK-NEXT: sub w8, w8, w2
; CHECK-NEXT: mvn w8, w2
; CHECK-NEXT: lsr w8, w0, w8
; CHECK-NEXT: str w8, [x1]
; CHECK-NEXT: ret
Expand All @@ -464,24 +597,40 @@ define void @store32_lshr_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) no
define void @modify32_lshr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
; CHECK-LABEL: modify32_lshr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #31
; CHECK-NEXT: mvn w8, w1
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: lsr w8, w9, w8
; CHECK-NEXT: str w8, [x0]
; CHECK-NEXT: ret
%val = load i32, i32* %valptr
%negshamt = sub i32 31, %shamt
%shifted = lshr i32 %val, %negshamt
store i32 %shifted, i32* %valptr
ret void
}
define void @modify32_lshr_by_complemented_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind {
; CHECK-LABEL: modify32_lshr_by_complemented_multi_use:
; CHECK: // %bb.0:
; CHECK-NEXT: mvn w8, w1
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: sub w8, w8, w1
; CHECK-NEXT: mov w10, #31
; CHECK-NEXT: lsr w8, w9, w8
; CHECK-NEXT: sub w9, w10, w1
; CHECK-NEXT: str w8, [x0]
; CHECK-NEXT: str w9, [x2]
; CHECK-NEXT: ret
%val = load i32, i32* %valptr
%negshamt = sub i32 31, %shamt
%shifted = lshr i32 %val, %negshamt
store i32 %shifted, i32* %valptr
store i32 %negshamt, i32* %shamtptr
ret void
}

define i64 @reg64_lshr_by_complemented(i64 %val, i64 %shamt) nounwind {
; CHECK-LABEL: reg64_lshr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #63
; CHECK-NEXT: sub x8, x8, x1
; CHECK-NEXT: mvn x8, x1
; CHECK-NEXT: lsr x0, x0, x8
; CHECK-NEXT: ret
%negshamt = sub i64 63, %shamt
Expand All @@ -491,9 +640,8 @@ define i64 @reg64_lshr_by_complemented(i64 %val, i64 %shamt) nounwind {
define i64 @load64_lshr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
; CHECK-LABEL: load64_lshr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #63
; CHECK-NEXT: mvn x8, x1
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: sub x8, x8, x1
; CHECK-NEXT: lsr x0, x9, x8
; CHECK-NEXT: ret
%val = load i64, i64* %valptr
Expand All @@ -504,8 +652,7 @@ define i64 @load64_lshr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
define void @store64_lshr_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
; CHECK-LABEL: store64_lshr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #63
; CHECK-NEXT: sub x8, x8, x2
; CHECK-NEXT: mvn x8, x2
; CHECK-NEXT: lsr x8, x0, x8
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: ret
Expand All @@ -517,16 +664,33 @@ define void @store64_lshr_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) no
define void @modify64_lshr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
; CHECK-LABEL: modify64_lshr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #63
; CHECK-NEXT: mvn x8, x1
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: lsr x8, x9, x8
; CHECK-NEXT: str x8, [x0]
; CHECK-NEXT: ret
%val = load i64, i64* %valptr
%negshamt = sub i64 63, %shamt
%shifted = lshr i64 %val, %negshamt
store i64 %shifted, i64* %valptr
ret void
}
define void @modify64_lshr_by_complemented_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind {
; CHECK-LABEL: modify64_lshr_by_complemented_multi_use:
; CHECK: // %bb.0:
; CHECK-NEXT: mvn x8, x1
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: sub x8, x8, x1
; CHECK-NEXT: mov w10, #63
; CHECK-NEXT: lsr x8, x9, x8
; CHECK-NEXT: sub x9, x10, x1
; CHECK-NEXT: str x8, [x0]
; CHECK-NEXT: str x9, [x2]
; CHECK-NEXT: ret
%val = load i64, i64* %valptr
%negshamt = sub i64 63, %shamt
%shifted = lshr i64 %val, %negshamt
store i64 %shifted, i64* %valptr
store i64 %negshamt, i64* %shamtptr
ret void
}

Expand All @@ -536,8 +700,7 @@ define void @modify64_lshr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
define i32 @reg32_ashr_by_complemented(i32 %val, i32 %shamt) nounwind {
; CHECK-LABEL: reg32_ashr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #31
; CHECK-NEXT: sub w8, w8, w1
; CHECK-NEXT: mvn w8, w1
; CHECK-NEXT: asr w0, w0, w8
; CHECK-NEXT: ret
%negshamt = sub i32 31, %shamt
Expand All @@ -547,9 +710,8 @@ define i32 @reg32_ashr_by_complemented(i32 %val, i32 %shamt) nounwind {
define i32 @load32_ashr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
; CHECK-LABEL: load32_ashr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #31
; CHECK-NEXT: mvn w8, w1
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: sub w8, w8, w1
; CHECK-NEXT: asr w0, w9, w8
; CHECK-NEXT: ret
%val = load i32, i32* %valptr
Expand All @@ -560,8 +722,7 @@ define i32 @load32_ashr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
define void @store32_ashr_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
; CHECK-LABEL: store32_ashr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #31
; CHECK-NEXT: sub w8, w8, w2
; CHECK-NEXT: mvn w8, w2
; CHECK-NEXT: asr w8, w0, w8
; CHECK-NEXT: str w8, [x1]
; CHECK-NEXT: ret
Expand All @@ -573,24 +734,40 @@ define void @store32_ashr_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) no
define void @modify32_ashr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
; CHECK-LABEL: modify32_ashr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #31
; CHECK-NEXT: mvn w8, w1
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: asr w8, w9, w8
; CHECK-NEXT: str w8, [x0]
; CHECK-NEXT: ret
%val = load i32, i32* %valptr
%negshamt = sub i32 31, %shamt
%shifted = ashr i32 %val, %negshamt
store i32 %shifted, i32* %valptr
ret void
}
define void @modify32_ashr_by_complemented_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind {
; CHECK-LABEL: modify32_ashr_by_complemented_multi_use:
; CHECK: // %bb.0:
; CHECK-NEXT: mvn w8, w1
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: sub w8, w8, w1
; CHECK-NEXT: mov w10, #31
; CHECK-NEXT: asr w8, w9, w8
; CHECK-NEXT: sub w9, w10, w1
; CHECK-NEXT: str w8, [x0]
; CHECK-NEXT: str w9, [x2]
; CHECK-NEXT: ret
%val = load i32, i32* %valptr
%negshamt = sub i32 31, %shamt
%shifted = ashr i32 %val, %negshamt
store i32 %shifted, i32* %valptr
store i32 %negshamt, i32* %shamtptr
ret void
}

define i64 @reg64_ashr_by_complemented(i64 %val, i64 %shamt) nounwind {
; CHECK-LABEL: reg64_ashr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #63
; CHECK-NEXT: sub x8, x8, x1
; CHECK-NEXT: mvn x8, x1
; CHECK-NEXT: asr x0, x0, x8
; CHECK-NEXT: ret
%negshamt = sub i64 63, %shamt
Expand All @@ -600,9 +777,8 @@ define i64 @reg64_ashr_by_complemented(i64 %val, i64 %shamt) nounwind {
define i64 @load64_ashr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
; CHECK-LABEL: load64_ashr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #63
; CHECK-NEXT: mvn x8, x1
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: sub x8, x8, x1
; CHECK-NEXT: asr x0, x9, x8
; CHECK-NEXT: ret
%val = load i64, i64* %valptr
Expand All @@ -613,8 +789,7 @@ define i64 @load64_ashr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
define void @store64_ashr_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
; CHECK-LABEL: store64_ashr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #63
; CHECK-NEXT: sub x8, x8, x2
; CHECK-NEXT: mvn x8, x2
; CHECK-NEXT: asr x8, x0, x8
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: ret
Expand All @@ -626,16 +801,33 @@ define void @store64_ashr_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) no
define void @modify64_ashr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
; CHECK-LABEL: modify64_ashr_by_complemented:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #63
; CHECK-NEXT: mvn x8, x1
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: asr x8, x9, x8
; CHECK-NEXT: str x8, [x0]
; CHECK-NEXT: ret
%val = load i64, i64* %valptr
%negshamt = sub i64 63, %shamt
%shifted = ashr i64 %val, %negshamt
store i64 %shifted, i64* %valptr
ret void
}
define void @modify64_ashr_by_complemented_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind {
; CHECK-LABEL: modify64_ashr_by_complemented_multi_use:
; CHECK: // %bb.0:
; CHECK-NEXT: mvn x8, x1
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: sub x8, x8, x1
; CHECK-NEXT: mov w10, #63
; CHECK-NEXT: asr x8, x9, x8
; CHECK-NEXT: sub x9, x10, x1
; CHECK-NEXT: str x8, [x0]
; CHECK-NEXT: str x9, [x2]
; CHECK-NEXT: ret
%val = load i64, i64* %valptr
%negshamt = sub i64 63, %shamt
%shifted = ashr i64 %val, %negshamt
store i64 %shifted, i64* %valptr
store i64 %negshamt, i64* %shamtptr
ret void
}

Expand Down