Expand Up
@@ -543,10 +543,9 @@ define i64 @bs_and64i_multiuse(i64 %a, i64 %b) #0 {
; Fold: BSWAP( OP( BSWAP(x), y ) ) -> OP( x, BSWAP(y) )
define i16 @bs_and_lhs_bs16 (i16 %a , i16 %b ) #0 {
; CHECK-LABEL: @bs_and_lhs_bs16(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = and i16 [[TMP1]], [[B:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[TMP2]])
; CHECK-NEXT: ret i16 [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = and i16 [[TMP1]], [[A:%.*]]
; CHECK-NEXT: ret i16 [[TMP2]]
;
%1 = tail call i16 @llvm.bswap.i16 (i16 %a )
%2 = and i16 %1 , %b
Expand All
@@ -556,10 +555,9 @@ define i16 @bs_and_lhs_bs16(i16 %a, i16 %b) #0 {
define i16 @bs_or_lhs_bs16 (i16 %a , i16 %b ) #0 {
; CHECK-LABEL: @bs_or_lhs_bs16(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = or i16 [[TMP1]], [[B:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[TMP2]])
; CHECK-NEXT: ret i16 [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = or i16 [[TMP1]], [[A:%.*]]
; CHECK-NEXT: ret i16 [[TMP2]]
;
%1 = tail call i16 @llvm.bswap.i16 (i16 %a )
%2 = or i16 %1 , %b
Expand All
@@ -569,10 +567,9 @@ define i16 @bs_or_lhs_bs16(i16 %a, i16 %b) #0 {
define i16 @bs_xor_lhs_bs16 (i16 %a , i16 %b ) #0 {
; CHECK-LABEL: @bs_xor_lhs_bs16(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[TMP1]], [[B:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[TMP2]])
; CHECK-NEXT: ret i16 [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[TMP1]], [[A:%.*]]
; CHECK-NEXT: ret i16 [[TMP2]]
;
%1 = tail call i16 @llvm.bswap.i16 (i16 %a )
%2 = xor i16 %1 , %b
Expand All
@@ -582,10 +579,9 @@ define i16 @bs_xor_lhs_bs16(i16 %a, i16 %b) #0 {
define i16 @bs_and_rhs_bs16 (i16 %a , i16 %b ) #0 {
; CHECK-LABEL: @bs_and_rhs_bs16(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = and i16 [[TMP1]], [[A:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[TMP2]])
; CHECK-NEXT: ret i16 [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = and i16 [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret i16 [[TMP2]]
;
%1 = tail call i16 @llvm.bswap.i16 (i16 %b )
%2 = and i16 %a , %1
Expand All
@@ -595,10 +591,9 @@ define i16 @bs_and_rhs_bs16(i16 %a, i16 %b) #0 {
define i16 @bs_or_rhs_bs16 (i16 %a , i16 %b ) #0 {
; CHECK-LABEL: @bs_or_rhs_bs16(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = or i16 [[TMP1]], [[A:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[TMP2]])
; CHECK-NEXT: ret i16 [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = or i16 [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret i16 [[TMP2]]
;
%1 = tail call i16 @llvm.bswap.i16 (i16 %b )
%2 = or i16 %a , %1
Expand All
@@ -608,10 +603,9 @@ define i16 @bs_or_rhs_bs16(i16 %a, i16 %b) #0 {
define i16 @bs_xor_rhs_bs16 (i16 %a , i16 %b ) #0 {
; CHECK-LABEL: @bs_xor_rhs_bs16(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[TMP1]], [[A:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[TMP2]])
; CHECK-NEXT: ret i16 [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret i16 [[TMP2]]
;
%1 = tail call i16 @llvm.bswap.i16 (i16 %b )
%2 = xor i16 %a , %1
Expand All
@@ -621,10 +615,9 @@ define i16 @bs_xor_rhs_bs16(i16 %a, i16 %b) #0 {
define i32 @bs_and_rhs_bs32 (i32 %a , i32 %b ) #0 {
; CHECK-LABEL: @bs_and_rhs_bs32(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[TMP2]])
; CHECK-NEXT: ret i32 [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.bswap.i32(i32 [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret i32 [[TMP2]]
;
%1 = tail call i32 @llvm.bswap.i32 (i32 %b )
%2 = and i32 %a , %1
Expand All
@@ -634,10 +627,9 @@ define i32 @bs_and_rhs_bs32(i32 %a, i32 %b) #0 {
define i32 @bs_or_rhs_bs32 (i32 %a , i32 %b ) #0 {
; CHECK-LABEL: @bs_or_rhs_bs32(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[A:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[TMP2]])
; CHECK-NEXT: ret i32 [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.bswap.i32(i32 [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret i32 [[TMP2]]
;
%1 = tail call i32 @llvm.bswap.i32 (i32 %b )
%2 = or i32 %a , %1
Expand All
@@ -647,10 +639,9 @@ define i32 @bs_or_rhs_bs32(i32 %a, i32 %b) #0 {
define i32 @bs_xor_rhs_bs32 (i32 %a , i32 %b ) #0 {
; CHECK-LABEL: @bs_xor_rhs_bs32(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], [[A:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[TMP2]])
; CHECK-NEXT: ret i32 [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.bswap.i32(i32 [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret i32 [[TMP2]]
;
%1 = tail call i32 @llvm.bswap.i32 (i32 %b )
%2 = xor i32 %a , %1
Expand All
@@ -660,10 +651,9 @@ define i32 @bs_xor_rhs_bs32(i32 %a, i32 %b) #0 {
define i64 @bs_and_rhs_bs64 (i64 %a , i64 %b ) #0 {
; CHECK-LABEL: @bs_and_rhs_bs64(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[A:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[TMP2]])
; CHECK-NEXT: ret i64 [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.bswap.i64(i64 [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret i64 [[TMP2]]
;
%1 = tail call i64 @llvm.bswap.i64 (i64 %b )
%2 = and i64 %a , %1
Expand All
@@ -673,10 +663,9 @@ define i64 @bs_and_rhs_bs64(i64 %a, i64 %b) #0 {
define i64 @bs_or_rhs_bs64 (i64 %a , i64 %b ) #0 {
; CHECK-LABEL: @bs_or_rhs_bs64(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = or i64 [[TMP1]], [[A:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[TMP2]])
; CHECK-NEXT: ret i64 [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.bswap.i64(i64 [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = or i64 [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret i64 [[TMP2]]
;
%1 = tail call i64 @llvm.bswap.i64 (i64 %b )
%2 = or i64 %a , %1
Expand All
@@ -686,10 +675,9 @@ define i64 @bs_or_rhs_bs64(i64 %a, i64 %b) #0 {
define i64 @bs_xor_rhs_bs64 (i64 %a , i64 %b ) #0 {
; CHECK-LABEL: @bs_xor_rhs_bs64(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], [[A:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[TMP2]])
; CHECK-NEXT: ret i64 [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.bswap.i64(i64 [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret i64 [[TMP2]]
;
%1 = tail call i64 @llvm.bswap.i64 (i64 %b )
%2 = xor i64 %a , %1
Expand All
@@ -699,10 +687,9 @@ define i64 @bs_xor_rhs_bs64(i64 %a, i64 %b) #0 {
define <2 x i32 > @bs_and_rhs_i32vec (<2 x i32 > %a , <2 x i32 > %b ) #0 {
; CHECK-LABEL: @bs_and_rhs_i32vec(
; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[A:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP2]])
; CHECK-NEXT: ret <2 x i32> [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret <2 x i32> [[TMP2]]
;
%1 = tail call <2 x i32 > @llvm.bswap.v2i32 (<2 x i32 > %b )
%2 = and <2 x i32 > %a , %1
Expand All
@@ -712,10 +699,9 @@ define <2 x i32> @bs_and_rhs_i32vec(<2 x i32> %a, <2 x i32> %b) #0 {
define <2 x i32 > @bs_or_rhs_i32vec (<2 x i32 > %a , <2 x i32 > %b ) #0 {
; CHECK-LABEL: @bs_or_rhs_i32vec(
; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i32> [[TMP1]], [[A:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP2]])
; CHECK-NEXT: ret <2 x i32> [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i32> [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret <2 x i32> [[TMP2]]
;
%1 = tail call <2 x i32 > @llvm.bswap.v2i32 (<2 x i32 > %b )
%2 = or <2 x i32 > %a , %1
Expand All
@@ -725,10 +711,9 @@ define <2 x i32> @bs_or_rhs_i32vec(<2 x i32> %a, <2 x i32> %b) #0 {
define <2 x i32 > @bs_xor_rhs_i32vec (<2 x i32 > %a , <2 x i32 > %b ) #0 {
; CHECK-LABEL: @bs_xor_rhs_i32vec(
; CHECK-NEXT: [[TMP1:%.*]] = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i32> [[TMP1]], [[A:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP2]])
; CHECK-NEXT: ret <2 x i32> [[TMP3]]
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i32> [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret <2 x i32> [[TMP2]]
;
%1 = tail call <2 x i32 > @llvm.bswap.v2i32 (<2 x i32 > %b )
%2 = xor <2 x i32 > %a , %1
Expand Down
Expand Up
@@ -782,11 +767,10 @@ define i64 @bs_all_operand64_multiuse_both(i64 %a, i64 %b) #0 {
; CHECK-LABEL: @bs_all_operand64_multiuse_both(
; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[A:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[TMP3]])
; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[A]], [[B]]
; CHECK-NEXT: call void @use.i64(i64 [[TMP1]])
; CHECK-NEXT: call void @use.i64(i64 [[TMP2]])
; CHECK-NEXT: ret i64 [[TMP4 ]]
; CHECK-NEXT: ret i64 [[TMP3 ]]
;
%1 = tail call i64 @llvm.bswap.i64 (i64 %a )
%2 = tail call i64 @llvm.bswap.i64 (i64 %b )
Expand All
@@ -798,6 +782,36 @@ define i64 @bs_all_operand64_multiuse_both(i64 %a, i64 %b) #0 {
ret i64 %4
}
@gp = external global [0 x i8 ]
define void @bs_and_constexpr (ptr %out , i64 %a ) {
; CHECK-LABEL: @bs_and_constexpr(
; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.bswap.i64(i64 and (i64 ptrtoint (ptr @gp to i64), i64 4095))
; CHECK-NEXT: store i64 [[RES]], ptr [[OUT:%.*]], align 8
; CHECK-NEXT: ret void
;
%gpi = ptrtoint ptr @gp to i64
%exp = and i64 %gpi , 4095
%res = call i64 @llvm.bswap.i64 (i64 %exp )
store i64 %res , ptr %out , align 8
ret void
}
define void @bs_and_bs_constexpr (ptr %out , i64 %a ) {
; CHECK-LABEL: @bs_and_bs_constexpr(
; CHECK-NEXT: store i64 and (i64 ptrtoint (ptr @gp to i64), i64 -67835469387268096), ptr [[OUT:%.*]], align 8
; CHECK-NEXT: ret void
;
%gpi = ptrtoint ptr @gp to i64
%bs_gpi = call i64 @llvm.bswap.i64 (i64 %gpi )
%exp = and i64 %bs_gpi , 4095
%res = call i64 @llvm.bswap.i64 (i64 %exp )
store i64 %res , ptr %out , align 8
ret void
}
define i64 @bs_active_high8 (i64 %0 ) {
; CHECK-LABEL: @bs_active_high8(
; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP0:%.*]], 255
Expand Down