68 changes: 34 additions & 34 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@ body: |
; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
; GCN: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
; GCN: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: %4:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: %5:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
; GCN: %6:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
; GCN: FLAT_STORE_DWORD [[COPY3]], %4, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY3]], %5, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY3]], %6, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
%0:sgpr(s32) = COPY $sgpr0
%1:vgpr(s32) = COPY $vgpr0
%2:vgpr(s32) = COPY $vgpr1
Expand Down Expand Up @@ -53,10 +53,10 @@ body: |
; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; GCN: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; GCN: [[COPY2:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
; GCN: [[V_MUL_F64_:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F64_1:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F64_2:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
; GCN: S_ENDPGM 0, implicit [[V_MUL_F64_]], implicit [[V_MUL_F64_1]], implicit [[V_MUL_F64_2]]
; GCN: %4:vreg_64 = nofpexcept V_MUL_F64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: %5:vreg_64 = nofpexcept V_MUL_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
; GCN: %6:vreg_64 = nofpexcept V_MUL_F64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
; GCN: S_ENDPGM 0, implicit %4, implicit %5, implicit %6
%0:sgpr(s64) = COPY $sgpr0_sgpr1
%1:vgpr(s64) = COPY $vgpr0_vgpr1
%2:vgpr(s64) = COPY $vgpr2_vgpr3
Expand Down Expand Up @@ -86,10 +86,10 @@ body: |
; GCN-LABEL: name: fmul_f16
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[V_MUL_F16_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F16_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F16_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
; GCN: S_ENDPGM 0, implicit [[V_MUL_F16_e64_]], implicit [[V_MUL_F16_e64_1]], implicit [[V_MUL_F16_e64_2]]
; GCN: %7:vgpr_32 = nofpexcept V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: %8:vgpr_32 = nofpexcept V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: %9:vgpr_32 = nofpexcept V_MUL_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
; GCN: S_ENDPGM 0, implicit %7, implicit %8, implicit %9
%0:sgpr(s32) = COPY $sgpr0
%1:vgpr(s32) = COPY $vgpr0
%2:vgpr(s32) = COPY $vgpr1
Expand Down Expand Up @@ -123,26 +123,26 @@ body: |
; GCN-LABEL: name: fmul_modifiers_f32
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
; GCN: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 2, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F32_e64_3:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 1, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F32_e64_4:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F32_e64_5:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F32_e64_6:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F32_e64_7:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F32_e64_8:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_MUL_F32_e64_9:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_3]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_4]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_5]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_6]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_7]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_8]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_9]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: %6:vgpr_32 = nofpexcept V_MUL_F32_e64 2, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: %7:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: %8:vgpr_32 = nofpexcept V_MUL_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: %9:vgpr_32 = nofpexcept V_MUL_F32_e64 1, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: %10:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: %11:vgpr_32 = nofpexcept V_MUL_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: %12:vgpr_32 = nofpexcept V_MUL_F32_e64 3, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: %13:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: %14:vgpr_32 = nofpexcept V_MUL_F32_e64 3, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: %15:vgpr_32 = nofpexcept V_MUL_F32_e64 3, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: FLAT_STORE_DWORD [[COPY1]], %6, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], %7, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], %8, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], %9, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], %10, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], %11, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], %12, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], %13, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], %14, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY1]], %15, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s32) = COPY $vgpr1
%2:vgpr(p1) = COPY $vgpr2_vgpr3
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.v2s16.mir
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ body: |
; GFX9-LABEL: name: fmul_v2f16_vv
; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
; GFX9: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32 = V_PK_MUL_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
; GFX9: S_ENDPGM 0, implicit [[V_PK_MUL_F16_]]
; GFX9: %2:vgpr_32 = nofpexcept V_PK_MUL_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
; GFX9: S_ENDPGM 0, implicit %2
%0:vgpr(<2 x s16>) = COPY $vgpr0
%1:vgpr(<2 x s16>) = COPY $vgpr1
%2:vgpr(<2 x s16>) = G_FMUL %0, %1
Expand All @@ -33,8 +33,8 @@ body: |
; GFX9-LABEL: name: fmul_v2f16_fneg_v_fneg_v
; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
; GFX9: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32 = V_PK_MUL_F16 11, [[COPY]], 11, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
; GFX9: S_ENDPGM 0, implicit [[V_PK_MUL_F16_]]
; GFX9: %4:vgpr_32 = nofpexcept V_PK_MUL_F16 11, [[COPY]], 11, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
; GFX9: S_ENDPGM 0, implicit %4
%0:vgpr(<2 x s16>) = COPY $vgpr0
%1:vgpr(<2 x s16>) = COPY $vgpr1
%2:vgpr(<2 x s16>) = G_FNEG %0
Expand All @@ -60,8 +60,8 @@ body: |
; GFX9: [[FNEG:%[0-9]+]]:vgpr(s16) = G_FNEG [[TRUNC]]
; GFX9: [[ANYEXT:%[0-9]+]]:vgpr(s32) = G_ANYEXT [[FNEG]](s16)
; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:vgpr_32(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[COPY2]](s32)
; GFX9: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32(<2 x s16>) = V_PK_MUL_F16 8, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 8, [[COPY]](<2 x s16>), 0, 0, 0, 0, 0, implicit $mode, implicit $exec
; GFX9: S_ENDPGM 0, implicit [[V_PK_MUL_F16_]](<2 x s16>)
; GFX9: %7:vgpr_32(<2 x s16>) = nofpexcept V_PK_MUL_F16 8, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 8, [[COPY]](<2 x s16>), 0, 0, 0, 0, 0, implicit $mode, implicit $exec
; GFX9: S_ENDPGM 0, implicit %7(<2 x s16>)
%0:vgpr(<2 x s16>) = COPY $vgpr0
%1:vgpr(s32) = COPY $vgpr1
%2:vgpr(s32) = COPY $vgpr2
Expand Down
30 changes: 15 additions & 15 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ body: |
; GCN-LABEL: name: fptosi_s32_to_s32_vv
; GCN: liveins: $vgpr0
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[V_CVT_I32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e64_]]
; GCN: %1:vgpr_32 = nofpexcept V_CVT_I32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %1
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s32) = G_FPTOSI %0
$vgpr0 = COPY %1
Expand All @@ -34,8 +34,8 @@ body: |
; GCN-LABEL: name: fptosi_s32_to_s32_vs
; GCN: liveins: $sgpr0
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[V_CVT_I32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e64_]]
; GCN: %1:vgpr_32 = nofpexcept V_CVT_I32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %1
%0:sgpr(s32) = COPY $sgpr0
%1:vgpr(s32) = G_FPTOSI %0
$vgpr0 = COPY %1
Expand All @@ -54,8 +54,8 @@ body: |
; GCN-LABEL: name: fptosi_s32_to_s32_fneg_vv
; GCN: liveins: $vgpr0
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[V_CVT_I32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e64_]]
; GCN: %2:vgpr_32 = nofpexcept V_CVT_I32_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %2
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s32) = G_FNEG %0
%2:vgpr(s32) = G_FPTOSI %1
Expand All @@ -75,9 +75,9 @@ body: |
; GCN-LABEL: name: fptosi_s16_to_s32_vv
; GCN: liveins: $vgpr0
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
; GCN: [[V_CVT_I32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e32_]]
; GCN: %3:vgpr_32 = nofpexcept V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
; GCN: %2:vgpr_32 = nofpexcept V_CVT_I32_F32_e32 %3, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %2
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s16) = G_TRUNC %0
%2:vgpr(s32) = G_FPTOSI %1
Expand All @@ -97,9 +97,9 @@ body: |
; GCN-LABEL: name: fptosi_s16_to_s32_vs
; GCN: liveins: $sgpr0
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
; GCN: [[V_CVT_I32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e32_]]
; GCN: %3:vgpr_32 = nofpexcept V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
; GCN: %2:vgpr_32 = nofpexcept V_CVT_I32_F32_e32 %3, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %2
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0
%2:vgpr(s32) = G_FPTOSI %1
Expand All @@ -121,9 +121,9 @@ body: |
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32768
; GCN: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec
; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[V_XOR_B32_e32_]], implicit $mode, implicit $exec
; GCN: [[V_CVT_I32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e32_]]
; GCN: %4:vgpr_32 = nofpexcept V_CVT_F32_F16_e32 [[V_XOR_B32_e32_]], implicit $mode, implicit $exec
; GCN: %3:vgpr_32 = nofpexcept V_CVT_I32_F32_e32 %4, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %3
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s16) = G_TRUNC %0
%2:vgpr(s16) = G_FNEG %1
Expand Down
26 changes: 13 additions & 13 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@ body: |
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[COPY2:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
; GCN: [[V_CVT_U32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: [[V_CVT_U32_F32_e64_1:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e64 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
; GCN: FLAT_STORE_DWORD [[COPY2]], [[V_CVT_U32_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY2]], [[V_CVT_U32_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: %3:vgpr_32 = nofpexcept V_CVT_U32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: %4:vgpr_32 = nofpexcept V_CVT_U32_F32_e64 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
; GCN: FLAT_STORE_DWORD [[COPY2]], %3, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
; GCN: FLAT_STORE_DWORD [[COPY2]], %4, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
%0:sgpr(s32) = COPY $sgpr0
%1:vgpr(s32) = COPY $vgpr0
Expand Down Expand Up @@ -48,9 +48,9 @@ body: |
; GCN-LABEL: name: fptoui_s16_to_s32_vv
; GCN: liveins: $vgpr0
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
; GCN: [[V_CVT_U32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_CVT_U32_F32_e32_]]
; GCN: %3:vgpr_32 = nofpexcept V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
; GCN: %2:vgpr_32 = nofpexcept V_CVT_U32_F32_e32 %3, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %2
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s16) = G_TRUNC %0
%2:vgpr(s32) = G_FPTOUI %1
Expand All @@ -70,9 +70,9 @@ body: |
; GCN-LABEL: name: fptoui_s16_to_s32_vs
; GCN: liveins: $sgpr0
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
; GCN: [[V_CVT_U32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_CVT_U32_F32_e32_]]
; GCN: %3:vgpr_32 = nofpexcept V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
; GCN: %2:vgpr_32 = nofpexcept V_CVT_U32_F32_e32 %3, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %2
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0
%2:vgpr(s32) = G_FPTOUI %1
Expand All @@ -94,9 +94,9 @@ body: |
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32768
; GCN: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec
; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[V_XOR_B32_e32_]], implicit $mode, implicit $exec
; GCN: [[V_CVT_U32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_CVT_U32_F32_e32_]]
; GCN: %4:vgpr_32 = nofpexcept V_CVT_F32_F16_e32 [[V_XOR_B32_e32_]], implicit $mode, implicit $exec
; GCN: %3:vgpr_32 = nofpexcept V_CVT_U32_F32_e32 %4, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %3
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s16) = G_TRUNC %0
%2:vgpr(s16) = G_FNEG %1
Expand Down
20 changes: 10 additions & 10 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.mir
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ body: |
; GCN-LABEL: name: frint_s32_vv
; GCN: liveins: $vgpr0
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[V_RNDNE_F32_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_RNDNE_F32_e64_]]
; GCN: %1:vgpr_32 = nofpexcept V_RNDNE_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %1
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s32) = G_FRINT %0
$vgpr0 = COPY %1
Expand All @@ -35,8 +35,8 @@ body: |
; GCN-LABEL: name: frint_s32_vs
; GCN: liveins: $sgpr0
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[V_RNDNE_F32_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_RNDNE_F32_e64_]]
; GCN: %1:vgpr_32 = nofpexcept V_RNDNE_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %1
%0:sgpr(s32) = COPY $sgpr0
%1:vgpr(s32) = G_FRINT %0
$vgpr0 = COPY %1
Expand All @@ -55,8 +55,8 @@ body: |
; GCN-LABEL: name: frint_fneg_s32_vv
; GCN: liveins: $vgpr0
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[V_RNDNE_F32_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_RNDNE_F32_e64_]]
; GCN: %2:vgpr_32 = nofpexcept V_RNDNE_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %2
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s32) = G_FNEG %0
%2:vgpr(s32) = G_FRINT %1
Expand All @@ -76,8 +76,8 @@ body: |
; GCN-LABEL: name: frint_s64_vv
; GCN: liveins: $vgpr0_vgpr1
; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; GCN: [[V_RNDNE_F64_e64_:%[0-9]+]]:vreg_64 = V_RNDNE_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0_vgpr1 = COPY [[V_RNDNE_F64_e64_]]
; GCN: %1:vreg_64 = nofpexcept V_RNDNE_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0_vgpr1 = COPY %1
%0:vgpr(s64) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_FRINT %0
$vgpr0_vgpr1 = COPY %1
Expand All @@ -96,8 +96,8 @@ body: |
; GCN-LABEL: name: frint_s64_fneg_vv
; GCN: liveins: $vgpr0_vgpr1
; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; GCN: [[V_RNDNE_F64_e64_:%[0-9]+]]:vreg_64 = V_RNDNE_F64_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0_vgpr1 = COPY [[V_RNDNE_F64_e64_]]
; GCN: %2:vreg_64 = nofpexcept V_RNDNE_F64_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0_vgpr1 = COPY %2
%0:vgpr(s64) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_FNEG %0
%2:vgpr(s64) = G_FRINT %1
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.s16.mir
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ body: |
; GCN-LABEL: name: frint_s16_vv
; GCN: liveins: $vgpr0
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[V_RNDNE_F16_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_RNDNE_F16_e64_]]
; GCN: %2:vgpr_32 = nofpexcept V_RNDNE_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %2
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s16) = G_TRUNC %0
%2:vgpr(s16) = G_FRINT %1
Expand All @@ -60,8 +60,8 @@ body: |
; GCN-LABEL: name: frint_s16_vs
; GCN: liveins: $sgpr0
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[V_RNDNE_F16_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_RNDNE_F16_e64_]]
; GCN: %2:vgpr_32 = nofpexcept V_RNDNE_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %2
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0
%2:vgpr(s16) = G_FRINT %1
Expand All @@ -82,8 +82,8 @@ body: |
; GCN-LABEL: name: frint_fneg_s16_vv
; GCN: liveins: $vgpr0
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[V_RNDNE_F16_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_RNDNE_F16_e64_]]
; GCN: %3:vgpr_32 = nofpexcept V_RNDNE_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %3
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s16) = G_TRUNC %0
%2:vgpr(s16) = G_FNEG %1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ body: |
; CHECK-LABEL: name: intrinsic_trunc_s32_vv
; CHECK: liveins: $vgpr0
; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; CHECK: [[V_TRUNC_F32_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; CHECK: $vgpr0 = COPY [[V_TRUNC_F32_e64_]]
; CHECK: %1:vgpr_32 = nofpexcept V_TRUNC_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; CHECK: $vgpr0 = COPY %1
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s32) = G_INTRINSIC_TRUNC %0
$vgpr0 = COPY %1
Expand All @@ -34,8 +34,8 @@ body: |
; CHECK-LABEL: name: intrinsic_trunc_s32_vs
; CHECK: liveins: $sgpr0
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; CHECK: [[V_TRUNC_F32_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; CHECK: $vgpr0 = COPY [[V_TRUNC_F32_e64_]]
; CHECK: %1:vgpr_32 = nofpexcept V_TRUNC_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; CHECK: $vgpr0 = COPY %1
%0:sgpr(s32) = COPY $sgpr0
%1:vgpr(s32) = G_INTRINSIC_TRUNC %0
$vgpr0 = COPY %1
Expand All @@ -54,8 +54,8 @@ body: |
; CHECK-LABEL: name: intrinsic_trunc_s64_sv
; CHECK: liveins: $sgpr0_sgpr1
; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
; CHECK: [[V_TRUNC_F64_e64_:%[0-9]+]]:vreg_64 = V_TRUNC_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; CHECK: $vgpr0_vgpr1 = COPY [[V_TRUNC_F64_e64_]]
; CHECK: %1:vreg_64 = nofpexcept V_TRUNC_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; CHECK: $vgpr0_vgpr1 = COPY %1
%0:sgpr(s64) = COPY $sgpr0_sgpr1
%1:vgpr(s64) = G_INTRINSIC_TRUNC %0
$vgpr0_vgpr1 = COPY %1
Expand All @@ -74,8 +74,8 @@ body: |
; CHECK-LABEL: name: intrinsic_trunc_s64_vv
; CHECK: liveins: $vgpr0_vgpr1
; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
; CHECK: [[V_TRUNC_F64_e64_:%[0-9]+]]:vreg_64 = V_TRUNC_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; CHECK: $vgpr0_vgpr1 = COPY [[V_TRUNC_F64_e64_]]
; CHECK: %1:vreg_64 = nofpexcept V_TRUNC_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; CHECK: $vgpr0_vgpr1 = COPY %1
%0:vgpr(s64) = COPY $vgpr0_vgpr1
%1:vgpr(s64) = G_INTRINSIC_TRUNC %0
$vgpr0_vgpr1 = COPY %1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ body: |
; GCN-LABEL: name: intrinsic_trunc_s16_vv
; GCN: liveins: $vgpr0
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[V_TRUNC_F16_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_TRUNC_F16_e64_]]
; GCN: %2:vgpr_32 = nofpexcept V_TRUNC_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %2
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s16) = G_TRUNC %0
%2:vgpr(s16) = G_INTRINSIC_TRUNC %1
Expand All @@ -36,8 +36,8 @@ body: |
; GCN-LABEL: name: intrinsic_trunc_s16_vs
; GCN: liveins: $sgpr0
; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; GCN: [[V_TRUNC_F16_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_TRUNC_F16_e64_]]
; GCN: %2:vgpr_32 = nofpexcept V_TRUNC_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %2
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s16) = G_TRUNC %0
%2:vgpr(s16) = G_INTRINSIC_TRUNC %1
Expand All @@ -58,8 +58,8 @@ body: |
; GCN-LABEL: name: intrinsic_trunc_fneg_s16_vv
; GCN: liveins: $vgpr0
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; GCN: [[V_TRUNC_F16_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY [[V_TRUNC_F16_e64_]]
; GCN: %3:vgpr_32 = nofpexcept V_TRUNC_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
; GCN: $vgpr0 = COPY %3
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s16) = G_TRUNC %0
%2:vgpr(s16) = G_FNEG %1
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir
Original file line number Diff line number Diff line change
Expand Up @@ -59,15 +59,15 @@ body: |
; WAVE64: liveins: $vgpr0
; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; WAVE64: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $mode, implicit $exec
; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
; WAVE64: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
; WAVE64: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
; WAVE64: $vgpr0 = COPY %1
; WAVE32-LABEL: name: sitofp_s32_to_s16_vv
; WAVE32: liveins: $vgpr0
; WAVE32: $vcc_hi = IMPLICIT_DEF
; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; WAVE32: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $mode, implicit $exec
; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
; WAVE32: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
; WAVE32: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
; WAVE32: $vgpr0 = COPY %1
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s16) = G_SITOFP %0
%2:vgpr(s32) = G_ANYEXT %1
Expand All @@ -88,15 +88,15 @@ body: |
; WAVE64: liveins: $sgpr0
; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; WAVE64: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $mode, implicit $exec
; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
; WAVE64: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
; WAVE64: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
; WAVE64: $vgpr0 = COPY %1
; WAVE32-LABEL: name: sitofp_s32_to_s16_vs
; WAVE32: liveins: $sgpr0
; WAVE32: $vcc_hi = IMPLICIT_DEF
; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; WAVE32: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $mode, implicit $exec
; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
; WAVE32: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
; WAVE32: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
; WAVE32: $vgpr0 = COPY %1
%0:sgpr(s32) = COPY $sgpr0
%1:vgpr(s16) = G_SITOFP %0
%2:vgpr(s32) = G_ANYEXT %1
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir
Original file line number Diff line number Diff line change
Expand Up @@ -68,15 +68,15 @@ body: |
; WAVE64: liveins: $vgpr0
; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; WAVE64: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $mode, implicit $exec
; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
; WAVE64: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
; WAVE64: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
; WAVE64: $vgpr0 = COPY %1
; WAVE32-LABEL: name: uitofp_s32_to_s16_vv
; WAVE32: liveins: $vgpr0
; WAVE32: $vcc_hi = IMPLICIT_DEF
; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
; WAVE32: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $mode, implicit $exec
; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
; WAVE32: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
; WAVE32: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
; WAVE32: $vgpr0 = COPY %1
%0:vgpr(s32) = COPY $vgpr0
%1:vgpr(s16) = G_UITOFP %0
%2:vgpr(s32) = G_ANYEXT %1
Expand All @@ -97,15 +97,15 @@ body: |
; WAVE64: liveins: $sgpr0
; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; WAVE64: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $mode, implicit $exec
; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
; WAVE64: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
; WAVE64: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
; WAVE64: $vgpr0 = COPY %1
; WAVE32-LABEL: name: uitofp_s32_to_s16_vs
; WAVE32: liveins: $sgpr0
; WAVE32: $vcc_hi = IMPLICIT_DEF
; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
; WAVE32: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $mode, implicit $exec
; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
; WAVE32: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
; WAVE32: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
; WAVE32: $vgpr0 = COPY %1
%0:sgpr(s32) = COPY $sgpr0
%1:vgpr(s16) = G_UITOFP %0
%2:vgpr(s32) = G_ANYEXT %1
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/X86/GlobalISel/select-add.mir
Original file line number Diff line number Diff line change
Expand Up @@ -175,16 +175,16 @@ registers:
- { id: 2, class: vecr }
# SSE: %0:vr128 = COPY $xmm0
# SSE-NEXT: %1:vr128 = COPY $xmm1
# SSE-NEXT: %2:vr128 = ADDPSrr %0, %1
# SSE-NEXT: %2:vr128 = nofpexcept ADDPSrr %0, %1
# AVX: %0:vr128 = COPY $xmm0
# AVX-NEXT: %1:vr128 = COPY $xmm1
# AVX-NEXT: %2:vr128 = VADDPSrr %0, %1
# AVX-NEXT: %2:vr128 = nofpexcept VADDPSrr %0, %1
# AVX512F: %0:vr128 = COPY $xmm0
# AVX512F-NEXT: 1:vr128 = COPY $xmm1
# AVX512F-NEXT: %2:vr128 = VADDPSrr %0, %1
# AVX512F-NEXT: %2:vr128 = nofpexcept VADDPSrr %0, %1
# AVX512VL: %0:vr128x = COPY $xmm0
# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: %2:vr128x = VADDPSZ128rr %0, %1
# AVX512VL-NEXT: %2:vr128x = nofpexcept VADDPSZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
liveins: $xmm0, $xmm1
Expand Down
32 changes: 16 additions & 16 deletions llvm/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
Original file line number Diff line number Diff line change
Expand Up @@ -44,35 +44,35 @@ body: |
; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
; SSE: [[ADDSSrr:%[0-9]+]]:fr32 = ADDSSrr [[COPY1]], [[COPY3]]
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[ADDSSrr]]
; SSE: %4:fr32 = nofpexcept ADDSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fadd_float
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
; AVX: [[VADDSSrr:%[0-9]+]]:fr32 = VADDSSrr [[COPY1]], [[COPY3]]
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VADDSSrr]]
; AVX: %4:fr32 = nofpexcept VADDSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fadd_float
; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
; AVX512F: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY1]], [[COPY3]]
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSSZrr]]
; AVX512F: %4:fr32x = nofpexcept VADDSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fadd_float
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
; AVX512VL: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY1]], [[COPY3]]
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSSZrr]]
; AVX512VL: %4:fr32x = nofpexcept VADDSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
%2:vecr(s128) = COPY $xmm0
Expand Down Expand Up @@ -113,35 +113,35 @@ body: |
; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
; SSE: [[ADDSDrr:%[0-9]+]]:fr64 = ADDSDrr [[COPY1]], [[COPY3]]
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[ADDSDrr]]
; SSE: %4:fr64 = nofpexcept ADDSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fadd_double
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
; AVX: [[VADDSDrr:%[0-9]+]]:fr64 = VADDSDrr [[COPY1]], [[COPY3]]
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VADDSDrr]]
; AVX: %4:fr64 = nofpexcept VADDSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fadd_double
; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
; AVX512F: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY1]], [[COPY3]]
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSDZrr]]
; AVX512F: %4:fr64x = nofpexcept VADDSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fadd_double
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
; AVX512VL: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY1]], [[COPY3]]
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSDZrr]]
; AVX512VL: %4:fr64x = nofpexcept VADDSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
%2:vecr(s128) = COPY $xmm0
Expand Down
32 changes: 16 additions & 16 deletions llvm/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
Original file line number Diff line number Diff line change
Expand Up @@ -44,35 +44,35 @@ body: |
; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
; SSE: [[DIVSSrr:%[0-9]+]]:fr32 = DIVSSrr [[COPY1]], [[COPY3]]
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[DIVSSrr]]
; SSE: %4:fr32 = nofpexcept DIVSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fdiv_float
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
; AVX: [[VDIVSSrr:%[0-9]+]]:fr32 = VDIVSSrr [[COPY1]], [[COPY3]]
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VDIVSSrr]]
; AVX: %4:fr32 = nofpexcept VDIVSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fdiv_float
; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
; AVX512F: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY1]], [[COPY3]]
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSSZrr]]
; AVX512F: %4:fr32x = nofpexcept VDIVSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fdiv_float
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
; AVX512VL: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY1]], [[COPY3]]
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSSZrr]]
; AVX512VL: %4:fr32x = nofpexcept VDIVSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
%2:vecr(s128) = COPY $xmm0
Expand Down Expand Up @@ -113,35 +113,35 @@ body: |
; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
; SSE: [[DIVSDrr:%[0-9]+]]:fr64 = DIVSDrr [[COPY1]], [[COPY3]]
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[DIVSDrr]]
; SSE: %4:fr64 = nofpexcept DIVSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fdiv_double
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
; AVX: [[VDIVSDrr:%[0-9]+]]:fr64 = VDIVSDrr [[COPY1]], [[COPY3]]
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VDIVSDrr]]
; AVX: %4:fr64 = nofpexcept VDIVSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fdiv_double
; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
; AVX512F: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY1]], [[COPY3]]
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSDZrr]]
; AVX512F: %4:fr64x = nofpexcept VDIVSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fdiv_double
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
; AVX512VL: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY1]], [[COPY3]]
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSDZrr]]
; AVX512VL: %4:fr64x = nofpexcept VDIVSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
%2:vecr(s128) = COPY $xmm0
Expand Down
32 changes: 16 additions & 16 deletions llvm/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
Original file line number Diff line number Diff line change
Expand Up @@ -44,35 +44,35 @@ body: |
; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
; SSE: [[MULSSrr:%[0-9]+]]:fr32 = MULSSrr [[COPY1]], [[COPY3]]
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[MULSSrr]]
; SSE: %4:fr32 = nofpexcept MULSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fmul_float
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
; AVX: [[VMULSSrr:%[0-9]+]]:fr32 = VMULSSrr [[COPY1]], [[COPY3]]
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VMULSSrr]]
; AVX: %4:fr32 = nofpexcept VMULSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fmul_float
; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
; AVX512F: [[VMULSSZrr:%[0-9]+]]:fr32x = VMULSSZrr [[COPY1]], [[COPY3]]
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VMULSSZrr]]
; AVX512F: %4:fr32x = nofpexcept VMULSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fmul_float
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
; AVX512VL: [[VMULSSZrr:%[0-9]+]]:fr32x = VMULSSZrr [[COPY1]], [[COPY3]]
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VMULSSZrr]]
; AVX512VL: %4:fr32x = nofpexcept VMULSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
%2:vecr(s128) = COPY $xmm0
Expand Down Expand Up @@ -113,35 +113,35 @@ body: |
; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
; SSE: [[MULSDrr:%[0-9]+]]:fr64 = MULSDrr [[COPY1]], [[COPY3]]
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[MULSDrr]]
; SSE: %4:fr64 = nofpexcept MULSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fmul_double
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
; AVX: [[VMULSDrr:%[0-9]+]]:fr64 = VMULSDrr [[COPY1]], [[COPY3]]
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VMULSDrr]]
; AVX: %4:fr64 = nofpexcept VMULSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fmul_double
; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
; AVX512F: [[VMULSDZrr:%[0-9]+]]:fr64x = VMULSDZrr [[COPY1]], [[COPY3]]
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VMULSDZrr]]
; AVX512F: %4:fr64x = nofpexcept VMULSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fmul_double
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
; AVX512VL: [[VMULSDZrr:%[0-9]+]]:fr64x = VMULSDZrr [[COPY1]], [[COPY3]]
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VMULSDZrr]]
; AVX512VL: %4:fr64x = nofpexcept VMULSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
%2:vecr(s128) = COPY $xmm0
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ body: |
; ALL-LABEL: name: test
; ALL: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; ALL: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; ALL: [[CVTSS2SDrr:%[0-9]+]]:fr64 = CVTSS2SDrr [[COPY1]]
; ALL: [[COPY2:%[0-9]+]]:vr128 = COPY [[CVTSS2SDrr]]
; ALL: %2:fr64 = nofpexcept CVTSS2SDrr [[COPY1]], implicit $mxcsr
; ALL: [[COPY2:%[0-9]+]]:vr128 = COPY %2
; ALL: $xmm0 = COPY [[COPY2]]
; ALL: RET 0, implicit $xmm0
%1:vecr(s128) = COPY $xmm0
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/X86/GlobalISel/select-fptrunc-scalar.mir
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ body: |
; ALL: liveins: $xmm0
; ALL: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; ALL: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; ALL: [[CVTSD2SSrr:%[0-9]+]]:fr32 = CVTSD2SSrr [[COPY1]]
; ALL: [[COPY2:%[0-9]+]]:vr128 = COPY [[CVTSD2SSrr]]
; ALL: %2:fr32 = nofpexcept CVTSD2SSrr [[COPY1]], implicit $mxcsr
; ALL: [[COPY2:%[0-9]+]]:vr128 = COPY %2
; ALL: $xmm0 = COPY [[COPY2]]
; ALL: RET 0, implicit $xmm0
%1:vecr(s128) = COPY $xmm0
Expand Down
32 changes: 16 additions & 16 deletions llvm/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
Original file line number Diff line number Diff line change
Expand Up @@ -44,35 +44,35 @@ body: |
; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
; SSE: [[SUBSSrr:%[0-9]+]]:fr32 = SUBSSrr [[COPY1]], [[COPY3]]
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[SUBSSrr]]
; SSE: %4:fr32 = nofpexcept SUBSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fsub_float
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
; AVX: [[VSUBSSrr:%[0-9]+]]:fr32 = VSUBSSrr [[COPY1]], [[COPY3]]
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VSUBSSrr]]
; AVX: %4:fr32 = nofpexcept VSUBSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fsub_float
; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
; AVX512F: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY1]], [[COPY3]]
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSSZrr]]
; AVX512F: %4:fr32x = nofpexcept VSUBSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fsub_float
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
; AVX512VL: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY1]], [[COPY3]]
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSSZrr]]
; AVX512VL: %4:fr32x = nofpexcept VSUBSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
%2:vecr(s128) = COPY $xmm0
Expand Down Expand Up @@ -113,35 +113,35 @@ body: |
; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
; SSE: [[SUBSDrr:%[0-9]+]]:fr64 = SUBSDrr [[COPY1]], [[COPY3]]
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[SUBSDrr]]
; SSE: %4:fr64 = nofpexcept SUBSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fsub_double
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
; AVX: [[VSUBSDrr:%[0-9]+]]:fr64 = VSUBSDrr [[COPY1]], [[COPY3]]
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VSUBSDrr]]
; AVX: %4:fr64 = nofpexcept VSUBSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fsub_double
; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
; AVX512F: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY1]], [[COPY3]]
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSDZrr]]
; AVX512F: %4:fr64x = nofpexcept VSUBSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fsub_double
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
; AVX512VL: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY1]], [[COPY3]]
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSDZrr]]
; AVX512VL: %4:fr64x = nofpexcept VSUBSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
%2:vecr(s128) = COPY $xmm0
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/X86/GlobalISel/select-sub.mir
Original file line number Diff line number Diff line change
Expand Up @@ -111,13 +111,13 @@ registers:
- { id: 2, class: vecr }
# NO_AVX512VL: %0:vr128 = COPY $xmm0
# NO_AVX512VL: %1:vr128 = COPY $xmm1
# SSE-NEXT: %2:vr128 = SUBPSrr %0, %1
# AVX-NEXT: %2:vr128 = VSUBPSrr %0, %1
# AVX512F-NEXT: %2:vr128 = VSUBPSrr %0, %1
# SSE-NEXT: %2:vr128 = nofpexcept SUBPSrr %0, %1
# AVX-NEXT: %2:vr128 = nofpexcept VSUBPSrr %0, %1
# AVX512F-NEXT: %2:vr128 = nofpexcept VSUBPSrr %0, %1
#
# AVX512VL: %0:vr128x = COPY $xmm0
# AVX512VL: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: %2:vr128x = VSUBPSZ128rr %0, %1
# AVX512VL-NEXT: %2:vr128x = nofpexcept VSUBPSZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
liveins: $xmm0, $xmm1
Expand Down
32 changes: 16 additions & 16 deletions llvm/test/CodeGen/X86/GlobalISel/x86_64-select-fptosi.mir
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ body: |
; CHECK: liveins: $xmm0
; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; CHECK: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; CHECK: [[CVTTSS2SIrr:%[0-9]+]]:gr32 = CVTTSS2SIrr [[COPY1]]
; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[CVTTSS2SIrr]].sub_8bit
; CHECK: %3:gr32 = nofpexcept CVTTSS2SIrr [[COPY1]], implicit $mxcsr
; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY %3.sub_8bit
; CHECK: $al = COPY [[COPY2]]
; CHECK: RET 0, implicit $al
%1:vecr(s128) = COPY $xmm0
Expand Down Expand Up @@ -102,8 +102,8 @@ body: |
; CHECK: liveins: $xmm0
; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; CHECK: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; CHECK: [[CVTTSS2SIrr:%[0-9]+]]:gr32 = CVTTSS2SIrr [[COPY1]]
; CHECK: [[COPY2:%[0-9]+]]:gr16 = COPY [[CVTTSS2SIrr]].sub_16bit
; CHECK: %3:gr32 = nofpexcept CVTTSS2SIrr [[COPY1]], implicit $mxcsr
; CHECK: [[COPY2:%[0-9]+]]:gr16 = COPY %3.sub_16bit
; CHECK: $ax = COPY [[COPY2]]
; CHECK: RET 0, implicit $ax
%1:vecr(s128) = COPY $xmm0
Expand Down Expand Up @@ -132,8 +132,8 @@ body: |
; CHECK: liveins: $xmm0
; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; CHECK: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; CHECK: [[CVTTSS2SIrr:%[0-9]+]]:gr32 = CVTTSS2SIrr [[COPY1]]
; CHECK: $eax = COPY [[CVTTSS2SIrr]]
; CHECK: %2:gr32 = nofpexcept CVTTSS2SIrr [[COPY1]], implicit $mxcsr
; CHECK: $eax = COPY %2
; CHECK: RET 0, implicit $eax
%1:vecr(s128) = COPY $xmm0
%0:vecr(s32) = G_TRUNC %1(s128)
Expand All @@ -160,8 +160,8 @@ body: |
; CHECK: liveins: $xmm0
; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; CHECK: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; CHECK: [[CVTTSS2SI64rr:%[0-9]+]]:gr64 = CVTTSS2SI64rr [[COPY1]]
; CHECK: $rax = COPY [[CVTTSS2SI64rr]]
; CHECK: %2:gr64 = nofpexcept CVTTSS2SI64rr [[COPY1]], implicit $mxcsr
; CHECK: $rax = COPY %2
; CHECK: RET 0, implicit $rax
%1:vecr(s128) = COPY $xmm0
%0:vecr(s32) = G_TRUNC %1(s128)
Expand Down Expand Up @@ -189,8 +189,8 @@ body: |
; CHECK: liveins: $xmm0
; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; CHECK: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; CHECK: [[CVTTSD2SIrr:%[0-9]+]]:gr32 = CVTTSD2SIrr [[COPY1]]
; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[CVTTSD2SIrr]].sub_8bit
; CHECK: %3:gr32 = nofpexcept CVTTSD2SIrr [[COPY1]], implicit $mxcsr
; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY %3.sub_8bit
; CHECK: $al = COPY [[COPY2]]
; CHECK: RET 0, implicit $al
%1:vecr(s128) = COPY $xmm0
Expand Down Expand Up @@ -220,8 +220,8 @@ body: |
; CHECK: liveins: $xmm0
; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; CHECK: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; CHECK: [[CVTTSD2SIrr:%[0-9]+]]:gr32 = CVTTSD2SIrr [[COPY1]]
; CHECK: [[COPY2:%[0-9]+]]:gr16 = COPY [[CVTTSD2SIrr]].sub_16bit
; CHECK: %3:gr32 = nofpexcept CVTTSD2SIrr [[COPY1]], implicit $mxcsr
; CHECK: [[COPY2:%[0-9]+]]:gr16 = COPY %3.sub_16bit
; CHECK: $ax = COPY [[COPY2]]
; CHECK: RET 0, implicit $ax
%1:vecr(s128) = COPY $xmm0
Expand Down Expand Up @@ -250,8 +250,8 @@ body: |
; CHECK: liveins: $xmm0
; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; CHECK: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; CHECK: [[CVTTSD2SIrr:%[0-9]+]]:gr32 = CVTTSD2SIrr [[COPY1]]
; CHECK: $eax = COPY [[CVTTSD2SIrr]]
; CHECK: %2:gr32 = nofpexcept CVTTSD2SIrr [[COPY1]], implicit $mxcsr
; CHECK: $eax = COPY %2
; CHECK: RET 0, implicit $eax
%1:vecr(s128) = COPY $xmm0
%0:vecr(s64) = G_TRUNC %1(s128)
Expand All @@ -278,8 +278,8 @@ body: |
; CHECK: liveins: $xmm0
; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; CHECK: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; CHECK: [[CVTTSD2SI64rr:%[0-9]+]]:gr64 = CVTTSD2SI64rr [[COPY1]]
; CHECK: $rax = COPY [[CVTTSD2SI64rr]]
; CHECK: %2:gr64 = nofpexcept CVTTSD2SI64rr [[COPY1]], implicit $mxcsr
; CHECK: $rax = COPY %2
; CHECK: RET 0, implicit $rax
%1:vecr(s128) = COPY $xmm0
%0:vecr(s64) = G_TRUNC %1(s128)
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/X86/GlobalISel/x86_64-select-sitofp.mir
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ body: |
; CHECK-LABEL: name: int32_to_float
; CHECK: liveins: $edi
; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; CHECK: [[CVTSI2SSrr:%[0-9]+]]:fr32 = CVTSI2SSrr [[COPY]]
; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY [[CVTSI2SSrr]]
; CHECK: %1:fr32 = nofpexcept CVTSI2SSrr [[COPY]], implicit $mxcsr
; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %1
; CHECK: $xmm0 = COPY [[COPY1]]
; CHECK: RET 0, implicit $xmm0
%0:gpr(s32) = COPY $edi
Expand All @@ -89,8 +89,8 @@ body: |
; CHECK-LABEL: name: int64_to_float
; CHECK: liveins: $rdi
; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; CHECK: [[CVTSI642SSrr:%[0-9]+]]:fr32 = CVTSI642SSrr [[COPY]]
; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY [[CVTSI642SSrr]]
; CHECK: %1:fr32 = nofpexcept CVTSI642SSrr [[COPY]], implicit $mxcsr
; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %1
; CHECK: $xmm0 = COPY [[COPY1]]
; CHECK: RET 0, implicit $xmm0
%0:gpr(s64) = COPY $rdi
Expand Down Expand Up @@ -145,8 +145,8 @@ body: |
; CHECK-LABEL: name: int64_to_double
; CHECK: liveins: $rdi
; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; CHECK: [[CVTSI642SDrr:%[0-9]+]]:fr64 = CVTSI642SDrr [[COPY]]
; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY [[CVTSI642SDrr]]
; CHECK: %1:fr64 = nofpexcept CVTSI642SDrr [[COPY]], implicit $mxcsr
; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %1
; CHECK: $xmm0 = COPY [[COPY1]]
; CHECK: RET 0, implicit $xmm0
%0:gpr(s64) = COPY $rdi
Expand Down