2 changes: 1 addition & 1 deletion llvm/test/CodeGen/ARM/GlobalISel/arm-unsupported.ll
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ define i17 @test_funny_ints(i17 %a, i17 %b) {
}

define half @test_half(half %a, half %b) {
; CHECK: remark: {{.*}} unable to lower arguments: half (half, half)* (in function: test_half)
; CHECK: remark: {{.*}} unable to legalize instruction: %{{[0-9]+}}:_(s16) = G_FADD %{{[0-9]+}}:_, %{{[0-9]+}}:_ (in function: test_half)
; CHECK-LABEL: warning: Instruction selection used fallback path for test_half
%res = fadd half %a, %b
ret half %res
Expand Down
12 changes: 8 additions & 4 deletions llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,10 @@ define i16 @test_add_i16(i16 %arg1, i16 %arg2) {
;
; X86-LABEL: test_add_i16:
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addw {{[0-9]+}}(%esp), %ax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addw %cx, %ax
; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
%ret = add i16 %arg1, %arg2
ret i16 %ret
Expand All @@ -65,8 +67,10 @@ define i8 @test_add_i8(i8 %arg1, i8 %arg2) {
;
; X86-LABEL: test_add_i8:
; X86: # %bb.0:
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: addb {{[0-9]+}}(%esp), %al
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addb %cl, %al
; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: retl
%ret = add i8 %arg1, %arg2
ret i8 %ret
Expand Down
6 changes: 4 additions & 2 deletions llvm/test/CodeGen/X86/GlobalISel/callingconv.ll
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@ define i64 @test_ret_i64() {
define i8 @test_arg_i8(i8 %a) {
; X32-LABEL: test_arg_i8:
; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test_arg_i8:
Expand All @@ -46,7 +47,8 @@ define i8 @test_arg_i8(i8 %a) {
define i16 @test_arg_i16(i16 %a) {
; X32-LABEL: test_arg_i16:
; X32: # %bb.0:
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; X64-LABEL: test_arg_i16:
Expand Down
2 changes: 2 additions & 0 deletions llvm/test/CodeGen/X86/GlobalISel/ext.ll
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
; XFAIL: *
; FIXME: This test is broken due to https://bugs.llvm.org/show_bug.cgi?id=50035
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
; RUN: llc -mtriple=i386-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X32
Expand Down
42 changes: 26 additions & 16 deletions llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
Original file line number Diff line number Diff line change
Expand Up @@ -10,28 +10,36 @@ define i8 @test_i8_args_8(i8 %arg1, i8 %arg2, i8 %arg3, i8 %arg4, i8 %arg5, i8 %
; X86-LABEL: name: test_i8_args_8
; X86: bb.1.entry:
; X86: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.7
; X86: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.7, align 16)
; X86: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.7, align 16)
; X86: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[LOAD]](s32)
; X86: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
; X86: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.6, align 4)
; X86: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.6, align 4)
; X86: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[LOAD1]](s32)
; X86: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
; X86: [[LOAD2:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 1 from %fixed-stack.5, align 8)
; X86: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 1 from %fixed-stack.5, align 8)
; X86: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[LOAD2]](s32)
; X86: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
; X86: [[LOAD3:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX3]](p0) :: (invariant load 1 from %fixed-stack.4, align 4)
; X86: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (invariant load 1 from %fixed-stack.4, align 4)
; X86: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[LOAD3]](s32)
; X86: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
; X86: [[LOAD4:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX4]](p0) :: (invariant load 1 from %fixed-stack.3, align 16)
; X86: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p0) :: (invariant load 1 from %fixed-stack.3, align 16)
; X86: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[LOAD4]](s32)
; X86: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
; X86: [[LOAD5:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX5]](p0) :: (invariant load 1 from %fixed-stack.2, align 4)
; X86: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p0) :: (invariant load 1 from %fixed-stack.2, align 4)
; X86: [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[LOAD5]](s32)
; X86: [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; X86: [[LOAD6:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX6]](p0) :: (invariant load 1 from %fixed-stack.1, align 8)
; X86: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p0) :: (invariant load 1 from %fixed-stack.1, align 8)
; X86: [[TRUNC6:%[0-9]+]]:_(s8) = G_TRUNC [[LOAD6]](s32)
; X86: [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X86: [[LOAD7:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX7]](p0) :: (invariant load 1 from %fixed-stack.0, align 4)
; X86: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p0) :: (invariant load 1 from %fixed-stack.0, align 4)
; X86: [[TRUNC7:%[0-9]+]]:_(s8) = G_TRUNC [[LOAD7]](s32)
; X86: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a1_8bit
; X86: [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a7_8bit
; X86: [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a8_8bit
; X86: G_STORE [[LOAD]](s8), [[GV]](p0) :: (store 1 into @a1_8bit)
; X86: G_STORE [[LOAD6]](s8), [[GV1]](p0) :: (store 1 into @a7_8bit)
; X86: G_STORE [[LOAD7]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit)
; X86: $al = COPY [[LOAD]](s8)
; X86: G_STORE [[TRUNC]](s8), [[GV]](p0) :: (store 1 into @a1_8bit)
; X86: G_STORE [[TRUNC6]](s8), [[GV1]](p0) :: (store 1 into @a7_8bit)
; X86: G_STORE [[TRUNC7]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit)
; X86: $al = COPY [[TRUNC]](s8)
; X86: RET 0, implicit $al
; X64-LABEL: name: test_i8_args_8
; X64: bb.1.entry:
Expand All @@ -49,15 +57,17 @@ define i8 @test_i8_args_8(i8 %arg1, i8 %arg2, i8 %arg3, i8 %arg4, i8 %arg5, i8 %
; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY $r9d
; X64: [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[COPY5]](s32)
; X64: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; X64: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.1, align 16)
; X64: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.1, align 16)
; X64: [[TRUNC6:%[0-9]+]]:_(s8) = G_TRUNC [[LOAD]](s32)
; X64: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X64: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.0, align 8)
; X64: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.0, align 8)
; X64: [[TRUNC7:%[0-9]+]]:_(s8) = G_TRUNC [[LOAD1]](s32)
; X64: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a1_8bit
; X64: [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a7_8bit
; X64: [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a8_8bit
; X64: G_STORE [[TRUNC]](s8), [[GV]](p0) :: (store 1 into @a1_8bit)
; X64: G_STORE [[LOAD]](s8), [[GV1]](p0) :: (store 1 into @a7_8bit)
; X64: G_STORE [[LOAD1]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit)
; X64: G_STORE [[TRUNC6]](s8), [[GV1]](p0) :: (store 1 into @a7_8bit)
; X64: G_STORE [[TRUNC7]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit)
; X64: $al = COPY [[TRUNC]](s8)
; X64: RET 0, implicit $al
entry:
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no_x86_scrub_sp
; RUN: llc -mtriple=i386-linux-gnu -global-isel -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=i386-linux-gnu -regbankselect-greedy -global-isel -verify-machineinstrs < %s | FileCheck %s

Expand Down Expand Up @@ -47,7 +47,7 @@ define i32 @test_load_i32(i32 * %p1) {
define i1 * @test_store_i1(i1 %val, i1 * %p1) {
; CHECK-LABEL: test_store_i1:
; CHECK: # %bb.0:
; CHECK-NEXT: movb 4(%esp), %cl
; CHECK-NEXT: movl 4(%esp), %ecx
; CHECK-NEXT: movl 8(%esp), %eax
; CHECK-NEXT: andb $1, %cl
; CHECK-NEXT: movb %cl, (%eax)
Expand All @@ -59,7 +59,7 @@ define i1 * @test_store_i1(i1 %val, i1 * %p1) {
define i8 * @test_store_i8(i8 %val, i8 * %p1) {
; CHECK-LABEL: test_store_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: movb 4(%esp), %cl
; CHECK-NEXT: movl 4(%esp), %ecx
; CHECK-NEXT: movl 8(%esp), %eax
; CHECK-NEXT: movb %cl, (%eax)
; CHECK-NEXT: retl
Expand All @@ -70,7 +70,7 @@ define i8 * @test_store_i8(i8 %val, i8 * %p1) {
define i16 * @test_store_i16(i16 %val, i16 * %p1) {
; CHECK-LABEL: test_store_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: movzwl 4(%esp), %ecx
; CHECK-NEXT: movl 4(%esp), %ecx
; CHECK-NEXT: movl 8(%esp), %eax
; CHECK-NEXT: movw %cx, (%eax)
; CHECK-NEXT: retl
Expand Down