/
atomic-flags.ll
125 lines (117 loc) · 3.47 KB
/
atomic-flags.ll
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=i686-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=X86
; Make sure that flags are properly preserved despite atomic optimizations.
define i32 @atomic_and_flags_1(i8* %p, i32 %a, i32 %b) {
; X64-LABEL: atomic_and_flags_1:
; X64: # %bb.0:
; X64-NEXT: cmpl %edx, %esi
; X64-NEXT: jne .LBB0_3
; X64-NEXT: # %bb.1: # %L1
; X64-NEXT: incb (%rdi)
; X64-NEXT: cmpl %edx, %esi
; X64-NEXT: jne .LBB0_2
; X64-NEXT: # %bb.4: # %L3
; X64-NEXT: movl $3, %eax
; X64-NEXT: retq
; X64-NEXT: .LBB0_3: # %L2
; X64-NEXT: movl $2, %eax
; X64-NEXT: retq
; X64-NEXT: .LBB0_2: # %L4
; X64-NEXT: movl $4, %eax
; X64-NEXT: retq
;
; X86-LABEL: atomic_and_flags_1:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: cmpl %eax, %ecx
; X86-NEXT: jne .LBB0_3
; X86-NEXT: # %bb.1: # %L1
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: incb (%edx)
; X86-NEXT: cmpl %eax, %ecx
; X86-NEXT: jne .LBB0_2
; X86-NEXT: # %bb.4: # %L3
; X86-NEXT: movl $3, %eax
; X86-NEXT: retl
; X86-NEXT: .LBB0_3: # %L2
; X86-NEXT: movl $2, %eax
; X86-NEXT: retl
; X86-NEXT: .LBB0_2: # %L4
; X86-NEXT: movl $4, %eax
; X86-NEXT: retl
; Generate flags value, and use it.
%cmp = icmp eq i32 %a, %b
br i1 %cmp, label %L1, label %L2
L1:
; The following pattern will get folded.
%1 = load atomic i8, i8* %p seq_cst, align 1
%2 = add i8 %1, 1 ; This forces the INC instruction to be generated.
store atomic i8 %2, i8* %p release, align 1
; Use the comparison result again. We need to rematerialize the comparison
; somehow. This test checks that cmpl gets emitted again, but any
; rematerialization would work (the optimizer used to clobber the flags with
; the add).
br i1 %cmp, label %L3, label %L4
L2:
ret i32 2
L3:
ret i32 3
L4:
ret i32 4
}
; Same as above, but using 2 as immediate to avoid the INC instruction.
define i32 @atomic_and_flags_2(i8* %p, i32 %a, i32 %b) {
; X64-LABEL: atomic_and_flags_2:
; X64: # %bb.0:
; X64-NEXT: cmpl %edx, %esi
; X64-NEXT: jne .LBB1_3
; X64-NEXT: # %bb.1: # %L1
; X64-NEXT: addb $2, (%rdi)
; X64-NEXT: cmpl %edx, %esi
; X64-NEXT: jne .LBB1_2
; X64-NEXT: # %bb.4: # %L3
; X64-NEXT: movl $3, %eax
; X64-NEXT: retq
; X64-NEXT: .LBB1_3: # %L2
; X64-NEXT: movl $2, %eax
; X64-NEXT: retq
; X64-NEXT: .LBB1_2: # %L4
; X64-NEXT: movl $4, %eax
; X64-NEXT: retq
;
; X86-LABEL: atomic_and_flags_2:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: cmpl %eax, %ecx
; X86-NEXT: jne .LBB1_3
; X86-NEXT: # %bb.1: # %L1
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: addb $2, (%edx)
; X86-NEXT: cmpl %eax, %ecx
; X86-NEXT: jne .LBB1_2
; X86-NEXT: # %bb.4: # %L3
; X86-NEXT: movl $3, %eax
; X86-NEXT: retl
; X86-NEXT: .LBB1_3: # %L2
; X86-NEXT: movl $2, %eax
; X86-NEXT: retl
; X86-NEXT: .LBB1_2: # %L4
; X86-NEXT: movl $4, %eax
; X86-NEXT: retl
%cmp = icmp eq i32 %a, %b
br i1 %cmp, label %L1, label %L2
L1:
%1 = load atomic i8, i8* %p seq_cst, align 1
%2 = add i8 %1, 2
store atomic i8 %2, i8* %p release, align 1
br i1 %cmp, label %L3, label %L4
L2:
ret i32 2
L3:
ret i32 3
L4:
ret i32 4
}