1
1
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2
- ; RUN: opt -passes='loop-vectorize,loop-unroll ' -force-vector-width=4 -S < %s | FileCheck %s
2
+ ; RUN: opt -passes='loop-vectorize' -force-vector-width=4 -S < %s | FileCheck %s
3
3
4
4
; Test if the follow-up metadata for loops works fine. The original code is
5
- ; something like below. In this case, unrolling should be applied after
6
- ; vectorization .
5
+ ; something like below. In this case, the unrolling metadata from the original
6
+ ; loop should be applied to the vector loop as well .
7
7
;
8
8
; void f(float *a, float x) {
9
9
; #pragma clang loop vectorize(enable) unroll_count(8)
12
12
; }
13
13
; }
14
14
;
15
- define void @f (ptr noundef captures(none) %a , float noundef %x ) {
16
- ; CHECK-LABEL: define void @f (
15
+ define void @scalar_loop_dead (ptr noundef captures(none) %a , float noundef %x ) {
16
+ ; CHECK-LABEL: define void @scalar_loop_dead (
17
17
; CHECK-SAME: ptr noundef captures(none) [[A:%.*]], float noundef [[X:%.*]]) {
18
18
; CHECK-NEXT: [[ENTRY:.*:]]
19
19
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
@@ -22,126 +22,113 @@ define void @f(ptr noundef captures(none) %a, float noundef %x) {
22
22
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
23
23
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
24
24
; CHECK: [[VECTOR_BODY]]:
25
- ; CHECK-NEXT: [[INDEX_NEXT_6:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
26
- ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT_6]]
27
- ; CHECK-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x float>, ptr [[TMP14]], align 4
28
- ; CHECK-NEXT: [[TMP15:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_7]]
29
- ; CHECK-NEXT: store <4 x float> [[TMP15]], ptr [[TMP14]], align 4
30
- ; CHECK-NEXT: [[INDEX_NEXT1:%.*]] = add nuw nsw i64 [[INDEX_NEXT_6]], 4
31
- ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT1]]
32
- ; CHECK-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
33
- ; CHECK-NEXT: [[TMP3:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_1]]
34
- ; CHECK-NEXT: store <4 x float> [[TMP3]], ptr [[TMP2]], align 4
35
- ; CHECK-NEXT: [[INDEX_NEXT_1:%.*]] = add nuw nsw i64 [[INDEX_NEXT_6]], 8
36
- ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT_1]]
37
- ; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x float>, ptr [[TMP16]], align 4
38
- ; CHECK-NEXT: [[TMP5:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_2]]
39
- ; CHECK-NEXT: store <4 x float> [[TMP5]], ptr [[TMP16]], align 4
40
- ; CHECK-NEXT: [[INDEX_NEXT_2:%.*]] = add nuw nsw i64 [[INDEX_NEXT_6]], 12
41
- ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT_2]]
42
- ; CHECK-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x float>, ptr [[TMP6]], align 4
43
- ; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_3]]
44
- ; CHECK-NEXT: store <4 x float> [[TMP7]], ptr [[TMP6]], align 4
45
- ; CHECK-NEXT: [[INDEX_NEXT_3:%.*]] = add nuw nsw i64 [[INDEX_NEXT_6]], 16
46
- ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT_3]]
47
- ; CHECK-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x float>, ptr [[TMP8]], align 4
48
- ; CHECK-NEXT: [[TMP9:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_4]]
49
- ; CHECK-NEXT: store <4 x float> [[TMP9]], ptr [[TMP8]], align 4
50
- ; CHECK-NEXT: [[INDEX_NEXT_4:%.*]] = add nuw nsw i64 [[INDEX_NEXT_6]], 20
51
- ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT_4]]
52
- ; CHECK-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x float>, ptr [[TMP10]], align 4
53
- ; CHECK-NEXT: [[TMP11:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_5]]
54
- ; CHECK-NEXT: store <4 x float> [[TMP11]], ptr [[TMP10]], align 4
55
- ; CHECK-NEXT: [[INDEX_NEXT_5:%.*]] = add nuw nsw i64 [[INDEX_NEXT_6]], 24
56
- ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT_5]]
57
- ; CHECK-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x float>, ptr [[TMP12]], align 4
58
- ; CHECK-NEXT: [[TMP13:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_6]]
59
- ; CHECK-NEXT: store <4 x float> [[TMP13]], ptr [[TMP12]], align 4
60
- ; CHECK-NEXT: [[INDEX_NEXT_7:%.*]] = add nuw nsw i64 [[INDEX_NEXT_6]], 28
61
- ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX_NEXT_7]]
62
- ; CHECK-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x float>, ptr [[TMP17]], align 4
63
- ; CHECK-NEXT: [[TMP18:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD_8]]
64
- ; CHECK-NEXT: store <4 x float> [[TMP18]], ptr [[TMP17]], align 4
65
- ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw nsw i64 [[INDEX_NEXT_6]], 32
66
- ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
67
- ; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
25
+ ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
26
+ ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
27
+ ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
28
+ ; CHECK-NEXT: [[TMP1:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD]]
29
+ ; CHECK-NEXT: store <4 x float> [[TMP1]], ptr [[TMP0]], align 4
30
+ ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
31
+ ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
32
+ ; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
68
33
; CHECK: [[MIDDLE_BLOCK]]:
69
34
; CHECK-NEXT: br label %[[EXIT:.*]]
70
35
; CHECK: [[SCALAR_PH]]:
71
- ; CHECK-NEXT: br label %[[FOR_BODY :.*]]
72
- ; CHECK: [[FOR_BODY ]]:
73
- ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT_7 :%.*]], %[[FOR_BODY ]] ]
36
+ ; CHECK-NEXT: br label %[[LOOP :.*]]
37
+ ; CHECK: [[LOOP ]]:
38
+ ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT :%.*]], %[[LOOP ]] ]
74
39
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV]]
75
40
; CHECK-NEXT: [[LOAD:%.*]] = load float, ptr [[ARRAYIDX]], align 4
76
41
; CHECK-NEXT: [[MUL:%.*]] = fmul float [[X]], [[LOAD]]
77
42
; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4
78
- ; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1
79
- ; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT]]
80
- ; CHECK-NEXT: [[LOAD_1:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
81
- ; CHECK-NEXT: [[MUL_1:%.*]] = fmul float [[X]], [[LOAD_1]]
82
- ; CHECK-NEXT: store float [[MUL_1]], ptr [[ARRAYIDX_1]], align 4
83
- ; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2
84
- ; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT_1]]
85
- ; CHECK-NEXT: [[LOAD_2:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
86
- ; CHECK-NEXT: [[MUL_2:%.*]] = fmul float [[X]], [[LOAD_2]]
87
- ; CHECK-NEXT: store float [[MUL_2]], ptr [[ARRAYIDX_2]], align 4
88
- ; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3
89
- ; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT_2]]
90
- ; CHECK-NEXT: [[LOAD_3:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
91
- ; CHECK-NEXT: [[MUL_3:%.*]] = fmul float [[X]], [[LOAD_3]]
92
- ; CHECK-NEXT: store float [[MUL_3]], ptr [[ARRAYIDX_3]], align 4
93
- ; CHECK-NEXT: [[IV_NEXT_3:%.*]] = add nuw nsw i64 [[IV]], 4
94
- ; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT_3]]
95
- ; CHECK-NEXT: [[LOAD_4:%.*]] = load float, ptr [[ARRAYIDX_4]], align 4
96
- ; CHECK-NEXT: [[MUL_4:%.*]] = fmul float [[X]], [[LOAD_4]]
97
- ; CHECK-NEXT: store float [[MUL_4]], ptr [[ARRAYIDX_4]], align 4
98
- ; CHECK-NEXT: [[IV_NEXT_4:%.*]] = add nuw nsw i64 [[IV]], 5
99
- ; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT_4]]
100
- ; CHECK-NEXT: [[LOAD_5:%.*]] = load float, ptr [[ARRAYIDX_5]], align 4
101
- ; CHECK-NEXT: [[MUL_5:%.*]] = fmul float [[X]], [[LOAD_5]]
102
- ; CHECK-NEXT: store float [[MUL_5]], ptr [[ARRAYIDX_5]], align 4
103
- ; CHECK-NEXT: [[IV_NEXT_5:%.*]] = add nuw nsw i64 [[IV]], 6
104
- ; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT_5]]
105
- ; CHECK-NEXT: [[LOAD_6:%.*]] = load float, ptr [[ARRAYIDX_6]], align 4
106
- ; CHECK-NEXT: [[MUL_6:%.*]] = fmul float [[X]], [[LOAD_6]]
107
- ; CHECK-NEXT: store float [[MUL_6]], ptr [[ARRAYIDX_6]], align 4
108
- ; CHECK-NEXT: [[IV_NEXT_6:%.*]] = add nuw nsw i64 [[IV]], 7
109
- ; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV_NEXT_6]]
110
- ; CHECK-NEXT: [[LOAD_7:%.*]] = load float, ptr [[ARRAYIDX_7]], align 4
111
- ; CHECK-NEXT: [[MUL_7:%.*]] = fmul float [[X]], [[LOAD_7]]
112
- ; CHECK-NEXT: store float [[MUL_7]], ptr [[ARRAYIDX_7]], align 4
113
- ; CHECK-NEXT: [[IV_NEXT_7]] = add nuw nsw i64 [[IV]], 8
114
- ; CHECK-NEXT: br i1 true, label %[[EXIT_LOOPEXIT:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
115
- ; CHECK: [[EXIT_LOOPEXIT]]:
116
- ; CHECK-NEXT: br label %[[EXIT]]
43
+ ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
44
+ ; CHECK-NEXT: [[COMP:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
45
+ ; CHECK-NEXT: br i1 [[COMP]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
117
46
; CHECK: [[EXIT]]:
118
47
; CHECK-NEXT: ret void
119
48
;
120
49
entry:
121
- br label %for.body
50
+ br label %loop
122
51
123
- for.body :
124
- %iv = phi i64 [ 0 , %entry ], [ %iv.next , %for.body ]
52
+ loop :
53
+ %iv = phi i64 [ 0 , %entry ], [ %iv.next , %loop ]
125
54
%arrayidx = getelementptr inbounds nuw float , ptr %a , i64 %iv
126
55
%load = load float , ptr %arrayidx , align 4
127
56
%mul = fmul float %x , %load
128
57
store float %mul , ptr %arrayidx , align 4
129
58
%iv.next = add nuw nsw i64 %iv , 1
130
59
%comp = icmp eq i64 %iv.next , 1024
131
- br i1 %comp , label %exit , label %for.body , !llvm.loop !0
60
+ br i1 %comp , label %exit , label %loop , !llvm.loop !0
132
61
133
62
exit:
134
63
ret void
135
64
}
136
65
66
+ define void @scalar_loop_live (ptr noundef captures(none) %a , float noundef %x , i64 %n ) {
67
+ ; CHECK-LABEL: define void @scalar_loop_live(
68
+ ; CHECK-SAME: ptr noundef captures(none) [[A:%.*]], float noundef [[X:%.*]], i64 [[N:%.*]]) {
69
+ ; CHECK-NEXT: [[ENTRY:.*]]:
70
+ ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
71
+ ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
72
+ ; CHECK: [[VECTOR_PH]]:
73
+ ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
74
+ ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
75
+ ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[X]], i64 0
76
+ ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
77
+ ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
78
+ ; CHECK: [[VECTOR_BODY]]:
79
+ ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
80
+ ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
81
+ ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
82
+ ; CHECK-NEXT: [[TMP1:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD]]
83
+ ; CHECK-NEXT: store <4 x float> [[TMP1]], ptr [[TMP0]], align 4
84
+ ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
85
+ ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
86
+ ; CHECK-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
87
+ ; CHECK: [[MIDDLE_BLOCK]]:
88
+ ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
89
+ ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
90
+ ; CHECK: [[SCALAR_PH]]:
91
+ ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
92
+ ; CHECK-NEXT: br label %[[LOOP:.*]]
93
+ ; CHECK: [[LOOP]]:
94
+ ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
95
+ ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[IV]]
96
+ ; CHECK-NEXT: [[LOAD:%.*]] = load float, ptr [[ARRAYIDX]], align 4
97
+ ; CHECK-NEXT: [[MUL:%.*]] = fmul float [[X]], [[LOAD]]
98
+ ; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX]], align 4
99
+ ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
100
+ ; CHECK-NEXT: [[COMP:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
101
+ ; CHECK-NEXT: br i1 [[COMP]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
102
+ ; CHECK: [[EXIT]]:
103
+ ; CHECK-NEXT: ret void
104
+ ;
105
+ entry:
106
+ br label %loop
107
+
108
+ loop:
109
+ %iv = phi i64 [ 0 , %entry ], [ %iv.next , %loop ]
110
+ %arrayidx = getelementptr inbounds nuw float , ptr %a , i64 %iv
111
+ %load = load float , ptr %arrayidx , align 4
112
+ %mul = fmul float %x , %load
113
+ store float %mul , ptr %arrayidx , align 4
114
+ %iv.next = add nuw nsw i64 %iv , 1
115
+ %comp = icmp eq i64 %iv.next , %n
116
+ br i1 %comp , label %exit , label %loop , !llvm.loop !0
117
+
118
+ exit:
119
+ ret void
120
+ }
137
121
!0 = distinct !{!0 , !1 , !2 }
138
122
!1 = !{!"llvm.loop.vectorize.enable" , i1 true }
139
123
!2 = !{!"llvm.loop.vectorize.followup_all" , !3 , !4 }
140
124
!3 = !{!"llvm.loop.isvectorized" }
141
125
!4 = !{!"llvm.loop.unroll.count" , i32 8 }
142
126
;.
143
- ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
127
+ ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]] }
144
128
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized"}
145
- ; CHECK: [[META2]] = !{!"llvm.loop.unroll.disable"}
146
- ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
129
+ ; CHECK: [[META2]] = !{!"llvm.loop.unroll.count", i32 8}
130
+ ; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
131
+ ; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
132
+ ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]], [[META3]]}
133
+ ; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
147
134
;.
0 commit comments