Skip to content

Commit

Permalink
[X86][update_llc_test_checks] Use a less greedy regular expression fo…
Browse files Browse the repository at this point in the history
…r replacing constant pool labels in tests.

While working on D97208 I noticed that these greedy regular
expressions prevent tests from failing when (%rip) appears after
a constant pool label when it didn't before.

Reviewed By: RKSimon, pengfei

Differential Revision: https://reviews.llvm.org/D99460
  • Loading branch information
topperc committed Mar 28, 2021
1 parent 1e9746d commit 0248e24
Show file tree
Hide file tree
Showing 144 changed files with 1,680 additions and 1,680 deletions.
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/X86/WidenArith.ll
Expand Up @@ -11,7 +11,7 @@ define <8 x i32> @test(<8 x float> %a, <8 x float> %b) {
; X86-NEXT: vcmpltps %ymm1, %ymm0, %ymm0
; X86-NEXT: vcmpltps %ymm3, %ymm2, %ymm1
; X86-NEXT: vandps %ymm1, %ymm0, %ymm0
; X86-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: test:
Expand Down
34 changes: 17 additions & 17 deletions llvm/test/CodeGen/X86/addsub-constant-folding.ll
Expand Up @@ -64,7 +64,7 @@ define i32 @add_const_add_const_extrause(i32 %arg) {
define <4 x i32> @vec_add_const_add_const(<4 x i32> %arg) {
; X86-LABEL: vec_add_const_add_const:
; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_add_const_add_const:
Expand All @@ -87,7 +87,7 @@ define <4 x i32> @vec_add_const_add_const_extrause(<4 x i32> %arg) {
; X86-NEXT: paddd %xmm1, %xmm0
; X86-NEXT: calll vec_use@PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: addl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
Expand Down Expand Up @@ -115,7 +115,7 @@ define <4 x i32> @vec_add_const_add_const_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_add_const_add_const_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_add_const_add_const_nonsplat:
; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_add_const_add_const_nonsplat:
Expand Down Expand Up @@ -186,7 +186,7 @@ define i32 @add_const_sub_const_extrause(i32 %arg) {
define <4 x i32> @vec_add_const_sub_const(<4 x i32> %arg) {
; X86-LABEL: vec_add_const_sub_const:
; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_add_const_sub_const:
Expand All @@ -209,7 +209,7 @@ define <4 x i32> @vec_add_const_sub_const_extrause(<4 x i32> %arg) {
; X86-NEXT: paddd %xmm1, %xmm0
; X86-NEXT: calll vec_use@PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: addl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
Expand Down Expand Up @@ -237,7 +237,7 @@ define <4 x i32> @vec_add_const_sub_const_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_add_const_sub_const_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_add_const_sub_const_nonsplat:
; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_add_const_sub_const_nonsplat:
Expand Down Expand Up @@ -440,7 +440,7 @@ define i32 @sub_const_add_const_extrause(i32 %arg) {
define <4 x i32> @vec_sub_const_add_const(<4 x i32> %arg) {
; X86-LABEL: vec_sub_const_add_const:
; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_sub_const_add_const:
Expand All @@ -458,10 +458,10 @@ define <4 x i32> @vec_sub_const_add_const_extrause(<4 x i32> %arg) {
; X86-NEXT: subl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 32
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: calll vec_use@PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: addl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
Expand All @@ -487,7 +487,7 @@ define <4 x i32> @vec_sub_const_add_const_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_sub_const_add_const_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_sub_const_add_const_nonsplat:
; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_sub_const_add_const_nonsplat:
Expand Down Expand Up @@ -558,7 +558,7 @@ define i32 @sub_const_sub_const_extrause(i32 %arg) {
define <4 x i32> @vec_sub_const_sub_const(<4 x i32> %arg) {
; X86-LABEL: vec_sub_const_sub_const:
; X86: # %bb.0:
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_sub_const_sub_const:
Expand All @@ -576,10 +576,10 @@ define <4 x i32> @vec_sub_const_sub_const_extrause(<4 x i32> %arg) {
; X86-NEXT: subl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 32
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: calll vec_use@PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: addl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
Expand All @@ -605,7 +605,7 @@ define <4 x i32> @vec_sub_const_sub_const_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_sub_const_sub_const_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_sub_const_sub_const_nonsplat:
; X86: # %bb.0:
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_sub_const_sub_const_nonsplat:
Expand Down Expand Up @@ -698,7 +698,7 @@ define <4 x i32> @vec_sub_const_const_sub_extrause(<4 x i32> %arg) {
; X86: # %bb.0:
; X86-NEXT: subl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 32
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
; X86-NEXT: calll vec_use@PLT
; X86-NEXT: movdqa {{.*#+}} xmm0 = [2,2,2,2]
Expand Down Expand Up @@ -1074,7 +1074,7 @@ define i32 @const_sub_const_sub_extrause(i32 %arg) {
define <4 x i32> @vec_const_sub_const_sub(<4 x i32> %arg) {
; X86-LABEL: vec_const_sub_const_sub:
; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_const_sub_const_sub:
Expand Down Expand Up @@ -1126,7 +1126,7 @@ define <4 x i32> @vec_const_sub_const_sub_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_const_sub_const_sub_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_const_sub_const_sub_nonsplat:
; X86: # %bb.0:
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_const_sub_const_sub_nonsplat:
Expand Down
18 changes: 9 additions & 9 deletions llvm/test/CodeGen/X86/atomic-fp.ll
Expand Up @@ -200,7 +200,7 @@ define dso_local void @fadd_32g() nounwind {
; X86-SSE1-NEXT: movl glob32, %eax
; X86-SSE1-NEXT: movl %eax, (%esp)
; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE1-NEXT: addss {{\.LCPI.*}}, %xmm0
; X86-SSE1-NEXT: addss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE1-NEXT: movl %eax, glob32
Expand Down Expand Up @@ -296,7 +296,7 @@ define dso_local void @fadd_64g() nounwind {
; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $8, %esp
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: addsd {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: movsd %xmm0, (%esp)
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movlps %xmm0, glob64
Expand All @@ -311,7 +311,7 @@ define dso_local void @fadd_64g() nounwind {
; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $8, %esp
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vaddsd {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vaddsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vmovlps %xmm0, glob64
Expand Down Expand Up @@ -361,7 +361,7 @@ define dso_local void @fadd_32imm() nounwind {
; X86-SSE1-NEXT: movl -559038737, %eax
; X86-SSE1-NEXT: movl %eax, (%esp)
; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE1-NEXT: addss {{\.LCPI.*}}, %xmm0
; X86-SSE1-NEXT: addss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE1-NEXT: movl %eax, -559038737
Expand Down Expand Up @@ -459,7 +459,7 @@ define dso_local void @fadd_64imm() nounwind {
; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $8, %esp
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: addsd {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: movsd %xmm0, (%esp)
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movlps %xmm0, -559038737
Expand All @@ -474,7 +474,7 @@ define dso_local void @fadd_64imm() nounwind {
; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $8, %esp
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vaddsd {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vaddsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vmovlps %xmm0, -559038737
Expand Down Expand Up @@ -526,7 +526,7 @@ define dso_local void @fadd_32stack() nounwind {
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE1-NEXT: movl %eax, (%esp)
; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE1-NEXT: addss {{\.LCPI.*}}, %xmm0
; X86-SSE1-NEXT: addss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE1-NEXT: movl %eax, {{[0-9]+}}(%esp)
Expand Down Expand Up @@ -628,7 +628,7 @@ define dso_local void @fadd_64stack() nounwind {
; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $16, %esp
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: addsd {{\.LCPI.*}}, %xmm0
; X86-SSE2-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: movsd %xmm0, (%esp)
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
Expand All @@ -643,7 +643,7 @@ define dso_local void @fadd_64stack() nounwind {
; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $16, %esp
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vaddsd {{\.LCPI.*}}, %xmm0, %xmm0
; X86-AVX-NEXT: vaddsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/X86/avx-cmp.ll
Expand Up @@ -49,7 +49,7 @@ define void @render(double %a0) nounwind {
; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
; CHECK-NEXT: vmovsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: vucomisd {{\.LCPI.*}}, %xmm0
; CHECK-NEXT: vucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: jne .LBB2_5
; CHECK-NEXT: jnp .LBB2_2
; CHECK-NEXT: .LBB2_5: # %if.then
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
Expand Up @@ -645,8 +645,8 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9]
; X86-AVX-NEXT: vmovhpd {{\.LCPI.*}}, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
; X86-AVX-NEXT: vmovhpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: # xmm1 = xmm1[0],mem[0]
; X86-AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x58,0xc1]
; X86-AVX-NEXT: vmovupd %xmm0, (%eax) # encoding: [0xc5,0xf9,0x11,0x00]
Expand All @@ -656,8 +656,8 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9]
; X86-AVX512VL-NEXT: vmovhpd {{\.LCPI.*}}, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vmovhpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: # xmm1 = xmm1[0],mem[0]
; X86-AVX512VL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
; X86-AVX512VL-NEXT: vmovupd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x00]
Expand All @@ -667,7 +667,7 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9]
; X64-AVX-NEXT: vmovhpd {{.*}}(%rip), %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: # xmm1 = xmm1[0],mem[0]
; X64-AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x58,0xc1]
; X64-AVX-NEXT: vmovupd %xmm0, (%rdi) # encoding: [0xc5,0xf9,0x11,0x07]
Expand All @@ -677,7 +677,7 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9]
; X64-AVX512VL-NEXT: vmovhpd {{.*}}(%rip), %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: # xmm1 = xmm1[0],mem[0]
; X64-AVX512VL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
; X64-AVX512VL-NEXT: vmovupd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x07]
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/X86/avx2-arith.ll
Expand Up @@ -148,7 +148,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
; X32-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
; X32-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; X32-NEXT: vzeroupper
Expand Down Expand Up @@ -303,7 +303,7 @@ define <8 x i32> @mul_const5(<8 x i32> %x) {
define <8 x i32> @mul_const6(<8 x i32> %x) {
; X32-LABEL: mul_const6:
; X32: # %bb.0:
; X32-NEXT: vpmulld {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const6:
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/X86/avx2-conversions.ll
Expand Up @@ -159,7 +159,7 @@ define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
; X32-LABEL: trunc_16i16_16i8:
; X32: # %bb.0:
; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
; X32-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; X32-NEXT: vzeroupper
Expand Down

0 comments on commit 0248e24

Please sign in to comment.