111 changes: 40 additions & 71 deletions llvm/test/CodeGen/X86/gfni-rotates.ll
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2,+gfni | FileCheck %s --check-prefixes=GFNISSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX512,GFNIAVX512F
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX512,GFNIAVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+gfni | FileCheck %s --check-prefixes=GFNIAVX1OR2,GFNIAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+gfni | FileCheck %s --check-prefixes=GFNIAVX1OR2,GFNIAVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+gfni | FileCheck %s --check-prefixes=GFNIAVX512

;
; 128 Bit Vector Rotates
Expand All @@ -20,31 +19,20 @@ define <16 x i8> @splatconstant_rotl_v16i8(<16 x i8> %a) nounwind {
; GFNISSE-NEXT: por %xmm1, %xmm0
; GFNISSE-NEXT: retq
;
; GFNIAVX1-LABEL: splatconstant_rotl_v16i8:
; GFNIAVX1: # %bb.0:
; GFNIAVX1-NEXT: vpsrlw $5, %xmm0, %xmm1
; GFNIAVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; GFNIAVX1-NEXT: vpsllw $3, %xmm0, %xmm0
; GFNIAVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; GFNIAVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; GFNIAVX1-NEXT: retq
;
; GFNIAVX2-LABEL: splatconstant_rotl_v16i8:
; GFNIAVX2: # %bb.0:
; GFNIAVX2-NEXT: vpsrlw $5, %xmm0, %xmm1
; GFNIAVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; GFNIAVX2-NEXT: vpsllw $3, %xmm0, %xmm0
; GFNIAVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; GFNIAVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
; GFNIAVX2-NEXT: retq
; GFNIAVX1OR2-LABEL: splatconstant_rotl_v16i8:
; GFNIAVX1OR2: # %bb.0:
; GFNIAVX1OR2-NEXT: vpsrlw $5, %xmm0, %xmm1
; GFNIAVX1OR2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; GFNIAVX1OR2-NEXT: vpsllw $3, %xmm0, %xmm0
; GFNIAVX1OR2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; GFNIAVX1OR2-NEXT: vpor %xmm1, %xmm0, %xmm0
; GFNIAVX1OR2-NEXT: retq
;
; GFNIAVX512-LABEL: splatconstant_rotl_v16i8:
; GFNIAVX512: # %bb.0:
; GFNIAVX512-NEXT: vpsllw $3, %xmm0, %xmm1
; GFNIAVX512-NEXT: vpsrlw $5, %xmm0, %xmm0
; GFNIAVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm0
; GFNIAVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; GFNIAVX512-NEXT: vzeroupper
; GFNIAVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
; GFNIAVX512-NEXT: retq
%res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %a, <16 x i8> %a, <16 x i8> <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>)
ret <16 x i8> %res
Expand All @@ -61,13 +49,20 @@ define <16 x i8> @splatconstant_rotr_v16i8(<16 x i8> %a) nounwind {
; GFNISSE-NEXT: por %xmm1, %xmm0
; GFNISSE-NEXT: retq
;
; GFNIAVX-LABEL: splatconstant_rotr_v16i8:
; GFNIAVX: # %bb.0:
; GFNIAVX-NEXT: vpsrlw $7, %xmm0, %xmm1
; GFNIAVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; GFNIAVX-NEXT: vpaddb %xmm0, %xmm0, %xmm0
; GFNIAVX-NEXT: vpor %xmm1, %xmm0, %xmm0
; GFNIAVX-NEXT: retq
; GFNIAVX1OR2-LABEL: splatconstant_rotr_v16i8:
; GFNIAVX1OR2: # %bb.0:
; GFNIAVX1OR2-NEXT: vpsrlw $7, %xmm0, %xmm1
; GFNIAVX1OR2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; GFNIAVX1OR2-NEXT: vpaddb %xmm0, %xmm0, %xmm0
; GFNIAVX1OR2-NEXT: vpor %xmm1, %xmm0, %xmm0
; GFNIAVX1OR2-NEXT: retq
;
; GFNIAVX512-LABEL: splatconstant_rotr_v16i8:
; GFNIAVX512: # %bb.0:
; GFNIAVX512-NEXT: vpsrlw $7, %xmm0, %xmm1
; GFNIAVX512-NEXT: vpaddb %xmm0, %xmm0, %xmm0
; GFNIAVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
; GFNIAVX512-NEXT: retq
%res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %a, <16 x i8> %a, <16 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>)
ret <16 x i8> %res
}
Expand Down Expand Up @@ -126,8 +121,7 @@ define <32 x i8> @splatconstant_rotl_v32i8(<32 x i8> %a) nounwind {
; GFNIAVX512: # %bb.0:
; GFNIAVX512-NEXT: vpsllw $4, %ymm0, %ymm1
; GFNIAVX512-NEXT: vpsrlw $4, %ymm0, %ymm0
; GFNIAVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm0
; GFNIAVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; GFNIAVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
; GFNIAVX512-NEXT: retq
%res = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> %a, <32 x i8> %a, <32 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
ret <32 x i8> %res
Expand Down Expand Up @@ -183,8 +177,7 @@ define <32 x i8> @splatconstant_rotr_v32i8(<32 x i8> %a) nounwind {
; GFNIAVX512: # %bb.0:
; GFNIAVX512-NEXT: vpsllw $2, %ymm0, %ymm1
; GFNIAVX512-NEXT: vpsrlw $6, %ymm0, %ymm0
; GFNIAVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm0
; GFNIAVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; GFNIAVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
; GFNIAVX512-NEXT: retq
%res = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %a, <32 x i8> %a, <32 x i8> <i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6>)
ret <32 x i8> %res
Expand Down Expand Up @@ -259,24 +252,12 @@ define <64 x i8> @splatconstant_rotl_v64i8(<64 x i8> %a) nounwind {
; GFNIAVX2-NEXT: vpor %ymm2, %ymm1, %ymm1
; GFNIAVX2-NEXT: retq
;
; GFNIAVX512F-LABEL: splatconstant_rotl_v64i8:
; GFNIAVX512F: # %bb.0:
; GFNIAVX512F-NEXT: vpsrlw $7, %ymm0, %ymm1
; GFNIAVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; GFNIAVX512F-NEXT: vpsrlw $7, %ymm2, %ymm3
; GFNIAVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
; GFNIAVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm0
; GFNIAVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; GFNIAVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; GFNIAVX512F-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
; GFNIAVX512F-NEXT: retq
;
; GFNIAVX512BW-LABEL: splatconstant_rotl_v64i8:
; GFNIAVX512BW: # %bb.0:
; GFNIAVX512BW-NEXT: vpsrlw $7, %zmm0, %zmm1
; GFNIAVX512BW-NEXT: vpaddb %zmm0, %zmm0, %zmm0
; GFNIAVX512BW-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
; GFNIAVX512BW-NEXT: retq
; GFNIAVX512-LABEL: splatconstant_rotl_v64i8:
; GFNIAVX512: # %bb.0:
; GFNIAVX512-NEXT: vpsrlw $7, %zmm0, %zmm1
; GFNIAVX512-NEXT: vpaddb %zmm0, %zmm0, %zmm0
; GFNIAVX512-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
; GFNIAVX512-NEXT: retq
%res = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %a, <64 x i8> %a, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
ret <64 x i8> %res
}
Expand Down Expand Up @@ -359,24 +340,12 @@ define <64 x i8> @splatconstant_rotr_v64i8(<64 x i8> %a) nounwind {
; GFNIAVX2-NEXT: vpor %ymm2, %ymm1, %ymm1
; GFNIAVX2-NEXT: retq
;
; GFNIAVX512F-LABEL: splatconstant_rotr_v64i8:
; GFNIAVX512F: # %bb.0:
; GFNIAVX512F-NEXT: vpsllw $6, %ymm0, %ymm1
; GFNIAVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; GFNIAVX512F-NEXT: vpsllw $6, %ymm2, %ymm3
; GFNIAVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
; GFNIAVX512F-NEXT: vpsrlw $2, %ymm0, %ymm0
; GFNIAVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2
; GFNIAVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; GFNIAVX512F-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm0
; GFNIAVX512F-NEXT: retq
;
; GFNIAVX512BW-LABEL: splatconstant_rotr_v64i8:
; GFNIAVX512BW: # %bb.0:
; GFNIAVX512BW-NEXT: vpsllw $6, %zmm0, %zmm1
; GFNIAVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm0
; GFNIAVX512BW-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm0
; GFNIAVX512BW-NEXT: retq
; GFNIAVX512-LABEL: splatconstant_rotr_v64i8:
; GFNIAVX512: # %bb.0:
; GFNIAVX512-NEXT: vpsllw $6, %zmm0, %zmm1
; GFNIAVX512-NEXT: vpsrlw $2, %zmm0, %zmm0
; GFNIAVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm0
; GFNIAVX512-NEXT: retq
%res = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %a, <64 x i8> %a, <64 x i8> <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>)
ret <64 x i8> %res
}
Expand Down
102 changes: 37 additions & 65 deletions llvm/test/CodeGen/X86/gfni-shifts.ll
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2,+gfni | FileCheck %s --check-prefixes=GFNISSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX512,GFNIAVX512F
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX512,GFNIAVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX1OR2,GFNIAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX1OR2,GFNIAVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+gfni | FileCheck %s --check-prefixes=GFNIAVX,GFNIAVX512

;
; 128 Bit Vector Shifts
Expand Down Expand Up @@ -51,14 +50,22 @@ define <16 x i8> @splatconstant_ashr_v16i8(<16 x i8> %a) nounwind {
; GFNISSE-NEXT: psubb %xmm1, %xmm0
; GFNISSE-NEXT: retq
;
; GFNIAVX-LABEL: splatconstant_ashr_v16i8:
; GFNIAVX: # %bb.0:
; GFNIAVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; GFNIAVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; GFNIAVX-NEXT: vmovdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; GFNIAVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; GFNIAVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; GFNIAVX-NEXT: retq
; GFNIAVX1OR2-LABEL: splatconstant_ashr_v16i8:
; GFNIAVX1OR2: # %bb.0:
; GFNIAVX1OR2-NEXT: vpsrlw $4, %xmm0, %xmm0
; GFNIAVX1OR2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; GFNIAVX1OR2-NEXT: vmovdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; GFNIAVX1OR2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; GFNIAVX1OR2-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; GFNIAVX1OR2-NEXT: retq
;
; GFNIAVX512-LABEL: splatconstant_ashr_v16i8:
; GFNIAVX512: # %bb.0:
; GFNIAVX512-NEXT: vpsrlw $4, %xmm0, %xmm0
; GFNIAVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; GFNIAVX512-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
; GFNIAVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; GFNIAVX512-NEXT: retq
%shift = ashr <16 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
ret <16 x i8> %shift
}
Expand Down Expand Up @@ -182,9 +189,8 @@ define <32 x i8> @splatconstant_ashr_v32i8(<32 x i8> %a) nounwind {
; GFNIAVX512-LABEL: splatconstant_ashr_v32i8:
; GFNIAVX512: # %bb.0:
; GFNIAVX512-NEXT: vpsrlw $2, %ymm0, %ymm0
; GFNIAVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; GFNIAVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
; GFNIAVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; GFNIAVX512-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
; GFNIAVX512-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; GFNIAVX512-NEXT: retq
%shift = ashr <32 x i8> %a, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
Expand Down Expand Up @@ -235,20 +241,11 @@ define <64 x i8> @splatconstant_shl_v64i8(<64 x i8> %a) nounwind {
; GFNIAVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
; GFNIAVX2-NEXT: retq
;
; GFNIAVX512F-LABEL: splatconstant_shl_v64i8:
; GFNIAVX512F: # %bb.0:
; GFNIAVX512F-NEXT: vpsllw $5, %ymm0, %ymm1
; GFNIAVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; GFNIAVX512F-NEXT: vpsllw $5, %ymm0, %ymm0
; GFNIAVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; GFNIAVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
; GFNIAVX512F-NEXT: retq
;
; GFNIAVX512BW-LABEL: splatconstant_shl_v64i8:
; GFNIAVX512BW: # %bb.0:
; GFNIAVX512BW-NEXT: vpsllw $5, %zmm0, %zmm0
; GFNIAVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
; GFNIAVX512BW-NEXT: retq
; GFNIAVX512-LABEL: splatconstant_shl_v64i8:
; GFNIAVX512: # %bb.0:
; GFNIAVX512-NEXT: vpsllw $5, %zmm0, %zmm0
; GFNIAVX512-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
; GFNIAVX512-NEXT: retq
%shift = shl <64 x i8> %a, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
ret <64 x i8> %shift
}
Expand Down Expand Up @@ -293,20 +290,11 @@ define <64 x i8> @splatconstant_lshr_v64i8(<64 x i8> %a) nounwind {
; GFNIAVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
; GFNIAVX2-NEXT: retq
;
; GFNIAVX512F-LABEL: splatconstant_lshr_v64i8:
; GFNIAVX512F: # %bb.0:
; GFNIAVX512F-NEXT: vpsrlw $7, %ymm0, %ymm1
; GFNIAVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; GFNIAVX512F-NEXT: vpsrlw $7, %ymm0, %ymm0
; GFNIAVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; GFNIAVX512F-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
; GFNIAVX512F-NEXT: retq
;
; GFNIAVX512BW-LABEL: splatconstant_lshr_v64i8:
; GFNIAVX512BW: # %bb.0:
; GFNIAVX512BW-NEXT: vpsrlw $7, %zmm0, %zmm0
; GFNIAVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
; GFNIAVX512BW-NEXT: retq
; GFNIAVX512-LABEL: splatconstant_lshr_v64i8:
; GFNIAVX512: # %bb.0:
; GFNIAVX512-NEXT: vpsrlw $7, %zmm0, %zmm0
; GFNIAVX512-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
; GFNIAVX512-NEXT: retq
%shift = lshr <64 x i8> %a, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
ret <64 x i8> %shift
}
Expand Down Expand Up @@ -374,29 +362,13 @@ define <64 x i8> @splatconstant_ashr_v64i8(<64 x i8> %a) nounwind {
; GFNIAVX2-NEXT: vpsubb %ymm3, %ymm1, %ymm1
; GFNIAVX2-NEXT: retq
;
; GFNIAVX512F-LABEL: splatconstant_ashr_v64i8:
; GFNIAVX512F: # %bb.0:
; GFNIAVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; GFNIAVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; GFNIAVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; GFNIAVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1
; GFNIAVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
; GFNIAVX512F-NEXT: vpxor %ymm3, %ymm1, %ymm1
; GFNIAVX512F-NEXT: vpsubb %ymm3, %ymm1, %ymm1
; GFNIAVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0
; GFNIAVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0
; GFNIAVX512F-NEXT: vpxor %ymm3, %ymm0, %ymm0
; GFNIAVX512F-NEXT: vpsubb %ymm3, %ymm0, %ymm0
; GFNIAVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; GFNIAVX512F-NEXT: retq
;
; GFNIAVX512BW-LABEL: splatconstant_ashr_v64i8:
; GFNIAVX512BW: # %bb.0:
; GFNIAVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm0
; GFNIAVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
; GFNIAVX512BW-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
; GFNIAVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0
; GFNIAVX512BW-NEXT: retq
; GFNIAVX512-LABEL: splatconstant_ashr_v64i8:
; GFNIAVX512: # %bb.0:
; GFNIAVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
; GFNIAVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
; GFNIAVX512-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
; GFNIAVX512-NEXT: vpsubb %zmm1, %zmm0, %zmm0
; GFNIAVX512-NEXT: retq
%shift = ashr <64 x i8> %a, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
ret <64 x i8> %shift
}