diff --git a/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td b/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td index 10758eaf483ae4..7c3b523b16b4f8 100644 --- a/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td +++ b/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td @@ -359,3 +359,39 @@ let TargetPrefix = "ve" in def int_ve_vl_vminsl_vsvl : GCCBuiltin<"__builtin_ve_ let TargetPrefix = "ve" in def int_ve_vl_vminsl_vsvvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; let TargetPrefix = "ve" in def int_ve_vl_vminsl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; let TargetPrefix = "ve" in def int_ve_vl_vminsl_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vand_vvvl : GCCBuiltin<"__builtin_ve_vl_vand_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vand_vvvvl : GCCBuiltin<"__builtin_ve_vl_vand_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vand_vsvl : GCCBuiltin<"__builtin_ve_vl_vand_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vand_vsvvl : GCCBuiltin<"__builtin_ve_vl_vand_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vand_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vand_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vand_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vand_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvand_vvvl : GCCBuiltin<"__builtin_ve_vl_pvand_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvand_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvand_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvand_vsvl : GCCBuiltin<"__builtin_ve_vl_pvand_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvand_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvand_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvand_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvand_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvand_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvand_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vor_vvvl : GCCBuiltin<"__builtin_ve_vl_vor_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vor_vvvvl : GCCBuiltin<"__builtin_ve_vl_vor_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vor_vsvl : GCCBuiltin<"__builtin_ve_vl_vor_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vor_vsvvl : GCCBuiltin<"__builtin_ve_vl_vor_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vor_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vor_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vor_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vor_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvor_vvvl : GCCBuiltin<"__builtin_ve_vl_pvor_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvor_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvor_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvor_vsvl : GCCBuiltin<"__builtin_ve_vl_pvor_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvor_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvor_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvor_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvor_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvor_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvor_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vxor_vvvl : GCCBuiltin<"__builtin_ve_vl_vxor_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vxor_vvvvl : GCCBuiltin<"__builtin_ve_vl_vxor_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vxor_vsvl : GCCBuiltin<"__builtin_ve_vl_vxor_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vxor_vsvvl : GCCBuiltin<"__builtin_ve_vl_vxor_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vxor_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vxor_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vxor_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vxor_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxor_vvvl : GCCBuiltin<"__builtin_ve_vl_pvxor_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxor_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvxor_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxor_vsvl : GCCBuiltin<"__builtin_ve_vl_pvxor_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxor_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvxor_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxor_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvxor_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxor_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvxor_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; diff --git a/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td b/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td index d65e6ec12d11da..cfe2c31f5a6f5c 100644 --- a/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td +++ b/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td @@ -553,3 +553,39 @@ def : Pat<(int_ve_vl_vminsl_vsvvl simm7:$I, v256f64:$vz, v256f64:$pt, i32:$vl), def : Pat<(int_ve_vl_vminsl_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMINSLvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; def : Pat<(int_ve_vl_vminsl_vsvmvl i64:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMINSLrvml_v i64:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; def : Pat<(int_ve_vl_vminsl_vsvmvl simm7:$I, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VMINSLivml_v (LO7 $I), v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vand_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VANDvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vand_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VANDvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vand_vsvl i64:$sy, v256f64:$vz, i32:$vl), (VANDrvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vand_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VANDrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vand_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VANDvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vand_vsvmvl i64:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VANDrvml_v i64:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvand_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (PVANDvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvand_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (PVANDvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvand_vsvl i64:$sy, v256f64:$vz, i32:$vl), (PVANDrvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvand_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (PVANDrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvand_vvvMvl v256f64:$vy, v256f64:$vz, v512i1:$vm, v256f64:$pt, i32:$vl), (PVANDvvml_v v256f64:$vy, v256f64:$vz, v512i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvand_vsvMvl i64:$sy, v256f64:$vz, v512i1:$vm, v256f64:$pt, i32:$vl), (PVANDrvml_v i64:$sy, v256f64:$vz, v512i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vor_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VORvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vor_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VORvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vor_vsvl i64:$sy, v256f64:$vz, i32:$vl), (VORrvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vor_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VORrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vor_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VORvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vor_vsvmvl i64:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VORrvml_v i64:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvor_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (PVORvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvor_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (PVORvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvor_vsvl i64:$sy, v256f64:$vz, i32:$vl), (PVORrvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvor_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (PVORrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvor_vvvMvl v256f64:$vy, v256f64:$vz, v512i1:$vm, v256f64:$pt, i32:$vl), (PVORvvml_v v256f64:$vy, v256f64:$vz, v512i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvor_vsvMvl i64:$sy, v256f64:$vz, v512i1:$vm, v256f64:$pt, i32:$vl), (PVORrvml_v i64:$sy, v256f64:$vz, v512i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vxor_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (VXORvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vxor_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (VXORvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vxor_vsvl i64:$sy, v256f64:$vz, i32:$vl), (VXORrvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vxor_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (VXORrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vxor_vvvmvl v256f64:$vy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VXORvvml_v v256f64:$vy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_vxor_vsvmvl i64:$sy, v256f64:$vz, v256i1:$vm, v256f64:$pt, i32:$vl), (VXORrvml_v i64:$sy, v256f64:$vz, v256i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvxor_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (PVXORvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvxor_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$pt, i32:$vl), (PVXORvvl_v v256f64:$vy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvxor_vsvl i64:$sy, v256f64:$vz, i32:$vl), (PVXORrvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvxor_vsvvl i64:$sy, v256f64:$vz, v256f64:$pt, i32:$vl), (PVXORrvl_v i64:$sy, v256f64:$vz, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvxor_vvvMvl v256f64:$vy, v256f64:$vz, v512i1:$vm, v256f64:$pt, i32:$vl), (PVXORvvml_v v256f64:$vy, v256f64:$vz, v512i1:$vm, i32:$vl, v256f64:$pt)>; +def : Pat<(int_ve_vl_pvxor_vsvMvl i64:$sy, v256f64:$vz, v512i1:$vm, v256f64:$pt, i32:$vl), (PVXORrvml_v i64:$sy, v256f64:$vz, v512i1:$vm, i32:$vl, v256f64:$pt)>; diff --git a/llvm/test/CodeGen/VE/VELIntrinsics/vand.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vand.ll new file mode 100644 index 00000000000000..17ab5d97b361cf --- /dev/null +++ b/llvm/test/CodeGen/VE/VELIntrinsics/vand.ll @@ -0,0 +1,212 @@ +; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s + +;;; Test vector and intrinsic instructions +;;; +;;; Note: +;;; We test VAND*vvl, VAND*vvl_v, VAND*rvl, VAND*rvl_v, VAND*vvml_v, +;;; VAND*rvml_v, PVAND*vvl, PVAND*vvl_v, PVAND*rvl, PVAND*rvl_v, PVAND*vvml_v, and +;;; PVAND*rvml_v instructions. + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vand_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vand_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vand %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vand.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vand.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vand_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vand_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vand %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vand.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vand.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vand_vsvl(i64 %0, <256 x double> %1) { +; CHECK-LABEL: vand_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vand %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vand.vsvl(i64 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vand.vsvl(i64, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vand_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vand_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vand %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vand.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vand.vsvvl(i64, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vand_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vand_vvvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vand %v2, %v0, %v1, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vand.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vand.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vand_vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vand_vsvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vand %v1, %s0, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vand.vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vand.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvand_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: pvand_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvand %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.pvand.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvand.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvand_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: pvand_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvand %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.pvand.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvand.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvand_vsvl(i64 %0, <256 x double> %1) { +; CHECK-LABEL: pvand_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvand %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.pvand.vsvl(i64 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvand.vsvl(i64, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvand_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: pvand_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvand %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.pvand.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvand.vsvvl(i64, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvand_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: pvand_vvvMvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvand %v2, %v0, %v1, %vm2 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.pvand.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvand.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvand_vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: pvand_vsvMvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvand %v1, %s0, %v0, %vm2 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.pvand.vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvand.vsvMvl(i64, <256 x double>, <512 x i1>, <256 x double>, i32) diff --git a/llvm/test/CodeGen/VE/VELIntrinsics/vor.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vor.ll new file mode 100644 index 00000000000000..08959b6c51091c --- /dev/null +++ b/llvm/test/CodeGen/VE/VELIntrinsics/vor.ll @@ -0,0 +1,212 @@ +; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s + +;;; Test vector or intrinsic instructions +;;; +;;; Note: +;;; We test VOR*vvl, VOR*vvl_v, VOR*rvl, VOR*rvl_v, VOR*vvml_v, VOR*rvml_v, +;;; PVOR*vvl, PVOR*vvl_v, PVOR*rvl, PVOR*rvl_v, PVOR*vvml_v, and PVOR*rvml_v +;;; instructions. + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vor_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vor_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vor %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vor.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vor.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vor_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vor_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vor %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vor.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vor.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vor_vsvl(i64 %0, <256 x double> %1) { +; CHECK-LABEL: vor_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vor %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vor.vsvl(i64 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vor.vsvl(i64, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vor_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vor_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vor %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vor.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vor.vsvvl(i64, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vor_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vor_vvvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vor %v2, %v0, %v1, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vor.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vor.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vor_vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vor_vsvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vor %v1, %s0, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vor.vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vor.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvor_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: pvor_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvor %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.pvor.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvor.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvor_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: pvor_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvor %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.pvor.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvor.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvor_vsvl(i64 %0, <256 x double> %1) { +; CHECK-LABEL: pvor_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvor %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.pvor.vsvl(i64 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvor.vsvl(i64, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvor_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: pvor_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvor %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.pvor.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvor.vsvvl(i64, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvor_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: pvor_vvvMvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvor %v2, %v0, %v1, %vm2 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.pvor.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvor.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvor_vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: pvor_vsvMvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvor %v1, %s0, %v0, %vm2 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.pvor.vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvor.vsvMvl(i64, <256 x double>, <512 x i1>, <256 x double>, i32) diff --git a/llvm/test/CodeGen/VE/VELIntrinsics/vxor.ll b/llvm/test/CodeGen/VE/VELIntrinsics/vxor.ll new file mode 100644 index 00000000000000..1dcb0badcbd6fd --- /dev/null +++ b/llvm/test/CodeGen/VE/VELIntrinsics/vxor.ll @@ -0,0 +1,212 @@ +; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s + +;;; Test vector xor intrinsic instructions +;;; +;;; Note: +;;; We test VXOR*vvl, VXOR*vvl_v, VXOR*rvl, VXOR*rvl_v, VXOR*vvml_v, +;;; VXOR*rvml_v, PVXOR*vvl, PVXOR*vvl_v, PVXOR*rvl, PVXOR*rvl_v, PVXOR*vvml_v, +;;; and PVXOR*rvml_v instructions. + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vxor_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: vxor_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vxor %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vxor.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vxor.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vxor_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vxor_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vxor %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vxor.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vxor.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vxor_vsvl(i64 %0, <256 x double> %1) { +; CHECK-LABEL: vxor_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vxor %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.vxor.vsvl(i64 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vxor.vsvl(i64, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vxor_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: vxor_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vxor %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.vxor.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vxor.vsvvl(i64, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vxor_vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vxor_vvvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vxor %v2, %v0, %v1, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vxor.vvvmvl(<256 x double> %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vxor.vvvmvl(<256 x double>, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @vxor_vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: vxor_vsvmvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vxor %v1, %s0, %v0, %vm1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.vxor.vsvmvl(i64 %0, <256 x double> %1, <256 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.vxor.vsvmvl(i64, <256 x double>, <256 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvxor_vvvl(<256 x double> %0, <256 x double> %1) { +; CHECK-LABEL: pvxor_vvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvxor %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.pvxor.vvvl(<256 x double> %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvxor.vvvl(<256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvxor_vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: pvxor_vvvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvxor %v2, %v0, %v1 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.pvxor.vvvvl(<256 x double> %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvxor.vvvvl(<256 x double>, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvxor_vsvl(i64 %0, <256 x double> %1) { +; CHECK-LABEL: pvxor_vsvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvxor %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %3 = tail call fast <256 x double> @llvm.ve.vl.pvxor.vsvl(i64 %0, <256 x double> %1, i32 256) + ret <256 x double> %3 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvxor.vsvl(i64, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvxor_vsvvl(i64 %0, <256 x double> %1, <256 x double> %2) { +; CHECK-LABEL: pvxor_vsvvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvxor %v1, %s0, %v0 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %4 = tail call fast <256 x double> @llvm.ve.vl.pvxor.vsvvl(i64 %0, <256 x double> %1, <256 x double> %2, i32 128) + ret <256 x double> %4 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvxor.vsvvl(i64, <256 x double>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvxor_vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: pvxor_vvvMvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 128 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvxor %v2, %v0, %v1, %vm2 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v2 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.pvxor.vvvMvl(<256 x double> %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvxor.vvvMvl(<256 x double>, <256 x double>, <512 x i1>, <256 x double>, i32) + +; Function Attrs: nounwind readnone +define fastcc <256 x double> @pvxor_vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3) { +; CHECK-LABEL: pvxor_vsvMvl: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 128 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvxor %v1, %s0, %v0, %vm2 +; CHECK-NEXT: lea %s16, 256 +; CHECK-NEXT: lvl %s16 +; CHECK-NEXT: vor %v0, (0)1, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %5 = tail call fast <256 x double> @llvm.ve.vl.pvxor.vsvMvl(i64 %0, <256 x double> %1, <512 x i1> %2, <256 x double> %3, i32 128) + ret <256 x double> %5 +} + +; Function Attrs: nounwind readnone +declare <256 x double> @llvm.ve.vl.pvxor.vsvMvl(i64, <256 x double>, <512 x i1>, <256 x double>, i32)