diff --git a/clang/include/clang/Basic/DiagnosticParseKinds.td b/clang/include/clang/Basic/DiagnosticParseKinds.td index 1364b982ecf16..e4f859943266f 100644 --- a/clang/include/clang/Basic/DiagnosticParseKinds.td +++ b/clang/include/clang/Basic/DiagnosticParseKinds.td @@ -358,6 +358,10 @@ def err_invalid_pixel_decl_spec_combination : Error< "'%0' declaration specifier not allowed here">; def err_invalid_vector_bool_decl_spec : Error< "cannot use '%0' with '__vector bool'">; +def err_invalid_vector_long_decl_spec : Error< + "cannot use 'long' with '__vector'">; +def err_invalid_vector_float_decl_spec : Error< + "cannot use 'float' with '__vector'">; def err_invalid_vector_double_decl_spec : Error < "use of 'double' with '__vector' requires VSX support to be enabled " "(available on POWER7 or later)">; diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def index 8d606a12d046e..c184df77c37ef 100644 --- a/clang/include/clang/Basic/LangOptions.def +++ b/clang/include/clang/Basic/LangOptions.def @@ -104,6 +104,7 @@ LANGOPT(WritableStrings , 1, 0, "writable string support") LANGOPT(ConstStrings , 1, 0, "const-qualified string support") LANGOPT(LaxVectorConversions , 1, 1, "lax vector conversions") LANGOPT(AltiVec , 1, 0, "AltiVec-style vector initializers") +LANGOPT(ZVector , 1, 0, "System z vector extensions") LANGOPT(Exceptions , 1, 0, "exception handling") LANGOPT(ObjCExceptions , 1, 0, "Objective-C exceptions") LANGOPT(CXXExceptions , 1, 0, "C++ exceptions") diff --git a/clang/include/clang/Basic/TokenKinds.def b/clang/include/clang/Basic/TokenKinds.def index 7a91c9f502b72..8333a4ccf8db5 100644 --- a/clang/include/clang/Basic/TokenKinds.def +++ b/clang/include/clang/Basic/TokenKinds.def @@ -239,6 +239,8 @@ PUNCTUATOR(greatergreatergreater, ">>>") // KEYOPENCL - This is a keyword in OpenCL // KEYNOOPENCL - This is a keyword that is not supported in OpenCL // KEYALTIVEC - This is a keyword in AltiVec +// KEYZVECTOR - This is a keyword for the System z vector extensions, +// which are heavily based on AltiVec // KEYBORLAND - This is a keyword if Borland extensions are enabled // BOOLSUPPORT - This is a keyword if 'bool' is a built-in type // HALFSUPPORT - This is a keyword if 'half' is a built-in type @@ -501,7 +503,7 @@ ALIAS("write_only", __write_only , KEYOPENCL) ALIAS("read_write", __read_write , KEYOPENCL) // OpenCL builtins KEYWORD(__builtin_astype , KEYOPENCL) -KEYWORD(vec_step , KEYOPENCL|KEYALTIVEC) +KEYWORD(vec_step , KEYOPENCL|KEYALTIVEC|KEYZVECTOR) // OpenMP Type Traits KEYWORD(__builtin_omp_required_simd_align, KEYALL) @@ -510,9 +512,9 @@ KEYWORD(__builtin_omp_required_simd_align, KEYALL) KEYWORD(__pascal , KEYALL) // Altivec Extension. -KEYWORD(__vector , KEYALTIVEC) +KEYWORD(__vector , KEYALTIVEC|KEYZVECTOR) KEYWORD(__pixel , KEYALTIVEC) -KEYWORD(__bool , KEYALTIVEC) +KEYWORD(__bool , KEYALTIVEC|KEYZVECTOR) // ARM NEON extensions. ALIAS("__fp16", half , KEYALL) diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 6e5dbf225bb6e..9d3e2cfccdc72 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -1351,6 +1351,13 @@ def mno_altivec : Flag<["-"], "mno-altivec">, Alias; def mvx : Flag<["-"], "mvx">, Group; def mno_vx : Flag<["-"], "mno-vx">, Group; +def fzvector : Flag<["-"], "fzvector">, Group, Flags<[CC1Option]>, + HelpText<"Enable System z vector language extension">; +def fno_zvector : Flag<["-"], "fno-zvector">, Group, + Flags<[CC1Option]>; +def mzvector : Flag<["-"], "mzvector">, Alias; +def mno_zvector : Flag<["-"], "mno-zvector">, Alias; + def mno_warn_nonportable_cfstrings : Flag<["-"], "mno-warn-nonportable-cfstrings">, Group; def mno_omit_leaf_frame_pointer : Flag<["-"], "mno-omit-leaf-frame-pointer">, Group; def momit_leaf_frame_pointer : Flag<["-"], "momit-leaf-frame-pointer">, Group, diff --git a/clang/include/clang/Parse/Parser.h b/clang/include/clang/Parse/Parser.h index fb9eb8ff5af88..8719555be9148 100644 --- a/clang/include/clang/Parse/Parser.h +++ b/clang/include/clang/Parse/Parser.h @@ -108,12 +108,13 @@ class Parser : public CodeCompletionHandler { /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; - /// Ident_vector, Ident_pixel, Ident_bool - cached IdentifierInfo's - /// for "vector", "pixel", and "bool" fast comparison. Only present - /// if AltiVec enabled. + /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and + /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; - IdentifierInfo *Ident_pixel; IdentifierInfo *Ident_bool; + /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. + /// Only present if AltiVec enabled. + IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. mutable IdentifierInfo *Ident_instancetype; @@ -605,10 +606,12 @@ class Parser : public CodeCompletionHandler { bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { - if (!getLangOpts().AltiVec || - (Tok.getIdentifierInfo() != Ident_vector && - Tok.getIdentifierInfo() != Ident_pixel && - Tok.getIdentifierInfo() != Ident_bool)) + if (!getLangOpts().AltiVec && !getLangOpts().ZVector) + return false; + + if (Tok.getIdentifierInfo() != Ident_vector && + Tok.getIdentifierInfo() != Ident_bool && + (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); @@ -618,7 +621,7 @@ class Parser : public CodeCompletionHandler { /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { - if (!getLangOpts().AltiVec || + if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h index db7b6f954c1b8..ecbcd94b29dd6 100644 --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -8363,7 +8363,8 @@ class Sema { /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, - SourceLocation Loc, bool IsCompAssign); + SourceLocation Loc, bool IsCompAssign, + bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); diff --git a/clang/lib/Basic/IdentifierTable.cpp b/clang/lib/Basic/IdentifierTable.cpp index dcb7603bf5abd..7705834d91a05 100644 --- a/clang/lib/Basic/IdentifierTable.cpp +++ b/clang/lib/Basic/IdentifierTable.cpp @@ -110,7 +110,8 @@ namespace { HALFSUPPORT = 0x08000, KEYCONCEPTS = 0x10000, KEYOBJC2 = 0x20000, - KEYALL = (0x3ffff & ~KEYNOMS18 & + KEYZVECTOR = 0x40000, + KEYALL = (0x7ffff & ~KEYNOMS18 & ~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude. }; diff --git a/clang/lib/Basic/Module.cpp b/clang/lib/Basic/Module.cpp index 3846fecebf5d0..4314b41eb340f 100644 --- a/clang/lib/Basic/Module.cpp +++ b/clang/lib/Basic/Module.cpp @@ -67,6 +67,7 @@ static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, .Case("objc_arc", LangOpts.ObjCAutoRefCount) .Case("opencl", LangOpts.OpenCL) .Case("tls", Target.isTLSSupported()) + .Case("zvector", LangOpts.ZVector) .Default(Target.hasFeature(Feature)); if (!HasFeature) HasFeature = std::find(LangOpts.ModuleFeatures.begin(), diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp index dfe6593371e70..f965ab319457b 100644 --- a/clang/lib/Basic/Targets.cpp +++ b/clang/lib/Basic/Targets.cpp @@ -5725,6 +5725,8 @@ class SystemZTargetInfo : public TargetInfo { Builder.defineMacro("__LONG_DOUBLE_128__"); if (HasTransactionalExecution) Builder.defineMacro("__HTM__"); + if (Opts.ZVector) + Builder.defineMacro("__VEC__", "10301"); } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { diff --git a/clang/lib/Driver/Tools.cpp b/clang/lib/Driver/Tools.cpp index d46f4672bf1cd..19ebbb91ffc75 100644 --- a/clang/lib/Driver/Tools.cpp +++ b/clang/lib/Driver/Tools.cpp @@ -4049,9 +4049,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.AddLastArg(CmdArgs, options::OPT_fstandalone_debug); Args.AddLastArg(CmdArgs, options::OPT_fno_standalone_debug); Args.AddLastArg(CmdArgs, options::OPT_fno_operator_names); - // AltiVec language extensions aren't relevant for assembling. - if (!isa(JA) || Output.getType() != types::TY_PP_Asm) + // AltiVec-like language extensions aren't relevant for assembling. + if (!isa(JA) || Output.getType() != types::TY_PP_Asm) { Args.AddLastArg(CmdArgs, options::OPT_faltivec); + Args.AddLastArg(CmdArgs, options::OPT_fzvector); + } Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_show_template_tree); Args.AddLastArg(CmdArgs, options::OPT_fno_elide_type); @@ -4096,6 +4098,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, << "ppc/ppc64/ppc64le"; } + // -fzvector is incompatible with -faltivec. + if (Arg *A = Args.getLastArg(options::OPT_fzvector)) + if (Args.hasArg(options::OPT_faltivec)) + D.Diag(diag::err_drv_argument_not_allowed_with) << A->getAsString(Args) + << "-faltivec"; + if (getToolChain().SupportsProfiling()) Args.AddLastArg(CmdArgs, options::OPT_pg); diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index f325144fd08fa..fbeba09e1cf18 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -1260,6 +1260,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK, // OpenCL has some additional defaults. if (Opts.OpenCL) { Opts.AltiVec = 0; + Opts.ZVector = 0; Opts.CXXOperatorNames = 1; Opts.LaxVectorConversions = 0; Opts.DefaultFPContract = 1; @@ -1448,6 +1449,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK, if (Args.hasArg(OPT_faltivec)) Opts.AltiVec = 1; + if (Args.hasArg(OPT_fzvector)) + Opts.ZVector = 1; + if (Args.hasArg(OPT_pthread)) Opts.POSIXThreads = 1; diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt index 87afc60de9960..7de5fbe8be0ec 100644 --- a/clang/lib/Headers/CMakeLists.txt +++ b/clang/lib/Headers/CMakeLists.txt @@ -59,6 +59,7 @@ set(files unwind.h vadefs.h varargs.h + vecintrin.h __wmmintrin_aes.h wmmintrin.h __wmmintrin_pclmul.h diff --git a/clang/lib/Headers/module.modulemap b/clang/lib/Headers/module.modulemap index 0fc70a8c9b069..b147e891dceb3 100644 --- a/clang/lib/Headers/module.modulemap +++ b/clang/lib/Headers/module.modulemap @@ -158,6 +158,11 @@ module _Builtin_intrinsics [system] [extern_c] { header "htmintrin.h" header "htmxlintrin.h" } + + explicit module zvector { + requires zvector, vx + header "vecintrin.h" + } } } diff --git a/clang/lib/Headers/s390intrin.h b/clang/lib/Headers/s390intrin.h index b20989552d4d5..d51274c07df5f 100644 --- a/clang/lib/Headers/s390intrin.h +++ b/clang/lib/Headers/s390intrin.h @@ -32,4 +32,8 @@ #include #endif +#ifdef __VEC__ +#include +#endif + #endif /* __S390INTRIN_H*/ diff --git a/clang/lib/Headers/vecintrin.h b/clang/lib/Headers/vecintrin.h new file mode 100644 index 0000000000000..ca7acb4731f9e --- /dev/null +++ b/clang/lib/Headers/vecintrin.h @@ -0,0 +1,8946 @@ +/*===---- vecintrin.h - Vector intrinsics ----------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#if defined(__s390x__) && defined(__VEC__) + +#define __ATTRS_ai __attribute__((__always_inline__)) +#define __ATTRS_o __attribute__((__overloadable__)) +#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__)) + +#define __constant(PARM) \ + __attribute__((__enable_if__ ((PARM) == (PARM), \ + "argument must be a constant integer"))) +#define __constant_range(PARM, LOW, HIGH) \ + __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH), \ + "argument must be a constant integer from " #LOW " to " #HIGH))) +#define __constant_pow2_range(PARM, LOW, HIGH) \ + __attribute__((__enable_if__ ((PARM) >= (LOW) && (PARM) <= (HIGH) && \ + ((PARM) & ((PARM) - 1)) == 0, \ + "argument must be a constant power of 2 from " #LOW " to " #HIGH))) + +/*-- __lcbb -----------------------------------------------------------------*/ + +extern __ATTRS_o unsigned int +__lcbb(const void *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +#define __lcbb(X, Y) ((__typeof__((__lcbb)((X), (Y)))) \ + __builtin_s390_lcbb((X), __builtin_constant_p((Y))? \ + ((Y) == 64 ? 0 : \ + (Y) == 128 ? 1 : \ + (Y) == 256 ? 2 : \ + (Y) == 512 ? 3 : \ + (Y) == 1024 ? 4 : \ + (Y) == 2048 ? 5 : \ + (Y) == 4096 ? 6 : 0) : 0)) + +/*-- vec_extract ------------------------------------------------------------*/ + +static inline __ATTRS_o_ai signed char +vec_extract(vector signed char __vec, int __index) { + return __vec[__index & 15]; +} + +static inline __ATTRS_o_ai unsigned char +vec_extract(vector bool char __vec, int __index) { + return __vec[__index & 15]; +} + +static inline __ATTRS_o_ai unsigned char +vec_extract(vector unsigned char __vec, int __index) { + return __vec[__index & 15]; +} + +static inline __ATTRS_o_ai signed short +vec_extract(vector signed short __vec, int __index) { + return __vec[__index & 7]; +} + +static inline __ATTRS_o_ai unsigned short +vec_extract(vector bool short __vec, int __index) { + return __vec[__index & 7]; +} + +static inline __ATTRS_o_ai unsigned short +vec_extract(vector unsigned short __vec, int __index) { + return __vec[__index & 7]; +} + +static inline __ATTRS_o_ai signed int +vec_extract(vector signed int __vec, int __index) { + return __vec[__index & 3]; +} + +static inline __ATTRS_o_ai unsigned int +vec_extract(vector bool int __vec, int __index) { + return __vec[__index & 3]; +} + +static inline __ATTRS_o_ai unsigned int +vec_extract(vector unsigned int __vec, int __index) { + return __vec[__index & 3]; +} + +static inline __ATTRS_o_ai signed long long +vec_extract(vector signed long long __vec, int __index) { + return __vec[__index & 1]; +} + +static inline __ATTRS_o_ai unsigned long long +vec_extract(vector bool long long __vec, int __index) { + return __vec[__index & 1]; +} + +static inline __ATTRS_o_ai unsigned long long +vec_extract(vector unsigned long long __vec, int __index) { + return __vec[__index & 1]; +} + +static inline __ATTRS_o_ai double +vec_extract(vector double __vec, int __index) { + return __vec[__index & 1]; +} + +/*-- vec_insert -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_insert(signed char __scalar, vector signed char __vec, int __index) { + __vec[__index & 15] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector unsigned char +vec_insert(unsigned char __scalar, vector bool char __vec, int __index) { + vector unsigned char __newvec = (vector unsigned char)__vec; + __newvec[__index & 15] = (unsigned char)__scalar; + return __newvec; +} + +static inline __ATTRS_o_ai vector unsigned char +vec_insert(unsigned char __scalar, vector unsigned char __vec, int __index) { + __vec[__index & 15] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector signed short +vec_insert(signed short __scalar, vector signed short __vec, int __index) { + __vec[__index & 7] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector unsigned short +vec_insert(unsigned short __scalar, vector bool short __vec, int __index) { + vector unsigned short __newvec = (vector unsigned short)__vec; + __newvec[__index & 7] = (unsigned short)__scalar; + return __newvec; +} + +static inline __ATTRS_o_ai vector unsigned short +vec_insert(unsigned short __scalar, vector unsigned short __vec, int __index) { + __vec[__index & 7] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector signed int +vec_insert(signed int __scalar, vector signed int __vec, int __index) { + __vec[__index & 3] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector unsigned int +vec_insert(unsigned int __scalar, vector bool int __vec, int __index) { + vector unsigned int __newvec = (vector unsigned int)__vec; + __newvec[__index & 3] = __scalar; + return __newvec; +} + +static inline __ATTRS_o_ai vector unsigned int +vec_insert(unsigned int __scalar, vector unsigned int __vec, int __index) { + __vec[__index & 3] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector signed long long +vec_insert(signed long long __scalar, vector signed long long __vec, + int __index) { + __vec[__index & 1] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_insert(unsigned long long __scalar, vector bool long long __vec, + int __index) { + vector unsigned long long __newvec = (vector unsigned long long)__vec; + __newvec[__index & 1] = __scalar; + return __newvec; +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_insert(unsigned long long __scalar, vector unsigned long long __vec, + int __index) { + __vec[__index & 1] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector double +vec_insert(double __scalar, vector double __vec, int __index) { + __vec[__index & 1] = __scalar; + return __vec; +} + +/*-- vec_promote ------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_promote(signed char __scalar, int __index) { + const vector signed char __zero = (vector signed char)0; + vector signed char __vec = __builtin_shufflevector(__zero, __zero, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1); + __vec[__index & 15] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector unsigned char +vec_promote(unsigned char __scalar, int __index) { + const vector unsigned char __zero = (vector unsigned char)0; + vector unsigned char __vec = __builtin_shufflevector(__zero, __zero, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1); + __vec[__index & 15] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector signed short +vec_promote(signed short __scalar, int __index) { + const vector signed short __zero = (vector signed short)0; + vector signed short __vec = __builtin_shufflevector(__zero, __zero, + -1, -1, -1, -1, -1, -1, -1, -1); + __vec[__index & 7] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector unsigned short +vec_promote(unsigned short __scalar, int __index) { + const vector unsigned short __zero = (vector unsigned short)0; + vector unsigned short __vec = __builtin_shufflevector(__zero, __zero, + -1, -1, -1, -1, -1, -1, -1, -1); + __vec[__index & 7] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector signed int +vec_promote(signed int __scalar, int __index) { + const vector signed int __zero = (vector signed int)0; + vector signed int __vec = __builtin_shufflevector(__zero, __zero, + -1, -1, -1, -1); + __vec[__index & 3] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector unsigned int +vec_promote(unsigned int __scalar, int __index) { + const vector unsigned int __zero = (vector unsigned int)0; + vector unsigned int __vec = __builtin_shufflevector(__zero, __zero, + -1, -1, -1, -1); + __vec[__index & 3] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector signed long long +vec_promote(signed long long __scalar, int __index) { + const vector signed long long __zero = (vector signed long long)0; + vector signed long long __vec = __builtin_shufflevector(__zero, __zero, + -1, -1); + __vec[__index & 1] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_promote(unsigned long long __scalar, int __index) { + const vector unsigned long long __zero = (vector unsigned long long)0; + vector unsigned long long __vec = __builtin_shufflevector(__zero, __zero, + -1, -1); + __vec[__index & 1] = __scalar; + return __vec; +} + +static inline __ATTRS_o_ai vector double +vec_promote(double __scalar, int __index) { + const vector double __zero = (vector double)0; + vector double __vec = __builtin_shufflevector(__zero, __zero, -1, -1); + __vec[__index & 1] = __scalar; + return __vec; +} + +/*-- vec_insert_and_zero ----------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_insert_and_zero(const signed char *__ptr) { + vector signed char __vec = (vector signed char)0; + __vec[7] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai vector unsigned char +vec_insert_and_zero(const unsigned char *__ptr) { + vector unsigned char __vec = (vector unsigned char)0; + __vec[7] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai vector signed short +vec_insert_and_zero(const signed short *__ptr) { + vector signed short __vec = (vector signed short)0; + __vec[3] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai vector unsigned short +vec_insert_and_zero(const unsigned short *__ptr) { + vector unsigned short __vec = (vector unsigned short)0; + __vec[3] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai vector signed int +vec_insert_and_zero(const signed int *__ptr) { + vector signed int __vec = (vector signed int)0; + __vec[1] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai vector unsigned int +vec_insert_and_zero(const unsigned int *__ptr) { + vector unsigned int __vec = (vector unsigned int)0; + __vec[1] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai vector signed long long +vec_insert_and_zero(const signed long long *__ptr) { + vector signed long long __vec = (vector signed long long)0; + __vec[0] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_insert_and_zero(const unsigned long long *__ptr) { + vector unsigned long long __vec = (vector unsigned long long)0; + __vec[0] = *__ptr; + return __vec; +} + +static inline __ATTRS_o_ai vector double +vec_insert_and_zero(const double *__ptr) { + vector double __vec = (vector double)0; + __vec[0] = *__ptr; + return __vec; +} + +/*-- vec_perm ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_perm(vector signed char __a, vector signed char __b, + vector unsigned char __c) { + return (vector signed char)__builtin_s390_vperm( + (vector unsigned char)__a, (vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_perm(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return (vector unsigned char)__builtin_s390_vperm( + (vector unsigned char)__a, (vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai vector bool char +vec_perm(vector bool char __a, vector bool char __b, + vector unsigned char __c) { + return (vector bool char)__builtin_s390_vperm( + (vector unsigned char)__a, (vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai vector signed short +vec_perm(vector signed short __a, vector signed short __b, + vector unsigned char __c) { + return (vector signed short)__builtin_s390_vperm( + (vector unsigned char)__a, (vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_perm(vector unsigned short __a, vector unsigned short __b, + vector unsigned char __c) { + return (vector unsigned short)__builtin_s390_vperm( + (vector unsigned char)__a, (vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai vector bool short +vec_perm(vector bool short __a, vector bool short __b, + vector unsigned char __c) { + return (vector bool short)__builtin_s390_vperm( + (vector unsigned char)__a, (vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai vector signed int +vec_perm(vector signed int __a, vector signed int __b, + vector unsigned char __c) { + return (vector signed int)__builtin_s390_vperm( + (vector unsigned char)__a, (vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_perm(vector unsigned int __a, vector unsigned int __b, + vector unsigned char __c) { + return (vector unsigned int)__builtin_s390_vperm( + (vector unsigned char)__a, (vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai vector bool int +vec_perm(vector bool int __a, vector bool int __b, + vector unsigned char __c) { + return (vector bool int)__builtin_s390_vperm( + (vector unsigned char)__a, (vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai vector signed long long +vec_perm(vector signed long long __a, vector signed long long __b, + vector unsigned char __c) { + return (vector signed long long)__builtin_s390_vperm( + (vector unsigned char)__a, (vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_perm(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned char __c) { + return (vector unsigned long long)__builtin_s390_vperm( + (vector unsigned char)__a, (vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai vector bool long long +vec_perm(vector bool long long __a, vector bool long long __b, + vector unsigned char __c) { + return (vector bool long long)__builtin_s390_vperm( + (vector unsigned char)__a, (vector unsigned char)__b, __c); +} + +static inline __ATTRS_o_ai vector double +vec_perm(vector double __a, vector double __b, + vector unsigned char __c) { + return (vector double)__builtin_s390_vperm( + (vector unsigned char)__a, (vector unsigned char)__b, __c); +} + +/*-- vec_permi --------------------------------------------------------------*/ + +extern __ATTRS_o vector signed long long +vec_permi(vector signed long long __a, vector signed long long __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o vector unsigned long long +vec_permi(vector unsigned long long __a, vector unsigned long long __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o vector bool long long +vec_permi(vector bool long long __a, vector bool long long __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o vector double +vec_permi(vector double __a, vector double __b, int __c) + __constant_range(__c, 0, 3); + +#define vec_permi(X, Y, Z) ((__typeof__((vec_permi)((X), (Y), (Z)))) \ + __builtin_s390_vpdi((vector unsigned long long)(X), \ + (vector unsigned long long)(Y), \ + (((Z) & 2) << 1) | ((Z) & 1))) + +/*-- vec_sel ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_sel(vector signed char __a, vector signed char __b, + vector unsigned char __c) { + return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a); +} + +static inline __ATTRS_o_ai vector signed char +vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) { + return ((vector signed char)__c & __b) | (~(vector signed char)__c & __a); +} + +static inline __ATTRS_o_ai vector bool char +vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) { + return ((vector bool char)__c & __b) | (~(vector bool char)__c & __a); +} + +static inline __ATTRS_o_ai vector bool char +vec_sel(vector bool char __a, vector bool char __b, vector bool char __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_sel(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_sel(vector unsigned char __a, vector unsigned char __b, + vector bool char __c) { + return ((vector unsigned char)__c & __b) | (~(vector unsigned char)__c & __a); +} + +static inline __ATTRS_o_ai vector signed short +vec_sel(vector signed short __a, vector signed short __b, + vector unsigned short __c) { + return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a); +} + +static inline __ATTRS_o_ai vector signed short +vec_sel(vector signed short __a, vector signed short __b, + vector bool short __c) { + return ((vector signed short)__c & __b) | (~(vector signed short)__c & __a); +} + +static inline __ATTRS_o_ai vector bool short +vec_sel(vector bool short __a, vector bool short __b, + vector unsigned short __c) { + return ((vector bool short)__c & __b) | (~(vector bool short)__c & __a); +} + +static inline __ATTRS_o_ai vector bool short +vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_sel(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_sel(vector unsigned short __a, vector unsigned short __b, + vector bool short __c) { + return (((vector unsigned short)__c & __b) | + (~(vector unsigned short)__c & __a)); +} + +static inline __ATTRS_o_ai vector signed int +vec_sel(vector signed int __a, vector signed int __b, + vector unsigned int __c) { + return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a); +} + +static inline __ATTRS_o_ai vector signed int +vec_sel(vector signed int __a, vector signed int __b, vector bool int __c) { + return ((vector signed int)__c & __b) | (~(vector signed int)__c & __a); +} + +static inline __ATTRS_o_ai vector bool int +vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) { + return ((vector bool int)__c & __b) | (~(vector bool int)__c & __a); +} + +static inline __ATTRS_o_ai vector bool int +vec_sel(vector bool int __a, vector bool int __b, vector bool int __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_sel(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) { + return ((vector unsigned int)__c & __b) | (~(vector unsigned int)__c & __a); +} + +static inline __ATTRS_o_ai vector signed long long +vec_sel(vector signed long long __a, vector signed long long __b, + vector unsigned long long __c) { + return (((vector signed long long)__c & __b) | + (~(vector signed long long)__c & __a)); +} + +static inline __ATTRS_o_ai vector signed long long +vec_sel(vector signed long long __a, vector signed long long __b, + vector bool long long __c) { + return (((vector signed long long)__c & __b) | + (~(vector signed long long)__c & __a)); +} + +static inline __ATTRS_o_ai vector bool long long +vec_sel(vector bool long long __a, vector bool long long __b, + vector unsigned long long __c) { + return (((vector bool long long)__c & __b) | + (~(vector bool long long)__c & __a)); +} + +static inline __ATTRS_o_ai vector bool long long +vec_sel(vector bool long long __a, vector bool long long __b, + vector bool long long __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_sel(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned long long __c) { + return (__c & __b) | (~__c & __a); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_sel(vector unsigned long long __a, vector unsigned long long __b, + vector bool long long __c) { + return (((vector unsigned long long)__c & __b) | + (~(vector unsigned long long)__c & __a)); +} + +static inline __ATTRS_o_ai vector double +vec_sel(vector double __a, vector double __b, vector unsigned long long __c) { + return (vector double)((__c & (vector unsigned long long)__b) | + (~__c & (vector unsigned long long)__a)); +} + +static inline __ATTRS_o_ai vector double +vec_sel(vector double __a, vector double __b, vector bool long long __c) { + vector unsigned long long __ac = (vector unsigned long long)__a; + vector unsigned long long __bc = (vector unsigned long long)__b; + vector unsigned long long __cc = (vector unsigned long long)__c; + return (vector double)((__cc & __bc) | (~__cc & __ac)); +} + +/*-- vec_gather_element -----------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed int +vec_gather_element(vector signed int __vec, vector unsigned int __offset, + const signed int *__ptr, int __index) + __constant_range(__index, 0, 3) { + __vec[__index] = *(const signed int *)( + (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]); + return __vec; +} + +static inline __ATTRS_o_ai vector bool int +vec_gather_element(vector bool int __vec, vector unsigned int __offset, + const unsigned int *__ptr, int __index) + __constant_range(__index, 0, 3) { + __vec[__index] = *(const unsigned int *)( + (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]); + return __vec; +} + +static inline __ATTRS_o_ai vector unsigned int +vec_gather_element(vector unsigned int __vec, vector unsigned int __offset, + const unsigned int *__ptr, int __index) + __constant_range(__index, 0, 3) { + __vec[__index] = *(const unsigned int *)( + (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]); + return __vec; +} + +static inline __ATTRS_o_ai vector signed long long +vec_gather_element(vector signed long long __vec, + vector unsigned long long __offset, + const signed long long *__ptr, int __index) + __constant_range(__index, 0, 1) { + __vec[__index] = *(const signed long long *)( + (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]); + return __vec; +} + +static inline __ATTRS_o_ai vector bool long long +vec_gather_element(vector bool long long __vec, + vector unsigned long long __offset, + const unsigned long long *__ptr, int __index) + __constant_range(__index, 0, 1) { + __vec[__index] = *(const unsigned long long *)( + (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]); + return __vec; +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_gather_element(vector unsigned long long __vec, + vector unsigned long long __offset, + const unsigned long long *__ptr, int __index) + __constant_range(__index, 0, 1) { + __vec[__index] = *(const unsigned long long *)( + (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]); + return __vec; +} + +static inline __ATTRS_o_ai vector double +vec_gather_element(vector double __vec, vector unsigned long long __offset, + const double *__ptr, int __index) + __constant_range(__index, 0, 1) { + __vec[__index] = *(const double *)( + (__INTPTR_TYPE__)__ptr + (__INTPTR_TYPE__)__offset[__index]); + return __vec; +} + +/*-- vec_scatter_element ----------------------------------------------------*/ + +static inline __ATTRS_o_ai void +vec_scatter_element(vector signed int __vec, vector unsigned int __offset, + signed int *__ptr, int __index) + __constant_range(__index, 0, 3) { + *(signed int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) = + __vec[__index]; +} + +static inline __ATTRS_o_ai void +vec_scatter_element(vector bool int __vec, vector unsigned int __offset, + unsigned int *__ptr, int __index) + __constant_range(__index, 0, 3) { + *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) = + __vec[__index]; +} + +static inline __ATTRS_o_ai void +vec_scatter_element(vector unsigned int __vec, vector unsigned int __offset, + unsigned int *__ptr, int __index) + __constant_range(__index, 0, 3) { + *(unsigned int *)((__INTPTR_TYPE__)__ptr + __offset[__index]) = + __vec[__index]; +} + +static inline __ATTRS_o_ai void +vec_scatter_element(vector signed long long __vec, + vector unsigned long long __offset, + signed long long *__ptr, int __index) + __constant_range(__index, 0, 1) { + *(signed long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) = + __vec[__index]; +} + +static inline __ATTRS_o_ai void +vec_scatter_element(vector bool long long __vec, + vector unsigned long long __offset, + unsigned long long *__ptr, int __index) + __constant_range(__index, 0, 1) { + *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) = + __vec[__index]; +} + +static inline __ATTRS_o_ai void +vec_scatter_element(vector unsigned long long __vec, + vector unsigned long long __offset, + unsigned long long *__ptr, int __index) + __constant_range(__index, 0, 1) { + *(unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset[__index]) = + __vec[__index]; +} + +static inline __ATTRS_o_ai void +vec_scatter_element(vector double __vec, vector unsigned long long __offset, + double *__ptr, int __index) + __constant_range(__index, 0, 1) { + *(double *)((__INTPTR_TYPE__)__ptr + __offset[__index]) = + __vec[__index]; +} + +/*-- vec_xld2 ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_xld2(long __offset, const signed char *__ptr) { + return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_xld2(long __offset, const unsigned char *__ptr) { + return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset); +} + +static inline __ATTRS_o_ai vector signed short +vec_xld2(long __offset, const signed short *__ptr) { + return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_xld2(long __offset, const unsigned short *__ptr) { + return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset); +} + +static inline __ATTRS_o_ai vector signed int +vec_xld2(long __offset, const signed int *__ptr) { + return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_xld2(long __offset, const unsigned int *__ptr) { + return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset); +} + +static inline __ATTRS_o_ai vector signed long long +vec_xld2(long __offset, const signed long long *__ptr) { + return *(const vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_xld2(long __offset, const unsigned long long *__ptr) { + return *(const vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset); +} + +static inline __ATTRS_o_ai vector double +vec_xld2(long __offset, const double *__ptr) { + return *(const vector double *)((__INTPTR_TYPE__)__ptr + __offset); +} + +/*-- vec_xlw4 ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_xlw4(long __offset, const signed char *__ptr) { + return *(const vector signed char *)((__INTPTR_TYPE__)__ptr + __offset); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_xlw4(long __offset, const unsigned char *__ptr) { + return *(const vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset); +} + +static inline __ATTRS_o_ai vector signed short +vec_xlw4(long __offset, const signed short *__ptr) { + return *(const vector signed short *)((__INTPTR_TYPE__)__ptr + __offset); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_xlw4(long __offset, const unsigned short *__ptr) { + return *(const vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset); +} + +static inline __ATTRS_o_ai vector signed int +vec_xlw4(long __offset, const signed int *__ptr) { + return *(const vector signed int *)((__INTPTR_TYPE__)__ptr + __offset); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_xlw4(long __offset, const unsigned int *__ptr) { + return *(const vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset); +} + +/*-- vec_xstd2 --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai void +vec_xstd2(vector signed char __vec, long __offset, signed char *__ptr) { + *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void +vec_xstd2(vector unsigned char __vec, long __offset, unsigned char *__ptr) { + *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void +vec_xstd2(vector signed short __vec, long __offset, signed short *__ptr) { + *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void +vec_xstd2(vector unsigned short __vec, long __offset, unsigned short *__ptr) { + *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void +vec_xstd2(vector signed int __vec, long __offset, signed int *__ptr) { + *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void +vec_xstd2(vector unsigned int __vec, long __offset, unsigned int *__ptr) { + *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void +vec_xstd2(vector signed long long __vec, long __offset, + signed long long *__ptr) { + *(vector signed long long *)((__INTPTR_TYPE__)__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void +vec_xstd2(vector unsigned long long __vec, long __offset, + unsigned long long *__ptr) { + *(vector unsigned long long *)((__INTPTR_TYPE__)__ptr + __offset) = + __vec; +} + +static inline __ATTRS_o_ai void +vec_xstd2(vector double __vec, long __offset, double *__ptr) { + *(vector double *)((__INTPTR_TYPE__)__ptr + __offset) = __vec; +} + +/*-- vec_xstw4 --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai void +vec_xstw4(vector signed char __vec, long __offset, signed char *__ptr) { + *(vector signed char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void +vec_xstw4(vector unsigned char __vec, long __offset, unsigned char *__ptr) { + *(vector unsigned char *)((__INTPTR_TYPE__)__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void +vec_xstw4(vector signed short __vec, long __offset, signed short *__ptr) { + *(vector signed short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void +vec_xstw4(vector unsigned short __vec, long __offset, unsigned short *__ptr) { + *(vector unsigned short *)((__INTPTR_TYPE__)__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void +vec_xstw4(vector signed int __vec, long __offset, signed int *__ptr) { + *(vector signed int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec; +} + +static inline __ATTRS_o_ai void +vec_xstw4(vector unsigned int __vec, long __offset, unsigned int *__ptr) { + *(vector unsigned int *)((__INTPTR_TYPE__)__ptr + __offset) = __vec; +} + +/*-- vec_load_bndry ---------------------------------------------------------*/ + +extern __ATTRS_o vector signed char +vec_load_bndry(const signed char *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o vector unsigned char +vec_load_bndry(const unsigned char *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o vector signed short +vec_load_bndry(const signed short *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o vector unsigned short +vec_load_bndry(const unsigned short *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o vector signed int +vec_load_bndry(const signed int *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o vector unsigned int +vec_load_bndry(const unsigned int *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o vector signed long long +vec_load_bndry(const signed long long *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o vector unsigned long long +vec_load_bndry(const unsigned long long *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +extern __ATTRS_o vector double +vec_load_bndry(const double *__ptr, unsigned short __len) + __constant_pow2_range(__len, 64, 4096); + +#define vec_load_bndry(X, Y) ((__typeof__((vec_load_bndry)((X), (Y)))) \ + __builtin_s390_vlbb((X), ((Y) == 64 ? 0 : \ + (Y) == 128 ? 1 : \ + (Y) == 256 ? 2 : \ + (Y) == 512 ? 3 : \ + (Y) == 1024 ? 4 : \ + (Y) == 2048 ? 5 : \ + (Y) == 4096 ? 6 : -1))) + +/*-- vec_load_len -----------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_load_len(const signed char *__ptr, unsigned int __len) { + return (vector signed char)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_load_len(const unsigned char *__ptr, unsigned int __len) { + return (vector unsigned char)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai vector signed short +vec_load_len(const signed short *__ptr, unsigned int __len) { + return (vector signed short)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_load_len(const unsigned short *__ptr, unsigned int __len) { + return (vector unsigned short)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai vector signed int +vec_load_len(const signed int *__ptr, unsigned int __len) { + return (vector signed int)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_load_len(const unsigned int *__ptr, unsigned int __len) { + return (vector unsigned int)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai vector signed long long +vec_load_len(const signed long long *__ptr, unsigned int __len) { + return (vector signed long long)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_load_len(const unsigned long long *__ptr, unsigned int __len) { + return (vector unsigned long long)__builtin_s390_vll(__len, __ptr); +} + +static inline __ATTRS_o_ai vector double +vec_load_len(const double *__ptr, unsigned int __len) { + return (vector double)__builtin_s390_vll(__len, __ptr); +} + +/*-- vec_store_len ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai void +vec_store_len(vector signed char __vec, signed char *__ptr, + unsigned int __len) { + __builtin_s390_vstl((vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(vector unsigned char __vec, unsigned char *__ptr, + unsigned int __len) { + __builtin_s390_vstl((vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(vector signed short __vec, signed short *__ptr, + unsigned int __len) { + __builtin_s390_vstl((vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(vector unsigned short __vec, unsigned short *__ptr, + unsigned int __len) { + __builtin_s390_vstl((vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(vector signed int __vec, signed int *__ptr, + unsigned int __len) { + __builtin_s390_vstl((vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(vector unsigned int __vec, unsigned int *__ptr, + unsigned int __len) { + __builtin_s390_vstl((vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(vector signed long long __vec, signed long long *__ptr, + unsigned int __len) { + __builtin_s390_vstl((vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(vector unsigned long long __vec, unsigned long long *__ptr, + unsigned int __len) { + __builtin_s390_vstl((vector signed char)__vec, __len, __ptr); +} + +static inline __ATTRS_o_ai void +vec_store_len(vector double __vec, double *__ptr, + unsigned int __len) { + __builtin_s390_vstl((vector signed char)__vec, __len, __ptr); +} + +/*-- vec_load_pair ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed long long +vec_load_pair(signed long long __a, signed long long __b) { + return (vector signed long long)(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_load_pair(unsigned long long __a, unsigned long long __b) { + return (vector unsigned long long)(__a, __b); +} + +/*-- vec_genmask ------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_genmask(unsigned short __mask) + __constant(__mask) { + return (vector unsigned char)( + __mask & 0x8000 ? 0xff : 0, + __mask & 0x4000 ? 0xff : 0, + __mask & 0x2000 ? 0xff : 0, + __mask & 0x1000 ? 0xff : 0, + __mask & 0x0800 ? 0xff : 0, + __mask & 0x0400 ? 0xff : 0, + __mask & 0x0200 ? 0xff : 0, + __mask & 0x0100 ? 0xff : 0, + __mask & 0x0080 ? 0xff : 0, + __mask & 0x0040 ? 0xff : 0, + __mask & 0x0020 ? 0xff : 0, + __mask & 0x0010 ? 0xff : 0, + __mask & 0x0008 ? 0xff : 0, + __mask & 0x0004 ? 0xff : 0, + __mask & 0x0002 ? 0xff : 0, + __mask & 0x0001 ? 0xff : 0); +} + +/*-- vec_genmasks_* ---------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_genmasks_8(unsigned char __first, unsigned char __last) + __constant(__first) __constant(__last) { + unsigned char __bit1 = __first & 7; + unsigned char __bit2 = __last & 7; + unsigned char __mask1 = (unsigned char)(1U << (7 - __bit1) << 1) - 1; + unsigned char __mask2 = (unsigned char)(1U << (7 - __bit2)) - 1; + unsigned char __value = (__bit1 <= __bit2 ? + __mask1 & ~__mask2 : + __mask1 | ~__mask2); + return (vector unsigned char)__value; +} + +static inline __ATTRS_o_ai vector unsigned short +vec_genmasks_16(unsigned char __first, unsigned char __last) + __constant(__first) __constant(__last) { + unsigned char __bit1 = __first & 15; + unsigned char __bit2 = __last & 15; + unsigned short __mask1 = (unsigned short)(1U << (15 - __bit1) << 1) - 1; + unsigned short __mask2 = (unsigned short)(1U << (15 - __bit2)) - 1; + unsigned short __value = (__bit1 <= __bit2 ? + __mask1 & ~__mask2 : + __mask1 | ~__mask2); + return (vector unsigned short)__value; +} + +static inline __ATTRS_o_ai vector unsigned int +vec_genmasks_32(unsigned char __first, unsigned char __last) + __constant(__first) __constant(__last) { + unsigned char __bit1 = __first & 31; + unsigned char __bit2 = __last & 31; + unsigned int __mask1 = (1U << (31 - __bit1) << 1) - 1; + unsigned int __mask2 = (1U << (31 - __bit2)) - 1; + unsigned int __value = (__bit1 <= __bit2 ? + __mask1 & ~__mask2 : + __mask1 | ~__mask2); + return (vector unsigned int)__value; +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_genmasks_64(unsigned char __first, unsigned char __last) + __constant(__first) __constant(__last) { + unsigned char __bit1 = __first & 63; + unsigned char __bit2 = __last & 63; + unsigned long long __mask1 = (1ULL << (63 - __bit1) << 1) - 1; + unsigned long long __mask2 = (1ULL << (63 - __bit2)) - 1; + unsigned long long __value = (__bit1 <= __bit2 ? + __mask1 & ~__mask2 : + __mask1 | ~__mask2); + return (vector unsigned long long)__value; +} + +/*-- vec_splat --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_splat(vector signed char __vec, int __index) + __constant_range(__index, 0, 15) { + return (vector signed char)__vec[__index]; +} + +static inline __ATTRS_o_ai vector bool char +vec_splat(vector bool char __vec, int __index) + __constant_range(__index, 0, 15) { + return (vector bool char)(vector unsigned char)__vec[__index]; +} + +static inline __ATTRS_o_ai vector unsigned char +vec_splat(vector unsigned char __vec, int __index) + __constant_range(__index, 0, 15) { + return (vector unsigned char)__vec[__index]; +} + +static inline __ATTRS_o_ai vector signed short +vec_splat(vector signed short __vec, int __index) + __constant_range(__index, 0, 7) { + return (vector signed short)__vec[__index]; +} + +static inline __ATTRS_o_ai vector bool short +vec_splat(vector bool short __vec, int __index) + __constant_range(__index, 0, 7) { + return (vector bool short)(vector unsigned short)__vec[__index]; +} + +static inline __ATTRS_o_ai vector unsigned short +vec_splat(vector unsigned short __vec, int __index) + __constant_range(__index, 0, 7) { + return (vector unsigned short)__vec[__index]; +} + +static inline __ATTRS_o_ai vector signed int +vec_splat(vector signed int __vec, int __index) + __constant_range(__index, 0, 3) { + return (vector signed int)__vec[__index]; +} + +static inline __ATTRS_o_ai vector bool int +vec_splat(vector bool int __vec, int __index) + __constant_range(__index, 0, 3) { + return (vector bool int)(vector unsigned int)__vec[__index]; +} + +static inline __ATTRS_o_ai vector unsigned int +vec_splat(vector unsigned int __vec, int __index) + __constant_range(__index, 0, 3) { + return (vector unsigned int)__vec[__index]; +} + +static inline __ATTRS_o_ai vector signed long long +vec_splat(vector signed long long __vec, int __index) + __constant_range(__index, 0, 1) { + return (vector signed long long)__vec[__index]; +} + +static inline __ATTRS_o_ai vector bool long long +vec_splat(vector bool long long __vec, int __index) + __constant_range(__index, 0, 1) { + return (vector bool long long)(vector unsigned long long)__vec[__index]; +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_splat(vector unsigned long long __vec, int __index) + __constant_range(__index, 0, 1) { + return (vector unsigned long long)__vec[__index]; +} + +static inline __ATTRS_o_ai vector double +vec_splat(vector double __vec, int __index) + __constant_range(__index, 0, 1) { + return (vector double)__vec[__index]; +} + +/*-- vec_splat_s* -----------------------------------------------------------*/ + +static inline __ATTRS_ai vector signed char +vec_splat_s8(signed char __scalar) + __constant(__scalar) { + return (vector signed char)__scalar; +} + +static inline __ATTRS_ai vector signed short +vec_splat_s16(signed short __scalar) + __constant(__scalar) { + return (vector signed short)__scalar; +} + +static inline __ATTRS_ai vector signed int +vec_splat_s32(signed short __scalar) + __constant(__scalar) { + return (vector signed int)(signed int)__scalar; +} + +static inline __ATTRS_ai vector signed long long +vec_splat_s64(signed short __scalar) + __constant(__scalar) { + return (vector signed long long)(signed long)__scalar; +} + +/*-- vec_splat_u* -----------------------------------------------------------*/ + +static inline __ATTRS_ai vector unsigned char +vec_splat_u8(unsigned char __scalar) + __constant(__scalar) { + return (vector unsigned char)__scalar; +} + +static inline __ATTRS_ai vector unsigned short +vec_splat_u16(unsigned short __scalar) + __constant(__scalar) { + return (vector unsigned short)__scalar; +} + +static inline __ATTRS_ai vector unsigned int +vec_splat_u32(signed short __scalar) + __constant(__scalar) { + return (vector unsigned int)(signed int)__scalar; +} + +static inline __ATTRS_ai vector unsigned long long +vec_splat_u64(signed short __scalar) + __constant(__scalar) { + return (vector unsigned long long)(signed long long)__scalar; +} + +/*-- vec_splats -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_splats(signed char __scalar) { + return (vector signed char)__scalar; +} + +static inline __ATTRS_o_ai vector unsigned char +vec_splats(unsigned char __scalar) { + return (vector unsigned char)__scalar; +} + +static inline __ATTRS_o_ai vector signed short +vec_splats(signed short __scalar) { + return (vector signed short)__scalar; +} + +static inline __ATTRS_o_ai vector unsigned short +vec_splats(unsigned short __scalar) { + return (vector unsigned short)__scalar; +} + +static inline __ATTRS_o_ai vector signed int +vec_splats(signed int __scalar) { + return (vector signed int)__scalar; +} + +static inline __ATTRS_o_ai vector unsigned int +vec_splats(unsigned int __scalar) { + return (vector unsigned int)__scalar; +} + +static inline __ATTRS_o_ai vector signed long long +vec_splats(signed long long __scalar) { + return (vector signed long long)__scalar; +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_splats(unsigned long long __scalar) { + return (vector unsigned long long)__scalar; +} + +static inline __ATTRS_o_ai vector double +vec_splats(double __scalar) { + return (vector double)__scalar; +} + +/*-- vec_extend_s64 ---------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed long long +vec_extend_s64(vector signed char __a) { + return (vector signed long long)(__a[7], __a[15]); +} + +static inline __ATTRS_o_ai vector signed long long +vec_extend_s64(vector signed short __a) { + return (vector signed long long)(__a[3], __a[7]); +} + +static inline __ATTRS_o_ai vector signed long long +vec_extend_s64(vector signed int __a) { + return (vector signed long long)(__a[1], __a[3]); +} + +/*-- vec_mergeh -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_mergeh(vector signed char __a, vector signed char __b) { + return (vector signed char)( + __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3], + __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]); +} + +static inline __ATTRS_o_ai vector bool char +vec_mergeh(vector bool char __a, vector bool char __b) { + return (vector bool char)( + __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3], + __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_mergeh(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)( + __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3], + __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]); +} + +static inline __ATTRS_o_ai vector signed short +vec_mergeh(vector signed short __a, vector signed short __b) { + return (vector signed short)( + __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]); +} + +static inline __ATTRS_o_ai vector bool short +vec_mergeh(vector bool short __a, vector bool short __b) { + return (vector bool short)( + __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_mergeh(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)( + __a[0], __b[0], __a[1], __b[1], __a[2], __b[2], __a[3], __b[3]); +} + +static inline __ATTRS_o_ai vector signed int +vec_mergeh(vector signed int __a, vector signed int __b) { + return (vector signed int)(__a[0], __b[0], __a[1], __b[1]); +} + +static inline __ATTRS_o_ai vector bool int +vec_mergeh(vector bool int __a, vector bool int __b) { + return (vector bool int)(__a[0], __b[0], __a[1], __b[1]); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_mergeh(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)(__a[0], __b[0], __a[1], __b[1]); +} + +static inline __ATTRS_o_ai vector signed long long +vec_mergeh(vector signed long long __a, vector signed long long __b) { + return (vector signed long long)(__a[0], __b[0]); +} + +static inline __ATTRS_o_ai vector bool long long +vec_mergeh(vector bool long long __a, vector bool long long __b) { + return (vector bool long long)(__a[0], __b[0]); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) { + return (vector unsigned long long)(__a[0], __b[0]); +} + +static inline __ATTRS_o_ai vector double +vec_mergeh(vector double __a, vector double __b) { + return (vector double)(__a[0], __b[0]); +} + +/*-- vec_mergel -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_mergel(vector signed char __a, vector signed char __b) { + return (vector signed char)( + __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11], + __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]); +} + +static inline __ATTRS_o_ai vector bool char +vec_mergel(vector bool char __a, vector bool char __b) { + return (vector bool char)( + __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11], + __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_mergel(vector unsigned char __a, vector unsigned char __b) { + return (vector unsigned char)( + __a[8], __b[8], __a[9], __b[9], __a[10], __b[10], __a[11], __b[11], + __a[12], __b[12], __a[13], __b[13], __a[14], __b[14], __a[15], __b[15]); +} + +static inline __ATTRS_o_ai vector signed short +vec_mergel(vector signed short __a, vector signed short __b) { + return (vector signed short)( + __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]); +} + +static inline __ATTRS_o_ai vector bool short +vec_mergel(vector bool short __a, vector bool short __b) { + return (vector bool short)( + __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_mergel(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)( + __a[4], __b[4], __a[5], __b[5], __a[6], __b[6], __a[7], __b[7]); +} + +static inline __ATTRS_o_ai vector signed int +vec_mergel(vector signed int __a, vector signed int __b) { + return (vector signed int)(__a[2], __b[2], __a[3], __b[3]); +} + +static inline __ATTRS_o_ai vector bool int +vec_mergel(vector bool int __a, vector bool int __b) { + return (vector bool int)(__a[2], __b[2], __a[3], __b[3]); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_mergel(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)(__a[2], __b[2], __a[3], __b[3]); +} + +static inline __ATTRS_o_ai vector signed long long +vec_mergel(vector signed long long __a, vector signed long long __b) { + return (vector signed long long)(__a[1], __b[1]); +} + +static inline __ATTRS_o_ai vector bool long long +vec_mergel(vector bool long long __a, vector bool long long __b) { + return (vector bool long long)(__a[1], __b[1]); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_mergel(vector unsigned long long __a, vector unsigned long long __b) { + return (vector unsigned long long)(__a[1], __b[1]); +} + +static inline __ATTRS_o_ai vector double +vec_mergel(vector double __a, vector double __b) { + return (vector double)(__a[1], __b[1]); +} + +/*-- vec_pack ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_pack(vector signed short __a, vector signed short __b) { + vector signed char __ac = (vector signed char)__a; + vector signed char __bc = (vector signed char)__b; + return (vector signed char)( + __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15], + __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]); +} + +static inline __ATTRS_o_ai vector bool char +vec_pack(vector bool short __a, vector bool short __b) { + vector bool char __ac = (vector bool char)__a; + vector bool char __bc = (vector bool char)__b; + return (vector bool char)( + __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15], + __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_pack(vector unsigned short __a, vector unsigned short __b) { + vector unsigned char __ac = (vector unsigned char)__a; + vector unsigned char __bc = (vector unsigned char)__b; + return (vector unsigned char)( + __ac[1], __ac[3], __ac[5], __ac[7], __ac[9], __ac[11], __ac[13], __ac[15], + __bc[1], __bc[3], __bc[5], __bc[7], __bc[9], __bc[11], __bc[13], __bc[15]); +} + +static inline __ATTRS_o_ai vector signed short +vec_pack(vector signed int __a, vector signed int __b) { + vector signed short __ac = (vector signed short)__a; + vector signed short __bc = (vector signed short)__b; + return (vector signed short)( + __ac[1], __ac[3], __ac[5], __ac[7], + __bc[1], __bc[3], __bc[5], __bc[7]); +} + +static inline __ATTRS_o_ai vector bool short +vec_pack(vector bool int __a, vector bool int __b) { + vector bool short __ac = (vector bool short)__a; + vector bool short __bc = (vector bool short)__b; + return (vector bool short)( + __ac[1], __ac[3], __ac[5], __ac[7], + __bc[1], __bc[3], __bc[5], __bc[7]); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_pack(vector unsigned int __a, vector unsigned int __b) { + vector unsigned short __ac = (vector unsigned short)__a; + vector unsigned short __bc = (vector unsigned short)__b; + return (vector unsigned short)( + __ac[1], __ac[3], __ac[5], __ac[7], + __bc[1], __bc[3], __bc[5], __bc[7]); +} + +static inline __ATTRS_o_ai vector signed int +vec_pack(vector signed long long __a, vector signed long long __b) { + vector signed int __ac = (vector signed int)__a; + vector signed int __bc = (vector signed int)__b; + return (vector signed int)(__ac[1], __ac[3], __bc[1], __bc[3]); +} + +static inline __ATTRS_o_ai vector bool int +vec_pack(vector bool long long __a, vector bool long long __b) { + vector bool int __ac = (vector bool int)__a; + vector bool int __bc = (vector bool int)__b; + return (vector bool int)(__ac[1], __ac[3], __bc[1], __bc[3]); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_pack(vector unsigned long long __a, vector unsigned long long __b) { + vector unsigned int __ac = (vector unsigned int)__a; + vector unsigned int __bc = (vector unsigned int)__b; + return (vector unsigned int)(__ac[1], __ac[3], __bc[1], __bc[3]); +} + +/*-- vec_packs --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_packs(vector signed short __a, vector signed short __b) { + return __builtin_s390_vpksh(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_packs(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vpklsh(__a, __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_packs(vector signed int __a, vector signed int __b) { + return __builtin_s390_vpksf(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_packs(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vpklsf(__a, __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_packs(vector signed long long __a, vector signed long long __b) { + return __builtin_s390_vpksg(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_packs(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_s390_vpklsg(__a, __b); +} + +/*-- vec_packs_cc -----------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_packs_cc(vector signed short __a, vector signed short __b, int *__cc) { + return __builtin_s390_vpkshs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_packs_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) { + return __builtin_s390_vpklshs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector signed short +vec_packs_cc(vector signed int __a, vector signed int __b, int *__cc) { + return __builtin_s390_vpksfs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_packs_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) { + return __builtin_s390_vpklsfs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector signed int +vec_packs_cc(vector signed long long __a, vector signed long long __b, + int *__cc) { + return __builtin_s390_vpksgs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_packs_cc(vector unsigned long long __a, vector unsigned long long __b, + int *__cc) { + return __builtin_s390_vpklsgs(__a, __b, __cc); +} + +/*-- vec_packsu -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_packsu(vector signed short __a, vector signed short __b) { + const vector signed short __zero = (vector signed short)0; + return __builtin_s390_vpklsh( + (vector unsigned short)(__a >= __zero) & (vector unsigned short)__a, + (vector unsigned short)(__b >= __zero) & (vector unsigned short)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_packsu(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vpklsh(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_packsu(vector signed int __a, vector signed int __b) { + const vector signed int __zero = (vector signed int)0; + return __builtin_s390_vpklsf( + (vector unsigned int)(__a >= __zero) & (vector unsigned int)__a, + (vector unsigned int)(__b >= __zero) & (vector unsigned int)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_packsu(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vpklsf(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_packsu(vector signed long long __a, vector signed long long __b) { + const vector signed long long __zero = (vector signed long long)0; + return __builtin_s390_vpklsg( + (vector unsigned long long)(__a >= __zero) & + (vector unsigned long long)__a, + (vector unsigned long long)(__b >= __zero) & + (vector unsigned long long)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_packsu(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_s390_vpklsg(__a, __b); +} + +/*-- vec_packsu_cc ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_packsu_cc(vector unsigned short __a, vector unsigned short __b, int *__cc) { + return __builtin_s390_vpklshs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_packsu_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) { + return __builtin_s390_vpklsfs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_packsu_cc(vector unsigned long long __a, vector unsigned long long __b, + int *__cc) { + return __builtin_s390_vpklsgs(__a, __b, __cc); +} + +/*-- vec_unpackh ------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed short +vec_unpackh(vector signed char __a) { + return __builtin_s390_vuphb(__a); +} + +static inline __ATTRS_o_ai vector bool short +vec_unpackh(vector bool char __a) { + return (vector bool short)__builtin_s390_vuphb((vector signed char)__a); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_unpackh(vector unsigned char __a) { + return __builtin_s390_vuplhb(__a); +} + +static inline __ATTRS_o_ai vector signed int +vec_unpackh(vector signed short __a) { + return __builtin_s390_vuphh(__a); +} + +static inline __ATTRS_o_ai vector bool int +vec_unpackh(vector bool short __a) { + return (vector bool int)__builtin_s390_vuphh((vector signed short)__a); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_unpackh(vector unsigned short __a) { + return __builtin_s390_vuplhh(__a); +} + +static inline __ATTRS_o_ai vector signed long long +vec_unpackh(vector signed int __a) { + return __builtin_s390_vuphf(__a); +} + +static inline __ATTRS_o_ai vector bool long long +vec_unpackh(vector bool int __a) { + return (vector bool long long)__builtin_s390_vuphf((vector signed int)__a); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_unpackh(vector unsigned int __a) { + return __builtin_s390_vuplhf(__a); +} + +/*-- vec_unpackl ------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed short +vec_unpackl(vector signed char __a) { + return __builtin_s390_vuplb(__a); +} + +static inline __ATTRS_o_ai vector bool short +vec_unpackl(vector bool char __a) { + return (vector bool short)__builtin_s390_vuplb((vector signed char)__a); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_unpackl(vector unsigned char __a) { + return __builtin_s390_vupllb(__a); +} + +static inline __ATTRS_o_ai vector signed int +vec_unpackl(vector signed short __a) { + return __builtin_s390_vuplhw(__a); +} + +static inline __ATTRS_o_ai vector bool int +vec_unpackl(vector bool short __a) { + return (vector bool int)__builtin_s390_vuplhw((vector signed short)__a); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_unpackl(vector unsigned short __a) { + return __builtin_s390_vupllh(__a); +} + +static inline __ATTRS_o_ai vector signed long long +vec_unpackl(vector signed int __a) { + return __builtin_s390_vuplf(__a); +} + +static inline __ATTRS_o_ai vector bool long long +vec_unpackl(vector bool int __a) { + return (vector bool long long)__builtin_s390_vuplf((vector signed int)__a); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_unpackl(vector unsigned int __a) { + return __builtin_s390_vupllf(__a); +} + +/*-- vec_cmpeq --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_cmpeq(vector bool char __a, vector bool char __b) { + return (vector bool char)(__a == __b); +} + +static inline __ATTRS_o_ai vector bool char +vec_cmpeq(vector signed char __a, vector signed char __b) { + return (vector bool char)(__a == __b); +} + +static inline __ATTRS_o_ai vector bool char +vec_cmpeq(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)(__a == __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmpeq(vector bool short __a, vector bool short __b) { + return (vector bool short)(__a == __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmpeq(vector signed short __a, vector signed short __b) { + return (vector bool short)(__a == __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmpeq(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)(__a == __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmpeq(vector bool int __a, vector bool int __b) { + return (vector bool int)(__a == __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmpeq(vector signed int __a, vector signed int __b) { + return (vector bool int)(__a == __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmpeq(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)(__a == __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmpeq(vector bool long long __a, vector bool long long __b) { + return (vector bool long long)(__a == __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmpeq(vector signed long long __a, vector signed long long __b) { + return (vector bool long long)(__a == __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) { + return (vector bool long long)(__a == __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmpeq(vector double __a, vector double __b) { + return (vector bool long long)(__a == __b); +} + +/*-- vec_cmpge --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_cmpge(vector signed char __a, vector signed char __b) { + return (vector bool char)(__a >= __b); +} + +static inline __ATTRS_o_ai vector bool char +vec_cmpge(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)(__a >= __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmpge(vector signed short __a, vector signed short __b) { + return (vector bool short)(__a >= __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmpge(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)(__a >= __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmpge(vector signed int __a, vector signed int __b) { + return (vector bool int)(__a >= __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmpge(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)(__a >= __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmpge(vector signed long long __a, vector signed long long __b) { + return (vector bool long long)(__a >= __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) { + return (vector bool long long)(__a >= __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmpge(vector double __a, vector double __b) { + return (vector bool long long)(__a >= __b); +} + +/*-- vec_cmpgt --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_cmpgt(vector signed char __a, vector signed char __b) { + return (vector bool char)(__a > __b); +} + +static inline __ATTRS_o_ai vector bool char +vec_cmpgt(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)(__a > __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmpgt(vector signed short __a, vector signed short __b) { + return (vector bool short)(__a > __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmpgt(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)(__a > __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmpgt(vector signed int __a, vector signed int __b) { + return (vector bool int)(__a > __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmpgt(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)(__a > __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmpgt(vector signed long long __a, vector signed long long __b) { + return (vector bool long long)(__a > __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) { + return (vector bool long long)(__a > __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmpgt(vector double __a, vector double __b) { + return (vector bool long long)(__a > __b); +} + +/*-- vec_cmple --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_cmple(vector signed char __a, vector signed char __b) { + return (vector bool char)(__a <= __b); +} + +static inline __ATTRS_o_ai vector bool char +vec_cmple(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)(__a <= __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmple(vector signed short __a, vector signed short __b) { + return (vector bool short)(__a <= __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmple(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)(__a <= __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmple(vector signed int __a, vector signed int __b) { + return (vector bool int)(__a <= __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmple(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)(__a <= __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmple(vector signed long long __a, vector signed long long __b) { + return (vector bool long long)(__a <= __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmple(vector unsigned long long __a, vector unsigned long long __b) { + return (vector bool long long)(__a <= __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmple(vector double __a, vector double __b) { + return (vector bool long long)(__a <= __b); +} + +/*-- vec_cmplt --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_cmplt(vector signed char __a, vector signed char __b) { + return (vector bool char)(__a < __b); +} + +static inline __ATTRS_o_ai vector bool char +vec_cmplt(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)(__a < __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmplt(vector signed short __a, vector signed short __b) { + return (vector bool short)(__a < __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmplt(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)(__a < __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmplt(vector signed int __a, vector signed int __b) { + return (vector bool int)(__a < __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmplt(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)(__a < __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmplt(vector signed long long __a, vector signed long long __b) { + return (vector bool long long)(__a < __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) { + return (vector bool long long)(__a < __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_cmplt(vector double __a, vector double __b) { + return (vector bool long long)(__a < __b); +} + +/*-- vec_all_eq -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_all_eq(vector signed char __a, vector signed char __b) { + int __cc; + __builtin_s390_vceqbs(__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector signed char __a, vector bool char __b) { + int __cc; + __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector bool char __a, vector signed char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector unsigned char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector unsigned char __a, vector bool char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector bool char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector bool char __a, vector bool char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector signed short __a, vector signed short __b) { + int __cc; + __builtin_s390_vceqhs(__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector signed short __a, vector bool short __b) { + int __cc; + __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector bool short __a, vector signed short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector unsigned short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector unsigned short __a, vector bool short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector bool short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector bool short __a, vector bool short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector signed int __a, vector signed int __b) { + int __cc; + __builtin_s390_vceqfs(__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector signed int __a, vector bool int __b) { + int __cc; + __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector bool int __a, vector signed int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector unsigned int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector unsigned int __a, vector bool int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector bool int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector bool int __a, vector bool int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector signed long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector signed long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector bool long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector unsigned long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector unsigned long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector bool long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector bool long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_eq(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfcedbs(__a, __b, &__cc); + return __cc == 0; +} + +/*-- vec_all_ne -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_all_ne(vector signed char __a, vector signed char __b) { + int __cc; + __builtin_s390_vceqbs(__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector signed char __a, vector bool char __b) { + int __cc; + __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector bool char __a, vector signed char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector unsigned char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector unsigned char __a, vector bool char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector bool char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector bool char __a, vector bool char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector signed short __a, vector signed short __b) { + int __cc; + __builtin_s390_vceqhs(__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector signed short __a, vector bool short __b) { + int __cc; + __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector bool short __a, vector signed short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector unsigned short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector unsigned short __a, vector bool short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector bool short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector bool short __a, vector bool short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector signed int __a, vector signed int __b) { + int __cc; + __builtin_s390_vceqfs(__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector signed int __a, vector bool int __b) { + int __cc; + __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector bool int __a, vector signed int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector unsigned int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector unsigned int __a, vector bool int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector bool int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector bool int __a, vector bool int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector signed long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector signed long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector bool long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector unsigned long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector unsigned long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector bool long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector bool long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ne(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfcedbs(__a, __b, &__cc); + return __cc == 3; +} + +/*-- vec_all_ge -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_all_ge(vector signed char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector signed char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchbs((vector signed char)__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector bool char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector unsigned char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector unsigned char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector bool char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector bool char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__b, + (vector unsigned char)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector signed short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector signed short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchhs((vector signed short)__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector bool short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector unsigned short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector unsigned short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector bool short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector bool short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__b, + (vector unsigned short)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector signed int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector signed int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchfs((vector signed int)__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector bool int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector unsigned int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector unsigned int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector bool int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector bool int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__b, + (vector unsigned int)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector signed long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector signed long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector bool long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector unsigned long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector unsigned long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector bool long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector bool long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__b, + (vector unsigned long long)__a, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_ge(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__a, __b, &__cc); + return __cc == 0; +} + +/*-- vec_all_gt -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_all_gt(vector signed char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector signed char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector bool char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs((vector signed char)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector unsigned char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector unsigned char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector bool char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector bool char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__a, + (vector unsigned char)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector signed short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector signed short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector bool short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs((vector signed short)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector unsigned short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector unsigned short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector bool short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector bool short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__a, + (vector unsigned short)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector signed int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector signed int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector bool int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs((vector signed int)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector unsigned int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector unsigned int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector bool int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector bool int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__a, + (vector unsigned int)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector signed long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector signed long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector bool long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector unsigned long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector unsigned long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector bool long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector bool long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__a, + (vector unsigned long long)__b, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_gt(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__a, __b, &__cc); + return __cc == 0; +} + +/*-- vec_all_le -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_all_le(vector signed char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector signed char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector bool char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs((vector signed char)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector unsigned char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector unsigned char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector bool char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector bool char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__a, + (vector unsigned char)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector signed short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector signed short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector bool short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs((vector signed short)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector unsigned short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector unsigned short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector bool short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector bool short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__a, + (vector unsigned short)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector signed int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector signed int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector bool int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs((vector signed int)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector unsigned int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector unsigned int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector bool int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector bool int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__a, + (vector unsigned int)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector signed long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector signed long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector bool long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector unsigned long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector unsigned long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector bool long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector bool long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__a, + (vector unsigned long long)__b, &__cc); + return __cc == 3; +} + +static inline __ATTRS_o_ai int +vec_all_le(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__b, __a, &__cc); + return __cc == 0; +} + +/*-- vec_all_lt -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_all_lt(vector signed char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector signed char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchbs((vector signed char)__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector bool char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector unsigned char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector unsigned char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector bool char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector bool char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__b, + (vector unsigned char)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector signed short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector signed short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchhs((vector signed short)__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector bool short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector unsigned short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector unsigned short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector bool short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector bool short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__b, + (vector unsigned short)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector signed int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector signed int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchfs((vector signed int)__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector bool int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector unsigned int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector unsigned int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector bool int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector bool int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__b, + (vector unsigned int)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector signed long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector signed long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector bool long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector unsigned long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector unsigned long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector bool long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector bool long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__b, + (vector unsigned long long)__a, &__cc); + return __cc == 0; +} + +static inline __ATTRS_o_ai int +vec_all_lt(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__b, __a, &__cc); + return __cc == 0; +} + +/*-- vec_all_nge ------------------------------------------------------------*/ + +static inline __ATTRS_ai int +vec_all_nge(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__a, __b, &__cc); + return __cc == 3; +} + +/*-- vec_all_ngt ------------------------------------------------------------*/ + +static inline __ATTRS_ai int +vec_all_ngt(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__a, __b, &__cc); + return __cc == 3; +} + +/*-- vec_all_nle ------------------------------------------------------------*/ + +static inline __ATTRS_ai int +vec_all_nle(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__b, __a, &__cc); + return __cc == 3; +} + +/*-- vec_all_nlt ------------------------------------------------------------*/ + +static inline __ATTRS_ai int +vec_all_nlt(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__b, __a, &__cc); + return __cc == 3; +} + +/*-- vec_all_nan ------------------------------------------------------------*/ + +static inline __ATTRS_ai int +vec_all_nan(vector double __a) { + int __cc; + __builtin_s390_vftcidb(__a, 15, &__cc); + return __cc == 0; +} + +/*-- vec_all_numeric --------------------------------------------------------*/ + +static inline __ATTRS_ai int +vec_all_numeric(vector double __a) { + int __cc; + __builtin_s390_vftcidb(__a, 15, &__cc); + return __cc == 3; +} + +/*-- vec_any_eq -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_any_eq(vector signed char __a, vector signed char __b) { + int __cc; + __builtin_s390_vceqbs(__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector signed char __a, vector bool char __b) { + int __cc; + __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector bool char __a, vector signed char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector unsigned char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector unsigned char __a, vector bool char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector bool char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector bool char __a, vector bool char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector signed short __a, vector signed short __b) { + int __cc; + __builtin_s390_vceqhs(__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector signed short __a, vector bool short __b) { + int __cc; + __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector bool short __a, vector signed short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector unsigned short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector unsigned short __a, vector bool short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector bool short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector bool short __a, vector bool short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector signed int __a, vector signed int __b) { + int __cc; + __builtin_s390_vceqfs(__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector signed int __a, vector bool int __b) { + int __cc; + __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector bool int __a, vector signed int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector unsigned int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector unsigned int __a, vector bool int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector bool int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector bool int __a, vector bool int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector signed long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector signed long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector bool long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector unsigned long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector unsigned long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector bool long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector bool long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_eq(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfcedbs(__a, __b, &__cc); + return __cc <= 1; +} + +/*-- vec_any_ne -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_any_ne(vector signed char __a, vector signed char __b) { + int __cc; + __builtin_s390_vceqbs(__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector signed char __a, vector bool char __b) { + int __cc; + __builtin_s390_vceqbs(__a, (vector signed char)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector bool char __a, vector signed char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector unsigned char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector unsigned char __a, vector bool char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector bool char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector bool char __a, vector bool char __b) { + int __cc; + __builtin_s390_vceqbs((vector signed char)__a, + (vector signed char)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector signed short __a, vector signed short __b) { + int __cc; + __builtin_s390_vceqhs(__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector signed short __a, vector bool short __b) { + int __cc; + __builtin_s390_vceqhs(__a, (vector signed short)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector bool short __a, vector signed short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector unsigned short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector unsigned short __a, vector bool short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector bool short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector bool short __a, vector bool short __b) { + int __cc; + __builtin_s390_vceqhs((vector signed short)__a, + (vector signed short)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector signed int __a, vector signed int __b) { + int __cc; + __builtin_s390_vceqfs(__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector signed int __a, vector bool int __b) { + int __cc; + __builtin_s390_vceqfs(__a, (vector signed int)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector bool int __a, vector signed int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector unsigned int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector unsigned int __a, vector bool int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector bool int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector bool int __a, vector bool int __b) { + int __cc; + __builtin_s390_vceqfs((vector signed int)__a, + (vector signed int)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector signed long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector signed long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vceqgs(__a, (vector signed long long)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector bool long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector unsigned long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector unsigned long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector bool long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector bool long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vceqgs((vector signed long long)__a, + (vector signed long long)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ne(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfcedbs(__a, __b, &__cc); + return __cc != 0; +} + +/*-- vec_any_ge -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_any_ge(vector signed char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector signed char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchbs((vector signed char)__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector bool char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector unsigned char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector unsigned char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector bool char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector bool char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__b, + (vector unsigned char)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector signed short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector signed short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchhs((vector signed short)__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector bool short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector unsigned short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector unsigned short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector bool short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector bool short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__b, + (vector unsigned short)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector signed int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector signed int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchfs((vector signed int)__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector bool int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector unsigned int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector unsigned int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector bool int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector bool int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__b, + (vector unsigned int)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector signed long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector signed long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector bool long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector unsigned long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector unsigned long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector bool long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector bool long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__b, + (vector unsigned long long)__a, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_ge(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__a, __b, &__cc); + return __cc <= 1; +} + +/*-- vec_any_gt -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_any_gt(vector signed char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector signed char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector bool char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs((vector signed char)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector unsigned char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector unsigned char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector bool char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector bool char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__a, + (vector unsigned char)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector signed short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector signed short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector bool short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs((vector signed short)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector unsigned short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector unsigned short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector bool short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector bool short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__a, + (vector unsigned short)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector signed int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector signed int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector bool int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs((vector signed int)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector unsigned int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector unsigned int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector bool int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector bool int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__a, + (vector unsigned int)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector signed long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector signed long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector bool long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector unsigned long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector unsigned long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector bool long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector bool long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__a, + (vector unsigned long long)__b, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_gt(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__a, __b, &__cc); + return __cc <= 1; +} + +/*-- vec_any_le -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_any_le(vector signed char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector signed char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchbs(__a, (vector signed char)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector bool char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs((vector signed char)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector unsigned char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector unsigned char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs(__a, (vector unsigned char)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector bool char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector bool char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__a, + (vector unsigned char)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector signed short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector signed short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchhs(__a, (vector signed short)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector bool short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs((vector signed short)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector unsigned short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector unsigned short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs(__a, (vector unsigned short)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector bool short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector bool short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__a, + (vector unsigned short)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector signed int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector signed int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchfs(__a, (vector signed int)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector bool int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs((vector signed int)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector unsigned int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector unsigned int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs(__a, (vector unsigned int)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector bool int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector bool int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__a, + (vector unsigned int)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector signed long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector signed long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchgs(__a, (vector signed long long)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector bool long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs((vector signed long long)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector unsigned long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector unsigned long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs(__a, (vector unsigned long long)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector bool long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__a, __b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector bool long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__a, + (vector unsigned long long)__b, &__cc); + return __cc != 0; +} + +static inline __ATTRS_o_ai int +vec_any_le(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__b, __a, &__cc); + return __cc <= 1; +} + +/*-- vec_any_lt -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_any_lt(vector signed char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector signed char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchbs((vector signed char)__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector bool char __a, vector signed char __b) { + int __cc; + __builtin_s390_vchbs(__b, (vector signed char)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector unsigned char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector unsigned char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector bool char __a, vector unsigned char __b) { + int __cc; + __builtin_s390_vchlbs(__b, (vector unsigned char)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector bool char __a, vector bool char __b) { + int __cc; + __builtin_s390_vchlbs((vector unsigned char)__b, + (vector unsigned char)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector signed short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector signed short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchhs((vector signed short)__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector bool short __a, vector signed short __b) { + int __cc; + __builtin_s390_vchhs(__b, (vector signed short)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector unsigned short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector unsigned short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector bool short __a, vector unsigned short __b) { + int __cc; + __builtin_s390_vchlhs(__b, (vector unsigned short)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector bool short __a, vector bool short __b) { + int __cc; + __builtin_s390_vchlhs((vector unsigned short)__b, + (vector unsigned short)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector signed int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector signed int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchfs((vector signed int)__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector bool int __a, vector signed int __b) { + int __cc; + __builtin_s390_vchfs(__b, (vector signed int)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector unsigned int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector unsigned int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector bool int __a, vector unsigned int __b) { + int __cc; + __builtin_s390_vchlfs(__b, (vector unsigned int)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector bool int __a, vector bool int __b) { + int __cc; + __builtin_s390_vchlfs((vector unsigned int)__b, + (vector unsigned int)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector signed long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector signed long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchgs((vector signed long long)__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector bool long long __a, vector signed long long __b) { + int __cc; + __builtin_s390_vchgs(__b, (vector signed long long)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector unsigned long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector unsigned long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__b, __a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector bool long long __a, vector unsigned long long __b) { + int __cc; + __builtin_s390_vchlgs(__b, (vector unsigned long long)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector bool long long __a, vector bool long long __b) { + int __cc; + __builtin_s390_vchlgs((vector unsigned long long)__b, + (vector unsigned long long)__a, &__cc); + return __cc <= 1; +} + +static inline __ATTRS_o_ai int +vec_any_lt(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__b, __a, &__cc); + return __cc <= 1; +} + +/*-- vec_any_nge ------------------------------------------------------------*/ + +static inline __ATTRS_ai int +vec_any_nge(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__a, __b, &__cc); + return __cc != 0; +} + +/*-- vec_any_ngt ------------------------------------------------------------*/ + +static inline __ATTRS_ai int +vec_any_ngt(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__a, __b, &__cc); + return __cc != 0; +} + +/*-- vec_any_nle ------------------------------------------------------------*/ + +static inline __ATTRS_ai int +vec_any_nle(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchedbs(__b, __a, &__cc); + return __cc != 0; +} + +/*-- vec_any_nlt ------------------------------------------------------------*/ + +static inline __ATTRS_ai int +vec_any_nlt(vector double __a, vector double __b) { + int __cc; + __builtin_s390_vfchdbs(__b, __a, &__cc); + return __cc != 0; +} + +/*-- vec_any_nan ------------------------------------------------------------*/ + +static inline __ATTRS_ai int +vec_any_nan(vector double __a) { + int __cc; + __builtin_s390_vftcidb(__a, 15, &__cc); + return __cc != 3; +} + +/*-- vec_any_numeric --------------------------------------------------------*/ + +static inline __ATTRS_ai int +vec_any_numeric(vector double __a) { + int __cc; + __builtin_s390_vftcidb(__a, 15, &__cc); + return __cc != 0; +} + +/*-- vec_andc ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_andc(vector bool char __a, vector bool char __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector signed char +vec_andc(vector signed char __a, vector signed char __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector signed char +vec_andc(vector bool char __a, vector signed char __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector signed char +vec_andc(vector signed char __a, vector bool char __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector unsigned char +vec_andc(vector unsigned char __a, vector unsigned char __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector unsigned char +vec_andc(vector bool char __a, vector unsigned char __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector unsigned char +vec_andc(vector unsigned char __a, vector bool char __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector bool short +vec_andc(vector bool short __a, vector bool short __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector signed short +vec_andc(vector signed short __a, vector signed short __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector signed short +vec_andc(vector bool short __a, vector signed short __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector signed short +vec_andc(vector signed short __a, vector bool short __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector unsigned short +vec_andc(vector unsigned short __a, vector unsigned short __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector unsigned short +vec_andc(vector bool short __a, vector unsigned short __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector unsigned short +vec_andc(vector unsigned short __a, vector bool short __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector bool int +vec_andc(vector bool int __a, vector bool int __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector signed int +vec_andc(vector signed int __a, vector signed int __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector signed int +vec_andc(vector bool int __a, vector signed int __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector signed int +vec_andc(vector signed int __a, vector bool int __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector unsigned int +vec_andc(vector unsigned int __a, vector unsigned int __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector unsigned int +vec_andc(vector bool int __a, vector unsigned int __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector unsigned int +vec_andc(vector unsigned int __a, vector bool int __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector bool long long +vec_andc(vector bool long long __a, vector bool long long __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector signed long long +vec_andc(vector signed long long __a, vector signed long long __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector signed long long +vec_andc(vector bool long long __a, vector signed long long __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector signed long long +vec_andc(vector signed long long __a, vector bool long long __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_andc(vector unsigned long long __a, vector unsigned long long __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_andc(vector bool long long __a, vector unsigned long long __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_andc(vector unsigned long long __a, vector bool long long __b) { + return __a & ~__b; +} + +static inline __ATTRS_o_ai vector double +vec_andc(vector double __a, vector double __b) { + return (vector double)((vector unsigned long long)__a & + ~(vector unsigned long long)__b); +} + +static inline __ATTRS_o_ai vector double +vec_andc(vector bool long long __a, vector double __b) { + return (vector double)((vector unsigned long long)__a & + ~(vector unsigned long long)__b); +} + +static inline __ATTRS_o_ai vector double +vec_andc(vector double __a, vector bool long long __b) { + return (vector double)((vector unsigned long long)__a & + ~(vector unsigned long long)__b); +} + +/*-- vec_nor ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_nor(vector bool char __a, vector bool char __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector signed char +vec_nor(vector signed char __a, vector signed char __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector signed char +vec_nor(vector bool char __a, vector signed char __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector signed char +vec_nor(vector signed char __a, vector bool char __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_nor(vector unsigned char __a, vector unsigned char __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_nor(vector bool char __a, vector unsigned char __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_nor(vector unsigned char __a, vector bool char __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_nor(vector bool short __a, vector bool short __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_nor(vector signed short __a, vector signed short __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_nor(vector bool short __a, vector signed short __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_nor(vector signed short __a, vector bool short __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_nor(vector unsigned short __a, vector unsigned short __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_nor(vector bool short __a, vector unsigned short __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_nor(vector unsigned short __a, vector bool short __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_nor(vector bool int __a, vector bool int __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_nor(vector signed int __a, vector signed int __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_nor(vector bool int __a, vector signed int __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_nor(vector signed int __a, vector bool int __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_nor(vector unsigned int __a, vector unsigned int __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_nor(vector bool int __a, vector unsigned int __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_nor(vector unsigned int __a, vector bool int __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_nor(vector bool long long __a, vector bool long long __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_nor(vector signed long long __a, vector signed long long __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_nor(vector bool long long __a, vector signed long long __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_nor(vector signed long long __a, vector bool long long __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_nor(vector unsigned long long __a, vector unsigned long long __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_nor(vector bool long long __a, vector unsigned long long __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_nor(vector unsigned long long __a, vector bool long long __b) { + return ~(__a | __b); +} + +static inline __ATTRS_o_ai vector double +vec_nor(vector double __a, vector double __b) { + return (vector double)~((vector unsigned long long)__a | + (vector unsigned long long)__b); +} + +static inline __ATTRS_o_ai vector double +vec_nor(vector bool long long __a, vector double __b) { + return (vector double)~((vector unsigned long long)__a | + (vector unsigned long long)__b); +} + +static inline __ATTRS_o_ai vector double +vec_nor(vector double __a, vector bool long long __b) { + return (vector double)~((vector unsigned long long)__a | + (vector unsigned long long)__b); +} + +/*-- vec_cntlz --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_cntlz(vector signed char __a) { + return __builtin_s390_vclzb((vector unsigned char)__a); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cntlz(vector unsigned char __a) { + return __builtin_s390_vclzb(__a); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cntlz(vector signed short __a) { + return __builtin_s390_vclzh((vector unsigned short)__a); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cntlz(vector unsigned short __a) { + return __builtin_s390_vclzh(__a); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cntlz(vector signed int __a) { + return __builtin_s390_vclzf((vector unsigned int)__a); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cntlz(vector unsigned int __a) { + return __builtin_s390_vclzf(__a); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_cntlz(vector signed long long __a) { + return __builtin_s390_vclzg((vector unsigned long long)__a); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_cntlz(vector unsigned long long __a) { + return __builtin_s390_vclzg(__a); +} + +/*-- vec_cnttz --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_cnttz(vector signed char __a) { + return __builtin_s390_vctzb((vector unsigned char)__a); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cnttz(vector unsigned char __a) { + return __builtin_s390_vctzb(__a); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cnttz(vector signed short __a) { + return __builtin_s390_vctzh((vector unsigned short)__a); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cnttz(vector unsigned short __a) { + return __builtin_s390_vctzh(__a); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cnttz(vector signed int __a) { + return __builtin_s390_vctzf((vector unsigned int)__a); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cnttz(vector unsigned int __a) { + return __builtin_s390_vctzf(__a); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_cnttz(vector signed long long __a) { + return __builtin_s390_vctzg((vector unsigned long long)__a); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_cnttz(vector unsigned long long __a) { + return __builtin_s390_vctzg(__a); +} + +/*-- vec_popcnt -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_popcnt(vector signed char __a) { + return __builtin_s390_vpopctb((vector unsigned char)__a); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_popcnt(vector unsigned char __a) { + return __builtin_s390_vpopctb(__a); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_popcnt(vector signed short __a) { + return __builtin_s390_vpopcth((vector unsigned short)__a); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_popcnt(vector unsigned short __a) { + return __builtin_s390_vpopcth(__a); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_popcnt(vector signed int __a) { + return __builtin_s390_vpopctf((vector unsigned int)__a); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_popcnt(vector unsigned int __a) { + return __builtin_s390_vpopctf(__a); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_popcnt(vector signed long long __a) { + return __builtin_s390_vpopctg((vector unsigned long long)__a); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_popcnt(vector unsigned long long __a) { + return __builtin_s390_vpopctg(__a); +} + +/*-- vec_rl -----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_rl(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_s390_verllvb( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_rl(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_verllvb(__a, __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_rl(vector signed short __a, vector unsigned short __b) { + return (vector signed short)__builtin_s390_verllvh( + (vector unsigned short)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_rl(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_verllvh(__a, __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_rl(vector signed int __a, vector unsigned int __b) { + return (vector signed int)__builtin_s390_verllvf( + (vector unsigned int)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_rl(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_verllvf(__a, __b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_rl(vector signed long long __a, vector unsigned long long __b) { + return (vector signed long long)__builtin_s390_verllvg( + (vector unsigned long long)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_rl(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_s390_verllvg(__a, __b); +} + +/*-- vec_rli ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_rli(vector signed char __a, unsigned long __b) { + return (vector signed char)__builtin_s390_verllb( + (vector unsigned char)__a, (int)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_rli(vector unsigned char __a, unsigned long __b) { + return __builtin_s390_verllb(__a, (int)__b); +} + +static inline __ATTRS_o_ai vector signed short +vec_rli(vector signed short __a, unsigned long __b) { + return (vector signed short)__builtin_s390_verllh( + (vector unsigned short)__a, (int)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_rli(vector unsigned short __a, unsigned long __b) { + return __builtin_s390_verllh(__a, (int)__b); +} + +static inline __ATTRS_o_ai vector signed int +vec_rli(vector signed int __a, unsigned long __b) { + return (vector signed int)__builtin_s390_verllf( + (vector unsigned int)__a, (int)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_rli(vector unsigned int __a, unsigned long __b) { + return __builtin_s390_verllf(__a, (int)__b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_rli(vector signed long long __a, unsigned long __b) { + return (vector signed long long)__builtin_s390_verllg( + (vector unsigned long long)__a, (int)__b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_rli(vector unsigned long long __a, unsigned long __b) { + return __builtin_s390_verllg(__a, (int)__b); +} + +/*-- vec_rl_mask ------------------------------------------------------------*/ + +extern __ATTRS_o vector signed char +vec_rl_mask(vector signed char __a, vector unsigned char __b, + unsigned char __c) __constant(__c); + +extern __ATTRS_o vector unsigned char +vec_rl_mask(vector unsigned char __a, vector unsigned char __b, + unsigned char __c) __constant(__c); + +extern __ATTRS_o vector signed short +vec_rl_mask(vector signed short __a, vector unsigned short __b, + unsigned char __c) __constant(__c); + +extern __ATTRS_o vector unsigned short +vec_rl_mask(vector unsigned short __a, vector unsigned short __b, + unsigned char __c) __constant(__c); + +extern __ATTRS_o vector signed int +vec_rl_mask(vector signed int __a, vector unsigned int __b, + unsigned char __c) __constant(__c); + +extern __ATTRS_o vector unsigned int +vec_rl_mask(vector unsigned int __a, vector unsigned int __b, + unsigned char __c) __constant(__c); + +extern __ATTRS_o vector signed long long +vec_rl_mask(vector signed long long __a, vector unsigned long long __b, + unsigned char __c) __constant(__c); + +extern __ATTRS_o vector unsigned long long +vec_rl_mask(vector unsigned long long __a, vector unsigned long long __b, + unsigned char __c) __constant(__c); + +#define vec_rl_mask(X, Y, Z) ((__typeof__((vec_rl_mask)((X), (Y), (Z)))) \ + __extension__ ({ \ + vector unsigned char __res; \ + vector unsigned char __x = (vector unsigned char)(X); \ + vector unsigned char __y = (vector unsigned char)(Y); \ + switch (sizeof ((X)[0])) { \ + case 1: __res = (vector unsigned char) __builtin_s390_verimb( \ + (vector unsigned char)__x, (vector unsigned char)__x, \ + (vector unsigned char)__y, (Z)); break; \ + case 2: __res = (vector unsigned char) __builtin_s390_verimh( \ + (vector unsigned short)__x, (vector unsigned short)__x, \ + (vector unsigned short)__y, (Z)); break; \ + case 4: __res = (vector unsigned char) __builtin_s390_verimf( \ + (vector unsigned int)__x, (vector unsigned int)__x, \ + (vector unsigned int)__y, (Z)); break; \ + default: __res = (vector unsigned char) __builtin_s390_verimg( \ + (vector unsigned long long)__x, (vector unsigned long long)__x, \ + (vector unsigned long long)__y, (Z)); break; \ + } __res; })) + +/*-- vec_sll ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_sll(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_s390_vsl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector signed char +vec_sll(vector signed char __a, vector unsigned short __b) { + return (vector signed char)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed char +vec_sll(vector signed char __a, vector unsigned int __b) { + return (vector signed char)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool char +vec_sll(vector bool char __a, vector unsigned char __b) { + return (vector bool char)__builtin_s390_vsl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector bool char +vec_sll(vector bool char __a, vector unsigned short __b) { + return (vector bool char)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool char +vec_sll(vector bool char __a, vector unsigned int __b) { + return (vector bool char)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_sll(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vsl(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_sll(vector unsigned char __a, vector unsigned short __b) { + return __builtin_s390_vsl(__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_sll(vector unsigned char __a, vector unsigned int __b) { + return __builtin_s390_vsl(__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed short +vec_sll(vector signed short __a, vector unsigned char __b) { + return (vector signed short)__builtin_s390_vsl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_sll(vector signed short __a, vector unsigned short __b) { + return (vector signed short)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed short +vec_sll(vector signed short __a, vector unsigned int __b) { + return (vector signed short)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool short +vec_sll(vector bool short __a, vector unsigned char __b) { + return (vector bool short)__builtin_s390_vsl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_sll(vector bool short __a, vector unsigned short __b) { + return (vector bool short)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool short +vec_sll(vector bool short __a, vector unsigned int __b) { + return (vector bool short)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_sll(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_s390_vsl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_sll(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_sll(vector unsigned short __a, vector unsigned int __b) { + return (vector unsigned short)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed int +vec_sll(vector signed int __a, vector unsigned char __b) { + return (vector signed int)__builtin_s390_vsl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_sll(vector signed int __a, vector unsigned short __b) { + return (vector signed int)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed int +vec_sll(vector signed int __a, vector unsigned int __b) { + return (vector signed int)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool int +vec_sll(vector bool int __a, vector unsigned char __b) { + return (vector bool int)__builtin_s390_vsl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_sll(vector bool int __a, vector unsigned short __b) { + return (vector bool int)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool int +vec_sll(vector bool int __a, vector unsigned int __b) { + return (vector bool int)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_sll(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_s390_vsl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_sll(vector unsigned int __a, vector unsigned short __b) { + return (vector unsigned int)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_sll(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_sll(vector signed long long __a, vector unsigned char __b) { + return (vector signed long long)__builtin_s390_vsl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_sll(vector signed long long __a, vector unsigned short __b) { + return (vector signed long long)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_sll(vector signed long long __a, vector unsigned int __b) { + return (vector signed long long)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_sll(vector bool long long __a, vector unsigned char __b) { + return (vector bool long long)__builtin_s390_vsl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_sll(vector bool long long __a, vector unsigned short __b) { + return (vector bool long long)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_sll(vector bool long long __a, vector unsigned int __b) { + return (vector bool long long)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_sll(vector unsigned long long __a, vector unsigned char __b) { + return (vector unsigned long long)__builtin_s390_vsl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_sll(vector unsigned long long __a, vector unsigned short __b) { + return (vector unsigned long long)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_sll(vector unsigned long long __a, vector unsigned int __b) { + return (vector unsigned long long)__builtin_s390_vsl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +/*-- vec_slb ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_slb(vector signed char __a, vector signed char __b) { + return (vector signed char)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed char +vec_slb(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_s390_vslb( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_slb(vector unsigned char __a, vector signed char __b) { + return __builtin_s390_vslb(__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_slb(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vslb(__a, __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_slb(vector signed short __a, vector signed short __b) { + return (vector signed short)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed short +vec_slb(vector signed short __a, vector unsigned short __b) { + return (vector signed short)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_slb(vector unsigned short __a, vector signed short __b) { + return (vector unsigned short)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_slb(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed int +vec_slb(vector signed int __a, vector signed int __b) { + return (vector signed int)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed int +vec_slb(vector signed int __a, vector unsigned int __b) { + return (vector signed int)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_slb(vector unsigned int __a, vector signed int __b) { + return (vector unsigned int)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_slb(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_slb(vector signed long long __a, vector signed long long __b) { + return (vector signed long long)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_slb(vector signed long long __a, vector unsigned long long __b) { + return (vector signed long long)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_slb(vector unsigned long long __a, vector signed long long __b) { + return (vector unsigned long long)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_slb(vector unsigned long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector double +vec_slb(vector double __a, vector signed long long __b) { + return (vector double)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector double +vec_slb(vector double __a, vector unsigned long long __b) { + return (vector double)__builtin_s390_vslb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +/*-- vec_sld ----------------------------------------------------------------*/ + +extern __ATTRS_o vector signed char +vec_sld(vector signed char __a, vector signed char __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o vector unsigned char +vec_sld(vector unsigned char __a, vector unsigned char __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o vector signed short +vec_sld(vector signed short __a, vector signed short __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o vector unsigned short +vec_sld(vector unsigned short __a, vector unsigned short __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o vector signed int +vec_sld(vector signed int __a, vector signed int __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o vector unsigned int +vec_sld(vector unsigned int __a, vector unsigned int __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o vector signed long long +vec_sld(vector signed long long __a, vector signed long long __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o vector unsigned long long +vec_sld(vector unsigned long long __a, vector unsigned long long __b, int __c) + __constant_range(__c, 0, 15); + +extern __ATTRS_o vector double +vec_sld(vector double __a, vector double __b, int __c) + __constant_range(__c, 0, 15); + +#define vec_sld(X, Y, Z) ((__typeof__((vec_sld)((X), (Y), (Z)))) \ + __builtin_s390_vsldb((vector unsigned char)(X), \ + (vector unsigned char)(Y), (Z))) + +/*-- vec_sldw ---------------------------------------------------------------*/ + +extern __ATTRS_o vector signed char +vec_sldw(vector signed char __a, vector signed char __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o vector unsigned char +vec_sldw(vector unsigned char __a, vector unsigned char __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o vector signed short +vec_sldw(vector signed short __a, vector signed short __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o vector unsigned short +vec_sldw(vector unsigned short __a, vector unsigned short __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o vector signed int +vec_sldw(vector signed int __a, vector signed int __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o vector unsigned int +vec_sldw(vector unsigned int __a, vector unsigned int __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o vector signed long long +vec_sldw(vector signed long long __a, vector signed long long __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o vector unsigned long long +vec_sldw(vector unsigned long long __a, vector unsigned long long __b, int __c) + __constant_range(__c, 0, 3); + +extern __ATTRS_o vector double +vec_sldw(vector double __a, vector double __b, int __c) + __constant_range(__c, 0, 3); + +#define vec_sldw(X, Y, Z) ((__typeof__((vec_sldw)((X), (Y), (Z)))) \ + __builtin_s390_vsldb((vector unsigned char)(X), \ + (vector unsigned char)(Y), (Z) * 4)) + +/*-- vec_sral ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_sral(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_s390_vsra( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector signed char +vec_sral(vector signed char __a, vector unsigned short __b) { + return (vector signed char)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed char +vec_sral(vector signed char __a, vector unsigned int __b) { + return (vector signed char)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool char +vec_sral(vector bool char __a, vector unsigned char __b) { + return (vector bool char)__builtin_s390_vsra( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector bool char +vec_sral(vector bool char __a, vector unsigned short __b) { + return (vector bool char)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool char +vec_sral(vector bool char __a, vector unsigned int __b) { + return (vector bool char)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_sral(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vsra(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_sral(vector unsigned char __a, vector unsigned short __b) { + return __builtin_s390_vsra(__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_sral(vector unsigned char __a, vector unsigned int __b) { + return __builtin_s390_vsra(__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed short +vec_sral(vector signed short __a, vector unsigned char __b) { + return (vector signed short)__builtin_s390_vsra( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_sral(vector signed short __a, vector unsigned short __b) { + return (vector signed short)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed short +vec_sral(vector signed short __a, vector unsigned int __b) { + return (vector signed short)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool short +vec_sral(vector bool short __a, vector unsigned char __b) { + return (vector bool short)__builtin_s390_vsra( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_sral(vector bool short __a, vector unsigned short __b) { + return (vector bool short)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool short +vec_sral(vector bool short __a, vector unsigned int __b) { + return (vector bool short)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_sral(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_s390_vsra( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_sral(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_sral(vector unsigned short __a, vector unsigned int __b) { + return (vector unsigned short)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed int +vec_sral(vector signed int __a, vector unsigned char __b) { + return (vector signed int)__builtin_s390_vsra( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_sral(vector signed int __a, vector unsigned short __b) { + return (vector signed int)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed int +vec_sral(vector signed int __a, vector unsigned int __b) { + return (vector signed int)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool int +vec_sral(vector bool int __a, vector unsigned char __b) { + return (vector bool int)__builtin_s390_vsra( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_sral(vector bool int __a, vector unsigned short __b) { + return (vector bool int)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool int +vec_sral(vector bool int __a, vector unsigned int __b) { + return (vector bool int)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_sral(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_s390_vsra( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_sral(vector unsigned int __a, vector unsigned short __b) { + return (vector unsigned int)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_sral(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_sral(vector signed long long __a, vector unsigned char __b) { + return (vector signed long long)__builtin_s390_vsra( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_sral(vector signed long long __a, vector unsigned short __b) { + return (vector signed long long)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_sral(vector signed long long __a, vector unsigned int __b) { + return (vector signed long long)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_sral(vector bool long long __a, vector unsigned char __b) { + return (vector bool long long)__builtin_s390_vsra( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_sral(vector bool long long __a, vector unsigned short __b) { + return (vector bool long long)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_sral(vector bool long long __a, vector unsigned int __b) { + return (vector bool long long)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_sral(vector unsigned long long __a, vector unsigned char __b) { + return (vector unsigned long long)__builtin_s390_vsra( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_sral(vector unsigned long long __a, vector unsigned short __b) { + return (vector unsigned long long)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_sral(vector unsigned long long __a, vector unsigned int __b) { + return (vector unsigned long long)__builtin_s390_vsra( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +/*-- vec_srab ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_srab(vector signed char __a, vector signed char __b) { + return (vector signed char)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed char +vec_srab(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_s390_vsrab( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_srab(vector unsigned char __a, vector signed char __b) { + return __builtin_s390_vsrab(__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_srab(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vsrab(__a, __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_srab(vector signed short __a, vector signed short __b) { + return (vector signed short)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed short +vec_srab(vector signed short __a, vector unsigned short __b) { + return (vector signed short)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_srab(vector unsigned short __a, vector signed short __b) { + return (vector unsigned short)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_srab(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed int +vec_srab(vector signed int __a, vector signed int __b) { + return (vector signed int)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed int +vec_srab(vector signed int __a, vector unsigned int __b) { + return (vector signed int)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_srab(vector unsigned int __a, vector signed int __b) { + return (vector unsigned int)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_srab(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_srab(vector signed long long __a, vector signed long long __b) { + return (vector signed long long)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_srab(vector signed long long __a, vector unsigned long long __b) { + return (vector signed long long)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_srab(vector unsigned long long __a, vector signed long long __b) { + return (vector unsigned long long)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_srab(vector unsigned long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector double +vec_srab(vector double __a, vector signed long long __b) { + return (vector double)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector double +vec_srab(vector double __a, vector unsigned long long __b) { + return (vector double)__builtin_s390_vsrab( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +/*-- vec_srl ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_srl(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_s390_vsrl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector signed char +vec_srl(vector signed char __a, vector unsigned short __b) { + return (vector signed char)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed char +vec_srl(vector signed char __a, vector unsigned int __b) { + return (vector signed char)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool char +vec_srl(vector bool char __a, vector unsigned char __b) { + return (vector bool char)__builtin_s390_vsrl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector bool char +vec_srl(vector bool char __a, vector unsigned short __b) { + return (vector bool char)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool char +vec_srl(vector bool char __a, vector unsigned int __b) { + return (vector bool char)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_srl(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vsrl(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_srl(vector unsigned char __a, vector unsigned short __b) { + return __builtin_s390_vsrl(__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_srl(vector unsigned char __a, vector unsigned int __b) { + return __builtin_s390_vsrl(__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed short +vec_srl(vector signed short __a, vector unsigned char __b) { + return (vector signed short)__builtin_s390_vsrl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_srl(vector signed short __a, vector unsigned short __b) { + return (vector signed short)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed short +vec_srl(vector signed short __a, vector unsigned int __b) { + return (vector signed short)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool short +vec_srl(vector bool short __a, vector unsigned char __b) { + return (vector bool short)__builtin_s390_vsrl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector bool short +vec_srl(vector bool short __a, vector unsigned short __b) { + return (vector bool short)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool short +vec_srl(vector bool short __a, vector unsigned int __b) { + return (vector bool short)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_srl(vector unsigned short __a, vector unsigned char __b) { + return (vector unsigned short)__builtin_s390_vsrl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_srl(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_srl(vector unsigned short __a, vector unsigned int __b) { + return (vector unsigned short)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed int +vec_srl(vector signed int __a, vector unsigned char __b) { + return (vector signed int)__builtin_s390_vsrl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_srl(vector signed int __a, vector unsigned short __b) { + return (vector signed int)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed int +vec_srl(vector signed int __a, vector unsigned int __b) { + return (vector signed int)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool int +vec_srl(vector bool int __a, vector unsigned char __b) { + return (vector bool int)__builtin_s390_vsrl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector bool int +vec_srl(vector bool int __a, vector unsigned short __b) { + return (vector bool int)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool int +vec_srl(vector bool int __a, vector unsigned int __b) { + return (vector bool int)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_srl(vector unsigned int __a, vector unsigned char __b) { + return (vector unsigned int)__builtin_s390_vsrl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_srl(vector unsigned int __a, vector unsigned short __b) { + return (vector unsigned int)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_srl(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_srl(vector signed long long __a, vector unsigned char __b) { + return (vector signed long long)__builtin_s390_vsrl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_srl(vector signed long long __a, vector unsigned short __b) { + return (vector signed long long)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_srl(vector signed long long __a, vector unsigned int __b) { + return (vector signed long long)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_srl(vector bool long long __a, vector unsigned char __b) { + return (vector bool long long)__builtin_s390_vsrl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_srl(vector bool long long __a, vector unsigned short __b) { + return (vector bool long long)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector bool long long +vec_srl(vector bool long long __a, vector unsigned int __b) { + return (vector bool long long)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_srl(vector unsigned long long __a, vector unsigned char __b) { + return (vector unsigned long long)__builtin_s390_vsrl( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_srl(vector unsigned long long __a, vector unsigned short __b) { + return (vector unsigned long long)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_srl(vector unsigned long long __a, vector unsigned int __b) { + return (vector unsigned long long)__builtin_s390_vsrl( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +/*-- vec_srb ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_srb(vector signed char __a, vector signed char __b) { + return (vector signed char)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed char +vec_srb(vector signed char __a, vector unsigned char __b) { + return (vector signed char)__builtin_s390_vsrlb( + (vector unsigned char)__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_srb(vector unsigned char __a, vector signed char __b) { + return __builtin_s390_vsrlb(__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_srb(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vsrlb(__a, __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_srb(vector signed short __a, vector signed short __b) { + return (vector signed short)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed short +vec_srb(vector signed short __a, vector unsigned short __b) { + return (vector signed short)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_srb(vector unsigned short __a, vector signed short __b) { + return (vector unsigned short)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_srb(vector unsigned short __a, vector unsigned short __b) { + return (vector unsigned short)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed int +vec_srb(vector signed int __a, vector signed int __b) { + return (vector signed int)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed int +vec_srb(vector signed int __a, vector unsigned int __b) { + return (vector signed int)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_srb(vector unsigned int __a, vector signed int __b) { + return (vector unsigned int)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_srb(vector unsigned int __a, vector unsigned int __b) { + return (vector unsigned int)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_srb(vector signed long long __a, vector signed long long __b) { + return (vector signed long long)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_srb(vector signed long long __a, vector unsigned long long __b) { + return (vector signed long long)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_srb(vector unsigned long long __a, vector signed long long __b) { + return (vector unsigned long long)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_srb(vector unsigned long long __a, vector unsigned long long __b) { + return (vector unsigned long long)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector double +vec_srb(vector double __a, vector signed long long __b) { + return (vector double)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector double +vec_srb(vector double __a, vector unsigned long long __b) { + return (vector double)__builtin_s390_vsrlb( + (vector unsigned char)__a, (vector unsigned char)__b); +} + +/*-- vec_abs ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_abs(vector signed char __a) { + return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed char)0)); +} + +static inline __ATTRS_o_ai vector signed short +vec_abs(vector signed short __a) { + return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed short)0)); +} + +static inline __ATTRS_o_ai vector signed int +vec_abs(vector signed int __a) { + return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed int)0)); +} + +static inline __ATTRS_o_ai vector signed long long +vec_abs(vector signed long long __a) { + return vec_sel(__a, -__a, vec_cmplt(__a, (vector signed long long)0)); +} + +static inline __ATTRS_o_ai vector double +vec_abs(vector double __a) { + return __builtin_s390_vflpdb(__a); +} + +/*-- vec_nabs ---------------------------------------------------------------*/ + +static inline __ATTRS_ai vector double +vec_nabs(vector double __a) { + return __builtin_s390_vflndb(__a); +} + +/*-- vec_max ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_max(vector signed char __a, vector signed char __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector signed char +vec_max(vector signed char __a, vector bool char __b) { + vector signed char __bc = (vector signed char)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector signed char +vec_max(vector bool char __a, vector signed char __b) { + vector signed char __ac = (vector signed char)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_max(vector unsigned char __a, vector unsigned char __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_max(vector unsigned char __a, vector bool char __b) { + vector unsigned char __bc = (vector unsigned char)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_max(vector bool char __a, vector unsigned char __b) { + vector unsigned char __ac = (vector unsigned char)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector signed short +vec_max(vector signed short __a, vector signed short __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector signed short +vec_max(vector signed short __a, vector bool short __b) { + vector signed short __bc = (vector signed short)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector signed short +vec_max(vector bool short __a, vector signed short __b) { + vector signed short __ac = (vector signed short)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_max(vector unsigned short __a, vector unsigned short __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_max(vector unsigned short __a, vector bool short __b) { + vector unsigned short __bc = (vector unsigned short)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_max(vector bool short __a, vector unsigned short __b) { + vector unsigned short __ac = (vector unsigned short)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector signed int +vec_max(vector signed int __a, vector signed int __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector signed int +vec_max(vector signed int __a, vector bool int __b) { + vector signed int __bc = (vector signed int)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector signed int +vec_max(vector bool int __a, vector signed int __b) { + vector signed int __ac = (vector signed int)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_max(vector unsigned int __a, vector unsigned int __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_max(vector unsigned int __a, vector bool int __b) { + vector unsigned int __bc = (vector unsigned int)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_max(vector bool int __a, vector unsigned int __b) { + vector unsigned int __ac = (vector unsigned int)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector signed long long +vec_max(vector signed long long __a, vector signed long long __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector signed long long +vec_max(vector signed long long __a, vector bool long long __b) { + vector signed long long __bc = (vector signed long long)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector signed long long +vec_max(vector bool long long __a, vector signed long long __b) { + vector signed long long __ac = (vector signed long long)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_max(vector unsigned long long __a, vector unsigned long long __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_max(vector unsigned long long __a, vector bool long long __b) { + vector unsigned long long __bc = (vector unsigned long long)__b; + return vec_sel(__bc, __a, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_max(vector bool long long __a, vector unsigned long long __b) { + vector unsigned long long __ac = (vector unsigned long long)__a; + return vec_sel(__b, __ac, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector double +vec_max(vector double __a, vector double __b) { + return vec_sel(__b, __a, vec_cmpgt(__a, __b)); +} + +/*-- vec_min ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_min(vector signed char __a, vector signed char __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector signed char +vec_min(vector signed char __a, vector bool char __b) { + vector signed char __bc = (vector signed char)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector signed char +vec_min(vector bool char __a, vector signed char __b) { + vector signed char __ac = (vector signed char)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_min(vector unsigned char __a, vector unsigned char __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_min(vector unsigned char __a, vector bool char __b) { + vector unsigned char __bc = (vector unsigned char)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_min(vector bool char __a, vector unsigned char __b) { + vector unsigned char __ac = (vector unsigned char)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector signed short +vec_min(vector signed short __a, vector signed short __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector signed short +vec_min(vector signed short __a, vector bool short __b) { + vector signed short __bc = (vector signed short)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector signed short +vec_min(vector bool short __a, vector signed short __b) { + vector signed short __ac = (vector signed short)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_min(vector unsigned short __a, vector unsigned short __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_min(vector unsigned short __a, vector bool short __b) { + vector unsigned short __bc = (vector unsigned short)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_min(vector bool short __a, vector unsigned short __b) { + vector unsigned short __ac = (vector unsigned short)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector signed int +vec_min(vector signed int __a, vector signed int __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector signed int +vec_min(vector signed int __a, vector bool int __b) { + vector signed int __bc = (vector signed int)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector signed int +vec_min(vector bool int __a, vector signed int __b) { + vector signed int __ac = (vector signed int)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_min(vector unsigned int __a, vector unsigned int __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_min(vector unsigned int __a, vector bool int __b) { + vector unsigned int __bc = (vector unsigned int)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_min(vector bool int __a, vector unsigned int __b) { + vector unsigned int __ac = (vector unsigned int)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector signed long long +vec_min(vector signed long long __a, vector signed long long __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector signed long long +vec_min(vector signed long long __a, vector bool long long __b) { + vector signed long long __bc = (vector signed long long)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector signed long long +vec_min(vector bool long long __a, vector signed long long __b) { + vector signed long long __ac = (vector signed long long)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_min(vector unsigned long long __a, vector unsigned long long __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_min(vector unsigned long long __a, vector bool long long __b) { + vector unsigned long long __bc = (vector unsigned long long)__b; + return vec_sel(__a, __bc, vec_cmpgt(__a, __bc)); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_min(vector bool long long __a, vector unsigned long long __b) { + vector unsigned long long __ac = (vector unsigned long long)__a; + return vec_sel(__ac, __b, vec_cmpgt(__ac, __b)); +} + +static inline __ATTRS_o_ai vector double +vec_min(vector double __a, vector double __b) { + return vec_sel(__a, __b, vec_cmpgt(__a, __b)); +} + +/*-- vec_add_u128 -----------------------------------------------------------*/ + +static inline __ATTRS_ai vector unsigned char +vec_add_u128(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vaq(__a, __b); +} + +/*-- vec_addc ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_addc(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vaccb(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_addc(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vacch(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_addc(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vaccf(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_addc(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_s390_vaccg(__a, __b); +} + +/*-- vec_addc_u128 ----------------------------------------------------------*/ + +static inline __ATTRS_ai vector unsigned char +vec_addc_u128(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vaccq(__a, __b); +} + +/*-- vec_adde_u128 ----------------------------------------------------------*/ + +static inline __ATTRS_ai vector unsigned char +vec_adde_u128(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_s390_vacq(__a, __b, __c); +} + +/*-- vec_addec_u128 ---------------------------------------------------------*/ + +static inline __ATTRS_ai vector unsigned char +vec_addec_u128(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_s390_vacccq(__a, __b, __c); +} + +/*-- vec_avg ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_avg(vector signed char __a, vector signed char __b) { + return __builtin_s390_vavgb(__a, __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_avg(vector signed short __a, vector signed short __b) { + return __builtin_s390_vavgh(__a, __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_avg(vector signed int __a, vector signed int __b) { + return __builtin_s390_vavgf(__a, __b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_avg(vector signed long long __a, vector signed long long __b) { + return __builtin_s390_vavgg(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_avg(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vavglb(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_avg(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vavglh(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_avg(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vavglf(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_avg(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_s390_vavglg(__a, __b); +} + +/*-- vec_checksum -----------------------------------------------------------*/ + +static inline __ATTRS_ai vector unsigned int +vec_checksum(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vcksm(__a, __b); +} + +/*-- vec_gfmsum -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned short +vec_gfmsum(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vgfmb(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_gfmsum(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vgfmh(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_gfmsum(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vgfmf(__a, __b); +} + +/*-- vec_gfmsum_128 ---------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_gfmsum_128(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_s390_vgfmg(__a, __b); +} + +/*-- vec_gfmsum_accum -------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned short +vec_gfmsum_accum(vector unsigned char __a, vector unsigned char __b, + vector unsigned short __c) { + return __builtin_s390_vgfmab(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_gfmsum_accum(vector unsigned short __a, vector unsigned short __b, + vector unsigned int __c) { + return __builtin_s390_vgfmah(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_gfmsum_accum(vector unsigned int __a, vector unsigned int __b, + vector unsigned long long __c) { + return __builtin_s390_vgfmaf(__a, __b, __c); +} + +/*-- vec_gfmsum_accum_128 ---------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_gfmsum_accum_128(vector unsigned long long __a, + vector unsigned long long __b, + vector unsigned char __c) { + return __builtin_s390_vgfmag(__a, __b, __c); +} + +/*-- vec_mladd --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_mladd(vector signed char __a, vector signed char __b, + vector signed char __c) { + return __a * __b + __c; +} + +static inline __ATTRS_o_ai vector signed char +vec_mladd(vector unsigned char __a, vector signed char __b, + vector signed char __c) { + return (vector signed char)__a * __b + __c; +} + +static inline __ATTRS_o_ai vector signed char +vec_mladd(vector signed char __a, vector unsigned char __b, + vector unsigned char __c) { + return __a * (vector signed char)__b + (vector signed char)__c; +} + +static inline __ATTRS_o_ai vector unsigned char +vec_mladd(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __a * __b + __c; +} + +static inline __ATTRS_o_ai vector signed short +vec_mladd(vector signed short __a, vector signed short __b, + vector signed short __c) { + return __a * __b + __c; +} + +static inline __ATTRS_o_ai vector signed short +vec_mladd(vector unsigned short __a, vector signed short __b, + vector signed short __c) { + return (vector signed short)__a * __b + __c; +} + +static inline __ATTRS_o_ai vector signed short +vec_mladd(vector signed short __a, vector unsigned short __b, + vector unsigned short __c) { + return __a * (vector signed short)__b + (vector signed short)__c; +} + +static inline __ATTRS_o_ai vector unsigned short +vec_mladd(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return __a * __b + __c; +} + +static inline __ATTRS_o_ai vector signed int +vec_mladd(vector signed int __a, vector signed int __b, + vector signed int __c) { + return __a * __b + __c; +} + +static inline __ATTRS_o_ai vector signed int +vec_mladd(vector unsigned int __a, vector signed int __b, + vector signed int __c) { + return (vector signed int)__a * __b + __c; +} + +static inline __ATTRS_o_ai vector signed int +vec_mladd(vector signed int __a, vector unsigned int __b, + vector unsigned int __c) { + return __a * (vector signed int)__b + (vector signed int)__c; +} + +static inline __ATTRS_o_ai vector unsigned int +vec_mladd(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + return __a * __b + __c; +} + +/*-- vec_mhadd --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_mhadd(vector signed char __a, vector signed char __b, + vector signed char __c) { + return __builtin_s390_vmahb(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_mhadd(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_s390_vmalhb(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector signed short +vec_mhadd(vector signed short __a, vector signed short __b, + vector signed short __c) { + return __builtin_s390_vmahh(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_mhadd(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return __builtin_s390_vmalhh(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector signed int +vec_mhadd(vector signed int __a, vector signed int __b, + vector signed int __c) { + return __builtin_s390_vmahf(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_mhadd(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + return __builtin_s390_vmalhf(__a, __b, __c); +} + +/*-- vec_meadd --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed short +vec_meadd(vector signed char __a, vector signed char __b, + vector signed short __c) { + return __builtin_s390_vmaeb(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_meadd(vector unsigned char __a, vector unsigned char __b, + vector unsigned short __c) { + return __builtin_s390_vmaleb(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector signed int +vec_meadd(vector signed short __a, vector signed short __b, + vector signed int __c) { + return __builtin_s390_vmaeh(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_meadd(vector unsigned short __a, vector unsigned short __b, + vector unsigned int __c) { + return __builtin_s390_vmaleh(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector signed long long +vec_meadd(vector signed int __a, vector signed int __b, + vector signed long long __c) { + return __builtin_s390_vmaef(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_meadd(vector unsigned int __a, vector unsigned int __b, + vector unsigned long long __c) { + return __builtin_s390_vmalef(__a, __b, __c); +} + +/*-- vec_moadd --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed short +vec_moadd(vector signed char __a, vector signed char __b, + vector signed short __c) { + return __builtin_s390_vmaob(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_moadd(vector unsigned char __a, vector unsigned char __b, + vector unsigned short __c) { + return __builtin_s390_vmalob(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector signed int +vec_moadd(vector signed short __a, vector signed short __b, + vector signed int __c) { + return __builtin_s390_vmaoh(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_moadd(vector unsigned short __a, vector unsigned short __b, + vector unsigned int __c) { + return __builtin_s390_vmaloh(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector signed long long +vec_moadd(vector signed int __a, vector signed int __b, + vector signed long long __c) { + return __builtin_s390_vmaof(__a, __b, __c); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_moadd(vector unsigned int __a, vector unsigned int __b, + vector unsigned long long __c) { + return __builtin_s390_vmalof(__a, __b, __c); +} + +/*-- vec_mulh ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_mulh(vector signed char __a, vector signed char __b) { + return __builtin_s390_vmhb(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_mulh(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vmlhb(__a, __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_mulh(vector signed short __a, vector signed short __b) { + return __builtin_s390_vmhh(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_mulh(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vmlhh(__a, __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_mulh(vector signed int __a, vector signed int __b) { + return __builtin_s390_vmhf(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_mulh(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vmlhf(__a, __b); +} + +/*-- vec_mule ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed short +vec_mule(vector signed char __a, vector signed char __b) { + return __builtin_s390_vmeb(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_mule(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vmleb(__a, __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_mule(vector signed short __a, vector signed short __b) { + return __builtin_s390_vmeh(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_mule(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vmleh(__a, __b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_mule(vector signed int __a, vector signed int __b) { + return __builtin_s390_vmef(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_mule(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vmlef(__a, __b); +} + +/*-- vec_mulo ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed short +vec_mulo(vector signed char __a, vector signed char __b) { + return __builtin_s390_vmob(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_mulo(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vmlob(__a, __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_mulo(vector signed short __a, vector signed short __b) { + return __builtin_s390_vmoh(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_mulo(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vmloh(__a, __b); +} + +static inline __ATTRS_o_ai vector signed long long +vec_mulo(vector signed int __a, vector signed int __b) { + return __builtin_s390_vmof(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_mulo(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vmlof(__a, __b); +} + +/*-- vec_sub_u128 -----------------------------------------------------------*/ + +static inline __ATTRS_ai vector unsigned char +vec_sub_u128(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vsq(__a, __b); +} + +/*-- vec_subc ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_subc(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vscbib(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_subc(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vscbih(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_subc(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vscbif(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_subc(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_s390_vscbig(__a, __b); +} + +/*-- vec_subc_u128 ----------------------------------------------------------*/ + +static inline __ATTRS_ai vector unsigned char +vec_subc_u128(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vscbiq(__a, __b); +} + +/*-- vec_sube_u128 ----------------------------------------------------------*/ + +static inline __ATTRS_ai vector unsigned char +vec_sube_u128(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_s390_vsbiq(__a, __b, __c); +} + +/*-- vec_subec_u128 ---------------------------------------------------------*/ + +static inline __ATTRS_ai vector unsigned char +vec_subec_u128(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_s390_vsbcbiq(__a, __b, __c); +} + +/*-- vec_sum2 ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned long long +vec_sum2(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vsumgh(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned long long +vec_sum2(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vsumgf(__a, __b); +} + +/*-- vec_sum_u128 -----------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_sum_u128(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vsumqf(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_sum_u128(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_s390_vsumqg(__a, __b); +} + +/*-- vec_sum4 ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned int +vec_sum4(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vsumb(__a, __b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_sum4(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vsumh(__a, __b); +} + +/*-- vec_test_mask ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai int +vec_test_mask(vector signed char __a, vector unsigned char __b) { + return __builtin_s390_vtm((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vtm(__a, __b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(vector signed short __a, vector unsigned short __b) { + return __builtin_s390_vtm((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vtm((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(vector signed int __a, vector unsigned int __b) { + return __builtin_s390_vtm((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vtm((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(vector signed long long __a, vector unsigned long long __b) { + return __builtin_s390_vtm((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(vector unsigned long long __a, vector unsigned long long __b) { + return __builtin_s390_vtm((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai int +vec_test_mask(vector double __a, vector unsigned long long __b) { + return __builtin_s390_vtm((vector unsigned char)__a, + (vector unsigned char)__b); +} + +/*-- vec_madd ---------------------------------------------------------------*/ + +static inline __ATTRS_ai vector double +vec_madd(vector double __a, vector double __b, vector double __c) { + return __builtin_s390_vfmadb(__a, __b, __c); +} + +/*-- vec_msub ---------------------------------------------------------------*/ + +static inline __ATTRS_ai vector double +vec_msub(vector double __a, vector double __b, vector double __c) { + return __builtin_s390_vfmsdb(__a, __b, __c); +} + +/*-- vec_sqrt ---------------------------------------------------------------*/ + +static inline __ATTRS_ai vector double +vec_sqrt(vector double __a) { + return __builtin_s390_vfsqdb(__a); +} + +/*-- vec_ld2f ---------------------------------------------------------------*/ + +static inline __ATTRS_ai vector double +vec_ld2f(const float *__ptr) { + typedef float __v2f32 __attribute__((__vector_size__(8))); + return __builtin_convertvector(*(const __v2f32 *)__ptr, vector double); +} + +/*-- vec_st2f ---------------------------------------------------------------*/ + +static inline __ATTRS_ai void +vec_st2f(vector double __a, float *__ptr) { + typedef float __v2f32 __attribute__((__vector_size__(8))); + *(__v2f32 *)__ptr = __builtin_convertvector(__a, __v2f32); +} + +/*-- vec_ctd ----------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector double +vec_ctd(vector signed long long __a, int __b) + __constant_range(__b, 0, 31) { + vector double __conv = __builtin_convertvector(__a, vector double); + __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52); + return __conv; +} + +static inline __ATTRS_o_ai vector double +vec_ctd(vector unsigned long long __a, int __b) + __constant_range(__b, 0, 31) { + vector double __conv = __builtin_convertvector(__a, vector double); + __conv *= (vector double)(vector unsigned long long)((0x3ffULL - __b) << 52); + return __conv; +} + +/*-- vec_ctsl ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed long long +vec_ctsl(vector double __a, int __b) + __constant_range(__b, 0, 31) { + __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52); + return __builtin_convertvector(__a, vector signed long long); +} + +/*-- vec_ctul ---------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned long long +vec_ctul(vector double __a, int __b) + __constant_range(__b, 0, 31) { + __a *= (vector double)(vector unsigned long long)((0x3ffULL + __b) << 52); + return __builtin_convertvector(__a, vector unsigned long long); +} + +/*-- vec_roundp -------------------------------------------------------------*/ + +static inline __ATTRS_ai vector double +vec_roundp(vector double __a) { + return __builtin_s390_vfidb(__a, 4, 6); +} + +/*-- vec_ceil ---------------------------------------------------------------*/ + +static inline __ATTRS_ai vector double +vec_ceil(vector double __a) { + // On this platform, vec_ceil never triggers the IEEE-inexact exception. + return __builtin_s390_vfidb(__a, 4, 6); +} + +/*-- vec_roundm -------------------------------------------------------------*/ + +static inline __ATTRS_ai vector double +vec_roundm(vector double __a) { + return __builtin_s390_vfidb(__a, 4, 7); +} + +/*-- vec_floor --------------------------------------------------------------*/ + +static inline __ATTRS_ai vector double +vec_floor(vector double __a) { + // On this platform, vec_floor never triggers the IEEE-inexact exception. + return __builtin_s390_vfidb(__a, 4, 7); +} + +/*-- vec_roundz -------------------------------------------------------------*/ + +static inline __ATTRS_ai vector double +vec_roundz(vector double __a) { + return __builtin_s390_vfidb(__a, 4, 5); +} + +/*-- vec_trunc --------------------------------------------------------------*/ + +static inline __ATTRS_ai vector double +vec_trunc(vector double __a) { + // On this platform, vec_trunc never triggers the IEEE-inexact exception. + return __builtin_s390_vfidb(__a, 4, 5); +} + +/*-- vec_roundc -------------------------------------------------------------*/ + +static inline __ATTRS_ai vector double +vec_roundc(vector double __a) { + return __builtin_s390_vfidb(__a, 4, 0); +} + +/*-- vec_round --------------------------------------------------------------*/ + +static inline __ATTRS_ai vector double +vec_round(vector double __a) { + return __builtin_s390_vfidb(__a, 4, 4); +} + +/*-- vec_fp_test_data_class -------------------------------------------------*/ + +#define vec_fp_test_data_class(X, Y, Z) \ + ((vector bool long long)__builtin_s390_vftcidb((X), (Y), (Z))) + +/*-- vec_cp_until_zero ------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_cp_until_zero(vector signed char __a) { + return (vector signed char)__builtin_s390_vistrb((vector unsigned char)__a); +} + +static inline __ATTRS_o_ai vector bool char +vec_cp_until_zero(vector bool char __a) { + return (vector bool char)__builtin_s390_vistrb((vector unsigned char)__a); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cp_until_zero(vector unsigned char __a) { + return __builtin_s390_vistrb(__a); +} + +static inline __ATTRS_o_ai vector signed short +vec_cp_until_zero(vector signed short __a) { + return (vector signed short)__builtin_s390_vistrh((vector unsigned short)__a); +} + +static inline __ATTRS_o_ai vector bool short +vec_cp_until_zero(vector bool short __a) { + return (vector bool short)__builtin_s390_vistrh((vector unsigned short)__a); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cp_until_zero(vector unsigned short __a) { + return __builtin_s390_vistrh(__a); +} + +static inline __ATTRS_o_ai vector signed int +vec_cp_until_zero(vector signed int __a) { + return (vector signed int)__builtin_s390_vistrf((vector unsigned int)__a); +} + +static inline __ATTRS_o_ai vector bool int +vec_cp_until_zero(vector bool int __a) { + return (vector bool int)__builtin_s390_vistrf((vector unsigned int)__a); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cp_until_zero(vector unsigned int __a) { + return __builtin_s390_vistrf(__a); +} + +/*-- vec_cp_until_zero_cc ---------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_cp_until_zero_cc(vector signed char __a, int *__cc) { + return (vector signed char) + __builtin_s390_vistrbs((vector unsigned char)__a, __cc); +} + +static inline __ATTRS_o_ai vector bool char +vec_cp_until_zero_cc(vector bool char __a, int *__cc) { + return (vector bool char) + __builtin_s390_vistrbs((vector unsigned char)__a, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cp_until_zero_cc(vector unsigned char __a, int *__cc) { + return __builtin_s390_vistrbs(__a, __cc); +} + +static inline __ATTRS_o_ai vector signed short +vec_cp_until_zero_cc(vector signed short __a, int *__cc) { + return (vector signed short) + __builtin_s390_vistrhs((vector unsigned short)__a, __cc); +} + +static inline __ATTRS_o_ai vector bool short +vec_cp_until_zero_cc(vector bool short __a, int *__cc) { + return (vector bool short) + __builtin_s390_vistrhs((vector unsigned short)__a, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cp_until_zero_cc(vector unsigned short __a, int *__cc) { + return __builtin_s390_vistrhs(__a, __cc); +} + +static inline __ATTRS_o_ai vector signed int +vec_cp_until_zero_cc(vector signed int __a, int *__cc) { + return (vector signed int) + __builtin_s390_vistrfs((vector unsigned int)__a, __cc); +} + +static inline __ATTRS_o_ai vector bool int +vec_cp_until_zero_cc(vector bool int __a, int *__cc) { + return (vector bool int)__builtin_s390_vistrfs((vector unsigned int)__a, + __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cp_until_zero_cc(vector unsigned int __a, int *__cc) { + return __builtin_s390_vistrfs(__a, __cc); +} + +/*-- vec_cmpeq_idx ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_cmpeq_idx(vector signed char __a, vector signed char __b) { + return (vector signed char) + __builtin_s390_vfeeb((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpeq_idx(vector bool char __a, vector bool char __b) { + return __builtin_s390_vfeeb((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpeq_idx(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vfeeb(__a, __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_cmpeq_idx(vector signed short __a, vector signed short __b) { + return (vector signed short) + __builtin_s390_vfeeh((vector unsigned short)__a, + (vector unsigned short)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpeq_idx(vector bool short __a, vector bool short __b) { + return __builtin_s390_vfeeh((vector unsigned short)__a, + (vector unsigned short)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpeq_idx(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vfeeh(__a, __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_cmpeq_idx(vector signed int __a, vector signed int __b) { + return (vector signed int) + __builtin_s390_vfeef((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpeq_idx(vector bool int __a, vector bool int __b) { + return __builtin_s390_vfeef((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpeq_idx(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vfeef(__a, __b); +} + +/*-- vec_cmpeq_idx_cc -------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_cmpeq_idx_cc(vector signed char __a, vector signed char __b, int *__cc) { + return (vector signed char) + __builtin_s390_vfeebs((vector unsigned char)__a, + (vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpeq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) { + return __builtin_s390_vfeebs((vector unsigned char)__a, + (vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpeq_idx_cc(vector unsigned char __a, vector unsigned char __b, + int *__cc) { + return __builtin_s390_vfeebs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector signed short +vec_cmpeq_idx_cc(vector signed short __a, vector signed short __b, int *__cc) { + return (vector signed short) + __builtin_s390_vfeehs((vector unsigned short)__a, + (vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpeq_idx_cc(vector bool short __a, vector bool short __b, int *__cc) { + return __builtin_s390_vfeehs((vector unsigned short)__a, + (vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpeq_idx_cc(vector unsigned short __a, vector unsigned short __b, + int *__cc) { + return __builtin_s390_vfeehs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector signed int +vec_cmpeq_idx_cc(vector signed int __a, vector signed int __b, int *__cc) { + return (vector signed int) + __builtin_s390_vfeefs((vector unsigned int)__a, + (vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpeq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) { + return __builtin_s390_vfeefs((vector unsigned int)__a, + (vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpeq_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) { + return __builtin_s390_vfeefs(__a, __b, __cc); +} + +/*-- vec_cmpeq_or_0_idx -----------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_cmpeq_or_0_idx(vector signed char __a, vector signed char __b) { + return (vector signed char) + __builtin_s390_vfeezb((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpeq_or_0_idx(vector bool char __a, vector bool char __b) { + return __builtin_s390_vfeezb((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpeq_or_0_idx(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vfeezb(__a, __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_cmpeq_or_0_idx(vector signed short __a, vector signed short __b) { + return (vector signed short) + __builtin_s390_vfeezh((vector unsigned short)__a, + (vector unsigned short)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpeq_or_0_idx(vector bool short __a, vector bool short __b) { + return __builtin_s390_vfeezh((vector unsigned short)__a, + (vector unsigned short)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpeq_or_0_idx(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vfeezh(__a, __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_cmpeq_or_0_idx(vector signed int __a, vector signed int __b) { + return (vector signed int) + __builtin_s390_vfeezf((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpeq_or_0_idx(vector bool int __a, vector bool int __b) { + return __builtin_s390_vfeezf((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpeq_or_0_idx(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vfeezf(__a, __b); +} + +/*-- vec_cmpeq_or_0_idx_cc --------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_cmpeq_or_0_idx_cc(vector signed char __a, vector signed char __b, + int *__cc) { + return (vector signed char) + __builtin_s390_vfeezbs((vector unsigned char)__a, + (vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpeq_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) { + return __builtin_s390_vfeezbs((vector unsigned char)__a, + (vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpeq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b, + int *__cc) { + return __builtin_s390_vfeezbs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector signed short +vec_cmpeq_or_0_idx_cc(vector signed short __a, vector signed short __b, + int *__cc) { + return (vector signed short) + __builtin_s390_vfeezhs((vector unsigned short)__a, + (vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpeq_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) { + return __builtin_s390_vfeezhs((vector unsigned short)__a, + (vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpeq_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b, + int *__cc) { + return __builtin_s390_vfeezhs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector signed int +vec_cmpeq_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) { + return (vector signed int) + __builtin_s390_vfeezfs((vector unsigned int)__a, + (vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpeq_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) { + return __builtin_s390_vfeezfs((vector unsigned int)__a, + (vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpeq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b, + int *__cc) { + return __builtin_s390_vfeezfs(__a, __b, __cc); +} + +/*-- vec_cmpne_idx ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_cmpne_idx(vector signed char __a, vector signed char __b) { + return (vector signed char) + __builtin_s390_vfeneb((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpne_idx(vector bool char __a, vector bool char __b) { + return __builtin_s390_vfeneb((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpne_idx(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vfeneb(__a, __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_cmpne_idx(vector signed short __a, vector signed short __b) { + return (vector signed short) + __builtin_s390_vfeneh((vector unsigned short)__a, + (vector unsigned short)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpne_idx(vector bool short __a, vector bool short __b) { + return __builtin_s390_vfeneh((vector unsigned short)__a, + (vector unsigned short)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpne_idx(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vfeneh(__a, __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_cmpne_idx(vector signed int __a, vector signed int __b) { + return (vector signed int) + __builtin_s390_vfenef((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpne_idx(vector bool int __a, vector bool int __b) { + return __builtin_s390_vfenef((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpne_idx(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vfenef(__a, __b); +} + +/*-- vec_cmpne_idx_cc -------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_cmpne_idx_cc(vector signed char __a, vector signed char __b, int *__cc) { + return (vector signed char) + __builtin_s390_vfenebs((vector unsigned char)__a, + (vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) { + return __builtin_s390_vfenebs((vector unsigned char)__a, + (vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpne_idx_cc(vector unsigned char __a, vector unsigned char __b, + int *__cc) { + return __builtin_s390_vfenebs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector signed short +vec_cmpne_idx_cc(vector signed short __a, vector signed short __b, int *__cc) { + return (vector signed short) + __builtin_s390_vfenehs((vector unsigned short)__a, + (vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpne_idx_cc(vector bool short __a, vector bool short __b, int *__cc) { + return __builtin_s390_vfenehs((vector unsigned short)__a, + (vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpne_idx_cc(vector unsigned short __a, vector unsigned short __b, + int *__cc) { + return __builtin_s390_vfenehs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector signed int +vec_cmpne_idx_cc(vector signed int __a, vector signed int __b, int *__cc) { + return (vector signed int) + __builtin_s390_vfenefs((vector unsigned int)__a, + (vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) { + return __builtin_s390_vfenefs((vector unsigned int)__a, + (vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpne_idx_cc(vector unsigned int __a, vector unsigned int __b, int *__cc) { + return __builtin_s390_vfenefs(__a, __b, __cc); +} + +/*-- vec_cmpne_or_0_idx -----------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_cmpne_or_0_idx(vector signed char __a, vector signed char __b) { + return (vector signed char) + __builtin_s390_vfenezb((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpne_or_0_idx(vector bool char __a, vector bool char __b) { + return __builtin_s390_vfenezb((vector unsigned char)__a, + (vector unsigned char)__b); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpne_or_0_idx(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vfenezb(__a, __b); +} + +static inline __ATTRS_o_ai vector signed short +vec_cmpne_or_0_idx(vector signed short __a, vector signed short __b) { + return (vector signed short) + __builtin_s390_vfenezh((vector unsigned short)__a, + (vector unsigned short)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpne_or_0_idx(vector bool short __a, vector bool short __b) { + return __builtin_s390_vfenezh((vector unsigned short)__a, + (vector unsigned short)__b); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpne_or_0_idx(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vfenezh(__a, __b); +} + +static inline __ATTRS_o_ai vector signed int +vec_cmpne_or_0_idx(vector signed int __a, vector signed int __b) { + return (vector signed int) + __builtin_s390_vfenezf((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpne_or_0_idx(vector bool int __a, vector bool int __b) { + return __builtin_s390_vfenezf((vector unsigned int)__a, + (vector unsigned int)__b); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpne_or_0_idx(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vfenezf(__a, __b); +} + +/*-- vec_cmpne_or_0_idx_cc --------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_cmpne_or_0_idx_cc(vector signed char __a, vector signed char __b, + int *__cc) { + return (vector signed char) + __builtin_s390_vfenezbs((vector unsigned char)__a, + (vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpne_or_0_idx_cc(vector bool char __a, vector bool char __b, int *__cc) { + return __builtin_s390_vfenezbs((vector unsigned char)__a, + (vector unsigned char)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b, + int *__cc) { + return __builtin_s390_vfenezbs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector signed short +vec_cmpne_or_0_idx_cc(vector signed short __a, vector signed short __b, + int *__cc) { + return (vector signed short) + __builtin_s390_vfenezhs((vector unsigned short)__a, + (vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpne_or_0_idx_cc(vector bool short __a, vector bool short __b, int *__cc) { + return __builtin_s390_vfenezhs((vector unsigned short)__a, + (vector unsigned short)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpne_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b, + int *__cc) { + return __builtin_s390_vfenezhs(__a, __b, __cc); +} + +static inline __ATTRS_o_ai vector signed int +vec_cmpne_or_0_idx_cc(vector signed int __a, vector signed int __b, int *__cc) { + return (vector signed int) + __builtin_s390_vfenezfs((vector unsigned int)__a, + (vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpne_or_0_idx_cc(vector bool int __a, vector bool int __b, int *__cc) { + return __builtin_s390_vfenezfs((vector unsigned int)__a, + (vector unsigned int)__b, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b, + int *__cc) { + return __builtin_s390_vfenezfs(__a, __b, __cc); +} + +/*-- vec_cmprg --------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_cmprg(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 4); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmprg(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 4); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmprg(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 4); +} + +/*-- vec_cmprg_cc -----------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_cmprg_cc(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c, int *__cc) { + return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 4, __cc); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmprg_cc(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c, int *__cc) { + return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 4, __cc); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmprg_cc(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c, int *__cc) { + return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 4, __cc); +} + +/*-- vec_cmprg_idx ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_cmprg_idx(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_s390_vstrcb(__a, __b, __c, 0); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmprg_idx(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return __builtin_s390_vstrch(__a, __b, __c, 0); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmprg_idx(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + return __builtin_s390_vstrcf(__a, __b, __c, 0); +} + +/*-- vec_cmprg_idx_cc -------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_cmprg_idx_cc(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrcbs(__a, __b, __c, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmprg_idx_cc(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c, int *__cc) { + return __builtin_s390_vstrchs(__a, __b, __c, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmprg_idx_cc(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c, int *__cc) { + return __builtin_s390_vstrcfs(__a, __b, __c, 0, __cc); +} + +/*-- vec_cmprg_or_0_idx -----------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_cmprg_or_0_idx(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_s390_vstrczb(__a, __b, __c, 0); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmprg_or_0_idx(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return __builtin_s390_vstrczh(__a, __b, __c, 0); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmprg_or_0_idx(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + return __builtin_s390_vstrczf(__a, __b, __c, 0); +} + +/*-- vec_cmprg_or_0_idx_cc --------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_cmprg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrczbs(__a, __b, __c, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmprg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c, int *__cc) { + return __builtin_s390_vstrczhs(__a, __b, __c, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmprg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c, int *__cc) { + return __builtin_s390_vstrczfs(__a, __b, __c, 0, __cc); +} + +/*-- vec_cmpnrg -------------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_cmpnrg(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return (vector bool char)__builtin_s390_vstrcb(__a, __b, __c, 12); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmpnrg(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return (vector bool short)__builtin_s390_vstrch(__a, __b, __c, 12); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmpnrg(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + return (vector bool int)__builtin_s390_vstrcf(__a, __b, __c, 12); +} + +/*-- vec_cmpnrg_cc ----------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_cmpnrg_cc(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c, int *__cc) { + return (vector bool char)__builtin_s390_vstrcbs(__a, __b, __c, 12, __cc); +} + +static inline __ATTRS_o_ai vector bool short +vec_cmpnrg_cc(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c, int *__cc) { + return (vector bool short)__builtin_s390_vstrchs(__a, __b, __c, 12, __cc); +} + +static inline __ATTRS_o_ai vector bool int +vec_cmpnrg_cc(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c, int *__cc) { + return (vector bool int)__builtin_s390_vstrcfs(__a, __b, __c, 12, __cc); +} + +/*-- vec_cmpnrg_idx ---------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpnrg_idx(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_s390_vstrcb(__a, __b, __c, 8); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpnrg_idx(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return __builtin_s390_vstrch(__a, __b, __c, 8); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpnrg_idx(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + return __builtin_s390_vstrcf(__a, __b, __c, 8); +} + +/*-- vec_cmpnrg_idx_cc ------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpnrg_idx_cc(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrcbs(__a, __b, __c, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpnrg_idx_cc(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c, int *__cc) { + return __builtin_s390_vstrchs(__a, __b, __c, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpnrg_idx_cc(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c, int *__cc) { + return __builtin_s390_vstrcfs(__a, __b, __c, 8, __cc); +} + +/*-- vec_cmpnrg_or_0_idx ----------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpnrg_or_0_idx(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_s390_vstrczb(__a, __b, __c, 8); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpnrg_or_0_idx(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return __builtin_s390_vstrczh(__a, __b, __c, 8); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpnrg_or_0_idx(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c) { + return __builtin_s390_vstrczf(__a, __b, __c, 8); +} + +/*-- vec_cmpnrg_or_0_idx_cc -------------------------------------------------*/ + +static inline __ATTRS_o_ai vector unsigned char +vec_cmpnrg_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c, int *__cc) { + return __builtin_s390_vstrczbs(__a, __b, __c, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_cmpnrg_or_0_idx_cc(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c, int *__cc) { + return __builtin_s390_vstrczhs(__a, __b, __c, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_cmpnrg_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b, + vector unsigned int __c, int *__cc) { + return __builtin_s390_vstrczfs(__a, __b, __c, 8, __cc); +} + +/*-- vec_find_any_eq --------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_find_any_eq(vector signed char __a, vector signed char __b) { + return (vector bool char) + __builtin_s390_vfaeb((vector unsigned char)__a, + (vector unsigned char)__b, 4); +} + +static inline __ATTRS_o_ai vector bool char +vec_find_any_eq(vector bool char __a, vector bool char __b) { + return (vector bool char) + __builtin_s390_vfaeb((vector unsigned char)__a, + (vector unsigned char)__b, 4); +} + +static inline __ATTRS_o_ai vector bool char +vec_find_any_eq(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)__builtin_s390_vfaeb(__a, __b, 4); +} + +static inline __ATTRS_o_ai vector bool short +vec_find_any_eq(vector signed short __a, vector signed short __b) { + return (vector bool short) + __builtin_s390_vfaeh((vector unsigned short)__a, + (vector unsigned short)__b, 4); +} + +static inline __ATTRS_o_ai vector bool short +vec_find_any_eq(vector bool short __a, vector bool short __b) { + return (vector bool short) + __builtin_s390_vfaeh((vector unsigned short)__a, + (vector unsigned short)__b, 4); +} + +static inline __ATTRS_o_ai vector bool short +vec_find_any_eq(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)__builtin_s390_vfaeh(__a, __b, 4); +} + +static inline __ATTRS_o_ai vector bool int +vec_find_any_eq(vector signed int __a, vector signed int __b) { + return (vector bool int) + __builtin_s390_vfaef((vector unsigned int)__a, + (vector unsigned int)__b, 4); +} + +static inline __ATTRS_o_ai vector bool int +vec_find_any_eq(vector bool int __a, vector bool int __b) { + return (vector bool int) + __builtin_s390_vfaef((vector unsigned int)__a, + (vector unsigned int)__b, 4); +} + +static inline __ATTRS_o_ai vector bool int +vec_find_any_eq(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)__builtin_s390_vfaef(__a, __b, 4); +} + +/*-- vec_find_any_eq_cc -----------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_find_any_eq_cc(vector signed char __a, vector signed char __b, int *__cc) { + return (vector bool char) + __builtin_s390_vfaebs((vector unsigned char)__a, + (vector unsigned char)__b, 4, __cc); +} + +static inline __ATTRS_o_ai vector bool char +vec_find_any_eq_cc(vector bool char __a, vector bool char __b, int *__cc) { + return (vector bool char) + __builtin_s390_vfaebs((vector unsigned char)__a, + (vector unsigned char)__b, 4, __cc); +} + +static inline __ATTRS_o_ai vector bool char +vec_find_any_eq_cc(vector unsigned char __a, vector unsigned char __b, + int *__cc) { + return (vector bool char)__builtin_s390_vfaebs(__a, __b, 4, __cc); +} + +static inline __ATTRS_o_ai vector bool short +vec_find_any_eq_cc(vector signed short __a, vector signed short __b, + int *__cc) { + return (vector bool short) + __builtin_s390_vfaehs((vector unsigned short)__a, + (vector unsigned short)__b, 4, __cc); +} + +static inline __ATTRS_o_ai vector bool short +vec_find_any_eq_cc(vector bool short __a, vector bool short __b, int *__cc) { + return (vector bool short) + __builtin_s390_vfaehs((vector unsigned short)__a, + (vector unsigned short)__b, 4, __cc); +} + +static inline __ATTRS_o_ai vector bool short +vec_find_any_eq_cc(vector unsigned short __a, vector unsigned short __b, + int *__cc) { + return (vector bool short)__builtin_s390_vfaehs(__a, __b, 4, __cc); +} + +static inline __ATTRS_o_ai vector bool int +vec_find_any_eq_cc(vector signed int __a, vector signed int __b, int *__cc) { + return (vector bool int) + __builtin_s390_vfaefs((vector unsigned int)__a, + (vector unsigned int)__b, 4, __cc); +} + +static inline __ATTRS_o_ai vector bool int +vec_find_any_eq_cc(vector bool int __a, vector bool int __b, int *__cc) { + return (vector bool int) + __builtin_s390_vfaefs((vector unsigned int)__a, + (vector unsigned int)__b, 4, __cc); +} + +static inline __ATTRS_o_ai vector bool int +vec_find_any_eq_cc(vector unsigned int __a, vector unsigned int __b, + int *__cc) { + return (vector bool int)__builtin_s390_vfaefs(__a, __b, 4, __cc); +} + +/*-- vec_find_any_eq_idx ----------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_find_any_eq_idx(vector signed char __a, vector signed char __b) { + return (vector signed char) + __builtin_s390_vfaeb((vector unsigned char)__a, + (vector unsigned char)__b, 0); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_eq_idx(vector bool char __a, vector bool char __b) { + return __builtin_s390_vfaeb((vector unsigned char)__a, + (vector unsigned char)__b, 0); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_eq_idx(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vfaeb(__a, __b, 0); +} + +static inline __ATTRS_o_ai vector signed short +vec_find_any_eq_idx(vector signed short __a, vector signed short __b) { + return (vector signed short) + __builtin_s390_vfaeh((vector unsigned short)__a, + (vector unsigned short)__b, 0); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_eq_idx(vector bool short __a, vector bool short __b) { + return __builtin_s390_vfaeh((vector unsigned short)__a, + (vector unsigned short)__b, 0); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_eq_idx(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vfaeh(__a, __b, 0); +} + +static inline __ATTRS_o_ai vector signed int +vec_find_any_eq_idx(vector signed int __a, vector signed int __b) { + return (vector signed int) + __builtin_s390_vfaef((vector unsigned int)__a, + (vector unsigned int)__b, 0); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_eq_idx(vector bool int __a, vector bool int __b) { + return __builtin_s390_vfaef((vector unsigned int)__a, + (vector unsigned int)__b, 0); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_eq_idx(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vfaef(__a, __b, 0); +} + +/*-- vec_find_any_eq_idx_cc -------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_find_any_eq_idx_cc(vector signed char __a, vector signed char __b, + int *__cc) { + return (vector signed char) + __builtin_s390_vfaebs((vector unsigned char)__a, + (vector unsigned char)__b, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_eq_idx_cc(vector bool char __a, vector bool char __b, int *__cc) { + return __builtin_s390_vfaebs((vector unsigned char)__a, + (vector unsigned char)__b, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_eq_idx_cc(vector unsigned char __a, vector unsigned char __b, + int *__cc) { + return __builtin_s390_vfaebs(__a, __b, 0, __cc); +} + +static inline __ATTRS_o_ai vector signed short +vec_find_any_eq_idx_cc(vector signed short __a, vector signed short __b, + int *__cc) { + return (vector signed short) + __builtin_s390_vfaehs((vector unsigned short)__a, + (vector unsigned short)__b, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_eq_idx_cc(vector bool short __a, vector bool short __b, + int *__cc) { + return __builtin_s390_vfaehs((vector unsigned short)__a, + (vector unsigned short)__b, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_eq_idx_cc(vector unsigned short __a, vector unsigned short __b, + int *__cc) { + return __builtin_s390_vfaehs(__a, __b, 0, __cc); +} + +static inline __ATTRS_o_ai vector signed int +vec_find_any_eq_idx_cc(vector signed int __a, vector signed int __b, + int *__cc) { + return (vector signed int) + __builtin_s390_vfaefs((vector unsigned int)__a, + (vector unsigned int)__b, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_eq_idx_cc(vector bool int __a, vector bool int __b, int *__cc) { + return __builtin_s390_vfaefs((vector unsigned int)__a, + (vector unsigned int)__b, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_eq_idx_cc(vector unsigned int __a, vector unsigned int __b, + int *__cc) { + return __builtin_s390_vfaefs(__a, __b, 0, __cc); +} + +/*-- vec_find_any_eq_or_0_idx -----------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_find_any_eq_or_0_idx(vector signed char __a, vector signed char __b) { + return (vector signed char) + __builtin_s390_vfaezb((vector unsigned char)__a, + (vector unsigned char)__b, 0); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_eq_or_0_idx(vector bool char __a, vector bool char __b) { + return __builtin_s390_vfaezb((vector unsigned char)__a, + (vector unsigned char)__b, 0); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_eq_or_0_idx(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vfaezb(__a, __b, 0); +} + +static inline __ATTRS_o_ai vector signed short +vec_find_any_eq_or_0_idx(vector signed short __a, vector signed short __b) { + return (vector signed short) + __builtin_s390_vfaezh((vector unsigned short)__a, + (vector unsigned short)__b, 0); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_eq_or_0_idx(vector bool short __a, vector bool short __b) { + return __builtin_s390_vfaezh((vector unsigned short)__a, + (vector unsigned short)__b, 0); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_eq_or_0_idx(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vfaezh(__a, __b, 0); +} + +static inline __ATTRS_o_ai vector signed int +vec_find_any_eq_or_0_idx(vector signed int __a, vector signed int __b) { + return (vector signed int) + __builtin_s390_vfaezf((vector unsigned int)__a, + (vector unsigned int)__b, 0); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_eq_or_0_idx(vector bool int __a, vector bool int __b) { + return __builtin_s390_vfaezf((vector unsigned int)__a, + (vector unsigned int)__b, 0); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_eq_or_0_idx(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vfaezf(__a, __b, 0); +} + +/*-- vec_find_any_eq_or_0_idx_cc --------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_find_any_eq_or_0_idx_cc(vector signed char __a, vector signed char __b, + int *__cc) { + return (vector signed char) + __builtin_s390_vfaezbs((vector unsigned char)__a, + (vector unsigned char)__b, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_eq_or_0_idx_cc(vector bool char __a, vector bool char __b, + int *__cc) { + return __builtin_s390_vfaezbs((vector unsigned char)__a, + (vector unsigned char)__b, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_eq_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b, + int *__cc) { + return __builtin_s390_vfaezbs(__a, __b, 0, __cc); +} + +static inline __ATTRS_o_ai vector signed short +vec_find_any_eq_or_0_idx_cc(vector signed short __a, vector signed short __b, + int *__cc) { + return (vector signed short) + __builtin_s390_vfaezhs((vector unsigned short)__a, + (vector unsigned short)__b, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_eq_or_0_idx_cc(vector bool short __a, vector bool short __b, + int *__cc) { + return __builtin_s390_vfaezhs((vector unsigned short)__a, + (vector unsigned short)__b, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_eq_or_0_idx_cc(vector unsigned short __a, + vector unsigned short __b, int *__cc) { + return __builtin_s390_vfaezhs(__a, __b, 0, __cc); +} + +static inline __ATTRS_o_ai vector signed int +vec_find_any_eq_or_0_idx_cc(vector signed int __a, vector signed int __b, + int *__cc) { + return (vector signed int) + __builtin_s390_vfaezfs((vector unsigned int)__a, + (vector unsigned int)__b, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_eq_or_0_idx_cc(vector bool int __a, vector bool int __b, + int *__cc) { + return __builtin_s390_vfaezfs((vector unsigned int)__a, + (vector unsigned int)__b, 0, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_eq_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b, + int *__cc) { + return __builtin_s390_vfaezfs(__a, __b, 0, __cc); +} + +/*-- vec_find_any_ne --------------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_find_any_ne(vector signed char __a, vector signed char __b) { + return (vector bool char) + __builtin_s390_vfaeb((vector unsigned char)__a, + (vector unsigned char)__b, 12); +} + +static inline __ATTRS_o_ai vector bool char +vec_find_any_ne(vector bool char __a, vector bool char __b) { + return (vector bool char) + __builtin_s390_vfaeb((vector unsigned char)__a, + (vector unsigned char)__b, 12); +} + +static inline __ATTRS_o_ai vector bool char +vec_find_any_ne(vector unsigned char __a, vector unsigned char __b) { + return (vector bool char)__builtin_s390_vfaeb(__a, __b, 12); +} + +static inline __ATTRS_o_ai vector bool short +vec_find_any_ne(vector signed short __a, vector signed short __b) { + return (vector bool short) + __builtin_s390_vfaeh((vector unsigned short)__a, + (vector unsigned short)__b, 12); +} + +static inline __ATTRS_o_ai vector bool short +vec_find_any_ne(vector bool short __a, vector bool short __b) { + return (vector bool short) + __builtin_s390_vfaeh((vector unsigned short)__a, + (vector unsigned short)__b, 12); +} + +static inline __ATTRS_o_ai vector bool short +vec_find_any_ne(vector unsigned short __a, vector unsigned short __b) { + return (vector bool short)__builtin_s390_vfaeh(__a, __b, 12); +} + +static inline __ATTRS_o_ai vector bool int +vec_find_any_ne(vector signed int __a, vector signed int __b) { + return (vector bool int) + __builtin_s390_vfaef((vector unsigned int)__a, + (vector unsigned int)__b, 12); +} + +static inline __ATTRS_o_ai vector bool int +vec_find_any_ne(vector bool int __a, vector bool int __b) { + return (vector bool int) + __builtin_s390_vfaef((vector unsigned int)__a, + (vector unsigned int)__b, 12); +} + +static inline __ATTRS_o_ai vector bool int +vec_find_any_ne(vector unsigned int __a, vector unsigned int __b) { + return (vector bool int)__builtin_s390_vfaef(__a, __b, 12); +} + +/*-- vec_find_any_ne_cc -----------------------------------------------------*/ + +static inline __ATTRS_o_ai vector bool char +vec_find_any_ne_cc(vector signed char __a, vector signed char __b, int *__cc) { + return (vector bool char) + __builtin_s390_vfaebs((vector unsigned char)__a, + (vector unsigned char)__b, 12, __cc); +} + +static inline __ATTRS_o_ai vector bool char +vec_find_any_ne_cc(vector bool char __a, vector bool char __b, int *__cc) { + return (vector bool char) + __builtin_s390_vfaebs((vector unsigned char)__a, + (vector unsigned char)__b, 12, __cc); +} + +static inline __ATTRS_o_ai vector bool char +vec_find_any_ne_cc(vector unsigned char __a, vector unsigned char __b, + int *__cc) { + return (vector bool char)__builtin_s390_vfaebs(__a, __b, 12, __cc); +} + +static inline __ATTRS_o_ai vector bool short +vec_find_any_ne_cc(vector signed short __a, vector signed short __b, + int *__cc) { + return (vector bool short) + __builtin_s390_vfaehs((vector unsigned short)__a, + (vector unsigned short)__b, 12, __cc); +} + +static inline __ATTRS_o_ai vector bool short +vec_find_any_ne_cc(vector bool short __a, vector bool short __b, int *__cc) { + return (vector bool short) + __builtin_s390_vfaehs((vector unsigned short)__a, + (vector unsigned short)__b, 12, __cc); +} + +static inline __ATTRS_o_ai vector bool short +vec_find_any_ne_cc(vector unsigned short __a, vector unsigned short __b, + int *__cc) { + return (vector bool short)__builtin_s390_vfaehs(__a, __b, 12, __cc); +} + +static inline __ATTRS_o_ai vector bool int +vec_find_any_ne_cc(vector signed int __a, vector signed int __b, int *__cc) { + return (vector bool int) + __builtin_s390_vfaefs((vector unsigned int)__a, + (vector unsigned int)__b, 12, __cc); +} + +static inline __ATTRS_o_ai vector bool int +vec_find_any_ne_cc(vector bool int __a, vector bool int __b, int *__cc) { + return (vector bool int) + __builtin_s390_vfaefs((vector unsigned int)__a, + (vector unsigned int)__b, 12, __cc); +} + +static inline __ATTRS_o_ai vector bool int +vec_find_any_ne_cc(vector unsigned int __a, vector unsigned int __b, + int *__cc) { + return (vector bool int)__builtin_s390_vfaefs(__a, __b, 12, __cc); +} + +/*-- vec_find_any_ne_idx ----------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_find_any_ne_idx(vector signed char __a, vector signed char __b) { + return (vector signed char) + __builtin_s390_vfaeb((vector unsigned char)__a, + (vector unsigned char)__b, 8); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_ne_idx(vector bool char __a, vector bool char __b) { + return __builtin_s390_vfaeb((vector unsigned char)__a, + (vector unsigned char)__b, 8); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_ne_idx(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vfaeb(__a, __b, 8); +} + +static inline __ATTRS_o_ai vector signed short +vec_find_any_ne_idx(vector signed short __a, vector signed short __b) { + return (vector signed short) + __builtin_s390_vfaeh((vector unsigned short)__a, + (vector unsigned short)__b, 8); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_ne_idx(vector bool short __a, vector bool short __b) { + return __builtin_s390_vfaeh((vector unsigned short)__a, + (vector unsigned short)__b, 8); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_ne_idx(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vfaeh(__a, __b, 8); +} + +static inline __ATTRS_o_ai vector signed int +vec_find_any_ne_idx(vector signed int __a, vector signed int __b) { + return (vector signed int) + __builtin_s390_vfaef((vector unsigned int)__a, + (vector unsigned int)__b, 8); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_ne_idx(vector bool int __a, vector bool int __b) { + return __builtin_s390_vfaef((vector unsigned int)__a, + (vector unsigned int)__b, 8); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_ne_idx(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vfaef(__a, __b, 8); +} + +/*-- vec_find_any_ne_idx_cc -------------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_find_any_ne_idx_cc(vector signed char __a, vector signed char __b, + int *__cc) { + return (vector signed char) + __builtin_s390_vfaebs((vector unsigned char)__a, + (vector unsigned char)__b, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_ne_idx_cc(vector bool char __a, vector bool char __b, int *__cc) { + return __builtin_s390_vfaebs((vector unsigned char)__a, + (vector unsigned char)__b, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_ne_idx_cc(vector unsigned char __a, vector unsigned char __b, + int *__cc) { + return __builtin_s390_vfaebs(__a, __b, 8, __cc); +} + +static inline __ATTRS_o_ai vector signed short +vec_find_any_ne_idx_cc(vector signed short __a, vector signed short __b, + int *__cc) { + return (vector signed short) + __builtin_s390_vfaehs((vector unsigned short)__a, + (vector unsigned short)__b, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_ne_idx_cc(vector bool short __a, vector bool short __b, + int *__cc) { + return __builtin_s390_vfaehs((vector unsigned short)__a, + (vector unsigned short)__b, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_ne_idx_cc(vector unsigned short __a, vector unsigned short __b, + int *__cc) { + return __builtin_s390_vfaehs(__a, __b, 8, __cc); +} + +static inline __ATTRS_o_ai vector signed int +vec_find_any_ne_idx_cc(vector signed int __a, vector signed int __b, + int *__cc) { + return (vector signed int) + __builtin_s390_vfaefs((vector unsigned int)__a, + (vector unsigned int)__b, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_ne_idx_cc(vector bool int __a, vector bool int __b, int *__cc) { + return __builtin_s390_vfaefs((vector unsigned int)__a, + (vector unsigned int)__b, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_ne_idx_cc(vector unsigned int __a, vector unsigned int __b, + int *__cc) { + return __builtin_s390_vfaefs(__a, __b, 8, __cc); +} + +/*-- vec_find_any_ne_or_0_idx -----------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_find_any_ne_or_0_idx(vector signed char __a, vector signed char __b) { + return (vector signed char) + __builtin_s390_vfaezb((vector unsigned char)__a, + (vector unsigned char)__b, 8); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_ne_or_0_idx(vector bool char __a, vector bool char __b) { + return __builtin_s390_vfaezb((vector unsigned char)__a, + (vector unsigned char)__b, 8); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_ne_or_0_idx(vector unsigned char __a, vector unsigned char __b) { + return __builtin_s390_vfaezb(__a, __b, 8); +} + +static inline __ATTRS_o_ai vector signed short +vec_find_any_ne_or_0_idx(vector signed short __a, vector signed short __b) { + return (vector signed short) + __builtin_s390_vfaezh((vector unsigned short)__a, + (vector unsigned short)__b, 8); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_ne_or_0_idx(vector bool short __a, vector bool short __b) { + return __builtin_s390_vfaezh((vector unsigned short)__a, + (vector unsigned short)__b, 8); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_ne_or_0_idx(vector unsigned short __a, vector unsigned short __b) { + return __builtin_s390_vfaezh(__a, __b, 8); +} + +static inline __ATTRS_o_ai vector signed int +vec_find_any_ne_or_0_idx(vector signed int __a, vector signed int __b) { + return (vector signed int) + __builtin_s390_vfaezf((vector unsigned int)__a, + (vector unsigned int)__b, 8); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_ne_or_0_idx(vector bool int __a, vector bool int __b) { + return __builtin_s390_vfaezf((vector unsigned int)__a, + (vector unsigned int)__b, 8); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_ne_or_0_idx(vector unsigned int __a, vector unsigned int __b) { + return __builtin_s390_vfaezf(__a, __b, 8); +} + +/*-- vec_find_any_ne_or_0_idx_cc --------------------------------------------*/ + +static inline __ATTRS_o_ai vector signed char +vec_find_any_ne_or_0_idx_cc(vector signed char __a, vector signed char __b, + int *__cc) { + return (vector signed char) + __builtin_s390_vfaezbs((vector unsigned char)__a, + (vector unsigned char)__b, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_ne_or_0_idx_cc(vector bool char __a, vector bool char __b, + int *__cc) { + return __builtin_s390_vfaezbs((vector unsigned char)__a, + (vector unsigned char)__b, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned char +vec_find_any_ne_or_0_idx_cc(vector unsigned char __a, vector unsigned char __b, + int *__cc) { + return __builtin_s390_vfaezbs(__a, __b, 8, __cc); +} + +static inline __ATTRS_o_ai vector signed short +vec_find_any_ne_or_0_idx_cc(vector signed short __a, vector signed short __b, + int *__cc) { + return (vector signed short) + __builtin_s390_vfaezhs((vector unsigned short)__a, + (vector unsigned short)__b, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_ne_or_0_idx_cc(vector bool short __a, vector bool short __b, + int *__cc) { + return __builtin_s390_vfaezhs((vector unsigned short)__a, + (vector unsigned short)__b, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned short +vec_find_any_ne_or_0_idx_cc(vector unsigned short __a, + vector unsigned short __b, int *__cc) { + return __builtin_s390_vfaezhs(__a, __b, 8, __cc); +} + +static inline __ATTRS_o_ai vector signed int +vec_find_any_ne_or_0_idx_cc(vector signed int __a, vector signed int __b, + int *__cc) { + return (vector signed int) + __builtin_s390_vfaezfs((vector unsigned int)__a, + (vector unsigned int)__b, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_ne_or_0_idx_cc(vector bool int __a, vector bool int __b, + int *__cc) { + return __builtin_s390_vfaezfs((vector unsigned int)__a, + (vector unsigned int)__b, 8, __cc); +} + +static inline __ATTRS_o_ai vector unsigned int +vec_find_any_ne_or_0_idx_cc(vector unsigned int __a, vector unsigned int __b, + int *__cc) { + return __builtin_s390_vfaezfs(__a, __b, 8, __cc); +} + +#undef __constant_pow2_range +#undef __constant_range +#undef __constant +#undef __ATTRS_o +#undef __ATTRS_o_ai +#undef __ATTRS_ai + +#else + +#error "Use -fzvector to enable vector extensions" + +#endif diff --git a/clang/lib/Parse/Parser.cpp b/clang/lib/Parse/Parser.cpp index e76f767786f0c..0574a636337f3 100644 --- a/clang/lib/Parse/Parser.cpp +++ b/clang/lib/Parse/Parser.cpp @@ -476,11 +476,15 @@ void Parser::Initialize() { Ident_super = &PP.getIdentifierTable().get("super"); - if (getLangOpts().AltiVec) { + Ident_vector = nullptr; + Ident_bool = nullptr; + Ident_pixel = nullptr; + if (getLangOpts().AltiVec || getLangOpts().ZVector) { Ident_vector = &PP.getIdentifierTable().get("vector"); - Ident_pixel = &PP.getIdentifierTable().get("pixel"); Ident_bool = &PP.getIdentifierTable().get("bool"); } + if (getLangOpts().AltiVec) + Ident_pixel = &PP.getIdentifierTable().get("pixel"); Ident_introduced = nullptr; Ident_deprecated = nullptr; diff --git a/clang/lib/Sema/DeclSpec.cpp b/clang/lib/Sema/DeclSpec.cpp index ea3872f42700a..4adbb2b6af2a5 100644 --- a/clang/lib/Sema/DeclSpec.cpp +++ b/clang/lib/Sema/DeclSpec.cpp @@ -987,10 +987,11 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli Diag(D, TSWLoc, diag::err_invalid_vector_bool_decl_spec) << getSpecifierName((TSW)TypeSpecWidth); - // vector bool long long requires VSX support. + // vector bool long long requires VSX support or ZVector. if ((TypeSpecWidth == TSW_longlong) && (!PP.getTargetInfo().hasFeature("vsx")) && - (!PP.getTargetInfo().hasFeature("power8-vector"))) + (!PP.getTargetInfo().hasFeature("power8-vector")) && + !PP.getLangOpts().ZVector) Diag(D, TSTLoc, diag::err_invalid_vector_long_long_decl_spec); // Elements of vector bool are interpreted as unsigned. (PIM 2.1) @@ -999,14 +1000,23 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli TypeSpecSign = TSS_unsigned; } else if (TypeSpecType == TST_double) { // vector long double and vector long long double are never allowed. - // vector double is OK for Power7 and later. + // vector double is OK for Power7 and later, and ZVector. if (TypeSpecWidth == TSW_long || TypeSpecWidth == TSW_longlong) Diag(D, TSWLoc, diag::err_invalid_vector_long_double_decl_spec); - else if (!PP.getTargetInfo().hasFeature("vsx")) + else if (!PP.getTargetInfo().hasFeature("vsx") && + !PP.getLangOpts().ZVector) Diag(D, TSTLoc, diag::err_invalid_vector_double_decl_spec); + } else if (TypeSpecType == TST_float) { + // vector float is unsupported for ZVector. + if (PP.getLangOpts().ZVector) + Diag(D, TSTLoc, diag::err_invalid_vector_float_decl_spec); } else if (TypeSpecWidth == TSW_long) { - Diag(D, TSWLoc, diag::warn_vector_long_decl_spec_combination) - << getSpecifierName((TST)TypeSpecType, Policy); + // vector long is unsupported for ZVector and deprecated for AltiVec. + if (PP.getLangOpts().ZVector) + Diag(D, TSWLoc, diag::err_invalid_vector_long_decl_spec); + else + Diag(D, TSWLoc, diag::warn_vector_long_decl_spec_combination) + << getSpecifierName((TST)TypeSpecType, Policy); } if (TypeAltiVecPixel) { diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp index 1ae983cad227a..617b06af16b4c 100644 --- a/clang/lib/Sema/SemaExpr.cpp +++ b/clang/lib/Sema/SemaExpr.cpp @@ -5518,7 +5518,7 @@ Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc, // i.e. all the elements are integer constants. ParenExpr *PE = dyn_cast(CastExpr); ParenListExpr *PLE = dyn_cast(CastExpr); - if ((getLangOpts().AltiVec || getLangOpts().OpenCL) + if ((getLangOpts().AltiVec || getLangOpts().ZVector || getLangOpts().OpenCL) && castType->isVectorType() && (PE || PLE)) { if (PLE && PLE->getNumExprs() == 0) { Diag(PLE->getExprLoc(), diag::err_altivec_empty_initializer); @@ -6075,7 +6075,9 @@ OpenCLCheckVectorConditional(Sema &S, ExprResult &Cond, if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { QualType VecResTy = S.CheckVectorOperands(LHS, RHS, QuestionLoc, - /*isCompAssign*/false); + /*isCompAssign*/false, + /*AllowBothBool*/true, + /*AllowBoolConversions*/false); if (VecResTy.isNull()) return QualType(); // The result type must match the condition type as specified in // OpenCL v1.1 s6.11.6. @@ -6126,7 +6128,9 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, // Now check the two expressions. if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) - return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false); + return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false, + /*AllowBothBool*/true, + /*AllowBoolConversions*/false); QualType ResTy = UsualArithmeticConversions(LHS, RHS); if (LHS.isInvalid() || RHS.isInvalid()) @@ -7267,7 +7271,9 @@ static bool tryVectorConvertAndSplat(Sema &S, ExprResult *scalar, } QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, - SourceLocation Loc, bool IsCompAssign) { + SourceLocation Loc, bool IsCompAssign, + bool AllowBothBool, + bool AllowBoolConversions) { if (!IsCompAssign) { LHS = DefaultFunctionArrayLvalueConversion(LHS.get()); if (LHS.isInvalid()) @@ -7282,14 +7288,21 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, QualType LHSType = LHS.get()->getType().getUnqualifiedType(); QualType RHSType = RHS.get()->getType().getUnqualifiedType(); - // If the vector types are identical, return. - if (Context.hasSameType(LHSType, RHSType)) - return LHSType; - const VectorType *LHSVecType = LHSType->getAs(); const VectorType *RHSVecType = RHSType->getAs(); assert(LHSVecType || RHSVecType); + // AltiVec-style "vector bool op vector bool" combinations are allowed + // for some operators but not others. + if (!AllowBothBool && + LHSVecType && LHSVecType->getVectorKind() == VectorType::AltiVecBool && + RHSVecType && RHSVecType->getVectorKind() == VectorType::AltiVecBool) + return InvalidOperands(Loc, LHS, RHS); + + // If the vector types are identical, return. + if (Context.hasSameType(LHSType, RHSType)) + return LHSType; + // If we have compatible AltiVec and GCC vector types, use the AltiVec type. if (LHSVecType && RHSVecType && Context.areCompatibleVectorTypes(LHSType, RHSType)) { @@ -7303,6 +7316,28 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, return RHSType; } + // AllowBoolConversions says that bool and non-bool AltiVec vectors + // can be mixed, with the result being the non-bool type. The non-bool + // operand must have integer element type. + if (AllowBoolConversions && LHSVecType && RHSVecType && + LHSVecType->getNumElements() == RHSVecType->getNumElements() && + (Context.getTypeSize(LHSVecType->getElementType()) == + Context.getTypeSize(RHSVecType->getElementType()))) { + if (LHSVecType->getVectorKind() == VectorType::AltiVecVector && + LHSVecType->getElementType()->isIntegerType() && + RHSVecType->getVectorKind() == VectorType::AltiVecBool) { + RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast); + return LHSType; + } + if (!IsCompAssign && + LHSVecType->getVectorKind() == VectorType::AltiVecBool && + RHSVecType->getVectorKind() == VectorType::AltiVecVector && + RHSVecType->getElementType()->isIntegerType()) { + LHS = ImpCastExprToType(LHS.get(), RHSType, CK_BitCast); + return RHSType; + } + } + // If there's an ext-vector type and a scalar, try to convert the scalar to // the vector element type and splat. if (!RHSVecType && isa(LHSVecType)) { @@ -7391,7 +7426,9 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS, if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) - return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign); + return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign, + /*AllowBothBool*/getLangOpts().AltiVec, + /*AllowBoolConversions*/false); QualType compType = UsualArithmeticConversions(LHS, RHS, IsCompAssign); if (LHS.isInvalid() || RHS.isInvalid()) @@ -7420,7 +7457,9 @@ QualType Sema::CheckRemainderOperands( RHS.get()->getType()->isVectorType()) { if (LHS.get()->getType()->hasIntegerRepresentation() && RHS.get()->getType()->hasIntegerRepresentation()) - return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign); + return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign, + /*AllowBothBool*/getLangOpts().AltiVec, + /*AllowBoolConversions*/false); return InvalidOperands(Loc, LHS, RHS); } @@ -7706,7 +7745,10 @@ QualType Sema::CheckAdditionOperands( // C99 6.5.6 if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { - QualType compType = CheckVectorOperands(LHS, RHS, Loc, CompLHSTy); + QualType compType = CheckVectorOperands( + LHS, RHS, Loc, CompLHSTy, + /*AllowBothBool*/getLangOpts().AltiVec, + /*AllowBoolConversions*/getLangOpts().ZVector); if (CompLHSTy) *CompLHSTy = compType; return compType; } @@ -7781,7 +7823,10 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS, if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { - QualType compType = CheckVectorOperands(LHS, RHS, Loc, CompLHSTy); + QualType compType = CheckVectorOperands( + LHS, RHS, Loc, CompLHSTy, + /*AllowBothBool*/getLangOpts().AltiVec, + /*AllowBoolConversions*/getLangOpts().ZVector); if (CompLHSTy) *CompLHSTy = compType; return compType; } @@ -8023,7 +8068,21 @@ QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS, RHS.get()->getType()->isVectorType()) { if (LangOpts.OpenCL) return checkOpenCLVectorShift(*this, LHS, RHS, Loc, IsCompAssign); - return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign); + if (LangOpts.ZVector) { + // The shift operators for the z vector extensions work basically + // like OpenCL shifts, except that neither the LHS nor the RHS is + // allowed to be a "vector bool". + if (auto LHSVecType = LHS.get()->getType()->getAs()) + if (LHSVecType->getVectorKind() == VectorType::AltiVecBool) + return InvalidOperands(Loc, LHS, RHS); + if (auto RHSVecType = RHS.get()->getType()->getAs()) + if (RHSVecType->getVectorKind() == VectorType::AltiVecBool) + return InvalidOperands(Loc, LHS, RHS); + return checkOpenCLVectorShift(*this, LHS, RHS, Loc, IsCompAssign); + } + return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign, + /*AllowBothBool*/true, + /*AllowBoolConversions*/false); } // Shifts don't perform usual arithmetic conversions, they just do integer @@ -8797,7 +8856,9 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, bool IsRelational) { // Check to make sure we're operating on vectors of the same type and width, // Allowing one side to be a scalar of element type. - QualType vType = CheckVectorOperands(LHS, RHS, Loc, /*isCompAssign*/false); + QualType vType = CheckVectorOperands(LHS, RHS, Loc, /*isCompAssign*/false, + /*AllowBothBool*/true, + /*AllowBoolConversions*/getLangOpts().ZVector); if (vType.isNull()) return vType; @@ -8805,7 +8866,8 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, // If AltiVec, the comparison results in a numeric type, i.e. // bool for C++, int for C - if (vType->getAs()->getVectorKind() == VectorType::AltiVecVector) + if (getLangOpts().AltiVec && + vType->getAs()->getVectorKind() == VectorType::AltiVecVector) return Context.getLogicalOperationType(); // For non-floating point types, check for self-comparisons of the form @@ -8839,7 +8901,9 @@ QualType Sema::CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc) { // Ensure that either both operands are of the same vector type, or // one operand is of a vector type and the other is of its element type. - QualType vType = CheckVectorOperands(LHS, RHS, Loc, false); + QualType vType = CheckVectorOperands(LHS, RHS, Loc, false, + /*AllowBothBool*/true, + /*AllowBoolConversions*/false); if (vType.isNull()) return InvalidOperands(Loc, LHS, RHS); if (getLangOpts().OpenCL && getLangOpts().OpenCLVersion < 120 && @@ -8857,8 +8921,9 @@ inline QualType Sema::CheckBitwiseOperands( RHS.get()->getType()->isVectorType()) { if (LHS.get()->getType()->hasIntegerRepresentation() && RHS.get()->getType()->hasIntegerRepresentation()) - return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign); - + return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign, + /*AllowBothBool*/true, + /*AllowBoolConversions*/getLangOpts().ZVector); return InvalidOperands(Loc, LHS, RHS); } @@ -9472,6 +9537,10 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op, IsInc, IsPrefix); } else if (S.getLangOpts().AltiVec && ResType->isVectorType()) { // OK! ( C/C++ Language Extensions for CBEA(Version 2.6) 10.3 ) + } else if (S.getLangOpts().ZVector && ResType->isVectorType() && + (ResType->getAs()->getVectorKind() != + VectorType::AltiVecBool)) { + // The z vector extensions allow ++ and -- for non-bool vectors. } else if(S.getLangOpts().OpenCL && ResType->isVectorType() && ResType->getAs()->getElementType()->isIntegerType()) { // OpenCL V1.2 6.3 says dec/inc ops operate on integer vector types. @@ -10552,8 +10621,13 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc, resultType = Input.get()->getType(); if (resultType->isDependentType()) break; - if (resultType->isArithmeticType() || // C99 6.5.3.3p1 - resultType->isVectorType()) + if (resultType->isArithmeticType()) // C99 6.5.3.3p1 + break; + else if (resultType->isVectorType() && + // The z vector extensions don't allow + or - with bool vectors. + (!Context.getLangOpts().ZVector || + resultType->getAs()->getVectorKind() != + VectorType::AltiVecBool)) break; else if (getLangOpts().CPlusPlus && // C++ [expr.unary.op]p6 Opc == UO_Plus && diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp index 6608d7c1f061a..01966d569a11e 100644 --- a/clang/lib/Sema/SemaExprCXX.cpp +++ b/clang/lib/Sema/SemaExprCXX.cpp @@ -4950,7 +4950,9 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, // Extension: conditional operator involving vector types. if (LTy->isVectorType() || RTy->isVectorType()) - return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false); + return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false, + /*AllowBothBool*/true, + /*AllowBoolConversions*/false); // -- The second and third operands have arithmetic or enumeration type; // the usual arithmetic conversions are performed to bring them to a diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp index 3fd1f21ba3fdf..2e7f891915809 100644 --- a/clang/lib/Sema/SemaLookup.cpp +++ b/clang/lib/Sema/SemaLookup.cpp @@ -4245,7 +4245,7 @@ std::unique_ptr Sema::makeTypoCorrectionConsumer( // Don't try to correct the identifier "vector" when in AltiVec mode. // TODO: Figure out why typo correction misbehaves in this case, fix it, and // remove this workaround. - if (getLangOpts().AltiVec && Typo->isStr("vector")) + if ((getLangOpts().AltiVec || getLangOpts().ZVector) && Typo->isStr("vector")) return nullptr; // Provide a stop gap for files that are just seriously broken. Trying diff --git a/clang/test/CodeGen/builtins-systemz-zvector-error.c b/clang/test/CodeGen/builtins-systemz-zvector-error.c new file mode 100644 index 0000000000000..8d5380dac1612 --- /dev/null +++ b/clang/test/CodeGen/builtins-systemz-zvector-error.c @@ -0,0 +1,576 @@ +// REQUIRES: systemz-registered-target +// RUN: %clang_cc1 -target-cpu z13 -triple s390x-linux-gnu \ +// RUN: -fzvector -fno-lax-vector-conversions \ +// RUN: -Wall -Wno-unused -Werror -fsyntax-only -verify %s + +#include + +volatile vector signed char vsc; +volatile vector signed short vss; +volatile vector signed int vsi; +volatile vector signed long long vsl; +volatile vector unsigned char vuc; +volatile vector unsigned short vus; +volatile vector unsigned int vui; +volatile vector unsigned long long vul; +volatile vector bool char vbc; +volatile vector bool short vbs; +volatile vector bool int vbi; +volatile vector bool long long vbl; +volatile vector double vd; + +volatile signed char sc; +volatile signed short ss; +volatile signed int si; +volatile signed long long sl; +volatile unsigned char uc; +volatile unsigned short us; +volatile unsigned int ui; +volatile unsigned long long ul; +volatile double d; + +const void * volatile cptr; +const signed char * volatile cptrsc; +const signed short * volatile cptrss; +const signed int * volatile cptrsi; +const signed long long * volatile cptrsl; +const unsigned char * volatile cptruc; +const unsigned short * volatile cptrus; +const unsigned int * volatile cptrui; +const unsigned long long * volatile cptrul; +const float * volatile cptrf; +const double * volatile cptrd; + +void * volatile ptr; +signed char * volatile ptrsc; +signed short * volatile ptrss; +signed int * volatile ptrsi; +signed long long * volatile ptrsl; +unsigned char * volatile ptruc; +unsigned short * volatile ptrus; +unsigned int * volatile ptrui; +unsigned long long * volatile ptrul; +float * volatile ptrf; +double * volatile ptrd; + +volatile unsigned int len; +volatile int idx; +int cc; + +void test_core(void) { + len = __lcbb(cptr, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}} + len = __lcbb(cptr, 200); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}} + len = __lcbb(cptr, 32); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}} + len = __lcbb(cptr, 8192); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant power of 2 from 64 to 4096}} + + vsl = vec_permi(vsl, vsl, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 3 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vsl = vec_permi(vsl, vsl, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 3 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vsl = vec_permi(vsl, vsl, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 3 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vul = vec_permi(vul, vul, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 2 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vul = vec_permi(vul, vul, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 2 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vul = vec_permi(vul, vul, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 2 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vbl = vec_permi(vbl, vbl, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 2 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vbl = vec_permi(vbl, vbl, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 2 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vbl = vec_permi(vbl, vbl, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 2 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vd = vec_permi(vd, vd, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 3 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vd = vec_permi(vd, vd, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 3 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vd = vec_permi(vd, vd, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 3 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + + vsi = vec_gather_element(vsi, vui, cptrsi, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vsi = vec_gather_element(vsi, vui, cptrsi, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vsi = vec_gather_element(vsi, vui, cptrsi, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vui = vec_gather_element(vui, vui, cptrui, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vui = vec_gather_element(vui, vui, cptrui, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vui = vec_gather_element(vui, vui, cptrui, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vbi = vec_gather_element(vbi, vui, cptrui, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vbi = vec_gather_element(vbi, vui, cptrui, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vbi = vec_gather_element(vbi, vui, cptrui, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vsl = vec_gather_element(vsl, vul, cptrsl, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vsl = vec_gather_element(vsl, vul, cptrsl, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vsl = vec_gather_element(vsl, vul, cptrsl, 2); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vul = vec_gather_element(vul, vul, cptrul, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vul = vec_gather_element(vul, vul, cptrul, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vul = vec_gather_element(vul, vul, cptrul, 2); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vbl = vec_gather_element(vbl, vul, cptrul, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vbl = vec_gather_element(vbl, vul, cptrul, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vbl = vec_gather_element(vbl, vul, cptrul, 2); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vd = vec_gather_element(vd, vul, cptrd, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vd = vec_gather_element(vd, vul, cptrd, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vd = vec_gather_element(vd, vul, cptrd, 2); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + + vec_scatter_element(vsi, vui, ptrsi, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vec_scatter_element(vsi, vui, ptrsi, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vec_scatter_element(vsi, vui, ptrsi, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vec_scatter_element(vui, vui, ptrui, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vec_scatter_element(vui, vui, ptrui, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vec_scatter_element(vui, vui, ptrui, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vec_scatter_element(vbi, vui, ptrui, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vec_scatter_element(vbi, vui, ptrui, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vec_scatter_element(vbi, vui, ptrui, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vec_scatter_element(vsl, vul, ptrsl, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vec_scatter_element(vsl, vul, ptrsl, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vec_scatter_element(vsl, vul, ptrsl, 2); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vec_scatter_element(vul, vul, ptrul, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vec_scatter_element(vul, vul, ptrul, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vec_scatter_element(vul, vul, ptrul, 2); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vec_scatter_element(vbl, vul, ptrul, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vec_scatter_element(vbl, vul, ptrul, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vec_scatter_element(vbl, vul, ptrul, 2); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 5 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vec_scatter_element(vd, vul, ptrd, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vec_scatter_element(vd, vul, ptrd, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vec_scatter_element(vd, vul, ptrd, 2); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 6 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + + vsc = vec_load_bndry(cptrsc, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}} + vsc = vec_load_bndry(cptrsc, 200); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}} + vsc = vec_load_bndry(cptrsc, 32); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}} + vsc = vec_load_bndry(cptrsc, 8192); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}} + vuc = vec_load_bndry(cptruc, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}} + vss = vec_load_bndry(cptrss, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}} + vus = vec_load_bndry(cptrus, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}} + vsi = vec_load_bndry(cptrsi, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}} + vui = vec_load_bndry(cptrui, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}} + vsl = vec_load_bndry(cptrsl, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}} + vul = vec_load_bndry(cptrul, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant power of 2 from 64 to 4096}} + + vuc = vec_genmask(idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + + vuc = vec_genmasks_8(0, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vuc = vec_genmasks_8(idx, 0); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vuc = vec_genmasks_8(idx, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vus = vec_genmasks_16(0, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vus = vec_genmasks_16(idx, 0); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vus = vec_genmasks_16(idx, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vui = vec_genmasks_32(0, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vui = vec_genmasks_32(idx, 0); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vui = vec_genmasks_32(idx, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vul = vec_genmasks_64(0, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vul = vec_genmasks_64(idx, 0); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vul = vec_genmasks_64(idx, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + + vsc = vec_splat(vsc, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vsc = vec_splat(vsc, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vsc = vec_splat(vsc, 16); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vuc = vec_splat(vuc, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}} + vuc = vec_splat(vuc, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}} + vuc = vec_splat(vuc, 16); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}} + vbc = vec_splat(vbc, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}} + vbc = vec_splat(vbc, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}} + vbc = vec_splat(vbc, 16); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 15}} + vss = vec_splat(vss, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}} + vss = vec_splat(vss, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}} + vss = vec_splat(vss, 8); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 7}} + vus = vec_splat(vus, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}} + vus = vec_splat(vus, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}} + vus = vec_splat(vus, 8); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}} + vbs = vec_splat(vbs, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}} + vbs = vec_splat(vbs, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}} + vbs = vec_splat(vbs, 8); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 7}} + vsi = vec_splat(vsi, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vsi = vec_splat(vsi, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vsi = vec_splat(vsi, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vui = vec_splat(vui, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vui = vec_splat(vui, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vui = vec_splat(vui, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vbi = vec_splat(vbi, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vbi = vec_splat(vbi, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vbi = vec_splat(vbi, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 3}} + vsl = vec_splat(vsl, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vsl = vec_splat(vsl, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vsl = vec_splat(vsl, 2); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vul = vec_splat(vul, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vul = vec_splat(vul, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vul = vec_splat(vul, 2); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vbl = vec_splat(vbl, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vbl = vec_splat(vbl, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vbl = vec_splat(vbl, 2); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 11 {{candidate function not viable}} + // expected-note@vecintrin.h:* 2 {{must be a constant integer from 0 to 1}} + vd = vec_splat(vd, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vd = vec_splat(vd, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + vd = vec_splat(vd, 2); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 12 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 1}} + + vsc = vec_splat_s8(idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vuc = vec_splat_u8(idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vss = vec_splat_s16(idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vus = vec_splat_u16(idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vsi = vec_splat_s32(idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vui = vec_splat_u32(idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vsl = vec_splat_s64(idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} + vul = vec_splat_u64(idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* {{must be a constant integer}} +} + +void test_integer(void) { + vsc = vec_rl_mask(vsc, vuc, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 7 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer}} + vuc = vec_rl_mask(vuc, vuc, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 7 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer}} + vss = vec_rl_mask(vss, vus, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 7 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer}} + vus = vec_rl_mask(vus, vus, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 7 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer}} + vsi = vec_rl_mask(vsi, vui, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 7 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer}} + vui = vec_rl_mask(vui, vui, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 7 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer}} + vsl = vec_rl_mask(vsl, vul, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 7 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer}} + vul = vec_rl_mask(vul, vul, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 7 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer}} + + vsc = vec_sld(vsc, vsc, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vsc = vec_sld(vsc, vsc, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vsc = vec_sld(vsc, vsc, 16); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vuc = vec_sld(vuc, vuc, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vuc = vec_sld(vuc, vuc, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vuc = vec_sld(vuc, vuc, 16); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vss = vec_sld(vss, vss, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vus = vec_sld(vus, vus, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vsi = vec_sld(vsi, vsi, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vui = vec_sld(vui, vui, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vsl = vec_sld(vsl, vsl, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vul = vec_sld(vul, vul, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + vd = vec_sld(vd, vd, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 15}} + + vsc = vec_sldw(vsc, vsc, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vsc = vec_sldw(vsc, vsc, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vsc = vec_sldw(vsc, vsc, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vuc = vec_sldw(vuc, vuc, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vuc = vec_sldw(vuc, vuc, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vuc = vec_sldw(vuc, vuc, 4); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vss = vec_sldw(vss, vss, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vus = vec_sldw(vus, vus, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vsi = vec_sldw(vsi, vsi, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vui = vec_sldw(vui, vui, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vsl = vec_sldw(vsl, vsl, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vul = vec_sldw(vul, vul, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} + vd = vec_sldw(vd, vd, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 8 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 3}} +} + +void test_float(void) { + vd = vec_ctd(vsl, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 1 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}} + vd = vec_ctd(vsl, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 1 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}} + vd = vec_ctd(vsl, 32); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 1 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}} + vd = vec_ctd(vul, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 1 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}} + vd = vec_ctd(vul, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 1 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}} + vd = vec_ctd(vul, 32); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 1 {{candidate function not viable}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}} + + vsl = vec_ctsl(vd, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}} + vsl = vec_ctsl(vd, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}} + vsl = vec_ctsl(vd, 32); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}} + vul = vec_ctul(vd, idx); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}} + vul = vec_ctul(vd, -1); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}} + vul = vec_ctul(vd, 32); // expected-error {{no matching function}} + // expected-note@vecintrin.h:* 1 {{must be a constant integer from 0 to 31}} + + vbl = vec_fp_test_data_class(vd, idx, &cc); // expected-error {{must be a constant integer}} + vbl = vec_fp_test_data_class(vd, -1, &cc); // expected-error {{should be a value from 0 to 4095}} + vbl = vec_fp_test_data_class(vd, 4096, &cc); // expected-error {{should be a value from 0 to 4095}} +} diff --git a/clang/test/CodeGen/builtins-systemz-zvector.c b/clang/test/CodeGen/builtins-systemz-zvector.c new file mode 100644 index 0000000000000..6d554af44e93a --- /dev/null +++ b/clang/test/CodeGen/builtins-systemz-zvector.c @@ -0,0 +1,2967 @@ +// REQUIRES: systemz-registered-target +// RUN: %clang_cc1 -target-cpu z13 -triple s390x-linux-gnu \ +// RUN: -O -fzvector -fno-lax-vector-conversions \ +// RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s + +#include + +volatile vector signed char vsc; +volatile vector signed short vss; +volatile vector signed int vsi; +volatile vector signed long long vsl; +volatile vector unsigned char vuc; +volatile vector unsigned short vus; +volatile vector unsigned int vui; +volatile vector unsigned long long vul; +volatile vector bool char vbc; +volatile vector bool short vbs; +volatile vector bool int vbi; +volatile vector bool long long vbl; +volatile vector double vd; + +volatile signed char sc; +volatile signed short ss; +volatile signed int si; +volatile signed long long sl; +volatile unsigned char uc; +volatile unsigned short us; +volatile unsigned int ui; +volatile unsigned long long ul; +volatile double d; + +const void * volatile cptr; +const signed char * volatile cptrsc; +const signed short * volatile cptrss; +const signed int * volatile cptrsi; +const signed long long * volatile cptrsl; +const unsigned char * volatile cptruc; +const unsigned short * volatile cptrus; +const unsigned int * volatile cptrui; +const unsigned long long * volatile cptrul; +const float * volatile cptrf; +const double * volatile cptrd; + +void * volatile ptr; +signed char * volatile ptrsc; +signed short * volatile ptrss; +signed int * volatile ptrsi; +signed long long * volatile ptrsl; +unsigned char * volatile ptruc; +unsigned short * volatile ptrus; +unsigned int * volatile ptrui; +unsigned long long * volatile ptrul; +float * volatile ptrf; +double * volatile ptrd; + +volatile unsigned int len; +volatile int idx; +int cc; + +void test_core(void) { + len = __lcbb(cptr, 64); + // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 0) + len = __lcbb(cptr, 128); + // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 1) + len = __lcbb(cptr, 256); + // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 2) + len = __lcbb(cptr, 512); + // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 3) + len = __lcbb(cptr, 1024); + // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 4) + len = __lcbb(cptr, 2048); + // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 5) + len = __lcbb(cptr, 4096); + // CHECK: call i32 @llvm.s390.lcbb(i8* %{{.*}}, i32 6) + + sc = vec_extract(vsc, idx); + // CHECK: extractelement <16 x i8> %{{.*}}, i32 %{{.*}} + uc = vec_extract(vuc, idx); + // CHECK: extractelement <16 x i8> %{{.*}}, i32 %{{.*}} + uc = vec_extract(vbc, idx); + // CHECK: extractelement <16 x i8> %{{.*}}, i32 %{{.*}} + ss = vec_extract(vss, idx); + // CHECK: extractelement <8 x i16> %{{.*}}, i32 %{{.*}} + us = vec_extract(vus, idx); + // CHECK: extractelement <8 x i16> %{{.*}}, i32 %{{.*}} + us = vec_extract(vbs, idx); + // CHECK: extractelement <8 x i16> %{{.*}}, i32 %{{.*}} + si = vec_extract(vsi, idx); + // CHECK: extractelement <4 x i32> %{{.*}}, i32 %{{.*}} + ui = vec_extract(vui, idx); + // CHECK: extractelement <4 x i32> %{{.*}}, i32 %{{.*}} + ui = vec_extract(vbi, idx); + // CHECK: extractelement <4 x i32> %{{.*}}, i32 %{{.*}} + sl = vec_extract(vsl, idx); + // CHECK: extractelement <2 x i64> %{{.*}}, i32 %{{.*}} + ul = vec_extract(vul, idx); + // CHECK: extractelement <2 x i64> %{{.*}}, i32 %{{.*}} + ul = vec_extract(vbl, idx); + // CHECK: extractelement <2 x i64> %{{.*}}, i32 %{{.*}} + d = vec_extract(vd, idx); + // CHECK: extractelement <2 x double> %{{.*}}, i32 %{{.*}} + + vsc = vec_insert(sc, vsc, idx); + // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 %{{.*}} + vuc = vec_insert(uc, vuc, idx); + // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 %{{.*}} + vuc = vec_insert(uc, vbc, idx); + // CHECK: insertelement <16 x i8> %{{.*}}, i8 %{{.*}}, i32 %{{.*}} + vss = vec_insert(ss, vss, idx); + // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 %{{.*}} + vus = vec_insert(us, vus, idx); + // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 %{{.*}} + vus = vec_insert(us, vbs, idx); + // CHECK: insertelement <8 x i16> %{{.*}}, i16 %{{.*}}, i32 %{{.*}} + vsi = vec_insert(si, vsi, idx); + // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}} + vui = vec_insert(ui, vui, idx); + // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}} + vui = vec_insert(ui, vbi, idx); + // CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}} + vsl = vec_insert(sl, vsl, idx); + // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 %{{.*}} + vul = vec_insert(ul, vul, idx); + // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 %{{.*}} + vul = vec_insert(ul, vbl, idx); + // CHECK: insertelement <2 x i64> %{{.*}}, i64 %{{.*}}, i32 %{{.*}} + vd = vec_insert(d, vd, idx); + // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 %{{.*}} + + vsc = vec_promote(sc, idx); + // CHECK: insertelement <16 x i8> undef, i8 %{{.*}}, i32 %{{.*}} + vuc = vec_promote(uc, idx); + // CHECK: insertelement <16 x i8> undef, i8 %{{.*}}, i32 %{{.*}} + vss = vec_promote(ss, idx); + // CHECK: insertelement <8 x i16> undef, i16 %{{.*}}, i32 %{{.*}} + vus = vec_promote(us, idx); + // CHECK: insertelement <8 x i16> undef, i16 %{{.*}}, i32 %{{.*}} + vsi = vec_promote(si, idx); + // CHECK: insertelement <4 x i32> undef, i32 %{{.*}}, i32 %{{.*}} + vui = vec_promote(ui, idx); + // CHECK: insertelement <4 x i32> undef, i32 %{{.*}}, i32 %{{.*}} + vsl = vec_promote(sl, idx); + // CHECK: insertelement <2 x i64> undef, i64 %{{.*}}, i32 %{{.*}} + vul = vec_promote(ul, idx); + // CHECK: insertelement <2 x i64> undef, i64 %{{.*}}, i32 %{{.*}} + vd = vec_promote(d, idx); + // CHECK: insertelement <2 x double> undef, double %{{.*}}, i32 %{{.*}} + + vsc = vec_insert_and_zero(cptrsc); + // CHECK: insertelement <16 x i8> , i8 %{{.*}}, i32 7 + vuc = vec_insert_and_zero(cptruc); + // CHECK: insertelement <16 x i8> , i8 %{{.*}}, i32 7 + vss = vec_insert_and_zero(cptrss); + // CHECK: insertelement <8 x i16> , i16 %{{.*}}, i32 3 + vus = vec_insert_and_zero(cptrus); + // CHECK: insertelement <8 x i16> , i16 %{{.*}}, i32 3 + vsi = vec_insert_and_zero(cptrsi); + // CHECK: insertelement <4 x i32> , i32 %{{.*}}, i32 1 + vui = vec_insert_and_zero(cptrui); + // CHECK: insertelement <4 x i32> , i32 %{{.*}}, i32 1 + vsl = vec_insert_and_zero(cptrsl); + // CHECK: insertelement <2 x i64> , i64 %{{.*}}, i32 0 + vul = vec_insert_and_zero(cptrul); + // CHECK: insertelement <2 x i64> , i64 %{{.*}}, i32 0 + vd = vec_insert_and_zero(cptrd); + // CHECK: insertelement <2 x double> , double %{{.*}}, i32 0 + + vsc = vec_perm(vsc, vsc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_perm(vuc, vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbc = vec_perm(vbc, vbc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_perm(vss, vss, vuc); + // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_perm(vus, vus, vuc); + // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbs = vec_perm(vbs, vbs, vuc); + // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_perm(vsi, vsi, vuc); + // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_perm(vui, vui, vuc); + // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbi = vec_perm(vbi, vbi, vuc); + // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_perm(vsl, vsl, vuc); + // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_perm(vul, vul, vuc); + // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbl = vec_perm(vbl, vbl, vuc); + // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vd = vec_perm(vd, vd, vuc); + // CHECK: call <16 x i8> @llvm.s390.vperm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + + vsl = vec_permi(vsl, vsl, 0); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0) + vsl = vec_permi(vsl, vsl, 1); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1) + vsl = vec_permi(vsl, vsl, 2); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4) + vsl = vec_permi(vsl, vsl, 3); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5) + vul = vec_permi(vul, vul, 0); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0) + vul = vec_permi(vul, vul, 1); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1) + vul = vec_permi(vul, vul, 2); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4) + vul = vec_permi(vul, vul, 3); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5) + vbl = vec_permi(vbl, vbl, 0); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0) + vbl = vec_permi(vbl, vbl, 1); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1) + vbl = vec_permi(vbl, vbl, 2); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4) + vbl = vec_permi(vbl, vbl, 3); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5) + vd = vec_permi(vd, vd, 0); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0) + vd = vec_permi(vd, vd, 1); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 1) + vd = vec_permi(vd, vd, 2); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 4) + vd = vec_permi(vd, vd, 3); + // CHECK: call <2 x i64> @llvm.s390.vpdi(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 5) + + vsc = vec_sel(vsc, vsc, vuc); + vsc = vec_sel(vsc, vsc, vbc); + vuc = vec_sel(vuc, vuc, vuc); + vuc = vec_sel(vuc, vuc, vbc); + vbc = vec_sel(vbc, vbc, vuc); + vbc = vec_sel(vbc, vbc, vbc); + vss = vec_sel(vss, vss, vus); + vss = vec_sel(vss, vss, vbs); + vus = vec_sel(vus, vus, vus); + vus = vec_sel(vus, vus, vbs); + vbs = vec_sel(vbs, vbs, vus); + vbs = vec_sel(vbs, vbs, vbs); + vsi = vec_sel(vsi, vsi, vui); + vsi = vec_sel(vsi, vsi, vbi); + vui = vec_sel(vui, vui, vui); + vui = vec_sel(vui, vui, vbi); + vbi = vec_sel(vbi, vbi, vui); + vbi = vec_sel(vbi, vbi, vbi); + vsl = vec_sel(vsl, vsl, vul); + vsl = vec_sel(vsl, vsl, vbl); + vul = vec_sel(vul, vul, vul); + vul = vec_sel(vul, vul, vbl); + vbl = vec_sel(vbl, vbl, vul); + vbl = vec_sel(vbl, vbl, vbl); + vd = vec_sel(vd, vd, vul); + vd = vec_sel(vd, vd, vbl); + + vsi = vec_gather_element(vsi, vui, cptrsi, 0); + vsi = vec_gather_element(vsi, vui, cptrsi, 1); + vsi = vec_gather_element(vsi, vui, cptrsi, 2); + vsi = vec_gather_element(vsi, vui, cptrsi, 3); + vui = vec_gather_element(vui, vui, cptrui, 0); + vui = vec_gather_element(vui, vui, cptrui, 1); + vui = vec_gather_element(vui, vui, cptrui, 2); + vui = vec_gather_element(vui, vui, cptrui, 3); + vbi = vec_gather_element(vbi, vui, cptrui, 0); + vbi = vec_gather_element(vbi, vui, cptrui, 1); + vbi = vec_gather_element(vbi, vui, cptrui, 2); + vbi = vec_gather_element(vbi, vui, cptrui, 3); + vsl = vec_gather_element(vsl, vul, cptrsl, 0); + vsl = vec_gather_element(vsl, vul, cptrsl, 1); + vul = vec_gather_element(vul, vul, cptrul, 0); + vul = vec_gather_element(vul, vul, cptrul, 1); + vbl = vec_gather_element(vbl, vul, cptrul, 0); + vbl = vec_gather_element(vbl, vul, cptrul, 1); + vd = vec_gather_element(vd, vul, cptrd, 0); + vd = vec_gather_element(vd, vul, cptrd, 1); + + vec_scatter_element(vsi, vui, ptrsi, 0); + vec_scatter_element(vsi, vui, ptrsi, 1); + vec_scatter_element(vsi, vui, ptrsi, 2); + vec_scatter_element(vsi, vui, ptrsi, 3); + vec_scatter_element(vui, vui, ptrui, 0); + vec_scatter_element(vui, vui, ptrui, 1); + vec_scatter_element(vui, vui, ptrui, 2); + vec_scatter_element(vui, vui, ptrui, 3); + vec_scatter_element(vbi, vui, ptrui, 0); + vec_scatter_element(vbi, vui, ptrui, 1); + vec_scatter_element(vbi, vui, ptrui, 2); + vec_scatter_element(vbi, vui, ptrui, 3); + vec_scatter_element(vsl, vul, ptrsl, 0); + vec_scatter_element(vsl, vul, ptrsl, 1); + vec_scatter_element(vul, vul, ptrul, 0); + vec_scatter_element(vul, vul, ptrul, 1); + vec_scatter_element(vbl, vul, ptrul, 0); + vec_scatter_element(vbl, vul, ptrul, 1); + vec_scatter_element(vd, vul, ptrd, 0); + vec_scatter_element(vd, vul, ptrd, 1); + + vsc = vec_xld2(idx, cptrsc); + vuc = vec_xld2(idx, cptruc); + vss = vec_xld2(idx, cptrss); + vus = vec_xld2(idx, cptrus); + vsi = vec_xld2(idx, cptrsi); + vui = vec_xld2(idx, cptrui); + vsl = vec_xld2(idx, cptrsl); + vul = vec_xld2(idx, cptrul); + vd = vec_xld2(idx, cptrd); + + vsc = vec_xlw4(idx, cptrsc); + vuc = vec_xlw4(idx, cptruc); + vss = vec_xlw4(idx, cptrss); + vus = vec_xlw4(idx, cptrus); + vsi = vec_xlw4(idx, cptrsi); + vui = vec_xlw4(idx, cptrui); + + vec_xstd2(vsc, idx, ptrsc); + vec_xstd2(vuc, idx, ptruc); + vec_xstd2(vss, idx, ptrss); + vec_xstd2(vus, idx, ptrus); + vec_xstd2(vsi, idx, ptrsi); + vec_xstd2(vui, idx, ptrui); + vec_xstd2(vsl, idx, ptrsl); + vec_xstd2(vul, idx, ptrul); + vec_xstd2(vd, idx, ptrd); + + vec_xstw4(vsc, idx, ptrsc); + vec_xstw4(vuc, idx, ptruc); + vec_xstw4(vss, idx, ptrss); + vec_xstw4(vus, idx, ptrus); + vec_xstw4(vsi, idx, ptrsi); + vec_xstw4(vui, idx, ptrui); + + vsc = vec_load_bndry(cptrsc, 64); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0) + vuc = vec_load_bndry(cptruc, 64); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0) + vss = vec_load_bndry(cptrss, 64); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0) + vus = vec_load_bndry(cptrus, 64); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0) + vsi = vec_load_bndry(cptrsi, 64); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0) + vui = vec_load_bndry(cptrui, 64); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0) + vsl = vec_load_bndry(cptrsl, 64); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0) + vul = vec_load_bndry(cptrul, 64); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0) + vd = vec_load_bndry(cptrd, 64); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 0) + vsc = vec_load_bndry(cptrsc, 128); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 1) + vsc = vec_load_bndry(cptrsc, 256); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 2) + vsc = vec_load_bndry(cptrsc, 512); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 3) + vsc = vec_load_bndry(cptrsc, 1024); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 4) + vsc = vec_load_bndry(cptrsc, 2048); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 5) + vsc = vec_load_bndry(cptrsc, 4096); + // CHECK: call <16 x i8> @llvm.s390.vlbb(i8* %{{.*}}, i32 6) + + vsc = vec_load_len(cptrsc, idx); + // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}}) + vuc = vec_load_len(cptruc, idx); + // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}}) + vss = vec_load_len(cptrss, idx); + // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}}) + vus = vec_load_len(cptrus, idx); + // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}}) + vsi = vec_load_len(cptrsi, idx); + // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}}) + vui = vec_load_len(cptrui, idx); + // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}}) + vsl = vec_load_len(cptrsl, idx); + // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}}) + vul = vec_load_len(cptrul, idx); + // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}}) + vd = vec_load_len(cptrd, idx); + // CHECK: call <16 x i8> @llvm.s390.vll(i32 %{{.*}}, i8* %{{.*}}) + + vec_store_len(vsc, ptrsc, idx); + // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}}) + vec_store_len(vuc, ptruc, idx); + // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}}) + vec_store_len(vss, ptrss, idx); + // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}}) + vec_store_len(vus, ptrus, idx); + // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}}) + vec_store_len(vsi, ptrsi, idx); + // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}}) + vec_store_len(vui, ptrui, idx); + // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}}) + vec_store_len(vsl, ptrsl, idx); + // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}}) + vec_store_len(vul, ptrul, idx); + // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}}) + vec_store_len(vd, ptrd, idx); + // CHECK: call void @llvm.s390.vstl(<16 x i8> %{{.*}}, i32 %{{.*}}, i8* %{{.*}}) + + vsl = vec_load_pair(sl, sl); + vul = vec_load_pair(ul, ul); + + vuc = vec_genmask(0); + // CHECK: <16 x i8> zeroinitializer + vuc = vec_genmask(0x8000); + // CHECK: <16 x i8> + vuc = vec_genmask(0xffff); + // CHECK: <16 x i8> + + vuc = vec_genmasks_8(0, 7); + // CHECK: <16 x i8> + vuc = vec_genmasks_8(1, 4); + // CHECK: <16 x i8> + vuc = vec_genmasks_8(6, 2); + // CHECK: <16 x i8> + vus = vec_genmasks_16(0, 15); + // CHECK: <8 x i16> + vus = vec_genmasks_16(2, 11); + // CHECK: <8 x i16> + vus = vec_genmasks_16(9, 2); + // CHECK: <8 x i16> + vui = vec_genmasks_32(0, 31); + // CHECK: <4 x i32> + vui = vec_genmasks_32(7, 20); + // CHECK: <4 x i32> + vui = vec_genmasks_32(25, 4); + // CHECK: <4 x i32> + vul = vec_genmasks_64(0, 63); + // CHECK: <2 x i64> + vul = vec_genmasks_64(3, 40); + // CHECK: <2 x i64> + vul = vec_genmasks_64(30, 11); + // CHECK: <2 x i64> + + vsc = vec_splat(vsc, 0); + // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer + vsc = vec_splat(vsc, 15); + // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> + vuc = vec_splat(vuc, 0); + // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer + vuc = vec_splat(vuc, 15); + // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> + vbc = vec_splat(vbc, 0); + // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer + vbc = vec_splat(vbc, 15); + // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> + vss = vec_splat(vss, 0); + // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer + vss = vec_splat(vss, 7); + // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> + vus = vec_splat(vus, 0); + // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer + vus = vec_splat(vus, 7); + // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> + vbs = vec_splat(vbs, 0); + // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer + vbs = vec_splat(vbs, 7); + // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> + vsi = vec_splat(vsi, 0); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer + vsi = vec_splat(vsi, 3); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> + vui = vec_splat(vui, 0); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer + vui = vec_splat(vui, 3); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> + vbi = vec_splat(vbi, 0); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer + vbi = vec_splat(vbi, 3); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> + vsl = vec_splat(vsl, 0); + // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer + vsl = vec_splat(vsl, 1); + // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> + vul = vec_splat(vul, 0); + // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer + vul = vec_splat(vul, 1); + // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> + vbl = vec_splat(vbl, 0); + // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer + vbl = vec_splat(vbl, 1); + // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> + vd = vec_splat(vd, 0); + // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer + vd = vec_splat(vd, 1); + // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> + + vsc = vec_splat_s8(-128); + // CHECK: <16 x i8> + vsc = vec_splat_s8(127); + // CHECK: <16 x i8> + vuc = vec_splat_u8(1); + // CHECK: <16 x i8> + vuc = vec_splat_u8(254); + // CHECK: <16 x i8> + vss = vec_splat_s16(-32768); + // CHECK: <8 x i16> + vss = vec_splat_s16(32767); + // CHECK: <8 x i16> + vus = vec_splat_u16(1); + // CHECK: <8 x i16> + vus = vec_splat_u16(65534); + // CHECK: <8 x i16> + vsi = vec_splat_s32(-32768); + // CHECK: <4 x i32> + vsi = vec_splat_s32(32767); + // CHECK: <4 x i32> + vui = vec_splat_u32(-32768); + // CHECK: <4 x i32> + vui = vec_splat_u32(32767); + // CHECK: <4 x i32> + vsl = vec_splat_s64(-32768); + // CHECK: <2 x i64> + vsl = vec_splat_s64(32767); + // CHECK: <2 x i64> + vul = vec_splat_u64(-32768); + // CHECK: <2 x i64> + vul = vec_splat_u64(32767); + // CHECK: <2 x i64> + + vsc = vec_splats(sc); + // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer + vuc = vec_splats(uc); + // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> undef, <16 x i32> zeroinitializer + vss = vec_splats(ss); + // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer + vus = vec_splats(us); + // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <8 x i32> zeroinitializer + vsi = vec_splats(si); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer + vui = vec_splats(ui); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> undef, <4 x i32> zeroinitializer + vsl = vec_splats(sl); + // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer + vul = vec_splats(ul); + // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> undef, <2 x i32> zeroinitializer + vd = vec_splats(d); + // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> undef, <2 x i32> zeroinitializer + + vsl = vec_extend_s64(vsc); + vsl = vec_extend_s64(vss); + vsl = vec_extend_s64(vsi); + + vsc = vec_mergeh(vsc, vsc); + // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> + vuc = vec_mergeh(vuc, vuc); + // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> + vbc = vec_mergeh(vbc, vbc); + // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> + vss = vec_mergeh(vss, vss); + // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> + vus = vec_mergeh(vus, vus); + // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> + vbs = vec_mergeh(vbs, vbs); + // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> + vsi = vec_mergeh(vsi, vsi); + // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> + vui = vec_mergeh(vui, vui); + // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> + vbi = vec_mergeh(vbi, vbi); + // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> + vsl = vec_mergeh(vsl, vsl); + // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> + vul = vec_mergeh(vul, vul); + // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> + vbl = vec_mergeh(vbl, vbl); + // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i32> + vd = vec_mergeh(vd, vd); + // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> + + vsc = vec_mergel(vsc, vsc); + // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %a, <16 x i8> %b, <16 x i32> + vuc = vec_mergel(vuc, vuc); + // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %a, <16 x i8> %b, <16 x i32> + vbc = vec_mergel(vbc, vbc); + // shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %a, <16 x i8> %b, <16 x i32> + vss = vec_mergel(vss, vss); + // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> + vus = vec_mergel(vus, vus); + // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> + vbs = vec_mergel(vbs, vbs); + // shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> + vsi = vec_mergel(vsi, vsi); + // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, + vui = vec_mergel(vui, vui); + // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, + vbi = vec_mergel(vbi, vbi); + // shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, + vsl = vec_mergel(vsl, vsl); + // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, + vul = vec_mergel(vul, vul); + // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, + vbl = vec_mergel(vbl, vbl); + // shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, + vd = vec_mergel(vd, vd); + // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, + + vsc = vec_pack(vss, vss); + // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> + vuc = vec_pack(vus, vus); + // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> + vbc = vec_pack(vbs, vbs); + // CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i32> + vss = vec_pack(vsi, vsi); + // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> + vus = vec_pack(vui, vui); + // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> + vbs = vec_pack(vbi, vbi); + // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i32> + vsi = vec_pack(vsl, vsl); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> + vui = vec_pack(vul, vul); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> + vbi = vec_pack(vbl, vbl); + // CHECK: shufflevector <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> + + vsc = vec_packs(vss, vss); + // CHECK: call <16 x i8> @llvm.s390.vpksh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vuc = vec_packs(vus, vus); + // CHECK: call <16 x i8> @llvm.s390.vpklsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vss = vec_packs(vsi, vsi); + // CHECK: call <8 x i16> @llvm.s390.vpksf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vus = vec_packs(vui, vui); + // CHECK: call <8 x i16> @llvm.s390.vpklsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vsi = vec_packs(vsl, vsl); + // CHECK: call <4 x i32> @llvm.s390.vpksg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + vui = vec_packs(vul, vul); + // CHECK: call <4 x i32> @llvm.s390.vpklsg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + + vsc = vec_packs_cc(vss, vss, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vpkshs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vuc = vec_packs_cc(vus, vus, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vpklshs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vss = vec_packs_cc(vsi, vsi, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vpksfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vus = vec_packs_cc(vui, vui, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vpklsfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vsi = vec_packs_cc(vsl, vsl, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vpksgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + vui = vec_packs_cc(vul, vul, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vpklsgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + + vuc = vec_packsu(vss, vss); + // CHECK: call <16 x i8> @llvm.s390.vpklsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vuc = vec_packsu(vus, vus); + // CHECK: call <16 x i8> @llvm.s390.vpklsh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_packsu(vsi, vsi); + // CHECK: call <8 x i16> @llvm.s390.vpklsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vus = vec_packsu(vui, vui); + // CHECK: call <8 x i16> @llvm.s390.vpklsf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_packsu(vsl, vsl); + // CHECK: call <4 x i32> @llvm.s390.vpklsg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + vui = vec_packsu(vul, vul); + // CHECK: call <4 x i32> @llvm.s390.vpklsg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + + vuc = vec_packsu_cc(vus, vus, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vpklshs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_packsu_cc(vui, vui, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vpklsfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_packsu_cc(vul, vul, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vpklsgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + + vss = vec_unpackh(vsc); + // CHECK: call <8 x i16> @llvm.s390.vuphb(<16 x i8> %{{.*}}) + vus = vec_unpackh(vuc); + // CHECK: call <8 x i16> @llvm.s390.vuplhb(<16 x i8> %{{.*}}) + vbs = vec_unpackh(vbc); + // CHECK: call <8 x i16> @llvm.s390.vuphb(<16 x i8> %{{.*}}) + vsi = vec_unpackh(vss); + // CHECK: call <4 x i32> @llvm.s390.vuphh(<8 x i16> %{{.*}}) + vui = vec_unpackh(vus); + // CHECK: call <4 x i32> @llvm.s390.vuplhh(<8 x i16> %{{.*}}) + vbi = vec_unpackh(vbs); + // CHECK: call <4 x i32> @llvm.s390.vuphh(<8 x i16> %{{.*}}) + vsl = vec_unpackh(vsi); + // CHECK: call <2 x i64> @llvm.s390.vuphf(<4 x i32> %{{.*}}) + vul = vec_unpackh(vui); + // CHECK: call <2 x i64> @llvm.s390.vuplhf(<4 x i32> %{{.*}}) + vbl = vec_unpackh(vbi); + // CHECK: call <2 x i64> @llvm.s390.vuphf(<4 x i32> %{{.*}}) + + vss = vec_unpackl(vsc); + // CHECK: call <8 x i16> @llvm.s390.vuplb(<16 x i8> %{{.*}}) + vus = vec_unpackl(vuc); + // CHECK: call <8 x i16> @llvm.s390.vupllb(<16 x i8> %{{.*}}) + vbs = vec_unpackl(vbc); + // CHECK: call <8 x i16> @llvm.s390.vuplb(<16 x i8> %{{.*}}) + vsi = vec_unpackl(vss); + // CHECK: call <4 x i32> @llvm.s390.vuplhw(<8 x i16> %{{.*}}) + vui = vec_unpackl(vus); + // CHECK: call <4 x i32> @llvm.s390.vupllh(<8 x i16> %{{.*}}) + vbi = vec_unpackl(vbs); + // CHECK: call <4 x i32> @llvm.s390.vuplhw(<8 x i16> %{{.*}}) + vsl = vec_unpackl(vsi); + // CHECK: call <2 x i64> @llvm.s390.vuplf(<4 x i32> %{{.*}}) + vul = vec_unpackl(vui); + // CHECK: call <2 x i64> @llvm.s390.vupllf(<4 x i32> %{{.*}}) + vbl = vec_unpackl(vbi); + // CHECK: call <2 x i64> @llvm.s390.vuplf(<4 x i32> %{{.*}}) +} + +void test_compare(void) { + vbc = vec_cmpeq(vsc, vsc); + // CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}} + vbc = vec_cmpeq(vuc, vuc); + // CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}} + vbc = vec_cmpeq(vbc, vbc); + // CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}} + vbs = vec_cmpeq(vss, vss); + // CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}} + vbs = vec_cmpeq(vus, vus); + // CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}} + vbs = vec_cmpeq(vbs, vbs); + // CHECK: icmp eq <8 x i16> %{{.*}}, %{{.*}} + vbi = vec_cmpeq(vsi, vsi); + // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}} + vbi = vec_cmpeq(vui, vui); + // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}} + vbi = vec_cmpeq(vbi, vbi); + // CHECK: icmp eq <4 x i32> %{{.*}}, %{{.*}} + vbl = vec_cmpeq(vsl, vsl); + // CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}} + vbl = vec_cmpeq(vul, vul); + // CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}} + vbl = vec_cmpeq(vbl, vbl); + // CHECK: icmp eq <2 x i64> %{{.*}}, %{{.*}} + vbl = vec_cmpeq(vd, vd); + // CHECK: fcmp oeq <2 x double> %{{.*}}, %{{.*}} + + vbc = vec_cmpge(vsc, vsc); + // CHECK: icmp sge <16 x i8> %{{.*}}, %{{.*}} + vbc = vec_cmpge(vuc, vuc); + // CHECK: icmp uge <16 x i8> %{{.*}}, %{{.*}} + vbs = vec_cmpge(vss, vss); + // CHECK: icmp sge <8 x i16> %{{.*}}, %{{.*}} + vbs = vec_cmpge(vus, vus); + // CHECK: icmp uge <8 x i16> %{{.*}}, %{{.*}} + vbi = vec_cmpge(vsi, vsi); + // CHECK: icmp sge <4 x i32> %{{.*}}, %{{.*}} + vbi = vec_cmpge(vui, vui); + // CHECK: icmp uge <4 x i32> %{{.*}}, %{{.*}} + vbl = vec_cmpge(vsl, vsl); + // CHECK: icmp sge <2 x i64> %{{.*}}, %{{.*}} + vbl = vec_cmpge(vul, vul); + // CHECK: icmp uge <2 x i64> %{{.*}}, %{{.*}} + vbl = vec_cmpge(vd, vd); + // CHECK: fcmp oge <2 x double> %{{.*}}, %{{.*}} + + vbc = vec_cmpgt(vsc, vsc); + // CHECK: icmp sgt <16 x i8> %{{.*}}, %{{.*}} + vbc = vec_cmpgt(vuc, vuc); + // CHECK: icmp ugt <16 x i8> %{{.*}}, %{{.*}} + vbs = vec_cmpgt(vss, vss); + // CHECK: icmp sgt <8 x i16> %{{.*}}, %{{.*}} + vbs = vec_cmpgt(vus, vus); + // CHECK: icmp ugt <8 x i16> %{{.*}}, %{{.*}} + vbi = vec_cmpgt(vsi, vsi); + // CHECK: icmp sgt <4 x i32> %{{.*}}, %{{.*}} + vbi = vec_cmpgt(vui, vui); + // CHECK: icmp ugt <4 x i32> %{{.*}}, %{{.*}} + vbl = vec_cmpgt(vsl, vsl); + // CHECK: icmp sgt <2 x i64> %{{.*}}, %{{.*}} + vbl = vec_cmpgt(vul, vul); + // CHECK: icmp ugt <2 x i64> %{{.*}}, %{{.*}} + vbl = vec_cmpgt(vd, vd); + // CHECK: fcmp ogt <2 x double> %{{.*}}, %{{.*}} + + vbc = vec_cmple(vsc, vsc); + // CHECK: icmp sle <16 x i8> %{{.*}}, %{{.*}} + vbc = vec_cmple(vuc, vuc); + // CHECK: icmp ule <16 x i8> %{{.*}}, %{{.*}} + vbs = vec_cmple(vss, vss); + // CHECK: icmp sle <8 x i16> %{{.*}}, %{{.*}} + vbs = vec_cmple(vus, vus); + // CHECK: icmp ule <8 x i16> %{{.*}}, %{{.*}} + vbi = vec_cmple(vsi, vsi); + // CHECK: icmp sle <4 x i32> %{{.*}}, %{{.*}} + vbi = vec_cmple(vui, vui); + // CHECK: icmp ule <4 x i32> %{{.*}}, %{{.*}} + vbl = vec_cmple(vsl, vsl); + // CHECK: icmp sle <2 x i64> %{{.*}}, %{{.*}} + vbl = vec_cmple(vul, vul); + // CHECK: icmp ule <2 x i64> %{{.*}}, %{{.*}} + vbl = vec_cmple(vd, vd); + // CHECK: fcmp ole <2 x double> %{{.*}}, %{{.*}} + + vbc = vec_cmplt(vsc, vsc); + // CHECK: icmp slt <16 x i8> %{{.*}}, %{{.*}} + vbc = vec_cmplt(vuc, vuc); + // CHECK: icmp ult <16 x i8> %{{.*}}, %{{.*}} + vbs = vec_cmplt(vss, vss); + // CHECK: icmp slt <8 x i16> %{{.*}}, %{{.*}} + vbs = vec_cmplt(vus, vus); + // CHECK: icmp ult <8 x i16> %{{.*}}, %{{.*}} + vbi = vec_cmplt(vsi, vsi); + // CHECK: icmp slt <4 x i32> %{{.*}}, %{{.*}} + vbi = vec_cmplt(vui, vui); + // CHECK: icmp ult <4 x i32> %{{.*}}, %{{.*}} + vbl = vec_cmplt(vsl, vsl); + // CHECK: icmp slt <2 x i64> %{{.*}}, %{{.*}} + vbl = vec_cmplt(vul, vul); + // CHECK: icmp ult <2 x i64> %{{.*}}, %{{.*}} + vbl = vec_cmplt(vd, vd); + // CHECK: fcmp olt <2 x double> %{{.*}}, %{{.*}} + + idx = vec_all_eq(vsc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_eq(vsc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_eq(vbc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_eq(vuc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_eq(vuc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_eq(vbc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_eq(vbc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_eq(vss, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_eq(vss, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_eq(vbs, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_eq(vus, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_eq(vus, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_eq(vbs, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_eq(vbs, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_eq(vsi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_eq(vsi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_eq(vbi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_eq(vui, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_eq(vui, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_eq(vbi, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_eq(vbi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_eq(vsl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_eq(vsl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_eq(vbl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_eq(vul, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_eq(vul, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_eq(vbl, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_eq(vbl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_eq(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + + idx = vec_all_ne(vsc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_ne(vsc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_ne(vbc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_ne(vuc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_ne(vuc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_ne(vbc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_ne(vbc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_ne(vss, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_ne(vss, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_ne(vbs, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_ne(vus, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_ne(vus, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_ne(vbs, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_ne(vbs, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_ne(vsi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_ne(vsi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_ne(vbi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_ne(vui, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_ne(vui, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_ne(vbi, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_ne(vbi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_ne(vsl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_ne(vsl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_ne(vbl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_ne(vul, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_ne(vul, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_ne(vbl, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_ne(vbl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_ne(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + + idx = vec_all_ge(vsc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_ge(vsc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_ge(vbc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_ge(vuc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_ge(vuc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_ge(vbc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_ge(vbc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_ge(vss, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_ge(vss, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_ge(vbs, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_ge(vus, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_ge(vus, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_ge(vbs, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_ge(vbs, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_ge(vsi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_ge(vsi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_ge(vbi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_ge(vui, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_ge(vui, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_ge(vbi, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_ge(vbi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_ge(vsl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_ge(vsl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_ge(vbl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_ge(vul, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_ge(vul, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_ge(vbl, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_ge(vbl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_ge(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + + idx = vec_all_gt(vsc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_gt(vsc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_gt(vbc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_gt(vuc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_gt(vuc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_gt(vbc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_gt(vbc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_gt(vss, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_gt(vss, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_gt(vbs, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_gt(vus, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_gt(vus, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_gt(vbs, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_gt(vbs, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_gt(vsi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_gt(vsi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_gt(vbi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_gt(vui, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_gt(vui, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_gt(vbi, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_gt(vbi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_gt(vsl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_gt(vsl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_gt(vbl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_gt(vul, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_gt(vul, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_gt(vbl, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_gt(vbl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_gt(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + + idx = vec_all_le(vsc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_le(vsc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_le(vbc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_le(vuc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_le(vuc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_le(vbc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_le(vbc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_le(vss, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_le(vss, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_le(vbs, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_le(vus, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_le(vus, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_le(vbs, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_le(vbs, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_le(vsi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_le(vsi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_le(vbi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_le(vui, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_le(vui, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_le(vbi, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_le(vbi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_le(vsl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_le(vsl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_le(vbl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_le(vul, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_le(vul, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_le(vbl, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_le(vbl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_le(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + + idx = vec_all_lt(vsc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_lt(vsc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_lt(vbc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_lt(vuc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_lt(vuc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_lt(vbc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_lt(vbc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_all_lt(vss, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_lt(vss, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_lt(vbs, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_lt(vus, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_lt(vus, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_lt(vbs, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_lt(vbs, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_all_lt(vsi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_lt(vsi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_lt(vbi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_lt(vui, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_lt(vui, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_lt(vbi, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_lt(vbi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_all_lt(vsl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_lt(vsl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_lt(vbl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_lt(vul, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_lt(vul, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_lt(vbl, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_lt(vbl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_all_lt(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + + idx = vec_all_nge(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + idx = vec_all_ngt(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + idx = vec_all_nle(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + idx = vec_all_nlt(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + + idx = vec_all_nan(vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15) + idx = vec_all_numeric(vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15) + + idx = vec_any_eq(vsc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_eq(vsc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_eq(vbc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_eq(vuc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_eq(vuc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_eq(vbc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_eq(vbc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_eq(vss, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_eq(vss, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_eq(vbs, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_eq(vus, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_eq(vus, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_eq(vbs, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_eq(vbs, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_eq(vsi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_eq(vsi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_eq(vbi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_eq(vui, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_eq(vui, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_eq(vbi, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_eq(vbi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_eq(vsl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_eq(vsl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_eq(vbl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_eq(vul, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_eq(vul, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_eq(vbl, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_eq(vbl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_eq(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + + idx = vec_any_ne(vsc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_ne(vsc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_ne(vbc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_ne(vuc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_ne(vuc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_ne(vbc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_ne(vbc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vceqbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_ne(vss, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_ne(vss, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_ne(vbs, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_ne(vus, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_ne(vus, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_ne(vbs, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_ne(vbs, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vceqhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_ne(vsi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_ne(vsi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_ne(vbi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_ne(vui, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_ne(vui, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_ne(vbi, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_ne(vbi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vceqfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_ne(vsl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_ne(vsl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_ne(vbl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_ne(vul, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_ne(vul, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_ne(vbl, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_ne(vbl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vceqgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_ne(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + + idx = vec_any_ge(vsc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_ge(vsc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_ge(vbc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_ge(vuc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_ge(vuc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_ge(vbc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_ge(vbc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_ge(vss, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_ge(vss, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_ge(vbs, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_ge(vus, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_ge(vus, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_ge(vbs, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_ge(vbs, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_ge(vsi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_ge(vsi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_ge(vbi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_ge(vui, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_ge(vui, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_ge(vbi, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_ge(vbi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_ge(vsl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_ge(vsl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_ge(vbl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_ge(vul, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_ge(vul, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_ge(vbl, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_ge(vbl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_ge(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + + idx = vec_any_gt(vsc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_gt(vsc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_gt(vbc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_gt(vuc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_gt(vuc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_gt(vbc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_gt(vbc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_gt(vss, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_gt(vss, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_gt(vbs, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_gt(vus, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_gt(vus, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_gt(vbs, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_gt(vbs, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_gt(vsi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_gt(vsi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_gt(vbi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_gt(vui, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_gt(vui, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_gt(vbi, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_gt(vbi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_gt(vsl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_gt(vsl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_gt(vbl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_gt(vul, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_gt(vul, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_gt(vbl, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_gt(vbl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_gt(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + + idx = vec_any_le(vsc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_le(vsc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_le(vbc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_le(vuc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_le(vuc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_le(vbc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_le(vbc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_le(vss, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_le(vss, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_le(vbs, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_le(vus, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_le(vus, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_le(vbs, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_le(vbs, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_le(vsi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_le(vsi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_le(vbi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_le(vui, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_le(vui, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_le(vbi, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_le(vbi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_le(vsl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_le(vsl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_le(vbl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_le(vul, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_le(vul, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_le(vbl, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_le(vbl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_le(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + + idx = vec_any_lt(vsc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_lt(vsc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_lt(vbc, vsc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_lt(vuc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_lt(vuc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_lt(vbc, vuc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_lt(vbc, vbc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vchlbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_any_lt(vss, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_lt(vss, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_lt(vbs, vss); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_lt(vus, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_lt(vus, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_lt(vbs, vus); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_lt(vbs, vbs); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vchlhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + idx = vec_any_lt(vsi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_lt(vsi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_lt(vbi, vsi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_lt(vui, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_lt(vui, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_lt(vbi, vui); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_lt(vbi, vbi); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vchlfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + idx = vec_any_lt(vsl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_lt(vsl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_lt(vbl, vsl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_lt(vul, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_lt(vul, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_lt(vbl, vul); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_lt(vbl, vbl); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vchlgs(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + idx = vec_any_lt(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + + idx = vec_any_nge(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + idx = vec_any_ngt(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + idx = vec_any_nle(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + idx = vec_any_nlt(vd, vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}}) + + idx = vec_any_nan(vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15) + idx = vec_any_numeric(vd); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15) +} + +void test_integer(void) { + vsc = vec_andc(vsc, vsc); + vsc = vec_andc(vsc, vbc); + vsc = vec_andc(vbc, vsc); + vuc = vec_andc(vuc, vuc); + vuc = vec_andc(vuc, vbc); + vuc = vec_andc(vbc, vuc); + vbc = vec_andc(vbc, vbc); + vss = vec_andc(vss, vss); + vss = vec_andc(vss, vbs); + vss = vec_andc(vbs, vss); + vus = vec_andc(vus, vus); + vus = vec_andc(vus, vbs); + vus = vec_andc(vbs, vus); + vbs = vec_andc(vbs, vbs); + vsi = vec_andc(vsi, vsi); + vsi = vec_andc(vsi, vbi); + vsi = vec_andc(vbi, vsi); + vui = vec_andc(vui, vui); + vui = vec_andc(vui, vbi); + vui = vec_andc(vbi, vui); + vbi = vec_andc(vbi, vbi); + vsl = vec_andc(vsl, vsl); + vsl = vec_andc(vsl, vbl); + vsl = vec_andc(vbl, vsl); + vul = vec_andc(vul, vul); + vul = vec_andc(vul, vbl); + vul = vec_andc(vbl, vul); + vbl = vec_andc(vbl, vbl); + vd = vec_andc(vd, vd); + vd = vec_andc(vd, vbl); + vd = vec_andc(vbl, vd); + + vsc = vec_nor(vsc, vsc); + vsc = vec_nor(vsc, vbc); + vsc = vec_nor(vbc, vsc); + vuc = vec_nor(vuc, vuc); + vuc = vec_nor(vuc, vbc); + vuc = vec_nor(vbc, vuc); + vbc = vec_nor(vbc, vbc); + vss = vec_nor(vss, vss); + vss = vec_nor(vss, vbs); + vss = vec_nor(vbs, vss); + vus = vec_nor(vus, vus); + vus = vec_nor(vus, vbs); + vus = vec_nor(vbs, vus); + vbs = vec_nor(vbs, vbs); + vsi = vec_nor(vsi, vsi); + vsi = vec_nor(vsi, vbi); + vsi = vec_nor(vbi, vsi); + vui = vec_nor(vui, vui); + vui = vec_nor(vui, vbi); + vui = vec_nor(vbi, vui); + vbi = vec_nor(vbi, vbi); + vsl = vec_nor(vsl, vsl); + vsl = vec_nor(vsl, vbl); + vsl = vec_nor(vbl, vsl); + vul = vec_nor(vul, vul); + vul = vec_nor(vul, vbl); + vul = vec_nor(vbl, vul); + vbl = vec_nor(vbl, vbl); + vd = vec_nor(vd, vd); + vd = vec_nor(vd, vbl); + vd = vec_nor(vbl, vd); + + vuc = vec_cntlz(vsc); + // CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.*}}, i1 false) + vuc = vec_cntlz(vuc); + // CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.*}}, i1 false) + vus = vec_cntlz(vss); + // CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.*}}, i1 false) + vus = vec_cntlz(vus); + // CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.*}}, i1 false) + vui = vec_cntlz(vsi); + // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 false) + vui = vec_cntlz(vui); + // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.*}}, i1 false) + vul = vec_cntlz(vsl); + // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 false) + vul = vec_cntlz(vul); + // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.*}}, i1 false) + + vuc = vec_cnttz(vsc); + // CHECK: call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %{{.*}}, i1 false) + vuc = vec_cnttz(vuc); + // CHECK: call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %{{.*}}, i1 false) + vus = vec_cnttz(vss); + // CHECK: call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %{{.*}}, i1 false) + vus = vec_cnttz(vus); + // CHECK: call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %{{.*}}, i1 false) + vui = vec_cnttz(vsi); + // CHECK: call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %{{.*}}, i1 false) + vui = vec_cnttz(vui); + // CHECK: call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %{{.*}}, i1 false) + vul = vec_cnttz(vsl); + // CHECK: call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %{{.*}}, i1 false) + vul = vec_cnttz(vul); + // CHECK: call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %{{.*}}, i1 false) + + vuc = vec_popcnt(vsc); + // CHECK: call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %{{.*}}) + vuc = vec_popcnt(vuc); + // CHECK: call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %{{.*}}) + vus = vec_popcnt(vss); + // CHECK: call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %{{.*}}) + vus = vec_popcnt(vus); + // CHECK: call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %{{.*}}) + vui = vec_popcnt(vsi); + // CHECK: call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %{{.*}}) + vui = vec_popcnt(vui); + // CHECK: call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %{{.*}}) + vul = vec_popcnt(vsl); + // CHECK: call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %{{.*}}) + vul = vec_popcnt(vul); + // CHECK: call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %{{.*}}) + + vsc = vec_rl(vsc, vuc); + // CHECK: call <16 x i8> @llvm.s390.verllvb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_rl(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.verllvb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_rl(vss, vus); + // CHECK: call <8 x i16> @llvm.s390.verllvh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_rl(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.verllvh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vsi = vec_rl(vsi, vui); + // CHECK: call <4 x i32> @llvm.s390.verllvf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_rl(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.verllvf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vsl = vec_rl(vsl, vul); + // CHECK: call <2 x i64> @llvm.s390.verllvg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + vul = vec_rl(vul, vul); + // CHECK: call <2 x i64> @llvm.s390.verllvg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + + vsc = vec_rli(vsc, ul); + // CHECK: call <16 x i8> @llvm.s390.verllb(<16 x i8> %{{.*}}, i32 %{{.*}}) + vuc = vec_rli(vuc, ul); + // CHECK: call <16 x i8> @llvm.s390.verllb(<16 x i8> %{{.*}}, i32 %{{.*}}) + vss = vec_rli(vss, ul); + // CHECK: call <8 x i16> @llvm.s390.verllh(<8 x i16> %{{.*}}, i32 %{{.*}}) + vus = vec_rli(vus, ul); + // CHECK: call <8 x i16> @llvm.s390.verllh(<8 x i16> %{{.*}}, i32 %{{.*}}) + vsi = vec_rli(vsi, ul); + // CHECK: call <4 x i32> @llvm.s390.verllf(<4 x i32> %{{.*}}, i32 %{{.*}}) + vui = vec_rli(vui, ul); + // CHECK: call <4 x i32> @llvm.s390.verllf(<4 x i32> %{{.*}}, i32 %{{.*}}) + vsl = vec_rli(vsl, ul); + // CHECK: call <2 x i64> @llvm.s390.verllg(<2 x i64> %{{.*}}, i32 %{{.*}}) + vul = vec_rli(vul, ul); + // CHECK: call <2 x i64> @llvm.s390.verllg(<2 x i64> %{{.*}}, i32 %{{.*}}) + + vsc = vec_rl_mask(vsc, vuc, 0); + // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vsc = vec_rl_mask(vsc, vuc, 255); + // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 255) + vuc = vec_rl_mask(vuc, vuc, 0); + // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vuc = vec_rl_mask(vuc, vuc, 255); + // CHECK: call <16 x i8> @llvm.s390.verimb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 255) + vss = vec_rl_mask(vss, vus, 0); + // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vss = vec_rl_mask(vss, vus, 255); + // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 255) + vus = vec_rl_mask(vus, vus, 0); + // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vus = vec_rl_mask(vus, vus, 255); + // CHECK: call <8 x i16> @llvm.s390.verimh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 255) + vsi = vec_rl_mask(vsi, vui, 0); + // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + vsi = vec_rl_mask(vsi, vui, 255); + // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 255) + vui = vec_rl_mask(vui, vui, 0); + // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + vui = vec_rl_mask(vui, vui, 255); + // CHECK: call <4 x i32> @llvm.s390.verimf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 255) + vsl = vec_rl_mask(vsl, vul, 0); + // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0) + vsl = vec_rl_mask(vsl, vul, 255); + // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 255) + vul = vec_rl_mask(vul, vul, 0); + // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 0) + vul = vec_rl_mask(vul, vul, 255); + // CHECK: call <2 x i64> @llvm.s390.verimg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, i32 255) + + vsc = vec_sll(vsc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsc = vec_sll(vsc, vus); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsc = vec_sll(vsc, vui); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_sll(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_sll(vuc, vus); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_sll(vuc, vui); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbc = vec_sll(vbc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbc = vec_sll(vbc, vus); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbc = vec_sll(vbc, vui); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_sll(vss, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_sll(vss, vus); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_sll(vss, vui); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_sll(vus, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_sll(vus, vus); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_sll(vus, vui); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbs = vec_sll(vbs, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbs = vec_sll(vbs, vus); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbs = vec_sll(vbs, vui); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_sll(vsi, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_sll(vsi, vus); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_sll(vsi, vui); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_sll(vui, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_sll(vui, vus); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_sll(vui, vui); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbi = vec_sll(vbi, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbi = vec_sll(vbi, vus); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbi = vec_sll(vbi, vui); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_sll(vsl, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_sll(vsl, vus); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_sll(vsl, vui); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_sll(vul, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_sll(vul, vus); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_sll(vul, vui); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbl = vec_sll(vbl, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbl = vec_sll(vbl, vus); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbl = vec_sll(vbl, vui); + // CHECK: call <16 x i8> @llvm.s390.vsl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + + vsc = vec_slb(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsc = vec_slb(vsc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_slb(vuc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_slb(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_slb(vss, vss); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_slb(vss, vus); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_slb(vus, vss); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_slb(vus, vus); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_slb(vsi, vsi); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_slb(vsi, vui); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_slb(vui, vsi); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_slb(vui, vui); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_slb(vsl, vsl); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_slb(vsl, vul); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_slb(vul, vsl); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_slb(vul, vul); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vd = vec_slb(vd, vsl); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vd = vec_slb(vd, vul); + // CHECK: call <16 x i8> @llvm.s390.vslb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + + vsc = vec_sld(vsc, vsc, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vsc = vec_sld(vsc, vsc, 15); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15) + vuc = vec_sld(vuc, vuc, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vuc = vec_sld(vuc, vuc, 15); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15) + vss = vec_sld(vss, vss, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vss = vec_sld(vss, vss, 15); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15) + vus = vec_sld(vus, vus, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vus = vec_sld(vus, vus, 15); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15) + vsi = vec_sld(vsi, vsi, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vsi = vec_sld(vsi, vsi, 15); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15) + vui = vec_sld(vui, vui, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vui = vec_sld(vui, vui, 15); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15) + vsl = vec_sld(vsl, vsl, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vsl = vec_sld(vsl, vsl, 15); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15) + vul = vec_sld(vul, vul, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vul = vec_sld(vul, vul, 15); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15) + vd = vec_sld(vd, vd, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vd = vec_sld(vd, vd, 15); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 15) + + vsc = vec_sldw(vsc, vsc, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vsc = vec_sldw(vsc, vsc, 3); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vuc = vec_sldw(vuc, vuc, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vuc = vec_sldw(vuc, vuc, 3); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vss = vec_sldw(vss, vss, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vss = vec_sldw(vss, vss, 3); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vus = vec_sldw(vus, vus, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vus = vec_sldw(vus, vus, 3); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vsi = vec_sldw(vsi, vsi, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vsi = vec_sldw(vsi, vsi, 3); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vui = vec_sldw(vui, vui, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vui = vec_sldw(vui, vui, 3); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vsl = vec_sldw(vsl, vsl, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vsl = vec_sldw(vsl, vsl, 3); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vul = vec_sldw(vul, vul, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vul = vec_sldw(vul, vul, 3); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vd = vec_sldw(vd, vd, 0); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vd = vec_sldw(vd, vd, 3); + // CHECK: call <16 x i8> @llvm.s390.vsldb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + + vsc = vec_sral(vsc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsc = vec_sral(vsc, vus); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsc = vec_sral(vsc, vui); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_sral(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_sral(vuc, vus); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_sral(vuc, vui); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbc = vec_sral(vbc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbc = vec_sral(vbc, vus); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbc = vec_sral(vbc, vui); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_sral(vss, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_sral(vss, vus); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_sral(vss, vui); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_sral(vus, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_sral(vus, vus); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_sral(vus, vui); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbs = vec_sral(vbs, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbs = vec_sral(vbs, vus); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbs = vec_sral(vbs, vui); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_sral(vsi, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_sral(vsi, vus); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_sral(vsi, vui); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_sral(vui, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_sral(vui, vus); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_sral(vui, vui); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbi = vec_sral(vbi, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbi = vec_sral(vbi, vus); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbi = vec_sral(vbi, vui); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_sral(vsl, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_sral(vsl, vus); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_sral(vsl, vui); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_sral(vul, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_sral(vul, vus); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_sral(vul, vui); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbl = vec_sral(vbl, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbl = vec_sral(vbl, vus); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbl = vec_sral(vbl, vui); + // CHECK: call <16 x i8> @llvm.s390.vsra(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + + vsc = vec_srab(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsc = vec_srab(vsc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_srab(vuc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_srab(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_srab(vss, vss); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_srab(vss, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_srab(vus, vss); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_srab(vus, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_srab(vsi, vsi); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_srab(vsi, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_srab(vui, vsi); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_srab(vui, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_srab(vsl, vsl); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_srab(vsl, vul); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_srab(vul, vsl); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_srab(vul, vul); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vd = vec_srab(vd, vsl); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vd = vec_srab(vd, vul); + // CHECK: call <16 x i8> @llvm.s390.vsrab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + + vsc = vec_srl(vsc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsc = vec_srl(vsc, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsc = vec_srl(vsc, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_srl(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_srl(vuc, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_srl(vuc, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbc = vec_srl(vbc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbc = vec_srl(vbc, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbc = vec_srl(vbc, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_srl(vss, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_srl(vss, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_srl(vss, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_srl(vus, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_srl(vus, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_srl(vus, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbs = vec_srl(vbs, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbs = vec_srl(vbs, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbs = vec_srl(vbs, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_srl(vsi, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_srl(vsi, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_srl(vsi, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_srl(vui, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_srl(vui, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_srl(vui, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbi = vec_srl(vbi, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbi = vec_srl(vbi, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbi = vec_srl(vbi, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_srl(vsl, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_srl(vsl, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_srl(vsl, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_srl(vul, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_srl(vul, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_srl(vul, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbl = vec_srl(vbl, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbl = vec_srl(vbl, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vbl = vec_srl(vbl, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrl(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + + vsc = vec_srb(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsc = vec_srb(vsc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_srb(vuc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_srb(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_srb(vss, vss); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_srb(vss, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_srb(vus, vss); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_srb(vus, vus); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_srb(vsi, vsi); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_srb(vsi, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_srb(vui, vsi); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_srb(vui, vui); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_srb(vsl, vsl); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsl = vec_srb(vsl, vul); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_srb(vul, vsl); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vul = vec_srb(vul, vul); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vd = vec_srb(vd, vsl); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vd = vec_srb(vd, vul); + // CHECK: call <16 x i8> @llvm.s390.vsrlb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + + vsc = vec_abs(vsc); + vss = vec_abs(vss); + vsi = vec_abs(vsi); + vsl = vec_abs(vsl); + + vsc = vec_max(vsc, vsc); + vsc = vec_max(vsc, vbc); + vsc = vec_max(vbc, vsc); + vuc = vec_max(vuc, vuc); + vuc = vec_max(vuc, vbc); + vuc = vec_max(vbc, vuc); + vss = vec_max(vss, vss); + vss = vec_max(vss, vbs); + vss = vec_max(vbs, vss); + vus = vec_max(vus, vus); + vus = vec_max(vus, vbs); + vus = vec_max(vbs, vus); + vsi = vec_max(vsi, vsi); + vsi = vec_max(vsi, vbi); + vsi = vec_max(vbi, vsi); + vui = vec_max(vui, vui); + vui = vec_max(vui, vbi); + vui = vec_max(vbi, vui); + vsl = vec_max(vsl, vsl); + vsl = vec_max(vsl, vbl); + vsl = vec_max(vbl, vsl); + vul = vec_max(vul, vul); + vul = vec_max(vul, vbl); + vul = vec_max(vbl, vul); + vd = vec_max(vd, vd); + + vsc = vec_min(vsc, vsc); + vsc = vec_min(vsc, vbc); + vsc = vec_min(vbc, vsc); + vuc = vec_min(vuc, vuc); + vuc = vec_min(vuc, vbc); + vuc = vec_min(vbc, vuc); + vss = vec_min(vss, vss); + vss = vec_min(vss, vbs); + vss = vec_min(vbs, vss); + vus = vec_min(vus, vus); + vus = vec_min(vus, vbs); + vus = vec_min(vbs, vus); + vsi = vec_min(vsi, vsi); + vsi = vec_min(vsi, vbi); + vsi = vec_min(vbi, vsi); + vui = vec_min(vui, vui); + vui = vec_min(vui, vbi); + vui = vec_min(vbi, vui); + vsl = vec_min(vsl, vsl); + vsl = vec_min(vsl, vbl); + vsl = vec_min(vbl, vsl); + vul = vec_min(vul, vul); + vul = vec_min(vul, vbl); + vul = vec_min(vbl, vul); + vd = vec_min(vd, vd); + + vuc = vec_addc(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vaccb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_addc(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vacch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vui = vec_addc(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vaccf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vul = vec_addc(vul, vul); + // CHECK: call <2 x i64> @llvm.s390.vaccg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + + vuc = vec_add_u128(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vaq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_addc_u128(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vaccq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_adde_u128(vuc, vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vacq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_addec_u128(vuc, vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vacccq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + + vsc = vec_avg(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vavgb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_avg(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vavglb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_avg(vss, vss); + // CHECK: call <8 x i16> @llvm.s390.vavgh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_avg(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vavglh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vsi = vec_avg(vsi, vsi); + // CHECK: call <4 x i32> @llvm.s390.vavgf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_avg(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vavglf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vsl = vec_avg(vsl, vsl); + // CHECK: call <2 x i64> @llvm.s390.vavgg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + vul = vec_avg(vul, vul); + // CHECK: call <2 x i64> @llvm.s390.vavglg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + + vui = vec_checksum(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vcksm(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + + vus = vec_gfmsum(vuc, vuc); + // CHECK: call <8 x i16> @llvm.s390.vgfmb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_gfmsum(vus, vus); + // CHECK: call <4 x i32> @llvm.s390.vgfmh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vul = vec_gfmsum(vui, vui); + // CHECK: call <2 x i64> @llvm.s390.vgfmf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vuc = vec_gfmsum_128(vul, vul); + // CHECK: call <16 x i8> @llvm.s390.vgfmg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + + vus = vec_gfmsum_accum(vuc, vuc, vus); + // CHECK: call <8 x i16> @llvm.s390.vgfmab(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}}) + vui = vec_gfmsum_accum(vus, vus, vui); + // CHECK: call <4 x i32> @llvm.s390.vgfmah(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}}) + vul = vec_gfmsum_accum(vui, vui, vul); + // CHECK: call <2 x i64> @llvm.s390.vgfmaf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}}) + vuc = vec_gfmsum_accum_128(vul, vul, vuc); + // CHECK: call <16 x i8> @llvm.s390.vgfmag(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <16 x i8> %{{.*}}) + + vsc = vec_mladd(vsc, vsc, vsc); + vsc = vec_mladd(vuc, vsc, vsc); + vsc = vec_mladd(vsc, vuc, vuc); + vuc = vec_mladd(vuc, vuc, vuc); + vss = vec_mladd(vss, vss, vss); + vss = vec_mladd(vus, vss, vss); + vss = vec_mladd(vss, vus, vus); + vus = vec_mladd(vus, vus, vus); + vsi = vec_mladd(vsi, vsi, vsi); + vsi = vec_mladd(vui, vsi, vsi); + vsi = vec_mladd(vsi, vui, vui); + vui = vec_mladd(vui, vui, vui); + + vsc = vec_mhadd(vsc, vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vmahb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_mhadd(vuc, vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vmalhb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_mhadd(vss, vss, vss); + // CHECK: call <8 x i16> @llvm.s390.vmahh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_mhadd(vus, vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vmalhh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vsi = vec_mhadd(vsi, vsi, vsi); + // CHECK: call <4 x i32> @llvm.s390.vmahf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_mhadd(vui, vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vmalhf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + + vss = vec_meadd(vsc, vsc, vss); + // CHECK: call <8 x i16> @llvm.s390.vmaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_meadd(vuc, vuc, vus); + // CHECK: call <8 x i16> @llvm.s390.vmaleb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}}) + vsi = vec_meadd(vss, vss, vsi); + // CHECK: call <4 x i32> @llvm.s390.vmaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_meadd(vus, vus, vui); + // CHECK: call <4 x i32> @llvm.s390.vmaleh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}}) + vsl = vec_meadd(vsi, vsi, vsl); + // CHECK: call <2 x i64> @llvm.s390.vmaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}}) + vul = vec_meadd(vui, vui, vul); + // CHECK: call <2 x i64> @llvm.s390.vmalef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}}) + + vss = vec_moadd(vsc, vsc, vss); + // CHECK: call <8 x i16> @llvm.s390.vmaob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_moadd(vuc, vuc, vus); + // CHECK: call <8 x i16> @llvm.s390.vmalob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <8 x i16> %{{.*}}) + vsi = vec_moadd(vss, vss, vsi); + // CHECK: call <4 x i32> @llvm.s390.vmaoh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_moadd(vus, vus, vui); + // CHECK: call <4 x i32> @llvm.s390.vmaloh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <4 x i32> %{{.*}}) + vsl = vec_moadd(vsi, vsi, vsl); + // CHECK: call <2 x i64> @llvm.s390.vmaof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}}) + vul = vec_moadd(vui, vui, vul); + // CHECK: call <2 x i64> @llvm.s390.vmalof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <2 x i64> %{{.*}}) + + vsc = vec_mulh(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vmhb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_mulh(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vmlhb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_mulh(vss, vss); + // CHECK: call <8 x i16> @llvm.s390.vmhh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_mulh(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vmlhh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vsi = vec_mulh(vsi, vsi); + // CHECK: call <4 x i32> @llvm.s390.vmhf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_mulh(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vmlhf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + + vss = vec_mule(vsc, vsc); + // CHECK: call <8 x i16> @llvm.s390.vmeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_mule(vuc, vuc); + // CHECK: call <8 x i16> @llvm.s390.vmleb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_mule(vss, vss); + // CHECK: call <4 x i32> @llvm.s390.vmeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vui = vec_mule(vus, vus); + // CHECK: call <4 x i32> @llvm.s390.vmleh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vsl = vec_mule(vsi, vsi); + // CHECK: call <2 x i64> @llvm.s390.vmef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vul = vec_mule(vui, vui); + // CHECK: call <2 x i64> @llvm.s390.vmlef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + + vss = vec_mulo(vsc, vsc); + // CHECK: call <8 x i16> @llvm.s390.vmob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_mulo(vuc, vuc); + // CHECK: call <8 x i16> @llvm.s390.vmlob(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vsi = vec_mulo(vss, vss); + // CHECK: call <4 x i32> @llvm.s390.vmoh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vui = vec_mulo(vus, vus); + // CHECK: call <4 x i32> @llvm.s390.vmloh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vsl = vec_mulo(vsi, vsi); + // CHECK: call <2 x i64> @llvm.s390.vmof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vul = vec_mulo(vui, vui); + // CHECK: call <2 x i64> @llvm.s390.vmlof(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + + vuc = vec_subc(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vscbib(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vus = vec_subc(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vscbih(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vui = vec_subc(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vscbif(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vul = vec_subc(vul, vul); + // CHECK: call <2 x i64> @llvm.s390.vscbig(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + + vuc = vec_sub_u128(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_subc_u128(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vscbiq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_sube_u128(vuc, vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsbiq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_subec_u128(vuc, vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vsbcbiq(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + + vui = vec_sum4(vuc, vuc); + // CHECK: call <4 x i32> @llvm.s390.vsumb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vui = vec_sum4(vus, vus); + // CHECK: call <4 x i32> @llvm.s390.vsumh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vul = vec_sum2(vus, vus); + // CHECK: call <2 x i64> @llvm.s390.vsumgh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vul = vec_sum2(vui, vui); + // CHECK: call <2 x i64> @llvm.s390.vsumgf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vuc = vec_sum_u128(vui, vui); + // CHECK: call <16 x i8> @llvm.s390.vsumqf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vuc = vec_sum_u128(vul, vul); + // CHECK: call <16 x i8> @llvm.s390.vsumqg(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) + + idx = vec_test_mask(vsc, vuc); + // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_test_mask(vuc, vuc); + // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_test_mask(vss, vus); + // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_test_mask(vus, vus); + // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_test_mask(vsi, vui); + // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_test_mask(vui, vui); + // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_test_mask(vsl, vul); + // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_test_mask(vul, vul); + // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + idx = vec_test_mask(vd, vul); + // CHECK: call i32 @llvm.s390.vtm(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) +} + +void test_string(void) { + vsc = vec_cp_until_zero(vsc); + // CHECK: call <16 x i8> @llvm.s390.vistrb(<16 x i8> %{{.*}}) + vuc = vec_cp_until_zero(vuc); + // CHECK: call <16 x i8> @llvm.s390.vistrb(<16 x i8> %{{.*}}) + vbc = vec_cp_until_zero(vbc); + // CHECK: call <16 x i8> @llvm.s390.vistrb(<16 x i8> %{{.*}}) + vss = vec_cp_until_zero(vss); + // CHECK: call <8 x i16> @llvm.s390.vistrh(<8 x i16> %{{.*}}) + vus = vec_cp_until_zero(vus); + // CHECK: call <8 x i16> @llvm.s390.vistrh(<8 x i16> %{{.*}}) + vbs = vec_cp_until_zero(vbs); + // CHECK: call <8 x i16> @llvm.s390.vistrh(<8 x i16> %{{.*}}) + vsi = vec_cp_until_zero(vsi); + // CHECK: call <4 x i32> @llvm.s390.vistrf(<4 x i32> %{{.*}}) + vui = vec_cp_until_zero(vui); + // CHECK: call <4 x i32> @llvm.s390.vistrf(<4 x i32> %{{.*}}) + vbi = vec_cp_until_zero(vbi); + // CHECK: call <4 x i32> @llvm.s390.vistrf(<4 x i32> %{{.*}}) + + vsc = vec_cp_until_zero_cc(vsc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vistrbs(<16 x i8> %{{.*}}) + vuc = vec_cp_until_zero_cc(vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vistrbs(<16 x i8> %{{.*}}) + vbc = vec_cp_until_zero_cc(vbc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vistrbs(<16 x i8> %{{.*}}) + vss = vec_cp_until_zero_cc(vss, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vistrhs(<8 x i16> %{{.*}}) + vus = vec_cp_until_zero_cc(vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vistrhs(<8 x i16> %{{.*}}) + vbs = vec_cp_until_zero_cc(vbs, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vistrhs(<8 x i16> %{{.*}}) + vsi = vec_cp_until_zero_cc(vsi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vistrfs(<4 x i32> %{{.*}}) + vui = vec_cp_until_zero_cc(vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vistrfs(<4 x i32> %{{.*}}) + vbi = vec_cp_until_zero_cc(vbi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vistrfs(<4 x i32> %{{.*}}) + + vsc = vec_cmpeq_idx(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vfeeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpeq_idx(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vfeeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpeq_idx(vbc, vbc); + // CHECK: call <16 x i8> @llvm.s390.vfeeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_cmpeq_idx(vss, vss); + // CHECK: call <8 x i16> @llvm.s390.vfeeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpeq_idx(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vfeeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpeq_idx(vbs, vbs); + // CHECK: call <8 x i16> @llvm.s390.vfeeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vsi = vec_cmpeq_idx(vsi, vsi); + // CHECK: call <4 x i32> @llvm.s390.vfeef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpeq_idx(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vfeef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpeq_idx(vbi, vbi); + // CHECK: call <4 x i32> @llvm.s390.vfeef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + + vsc = vec_cmpeq_idx_cc(vsc, vsc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpeq_idx_cc(vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpeq_idx_cc(vbc, vbc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_cmpeq_idx_cc(vss, vss, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpeq_idx_cc(vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpeq_idx_cc(vbs, vbs, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vsi = vec_cmpeq_idx_cc(vsi, vsi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpeq_idx_cc(vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpeq_idx_cc(vbi, vbi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + + vsc = vec_cmpeq_or_0_idx(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vfeezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpeq_or_0_idx(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vfeezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpeq_or_0_idx(vbc, vbc); + // CHECK: call <16 x i8> @llvm.s390.vfeezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_cmpeq_or_0_idx(vss, vss); + // CHECK: call <8 x i16> @llvm.s390.vfeezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpeq_or_0_idx(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vfeezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpeq_or_0_idx(vbs, vbs); + // CHECK: call <8 x i16> @llvm.s390.vfeezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vsi = vec_cmpeq_or_0_idx(vsi, vsi); + // CHECK: call <4 x i32> @llvm.s390.vfeezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpeq_or_0_idx(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vfeezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpeq_or_0_idx(vbi, vbi); + // CHECK: call <4 x i32> @llvm.s390.vfeezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + + vsc = vec_cmpeq_or_0_idx_cc(vsc, vsc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpeq_or_0_idx_cc(vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpeq_or_0_idx_cc(vbc, vbc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfeezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_cmpeq_or_0_idx_cc(vss, vss, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpeq_or_0_idx_cc(vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpeq_or_0_idx_cc(vbs, vbs, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfeezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vsi = vec_cmpeq_or_0_idx_cc(vsi, vsi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpeq_or_0_idx_cc(vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpeq_or_0_idx_cc(vbi, vbi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfeezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + + vsc = vec_cmpne_idx(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vfeneb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpne_idx(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vfeneb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpne_idx(vbc, vbc); + // CHECK: call <16 x i8> @llvm.s390.vfeneb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_cmpne_idx(vss, vss); + // CHECK: call <8 x i16> @llvm.s390.vfeneh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpne_idx(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vfeneh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpne_idx(vbs, vbs); + // CHECK: call <8 x i16> @llvm.s390.vfeneh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vsi = vec_cmpne_idx(vsi, vsi); + // CHECK: call <4 x i32> @llvm.s390.vfenef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpne_idx(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vfenef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpne_idx(vbi, vbi); + // CHECK: call <4 x i32> @llvm.s390.vfenef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + + vsc = vec_cmpne_idx_cc(vsc, vsc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpne_idx_cc(vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpne_idx_cc(vbc, vbc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_cmpne_idx_cc(vss, vss, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpne_idx_cc(vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpne_idx_cc(vbs, vbs, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vsi = vec_cmpne_idx_cc(vsi, vsi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpne_idx_cc(vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpne_idx_cc(vbi, vbi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + + vsc = vec_cmpne_or_0_idx(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vfenezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpne_or_0_idx(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vfenezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpne_or_0_idx(vbc, vbc); + // CHECK: call <16 x i8> @llvm.s390.vfenezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_cmpne_or_0_idx(vss, vss); + // CHECK: call <8 x i16> @llvm.s390.vfenezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpne_or_0_idx(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vfenezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpne_or_0_idx(vbs, vbs); + // CHECK: call <8 x i16> @llvm.s390.vfenezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vsi = vec_cmpne_or_0_idx(vsi, vsi); + // CHECK: call <4 x i32> @llvm.s390.vfenezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpne_or_0_idx(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vfenezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpne_or_0_idx(vbi, vbi); + // CHECK: call <4 x i32> @llvm.s390.vfenezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + + vsc = vec_cmpne_or_0_idx_cc(vsc, vsc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpne_or_0_idx_cc(vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vuc = vec_cmpne_or_0_idx_cc(vbc, vbc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfenezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}) + vss = vec_cmpne_or_0_idx_cc(vss, vss, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpne_or_0_idx_cc(vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vus = vec_cmpne_or_0_idx_cc(vbs, vbs, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfenezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}) + vsi = vec_cmpne_or_0_idx_cc(vsi, vsi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpne_or_0_idx_cc(vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + vui = vec_cmpne_or_0_idx_cc(vbi, vbi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfenezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) + + vbc = vec_cmprg(vuc, vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4) + vbs = vec_cmprg(vus, vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4) + vbi = vec_cmprg(vui, vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4) + + vbc = vec_cmprg_cc(vuc, vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4) + vbs = vec_cmprg_cc(vus, vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4) + vbi = vec_cmprg_cc(vui, vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4) + + vuc = vec_cmprg_idx(vuc, vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vus = vec_cmprg_idx(vus, vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vui = vec_cmprg_idx(vui, vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + + vuc = vec_cmprg_idx_cc(vuc, vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vus = vec_cmprg_idx_cc(vus, vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vui = vec_cmprg_idx_cc(vui, vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + + vuc = vec_cmprg_or_0_idx(vuc, vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vstrczb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vus = vec_cmprg_or_0_idx(vus, vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vstrczh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vui = vec_cmprg_or_0_idx(vui, vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vstrczf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + + vuc = vec_cmprg_or_0_idx_cc(vuc, vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrczbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vus = vec_cmprg_or_0_idx_cc(vus, vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrczhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vui = vec_cmprg_or_0_idx_cc(vui, vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrczfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + + vbc = vec_cmpnrg(vuc, vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vbs = vec_cmpnrg(vus, vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12) + vbi = vec_cmpnrg(vui, vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12) + + vbc = vec_cmpnrg_cc(vuc, vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vbs = vec_cmpnrg_cc(vus, vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12) + vbi = vec_cmpnrg_cc(vui, vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12) + + vuc = vec_cmpnrg_idx(vuc, vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vstrcb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vus = vec_cmpnrg_idx(vus, vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vstrch(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vui = vec_cmpnrg_idx(vui, vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vstrcf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + + vuc = vec_cmpnrg_idx_cc(vuc, vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrcbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vus = vec_cmpnrg_idx_cc(vus, vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrchs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vui = vec_cmpnrg_idx_cc(vui, vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrcfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + + vuc = vec_cmpnrg_or_0_idx(vuc, vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vstrczb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vus = vec_cmpnrg_or_0_idx(vus, vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vstrczh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vui = vec_cmpnrg_or_0_idx(vui, vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vstrczf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + + vuc = vec_cmpnrg_or_0_idx_cc(vuc, vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vstrczbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vus = vec_cmpnrg_or_0_idx_cc(vus, vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vstrczhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vui = vec_cmpnrg_or_0_idx_cc(vui, vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vstrczfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + + vbc = vec_find_any_eq(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4) + vbc = vec_find_any_eq(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4) + vbc = vec_find_any_eq(vbc, vbc); + // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4) + vbs = vec_find_any_eq(vss, vss); + // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4) + vbs = vec_find_any_eq(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4) + vbs = vec_find_any_eq(vbs, vbs); + // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4) + vbi = vec_find_any_eq(vsi, vsi); + // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4) + vbi = vec_find_any_eq(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4) + vbi = vec_find_any_eq(vbi, vbi); + // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4) + + vbc = vec_find_any_eq_cc(vsc, vsc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4) + vbc = vec_find_any_eq_cc(vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4) + vbc = vec_find_any_eq_cc(vbc, vbc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 4) + vbs = vec_find_any_eq_cc(vss, vss, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4) + vbs = vec_find_any_eq_cc(vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4) + vbs = vec_find_any_eq_cc(vbs, vbs, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 4) + vbi = vec_find_any_eq_cc(vsi, vsi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4) + vbi = vec_find_any_eq_cc(vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4) + vbi = vec_find_any_eq_cc(vbi, vbi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 4) + + vsc = vec_find_any_eq_idx(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vuc = vec_find_any_eq_idx(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vuc = vec_find_any_eq_idx(vbc, vbc); + // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vss = vec_find_any_eq_idx(vss, vss); + // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vus = vec_find_any_eq_idx(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vus = vec_find_any_eq_idx(vbs, vbs); + // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vsi = vec_find_any_eq_idx(vsi, vsi); + // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + vui = vec_find_any_eq_idx(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + vui = vec_find_any_eq_idx(vbi, vbi); + // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + + vsc = vec_find_any_eq_idx_cc(vsc, vsc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vuc = vec_find_any_eq_idx_cc(vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vuc = vec_find_any_eq_idx_cc(vbc, vbc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vss = vec_find_any_eq_idx_cc(vss, vss, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vus = vec_find_any_eq_idx_cc(vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vus = vec_find_any_eq_idx_cc(vbs, vbs, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vsi = vec_find_any_eq_idx_cc(vsi, vsi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + vui = vec_find_any_eq_idx_cc(vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + vui = vec_find_any_eq_idx_cc(vbi, vbi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + + vsc = vec_find_any_eq_or_0_idx(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vuc = vec_find_any_eq_or_0_idx(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vuc = vec_find_any_eq_or_0_idx(vbc, vbc); + // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vss = vec_find_any_eq_or_0_idx(vss, vss); + // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vus = vec_find_any_eq_or_0_idx(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vus = vec_find_any_eq_or_0_idx(vbs, vbs); + // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vsi = vec_find_any_eq_or_0_idx(vsi, vsi); + // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + vui = vec_find_any_eq_or_0_idx(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + vui = vec_find_any_eq_or_0_idx(vbi, vbi); + // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + + vsc = vec_find_any_eq_or_0_idx_cc(vsc, vsc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vuc = vec_find_any_eq_or_0_idx_cc(vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vuc = vec_find_any_eq_or_0_idx_cc(vbc, vbc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 0) + vss = vec_find_any_eq_or_0_idx_cc(vss, vss, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vus = vec_find_any_eq_or_0_idx_cc(vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vus = vec_find_any_eq_or_0_idx_cc(vbs, vbs, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 0) + vsi = vec_find_any_eq_or_0_idx_cc(vsi, vsi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + vui = vec_find_any_eq_or_0_idx_cc(vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + vui = vec_find_any_eq_or_0_idx_cc(vbi, vbi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 0) + + vbc = vec_find_any_ne(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vbc = vec_find_any_ne(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vbc = vec_find_any_ne(vbc, vbc); + // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vbs = vec_find_any_ne(vss, vss); + // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12) + vbs = vec_find_any_ne(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12) + vbs = vec_find_any_ne(vbs, vbs); + // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12) + vbi = vec_find_any_ne(vsi, vsi); + // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12) + vbi = vec_find_any_ne(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12) + vbi = vec_find_any_ne(vbi, vbi); + // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12) + + vbc = vec_find_any_ne_cc(vsc, vsc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vbc = vec_find_any_ne_cc(vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vbc = vec_find_any_ne_cc(vbc, vbc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 12) + vbs = vec_find_any_ne_cc(vss, vss, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12) + vbs = vec_find_any_ne_cc(vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12) + vbs = vec_find_any_ne_cc(vbs, vbs, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 12) + vbi = vec_find_any_ne_cc(vsi, vsi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12) + vbi = vec_find_any_ne_cc(vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12) + vbi = vec_find_any_ne_cc(vbi, vbi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 12) + + vsc = vec_find_any_ne_idx(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vuc = vec_find_any_ne_idx(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vuc = vec_find_any_ne_idx(vbc, vbc); + // CHECK: call <16 x i8> @llvm.s390.vfaeb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vss = vec_find_any_ne_idx(vss, vss); + // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vus = vec_find_any_ne_idx(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vus = vec_find_any_ne_idx(vbs, vbs); + // CHECK: call <8 x i16> @llvm.s390.vfaeh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vsi = vec_find_any_ne_idx(vsi, vsi); + // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + vui = vec_find_any_ne_idx(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + vui = vec_find_any_ne_idx(vbi, vbi); + // CHECK: call <4 x i32> @llvm.s390.vfaef(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + + vsc = vec_find_any_ne_idx_cc(vsc, vsc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vuc = vec_find_any_ne_idx_cc(vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vuc = vec_find_any_ne_idx_cc(vbc, vbc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaebs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vss = vec_find_any_ne_idx_cc(vss, vss, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vus = vec_find_any_ne_idx_cc(vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vus = vec_find_any_ne_idx_cc(vbs, vbs, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaehs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vsi = vec_find_any_ne_idx_cc(vsi, vsi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + vui = vec_find_any_ne_idx_cc(vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + vui = vec_find_any_ne_idx_cc(vbi, vbi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaefs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + + vsc = vec_find_any_ne_or_0_idx(vsc, vsc); + // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vuc = vec_find_any_ne_or_0_idx(vuc, vuc); + // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vuc = vec_find_any_ne_or_0_idx(vbc, vbc); + // CHECK: call <16 x i8> @llvm.s390.vfaezb(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vss = vec_find_any_ne_or_0_idx(vss, vss); + // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vus = vec_find_any_ne_or_0_idx(vus, vus); + // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vus = vec_find_any_ne_or_0_idx(vbs, vbs); + // CHECK: call <8 x i16> @llvm.s390.vfaezh(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vsi = vec_find_any_ne_or_0_idx(vsi, vsi); + // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + vui = vec_find_any_ne_or_0_idx(vui, vui); + // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + vui = vec_find_any_ne_or_0_idx(vbi, vbi); + // CHECK: call <4 x i32> @llvm.s390.vfaezf(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + + vsc = vec_find_any_ne_or_0_idx_cc(vsc, vsc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vuc = vec_find_any_ne_or_0_idx_cc(vuc, vuc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vuc = vec_find_any_ne_or_0_idx_cc(vbc, vbc, &cc); + // CHECK: call { <16 x i8>, i32 } @llvm.s390.vfaezbs(<16 x i8> %{{.*}}, <16 x i8> %{{.*}}, i32 8) + vss = vec_find_any_ne_or_0_idx_cc(vss, vss, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vus = vec_find_any_ne_or_0_idx_cc(vus, vus, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vus = vec_find_any_ne_or_0_idx_cc(vbs, vbs, &cc); + // CHECK: call { <8 x i16>, i32 } @llvm.s390.vfaezhs(<8 x i16> %{{.*}}, <8 x i16> %{{.*}}, i32 8) + vsi = vec_find_any_ne_or_0_idx_cc(vsi, vsi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + vui = vec_find_any_ne_or_0_idx_cc(vui, vui, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) + vui = vec_find_any_ne_or_0_idx_cc(vbi, vbi, &cc); + // CHECK: call { <4 x i32>, i32 } @llvm.s390.vfaezfs(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}, i32 8) +} + +void test_float(void) { + vd = vec_abs(vd); + // CHECK: call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}}) + + vd = vec_nabs(vd); + // CHECK: [[ABS:%[^ ]+]] = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}}) + // CHECK-NEXT: fsub <2 x double> , [[ABS]] + + vd = vec_madd(vd, vd, vd); + // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}) + vd = vec_msub(vd, vd, vd); + // CHECK: [[NEG:%[^ ]+]] = fsub <2 x double> , %{{.*}} + // CHECK: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]]) + vd = vec_sqrt(vd); + // CHECK: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{.*}}) + + vd = vec_ld2f(cptrf); + // CHECK: [[VAL:%[^ ]+]] = load <2 x float>, <2 x float>* %{{.*}} + // CHECK: fpext <2 x float> [[VAL]] to <2 x double> + vec_st2f(vd, ptrf); + // CHECK: [[VAL:%[^ ]+]] = fptrunc <2 x double> %{{.*}} to <2 x float> + // CHECK: store <2 x float> [[VAL]], <2 x float>* %{{.*}} + + vd = vec_ctd(vsl, 0); + // CHECK: sitofp <2 x i64> %{{.*}} to <2 x double> + vd = vec_ctd(vul, 0); + // CHECK: uitofp <2 x i64> %{{.*}} to <2 x double> + vd = vec_ctd(vsl, 1); + // CHECK: [[VAL:%[^ ]+]] = sitofp <2 x i64> %{{.*}} to <2 x double> + // CHECK: fmul <2 x double> [[VAL]], + vd = vec_ctd(vul, 1); + // CHECK: [[VAL:%[^ ]+]] = uitofp <2 x i64> %{{.*}} to <2 x double> + // CHECK: fmul <2 x double> [[VAL]], + vd = vec_ctd(vsl, 31); + // CHECK: [[VAL:%[^ ]+]] = sitofp <2 x i64> %{{.*}} to <2 x double> + // CHECK: fmul <2 x double> [[VAL]], + vd = vec_ctd(vul, 31); + // CHECK: [[VAL:%[^ ]+]] = uitofp <2 x i64> %{{.*}} to <2 x double> + // CHECK: fmul <2 x double> [[VAL]], + + vsl = vec_ctsl(vd, 0); + // CHECK: fptosi <2 x double> %{{.*}} to <2 x i64> + vul = vec_ctul(vd, 0); + // CHECK: fptoui <2 x double> %{{.*}} to <2 x i64> + vsl = vec_ctsl(vd, 1); + // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, + // CHECK: fptosi <2 x double> [[VAL]] to <2 x i64> + vul = vec_ctul(vd, 1); + // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, + // CHECK: fptoui <2 x double> [[VAL]] to <2 x i64> + vsl = vec_ctsl(vd, 31); + // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, + // CHECK: fptosi <2 x double> [[VAL]] to <2 x i64> + vul = vec_ctul(vd, 31); + // CHECK: [[VAL:%[^ ]+]] = fmul <2 x double> %{{.*}}, + // CHECK: fptoui <2 x double> [[VAL]] to <2 x i64> + + vd = vec_roundp(vd); + // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{.*}}) + vd = vec_ceil(vd); + // CHECK: call <2 x double> @llvm.ceil.v2f64(<2 x double> %{{.*}}) + vd = vec_roundm(vd); + // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{.*}}) + vd = vec_floor(vd); + // CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> %{{.*}}) + vd = vec_roundz(vd); + // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) + vd = vec_trunc(vd); + // CHECK: call <2 x double> @llvm.trunc.v2f64(<2 x double> %{{.*}}) + vd = vec_roundc(vd); + // CHECK: call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %{{.*}}) + vd = vec_round(vd); + // CHECK: call <2 x double> @llvm.s390.vfidb(<2 x double> %{{.*}}, i32 4, i32 4) + + vbl = vec_fp_test_data_class(vd, 0, &cc); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 0) + vbl = vec_fp_test_data_class(vd, 4095, &cc); + // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 4095) +} diff --git a/clang/test/CodeGen/zvector.c b/clang/test/CodeGen/zvector.c new file mode 100644 index 0000000000000..ebe7e415e1db8 --- /dev/null +++ b/clang/test/CodeGen/zvector.c @@ -0,0 +1,2798 @@ +// RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu z13 -fzvector \ +// RUN: -O -emit-llvm -o - -W -Wall -Werror %s | FileCheck %s + +volatile vector signed char sc, sc2; +volatile vector unsigned char uc, uc2; +volatile vector bool char bc, bc2; + +volatile vector signed short ss, ss2; +volatile vector unsigned short us, us2; +volatile vector bool short bs, bs2; + +volatile vector signed int si, si2; +volatile vector unsigned int ui, ui2; +volatile vector bool int bi, bi2; + +volatile vector signed long long sl, sl2; +volatile vector unsigned long long ul, ul2; +volatile vector bool long long bl, bl2; + +volatile vector double fd, fd2; + +volatile int cnt; + +void test_assign (void) +{ +// CHECK-LABEL: test_assign + +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @sc + sc = sc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @uc + uc = uc2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @ss + ss = ss2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @us + us = us2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @si + si = si2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @ui + ui = ui2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @sl + sl = sl2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @ul + ul = ul2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: store volatile <2 x double> [[VAL]], <2 x double>* @fd + fd = fd2; +} + +void test_pos (void) +{ +// CHECK-LABEL: test_pos + +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @sc + sc = +sc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: store volatile <16 x i8> [[VAL]], <16 x i8>* @uc + uc = +uc2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @ss + ss = +ss2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: store volatile <8 x i16> [[VAL]], <8 x i16>* @us + us = +us2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @si + si = +si2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: store volatile <4 x i32> [[VAL]], <4 x i32>* @ui + ui = +ui2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @sl + sl = +sl2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: store volatile <2 x i64> [[VAL]], <2 x i64>* @ul + ul = +ul2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: store volatile <2 x double> [[VAL]], <2 x double>* @fd + fd = +fd2; +} + +void test_neg (void) +{ +// CHECK-LABEL: test_neg + +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = sub <16 x i8> zeroinitializer, [[VAL]] + sc = -sc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = sub <8 x i16> zeroinitializer, [[VAL]] + ss = -ss2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = sub <4 x i32> zeroinitializer, [[VAL]] + si = -si2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = sub <2 x i64> zeroinitializer, [[VAL]] + sl = -sl2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: %{{.*}} = fsub <2 x double> , [[VAL]] + fd = -fd2; +} + +void test_preinc (void) +{ +// CHECK-LABEL: test_preinc + +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = add <16 x i8> [[VAL]], + ++sc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = add <16 x i8> [[VAL]], + ++uc2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = add <8 x i16> [[VAL]], + ++ss2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = add <8 x i16> [[VAL]], + ++us2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = add <4 x i32> [[VAL]], + ++si2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = add <4 x i32> [[VAL]], + ++ui2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = add <2 x i64> [[VAL]], + ++sl2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = add <2 x i64> [[VAL]], + ++ul2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], + ++fd2; +} + +void test_postinc (void) +{ +// CHECK-LABEL: test_postinc + +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = add <16 x i8> [[VAL]], + sc2++; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = add <16 x i8> [[VAL]], + uc2++; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = add <8 x i16> [[VAL]], + ss2++; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = add <8 x i16> [[VAL]], + us2++; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = add <4 x i32> [[VAL]], + si2++; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = add <4 x i32> [[VAL]], + ui2++; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = add <2 x i64> [[VAL]], + sl2++; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = add <2 x i64> [[VAL]], + ul2++; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], + fd2++; +} + +void test_predec (void) +{ +// CHECK-LABEL: test_predec + +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = add <16 x i8> [[VAL]], + --sc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = add <16 x i8> [[VAL]], + --uc2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = add <8 x i16> [[VAL]], + --ss2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = add <8 x i16> [[VAL]], + --us2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = add <4 x i32> [[VAL]], + --si2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = add <4 x i32> [[VAL]], + --ui2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = add <2 x i64> [[VAL]], + --sl2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = add <2 x i64> [[VAL]], + --ul2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], + --fd2; +} + +void test_postdec (void) +{ +// CHECK-LABEL: test_postdec + +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = add <16 x i8> [[VAL]], + sc2--; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = add <16 x i8> [[VAL]], + uc2--; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = add <8 x i16> [[VAL]], + ss2--; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = add <8 x i16> [[VAL]], + us2--; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = add <4 x i32> [[VAL]], + si2--; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = add <4 x i32> [[VAL]], + ui2--; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = add <2 x i64> [[VAL]], + sl2--; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = add <2 x i64> [[VAL]], + ul2--; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: %{{.*}} = fadd <2 x double> [[VAL]], + fd2--; +} + +void test_add (void) +{ +// CHECK-LABEL: test_add + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]] + sc = sc + sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]] + sc = sc + bc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]] + sc = bc + sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]] + uc = uc + uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]] + uc = uc + bc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = add <16 x i8> [[VAL2]], [[VAL1]] + uc = bc + uc2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]] + ss = ss + ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]] + ss = ss + bs2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]] + ss = bs + ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]] + us = us + us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]] + us = us + bs2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = add <8 x i16> [[VAL2]], [[VAL1]] + us = bs + us2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]] + si = si + si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]] + si = si + bi2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]] + si = bi + si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]] + ui = ui + ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]] + ui = ui + bi2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = add <4 x i32> [[VAL2]], [[VAL1]] + ui = bi + ui2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]] + sl = sl + sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]] + sl = sl + bl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]] + sl = bl + sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]] + ul = ul + ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]] + ul = ul + bl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = add <2 x i64> [[VAL2]], [[VAL1]] + ul = bl + ul2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: %{{.*}} = fadd <2 x double> [[VAL1]], [[VAL2]] + fd = fd + fd2; +} + +void test_add_assign (void) +{ +// CHECK-LABEL: test_add_assign + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]] + sc += sc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]] + sc += bc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]] + uc += uc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = add <16 x i8> [[VAL1]], [[VAL2]] + uc += bc2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]] + ss += ss2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]] + ss += bs2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]] + us += us2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = add <8 x i16> [[VAL1]], [[VAL2]] + us += bs2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]] + si += si2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]] + si += bi2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]] + ui += ui2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = add <4 x i32> [[VAL1]], [[VAL2]] + ui += bi2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]] + sl += sl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]] + sl += bl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]] + ul += ul2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = add <2 x i64> [[VAL1]], [[VAL2]] + ul += bl2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd +// CHECK: %{{.*}} = fadd <2 x double> [[VAL2]], [[VAL1]] + fd += fd2; +} + +void test_sub (void) +{ +// CHECK-LABEL: test_sub + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]] + sc = sc - sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]] + sc = sc - bc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]] + sc = bc - sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]] + uc = uc - uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]] + uc = uc - bc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]] + uc = bc - uc2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]] + ss = ss - ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]] + ss = ss - bs2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]] + ss = bs - ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]] + us = us - us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]] + us = us - bs2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]] + us = bs - us2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]] + si = si - si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]] + si = si - bi2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]] + si = bi - si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]] + ui = ui - ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]] + ui = ui - bi2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]] + ui = bi - ui2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]] + sl = sl - sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]] + sl = sl - bl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]] + sl = bl - sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]] + ul = ul - ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]] + ul = ul - bl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]] + ul = bl - ul2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: %{{.*}} = fsub <2 x double> [[VAL1]], [[VAL2]] + fd = fd - fd2; +} + +void test_sub_assign (void) +{ +// CHECK-LABEL: test_sub_assign + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]] + sc -= sc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]] + sc -= bc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]] + uc -= uc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = sub <16 x i8> [[VAL1]], [[VAL2]] + uc -= bc2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]] + ss -= ss2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]] + ss -= bs2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]] + us -= us2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = sub <8 x i16> [[VAL1]], [[VAL2]] + us -= bs2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]] + si -= si2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]] + si -= bi2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]] + ui -= ui2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = sub <4 x i32> [[VAL1]], [[VAL2]] + ui -= bi2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]] + sl -= sl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]] + sl -= bl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]] + ul -= ul2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = sub <2 x i64> [[VAL1]], [[VAL2]] + ul -= bl2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd +// CHECK: %{{.*}} = fsub <2 x double> [[VAL1]], [[VAL2]] + fd -= fd2; +} + +void test_mul (void) +{ +// CHECK-LABEL: test_mul + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = mul <16 x i8> [[VAL2]], [[VAL1]] + sc = sc * sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = mul <16 x i8> [[VAL2]], [[VAL1]] + uc = uc * uc2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = mul <8 x i16> [[VAL2]], [[VAL1]] + ss = ss * ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = mul <8 x i16> [[VAL2]], [[VAL1]] + us = us * us2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = mul <4 x i32> [[VAL2]], [[VAL1]] + si = si * si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = mul <4 x i32> [[VAL2]], [[VAL1]] + ui = ui * ui2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = mul <2 x i64> [[VAL2]], [[VAL1]] + sl = sl * sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = mul <2 x i64> [[VAL2]], [[VAL1]] + ul = ul * ul2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: %{{.*}} = fmul <2 x double> [[VAL1]], [[VAL2]] + fd = fd * fd2; +} + +void test_mul_assign (void) +{ +// CHECK-LABEL: test_mul_assign + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = mul <16 x i8> [[VAL1]], [[VAL2]] + sc *= sc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = mul <16 x i8> [[VAL1]], [[VAL2]] + uc *= uc2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = mul <8 x i16> [[VAL1]], [[VAL2]] + ss *= ss2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = mul <8 x i16> [[VAL1]], [[VAL2]] + us *= us2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = mul <4 x i32> [[VAL1]], [[VAL2]] + si *= si2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = mul <4 x i32> [[VAL1]], [[VAL2]] + ui *= ui2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = mul <2 x i64> [[VAL1]], [[VAL2]] + sl *= sl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = mul <2 x i64> [[VAL1]], [[VAL2]] + ul *= ul2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd +// CHECK: %{{.*}} = fmul <2 x double> [[VAL2]], [[VAL1]] + fd *= fd2; +} + +void test_div (void) +{ +// CHECK-LABEL: test_div + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = sdiv <16 x i8> [[VAL1]], [[VAL2]] + sc = sc / sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = udiv <16 x i8> [[VAL1]], [[VAL2]] + uc = uc / uc2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = sdiv <8 x i16> [[VAL1]], [[VAL2]] + ss = ss / ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = udiv <8 x i16> [[VAL1]], [[VAL2]] + us = us / us2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = sdiv <4 x i32> [[VAL1]], [[VAL2]] + si = si / si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = udiv <4 x i32> [[VAL1]], [[VAL2]] + ui = ui / ui2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = sdiv <2 x i64> [[VAL1]], [[VAL2]] + sl = sl / sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = udiv <2 x i64> [[VAL1]], [[VAL2]] + ul = ul / ul2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: %{{.*}} = fdiv <2 x double> [[VAL1]], [[VAL2]] + fd = fd / fd2; +} + +void test_div_assign (void) +{ +// CHECK-LABEL: test_div_assign + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = sdiv <16 x i8> [[VAL1]], [[VAL2]] + sc /= sc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = udiv <16 x i8> [[VAL1]], [[VAL2]] + uc /= uc2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = sdiv <8 x i16> [[VAL1]], [[VAL2]] + ss /= ss2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = udiv <8 x i16> [[VAL1]], [[VAL2]] + us /= us2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = sdiv <4 x i32> [[VAL1]], [[VAL2]] + si /= si2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = udiv <4 x i32> [[VAL1]], [[VAL2]] + ui /= ui2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = sdiv <2 x i64> [[VAL1]], [[VAL2]] + sl /= sl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = udiv <2 x i64> [[VAL1]], [[VAL2]] + ul /= ul2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd +// CHECK: %{{.*}} = fdiv <2 x double> [[VAL1]], [[VAL2]] + fd /= fd2; +} + +void test_rem (void) +{ +// CHECK-LABEL: test_rem + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = srem <16 x i8> [[VAL1]], [[VAL2]] + sc = sc % sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = urem <16 x i8> [[VAL1]], [[VAL2]] + uc = uc % uc2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = srem <8 x i16> [[VAL1]], [[VAL2]] + ss = ss % ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = urem <8 x i16> [[VAL1]], [[VAL2]] + us = us % us2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = srem <4 x i32> [[VAL1]], [[VAL2]] + si = si % si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = urem <4 x i32> [[VAL1]], [[VAL2]] + ui = ui % ui2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = srem <2 x i64> [[VAL1]], [[VAL2]] + sl = sl % sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = urem <2 x i64> [[VAL1]], [[VAL2]] + ul = ul % ul2; +} + +void test_rem_assign (void) +{ +// CHECK-LABEL: test_rem_assign + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = srem <16 x i8> [[VAL1]], [[VAL2]] + sc %= sc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = urem <16 x i8> [[VAL1]], [[VAL2]] + uc %= uc2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = srem <8 x i16> [[VAL1]], [[VAL2]] + ss %= ss2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = urem <8 x i16> [[VAL1]], [[VAL2]] + us %= us2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = srem <4 x i32> [[VAL1]], [[VAL2]] + si %= si2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = urem <4 x i32> [[VAL1]], [[VAL2]] + ui %= ui2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = srem <2 x i64> [[VAL1]], [[VAL2]] + sl %= sl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = urem <2 x i64> [[VAL1]], [[VAL2]] + ul %= ul2; +} + +void test_not (void) +{ +// CHECK-LABEL: test_not + +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], + sc = ~sc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], + uc = ~uc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: %{{.*}} = xor <16 x i8> [[VAL]], + bc = ~bc2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], + ss = ~ss2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], + us = ~us2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: %{{.*}} = xor <8 x i16> [[VAL]], + bs = ~bs2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], + si = ~si2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], + ui = ~ui2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: %{{.*}} = xor <4 x i32> [[VAL]], + bi = ~bi2; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], + sl = ~sl2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], + ul = ~ul2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: %{{.*}} = xor <2 x i64> [[VAL]], + bl = ~bl2; +} + +void test_and (void) +{ +// CHECK-LABEL: test_and + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]] + sc = sc & sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]] + sc = sc & bc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]] + sc = bc & sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]] + uc = uc & uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]] + uc = uc & bc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]] + uc = bc & uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: %{{.*}} = and <16 x i8> [[VAL2]], [[VAL1]] + bc = bc & bc2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]] + ss = ss & ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]] + ss = ss & bs2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]] + ss = bs & ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]] + us = us & us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]] + us = us & bs2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]] + us = bs & us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: %{{.*}} = and <8 x i16> [[VAL2]], [[VAL1]] + bs = bs & bs2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]] + si = si & si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]] + si = si & bi2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]] + si = bi & si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]] + ui = ui & ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]] + ui = ui & bi2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]] + ui = bi & ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: %{{.*}} = and <4 x i32> [[VAL2]], [[VAL1]] + bi = bi & bi2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]] + sl = sl & sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]] + sl = sl & bl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]] + sl = bl & sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]] + ul = ul & ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]] + ul = ul & bl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]] + ul = bl & ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: %{{.*}} = and <2 x i64> [[VAL2]], [[VAL1]] + bl = bl & bl2; +} + +void test_and_assign (void) +{ +// CHECK-LABEL: test_and_assign + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]] + sc &= sc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]] + sc &= bc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]] + uc &= uc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]] + uc &= bc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: %{{.*}} = and <16 x i8> [[VAL1]], [[VAL2]] + bc &= bc2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]] + ss &= ss2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]] + ss &= bs2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]] + us &= us2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]] + us &= bs2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: %{{.*}} = and <8 x i16> [[VAL1]], [[VAL2]] + bs &= bs2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]] + si &= si2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]] + si &= bi2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]] + ui &= ui2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]] + ui &= bi2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: %{{.*}} = and <4 x i32> [[VAL1]], [[VAL2]] + bi &= bi2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]] + sl &= sl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]] + sl &= bl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]] + ul &= ul2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]] + ul &= bl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: %{{.*}} = and <2 x i64> [[VAL1]], [[VAL2]] + bl &= bl2; +} + +void test_or (void) +{ +// CHECK-LABEL: test_or + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]] + sc = sc | sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]] + sc = sc | bc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]] + sc = bc | sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]] + uc = uc | uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]] + uc = uc | bc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]] + uc = bc | uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: %{{.*}} = or <16 x i8> [[VAL2]], [[VAL1]] + bc = bc | bc2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]] + ss = ss | ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]] + ss = ss | bs2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]] + ss = bs | ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]] + us = us | us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]] + us = us | bs2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]] + us = bs | us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: %{{.*}} = or <8 x i16> [[VAL2]], [[VAL1]] + bs = bs | bs2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]] + si = si | si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]] + si = si | bi2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]] + si = bi | si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]] + ui = ui | ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]] + ui = ui | bi2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]] + ui = bi | ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: %{{.*}} = or <4 x i32> [[VAL2]], [[VAL1]] + bi = bi | bi2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]] + sl = sl | sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]] + sl = sl | bl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]] + sl = bl | sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]] + ul = ul | ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]] + ul = ul | bl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]] + ul = bl | ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: %{{.*}} = or <2 x i64> [[VAL2]], [[VAL1]] + bl = bl | bl2; +} + +void test_or_assign (void) +{ +// CHECK-LABEL: test_or_assign + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]] + sc |= sc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]] + sc |= bc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]] + uc |= uc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]] + uc |= bc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: %{{.*}} = or <16 x i8> [[VAL1]], [[VAL2]] + bc |= bc2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]] + ss |= ss2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]] + ss |= bs2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]] + us |= us2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]] + us |= bs2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: %{{.*}} = or <8 x i16> [[VAL1]], [[VAL2]] + bs |= bs2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]] + si |= si2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]] + si |= bi2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]] + ui |= ui2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]] + ui |= bi2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: %{{.*}} = or <4 x i32> [[VAL1]], [[VAL2]] + bi |= bi2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]] + sl |= sl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]] + sl |= bl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]] + ul |= ul2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]] + ul |= bl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: %{{.*}} = or <2 x i64> [[VAL1]], [[VAL2]] + bl |= bl2; +} + +void test_xor (void) +{ +// CHECK-LABEL: test_xor + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]] + sc = sc ^ sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]] + sc = sc ^ bc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]] + sc = bc ^ sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]] + uc = uc ^ uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]] + uc = uc ^ bc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]] + uc = bc ^ uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: %{{.*}} = xor <16 x i8> [[VAL1]], [[VAL2]] + bc = bc ^ bc2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]] + ss = ss ^ ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]] + ss = ss ^ bs2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]] + ss = bs ^ ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]] + us = us ^ us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]] + us = us ^ bs2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]] + us = bs ^ us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: %{{.*}} = xor <8 x i16> [[VAL1]], [[VAL2]] + bs = bs ^ bs2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]] + si = si ^ si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]] + si = si ^ bi2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]] + si = bi ^ si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]] + ui = ui ^ ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]] + ui = ui ^ bi2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]] + ui = bi ^ ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: %{{.*}} = xor <4 x i32> [[VAL1]], [[VAL2]] + bi = bi ^ bi2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]] + sl = sl ^ sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]] + sl = sl ^ bl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]] + sl = bl ^ sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]] + ul = ul ^ ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]] + ul = ul ^ bl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]] + ul = bl ^ ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: %{{.*}} = xor <2 x i64> [[VAL1]], [[VAL2]] + bl = bl ^ bl2; +} + +void test_xor_assign (void) +{ +// CHECK-LABEL: test_xor_assign + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]] + sc ^= sc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]] + sc ^= bc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]] + uc ^= uc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]] + uc ^= bc2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: %{{.*}} = xor <16 x i8> [[VAL2]], [[VAL1]] + bc ^= bc2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]] + ss ^= ss2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]] + ss ^= bs2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]] + us ^= us2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]] + us ^= bs2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: %{{.*}} = xor <8 x i16> [[VAL2]], [[VAL1]] + bs ^= bs2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]] + si ^= si2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]] + si ^= bi2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]] + ui ^= ui2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]] + ui ^= bi2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: %{{.*}} = xor <4 x i32> [[VAL2]], [[VAL1]] + bi ^= bi2; + +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]] + sl ^= sl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]] + sl ^= bl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]] + ul ^= ul2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]] + ul ^= bl2; +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: %{{.*}} = xor <2 x i64> [[VAL2]], [[VAL1]] + bl ^= bl2; +} + +void test_sl (void) +{ +// CHECK-LABEL: test_sl + +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]] + sc = sc << sc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]] + sc = sc << uc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer +// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8> +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]] + sc = sc << cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], + sc = sc << 5; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]] + uc = uc << sc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]] + uc = uc << uc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer +// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8> +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]] + uc = uc << cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], + uc = uc << 5; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]] + ss = ss << ss2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]] + ss = ss << us2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer +// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16> +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]] + ss = ss << cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], + ss = ss << 5; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]] + us = us << ss2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]] + us = us << us2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer +// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16> +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]] + us = us << cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], + us = us << 5; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]] + si = si << si2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]] + si = si << ui2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]] + si = si << cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], + si = si << 5; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]] + ui = ui << si2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]] + ui = ui << ui2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]] + ui = ui << cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], + ui = ui << 5; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]] + sl = sl << sl2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]] + sl = sl << ul2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer +// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64> +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]] + sl = sl << cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], + sl = sl << 5; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]] + ul = ul << sl2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]] + ul = ul << ul2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer +// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64> +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]] + ul = ul << cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], + ul = ul << 5; +} + +void test_sl_assign (void) +{ +// CHECK-LABEL: test_sl_assign + +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]] + sc <<= sc2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]] + sc <<= uc2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8> +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]] + sc <<= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], + sc <<= 5; +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]] + uc <<= sc2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]] + uc <<= uc2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8> +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], [[CNT]] + uc <<= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = shl <16 x i8> [[VAL]], + uc <<= 5; + +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]] + ss <<= ss2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]] + ss <<= us2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16> +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]] + ss <<= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], + ss <<= 5; +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]] + us <<= ss2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]] + us <<= us2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16> +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], [[CNT]] + us <<= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = shl <8 x i16> [[VAL]], + us <<= 5; + +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]] + si <<= si2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]] + si <<= ui2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]] + si <<= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], + si <<= 5; +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]] + ui <<= si2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]] + ui <<= ui2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], [[CNT]] + ui <<= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = shl <4 x i32> [[VAL]], + ui <<= 5; + +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]] + sl <<= sl2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]] + sl <<= ul2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64> +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]] + sl <<= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], + sl <<= 5; +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]] + ul <<= sl2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]] + ul <<= ul2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64> +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], [[CNT]] + ul <<= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = shl <2 x i64> [[VAL]], + ul <<= 5; +} + +void test_sr (void) +{ +// CHECK-LABEL: test_sr + +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]] + sc = sc >> sc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]] + sc = sc >> uc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer +// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8> +// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]] + sc = sc >> cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], + sc = sc >> 5; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]] + uc = uc >> sc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]] + uc = uc >> uc2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer +// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8> +// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]] + uc = uc >> cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], + uc = uc >> 5; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]] + ss = ss >> ss2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]] + ss = ss >> us2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer +// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16> +// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]] + ss = ss >> cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], + ss = ss >> 5; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]] + us = us >> ss2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]] + us = us >> us2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer +// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16> +// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]] + us = us >> cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], + us = us >> 5; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]] + si = si >> si2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]] + si = si >> ui2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]] + si = si >> cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], + si = si >> 5; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]] + ui = ui >> si2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]] + ui = ui >> ui2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]] + ui = ui >> cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], + ui = ui >> 5; + +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]] + sl = sl >> sl2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]] + sl = sl >> ul2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer +// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64> +// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]] + sl = sl >> cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], + sl = sl >> 5; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]] + ul = ul >> sl2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]] + ul = ul >> ul2; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer +// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64> +// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]] + ul = ul >> cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], + ul = ul >> 5; +} + +void test_sr_assign (void) +{ +// CHECK-LABEL: test_sr_assign + +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]] + sc >>= sc2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]] + sc >>= uc2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8> +// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], [[CNT]] + sc >>= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: %{{.*}} = ashr <16 x i8> [[VAL]], + sc >>= 5; +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]] + uc >>= sc2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]] + uc >>= uc2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <16 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <16 x i32> [[T2]], <16 x i32> undef, <16 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[CNT:%[^ ]+]] = trunc <16 x i32> [[T3]] to <16 x i8> +// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], [[CNT]] + uc >>= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: %{{.*}} = lshr <16 x i8> [[VAL]], + uc >>= 5; + +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]] + ss >>= ss2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]] + ss >>= us2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16> +// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], [[CNT]] + ss >>= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: %{{.*}} = ashr <8 x i16> [[VAL]], + ss >>= 5; +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]] + us >>= ss2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]] + us >>= us2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <8 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <8 x i32> [[T2]], <8 x i32> undef, <8 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[CNT:%[^ ]+]] = trunc <8 x i32> [[T3]] to <8 x i16> +// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], [[CNT]] + us >>= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: %{{.*}} = lshr <8 x i16> [[VAL]], + us >>= 5; + +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]] + si >>= si2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]] + si >>= ui2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], [[CNT]] + si >>= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: %{{.*}} = ashr <4 x i32> [[VAL]], + si >>= 5; +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]] + ui >>= si2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]] + ui >>= ui2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T3:%[^ ]+]] = insertelement <4 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[CNT:%[^ ]+]] = shufflevector <4 x i32> [[T3]], <4 x i32> undef, <4 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], [[CNT]] + ui >>= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: %{{.*}} = lshr <4 x i32> [[VAL]], + ui >>= 5; + +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]] + sl >>= sl2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]] + sl >>= ul2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64> +// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], [[CNT]] + sl >>= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: %{{.*}} = ashr <2 x i64> [[VAL]], + sl >>= 5; +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]] + ul >>= sl2; +// CHECK: [[CNT:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]] + ul >>= ul2; +// CHECK: [[T1:%[^ ]+]] = load volatile i32, i32* @cnt +// CHECK: [[T2:%[^ ]+]] = insertelement <2 x i32> undef, i32 [[T1]], i32 0 +// CHECK: [[T3:%[^ ]+]] = shufflevector <2 x i32> [[T2]], <2 x i32> undef, <2 x i32> zeroinitializer +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[CNT:%[^ ]+]] = zext <2 x i32> [[T3]] to <2 x i64> +// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], [[CNT]] + ul >>= cnt; +// CHECK: [[VAL:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: %{{.*}} = lshr <2 x i64> [[VAL]], + ul >>= 5; +} + + +void test_cmpeq (void) +{ +// CHECK-LABEL: test_cmpeq + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = sc == sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = sc == bc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = bc == sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = uc == uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = uc == bc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = bc == uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = bc == bc2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = ss == ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = ss == bs2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = bs == ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = us == us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = us == bs2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = bs == us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = bs == bs2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = si == si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = si == bi2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = bi == si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = ui == ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = ui == bi2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = bi == ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = bi == bi2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = sl == sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = sl == bl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = bl == sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = ul == ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = ul == bl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = bl == ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[CMP:%[^ ]+]] = icmp eq <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = bl == bl2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: [[CMP:%[^ ]+]] = fcmp oeq <2 x double> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = fd == fd2; +} + +void test_cmpne (void) +{ +// CHECK-LABEL: test_cmpne + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = sc != sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = sc != bc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = bc != sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = uc != uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = uc != bc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = bc != uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = bc != bc2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = ss != ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = ss != bs2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = bs != ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = us != us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = us != bs2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = bs != us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = bs != bs2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = si != si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = si != bi2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = bi != si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = ui != ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = ui != bi2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = bi != ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = bi != bi2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = sl != sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = sl != bl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = bl != sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = ul != ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = ul != bl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = bl != ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[CMP:%[^ ]+]] = icmp ne <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = bl != bl2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: [[CMP:%[^ ]+]] = fcmp une <2 x double> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = fd != fd2; +} + +void test_cmpge (void) +{ +// CHECK-LABEL: test_cmpge + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[CMP:%[^ ]+]] = icmp sge <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = sc >= sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[CMP:%[^ ]+]] = icmp uge <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = uc >= uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[CMP:%[^ ]+]] = icmp uge <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = bc >= bc2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[CMP:%[^ ]+]] = icmp sge <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = ss >= ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[CMP:%[^ ]+]] = icmp uge <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = us >= us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[CMP:%[^ ]+]] = icmp uge <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = bs >= bs2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[CMP:%[^ ]+]] = icmp sge <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = si >= si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[CMP:%[^ ]+]] = icmp uge <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = ui >= ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[CMP:%[^ ]+]] = icmp uge <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = bi >= bi2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[CMP:%[^ ]+]] = icmp sge <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = sl >= sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[CMP:%[^ ]+]] = icmp uge <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = ul >= ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[CMP:%[^ ]+]] = icmp uge <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = bl >= bl2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: [[CMP:%[^ ]+]] = fcmp oge <2 x double> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = fd >= fd2; +} + +void test_cmpgt (void) +{ +// CHECK-LABEL: test_cmpgt + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[CMP:%[^ ]+]] = icmp sgt <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = sc > sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[CMP:%[^ ]+]] = icmp ugt <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = uc > uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[CMP:%[^ ]+]] = icmp ugt <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = bc > bc2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[CMP:%[^ ]+]] = icmp sgt <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = ss > ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[CMP:%[^ ]+]] = icmp ugt <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = us > us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[CMP:%[^ ]+]] = icmp ugt <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = bs > bs2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[CMP:%[^ ]+]] = icmp sgt <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = si > si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[CMP:%[^ ]+]] = icmp ugt <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = ui > ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[CMP:%[^ ]+]] = icmp ugt <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = bi > bi2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[CMP:%[^ ]+]] = icmp sgt <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = sl > sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[CMP:%[^ ]+]] = icmp ugt <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = ul > ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[CMP:%[^ ]+]] = icmp ugt <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = bl > bl2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: [[CMP:%[^ ]+]] = fcmp ogt <2 x double> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = fd > fd2; +} + +void test_cmple (void) +{ +// CHECK-LABEL: test_cmple + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[CMP:%[^ ]+]] = icmp sle <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = sc <= sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[CMP:%[^ ]+]] = icmp ule <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = uc <= uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[CMP:%[^ ]+]] = icmp ule <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = bc <= bc2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[CMP:%[^ ]+]] = icmp sle <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = ss <= ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[CMP:%[^ ]+]] = icmp ule <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = us <= us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[CMP:%[^ ]+]] = icmp ule <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = bs <= bs2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[CMP:%[^ ]+]] = icmp sle <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = si <= si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[CMP:%[^ ]+]] = icmp ule <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = ui <= ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[CMP:%[^ ]+]] = icmp ule <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = bi <= bi2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[CMP:%[^ ]+]] = icmp sle <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = sl <= sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[CMP:%[^ ]+]] = icmp ule <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = ul <= ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[CMP:%[^ ]+]] = icmp ule <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = bl <= bl2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: [[CMP:%[^ ]+]] = fcmp ole <2 x double> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = fd <= fd2; +} + +void test_cmplt (void) +{ +// CHECK-LABEL: test_cmplt + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @sc2 +// CHECK: [[CMP:%[^ ]+]] = icmp slt <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = sc < sc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @uc2 +// CHECK: [[CMP:%[^ ]+]] = icmp ult <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = uc < uc2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc +// CHECK: [[VAL2:%[^ ]+]] = load volatile <16 x i8>, <16 x i8>* @bc2 +// CHECK: [[CMP:%[^ ]+]] = icmp ult <16 x i8> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <16 x i1> [[CMP]] to <16 x i8> + bc = bc < bc2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @ss2 +// CHECK: [[CMP:%[^ ]+]] = icmp slt <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = ss < ss2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @us2 +// CHECK: [[CMP:%[^ ]+]] = icmp ult <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = us < us2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs +// CHECK: [[VAL2:%[^ ]+]] = load volatile <8 x i16>, <8 x i16>* @bs2 +// CHECK: [[CMP:%[^ ]+]] = icmp ult <8 x i16> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <8 x i1> [[CMP]] to <8 x i16> + bs = bs < bs2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @si2 +// CHECK: [[CMP:%[^ ]+]] = icmp slt <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = si < si2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @ui2 +// CHECK: [[CMP:%[^ ]+]] = icmp ult <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = ui < ui2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi +// CHECK: [[VAL2:%[^ ]+]] = load volatile <4 x i32>, <4 x i32>* @bi2 +// CHECK: [[CMP:%[^ ]+]] = icmp ult <4 x i32> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <4 x i1> [[CMP]] to <4 x i32> + bi = bi < bi2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @sl2 +// CHECK: [[CMP:%[^ ]+]] = icmp slt <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = sl < sl2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @ul2 +// CHECK: [[CMP:%[^ ]+]] = icmp ult <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = ul < ul2; +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x i64>, <2 x i64>* @bl2 +// CHECK: [[CMP:%[^ ]+]] = icmp ult <2 x i64> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = bl < bl2; + +// CHECK: [[VAL1:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd +// CHECK: [[VAL2:%[^ ]+]] = load volatile <2 x double>, <2 x double>* @fd2 +// CHECK: [[CMP:%[^ ]+]] = fcmp olt <2 x double> [[VAL1]], [[VAL2]] +// CHECK: %{{.*}} = sext <2 x i1> [[CMP]] to <2 x i64> + bl = fd < fd2; +} + diff --git a/clang/test/Preprocessor/predefined-arch-macros.c b/clang/test/Preprocessor/predefined-arch-macros.c index 5126ef3d94cfe..14fc49478af5c 100644 --- a/clang/test/Preprocessor/predefined-arch-macros.c +++ b/clang/test/Preprocessor/predefined-arch-macros.c @@ -1748,3 +1748,12 @@ // RUN: | FileCheck %s -check-prefix=CHECK_SYSTEMZ_HTM // // CHECK_SYSTEMZ_HTM: #define __HTM__ 1 +// +// RUN: %clang -fzvector -E -dM %s -o - 2>&1 \ +// RUN: -target s390x-unknown-linux \ +// RUN: | FileCheck %s -check-prefix=CHECK_SYSTEMZ_ZVECTOR +// RUN: %clang -mzvector -E -dM %s -o - 2>&1 \ +// RUN: -target s390x-unknown-linux \ +// RUN: | FileCheck %s -check-prefix=CHECK_SYSTEMZ_ZVECTOR +// +// CHECK_SYSTEMZ_ZVECTOR: #define __VEC__ 10301 diff --git a/clang/test/Sema/zvector.c b/clang/test/Sema/zvector.c new file mode 100644 index 0000000000000..5220a629cfe10 --- /dev/null +++ b/clang/test/Sema/zvector.c @@ -0,0 +1,1009 @@ +// RUN: %clang_cc1 -triple s390x-linux-gnu -fzvector \ +// RUN: -fno-lax-vector-conversions -W -Wall -Wconversion \ +// RUN: -Werror -fsyntax-only -verify %s + +vector signed char sc, sc2; +vector unsigned char uc, uc2; +vector bool char bc, bc2; + +vector signed short ss, ss2; +vector unsigned short us, us2; +vector bool short bs, bs2; + +vector signed int si, si2; +vector unsigned int ui, ui2; +vector bool int bi, bi2; + +vector signed long long sl, sl2; +vector unsigned long long ul, ul2; +vector bool long long bl, bl2; + +vector double fd, fd2; + +vector long ll; // expected-error {{cannot use 'long' with '__vector'}} +vector float ff; // expected-error {{cannot use 'float' with '__vector'}} + +signed char sc_scalar; +unsigned char uc_scalar; + +signed short ss_scalar; +unsigned short us_scalar; + +signed int si_scalar; +unsigned int ui_scalar; + +signed long sl_scalar; +unsigned long ul_scalar; + +double fd_scalar; + +void foo(void) +{ + // ------------------------------------------------------------------------- + // Test assignment. + // ------------------------------------------------------------------------- + + sc = sc2; + uc = uc2; + bc = bc2; + + ss = ss2; + us = us2; + bs = bs2; + + si = si2; + ui = ui2; + bi = bi2; + + sl = sl2; + ul = ul2; + bl = bl2; + fd = fd2; + + sc = uc2; // expected-error {{incompatible type}} + sc = bc2; // expected-error {{incompatible type}} + uc = sc2; // expected-error {{incompatible type}} + uc = bc2; // expected-error {{incompatible type}} + bc = sc2; // expected-error {{incompatible type}} + bc = uc2; // expected-error {{incompatible type}} + + sc = sc_scalar; // expected-error {{incompatible type}} + sc = uc_scalar; // expected-error {{incompatible type}} + uc = sc_scalar; // expected-error {{incompatible type}} + uc = uc_scalar; // expected-error {{incompatible type}} + bc = sc_scalar; // expected-error {{incompatible type}} + bc = uc_scalar; // expected-error {{incompatible type}} + + sc = ss2; // expected-error {{incompatible type}} + sc = si2; // expected-error {{incompatible type}} + sc = sl2; // expected-error {{incompatible type}} + sc = fd2; // expected-error {{incompatible type}} + + ss = sc2; // expected-error {{incompatible type}} + si = sc2; // expected-error {{incompatible type}} + sl = sc2; // expected-error {{incompatible type}} + fd = sc2; // expected-error {{incompatible type}} + + uc = us2; // expected-error {{incompatible type}} + uc = ui2; // expected-error {{incompatible type}} + uc = ul2; // expected-error {{incompatible type}} + uc = fd2; // expected-error {{incompatible type}} + + us = uc2; // expected-error {{incompatible type}} + ui = uc2; // expected-error {{incompatible type}} + ul = uc2; // expected-error {{incompatible type}} + fd = uc2; // expected-error {{incompatible type}} + + bc = us2; // expected-error {{incompatible type}} + bc = ui2; // expected-error {{incompatible type}} + bc = ul2; // expected-error {{incompatible type}} + bc = fd2; // expected-error {{incompatible type}} + + bs = bc2; // expected-error {{incompatible type}} + bi = bc2; // expected-error {{incompatible type}} + bl = bc2; // expected-error {{incompatible type}} + fd = bc2; // expected-error {{incompatible type}} + + // ------------------------------------------------------------------------- + // Test casts to same element width. + // ------------------------------------------------------------------------- + + sc = (vector signed char)bc2; + bc = (vector bool char)uc2; + uc = (vector unsigned char)sc2; + + ss = (vector signed short)bs2; + bs = (vector bool short)us2; + us = (vector unsigned short)ss2; + + si = (vector signed int)bi2; + bi = (vector bool int)ui2; + ui = (vector unsigned int)si2; + + sl = (vector signed long long)bl2; + bl = (vector bool long long)ul2; + ul = (vector unsigned long long)fd2; + fd = (vector double)sl2; + + // ------------------------------------------------------------------------- + // Test casts to different element width. + // ------------------------------------------------------------------------- + + sc = (vector signed char)bs2; + bc = (vector bool char)us2; + uc = (vector unsigned char)fd2; + + ss = (vector signed short)bi2; + bs = (vector bool short)ui2; + us = (vector unsigned short)fd2; + + si = (vector signed int)bl2; + bi = (vector bool int)ul2; + ui = (vector unsigned int)fd2; + + sl = (vector signed long long)bc2; + bl = (vector bool long long)uc2; + ul = (vector unsigned long long)sc2; + fd = (vector double)sc2; + + // ------------------------------------------------------------------------- + // Test ++. + // ------------------------------------------------------------------------- + + ++sc2; + ++uc2; + ++bc2; // expected-error {{cannot increment}} + + ++ss2; + ++us2; + ++bs2; // expected-error {{cannot increment}} + + ++si2; + ++ui2; + ++bi2; // expected-error {{cannot increment}} + + ++sl2; + ++ul2; + ++bl2; // expected-error {{cannot increment}} + + ++fd2; + + sc++; + uc++; + bc++; // expected-error {{cannot increment}} + + ss++; + us++; + bs++; // expected-error {{cannot increment}} + + si++; + ui++; + bi++; // expected-error {{cannot increment}} + + sl++; + ul++; + bl++; // expected-error {{cannot increment}} + + fd++; + + // ------------------------------------------------------------------------- + // Test --. + // ------------------------------------------------------------------------- + + --sc2; + --uc2; + --bc2; // expected-error {{cannot decrement}} + + --ss2; + --us2; + --bs2; // expected-error {{cannot decrement}} + + --si2; + --ui2; + --bi2; // expected-error {{cannot decrement}} + + --sl2; + --ul2; + --bl2; // expected-error {{cannot decrement}} + + --fd2; + + sc--; + uc--; + bc--; // expected-error {{cannot decrement}} + + ss--; + us--; + bs--; // expected-error {{cannot decrement}} + + si--; + ui--; + bi--; // expected-error {{cannot decrement}} + + sl--; + ul--; + bl--; // expected-error {{cannot decrement}} + + fd--; + + // ------------------------------------------------------------------------- + // Test unary +. + // ------------------------------------------------------------------------- + + sc = +sc2; + uc = +uc2; + bc = +bc2; // expected-error {{invalid argument type}} + + ss = +ss2; + us = +us2; + bs = +bs2; // expected-error {{invalid argument type}} + + si = +si2; + ui = +ui2; + bi = +bi2; // expected-error {{invalid argument type}} + + sl = +sl2; + ul = +ul2; + bl = +bl2; // expected-error {{invalid argument type}} + + fd = +fd2; + + sc = +si2; // expected-error {{assigning to}} + ui = +si2; // expected-error {{assigning to}} + ui = +bi2; // expected-error {{invalid argument type}} + + // ------------------------------------------------------------------------- + // Test unary -. + // ------------------------------------------------------------------------- + + sc = -sc2; + uc = -uc2; + bc = -bc2; // expected-error {{invalid argument type}} + + ss = -ss2; + us = -us2; + bs = -bs2; // expected-error {{invalid argument type}} + + si = -si2; + ui = -ui2; + bi = -bi2; // expected-error {{invalid argument type}} + + sl = -sl2; + ul = -ul2; + bl = -bl2; // expected-error {{invalid argument type}} + + fd = -fd2; + + sc = -si2; // expected-error {{assigning to}} + ui = -si2; // expected-error {{assigning to}} + ui = -bi2; // expected-error {{invalid argument type}} + + // ------------------------------------------------------------------------- + // Test ~. + // ------------------------------------------------------------------------- + + sc = ~sc2; + uc = ~uc2; + bc = ~bc2; + + ss = ~ss2; + us = ~us2; + bs = ~bs2; + + si = ~si2; + ui = ~ui2; + bi = ~bi2; + + sl = ~sl2; + ul = ~ul2; + bl = ~bl2; + + fd = ~fd2; // expected-error {{invalid argument}} + + sc = ~si2; // expected-error {{assigning to}} + ui = ~si2; // expected-error {{assigning to}} + ui = ~bi2; // expected-error {{assigning to}} + + // ------------------------------------------------------------------------- + // Test binary +. + // ------------------------------------------------------------------------- + + sc = sc + sc2; + sc = sc + uc2; // expected-error {{can't convert}} + sc = uc + sc2; // expected-error {{can't convert}} + sc = sc + bc2; + sc = bc + sc2; + + uc = uc + uc2; + uc = sc + uc2; // expected-error {{can't convert}} + uc = uc + sc2; // expected-error {{can't convert}} + uc = bc + uc2; + uc = uc + bc2; + + bc = bc + bc2; // expected-error {{invalid operands}} + bc = bc + uc2; // expected-error {{incompatible type}} + bc = uc + bc2; // expected-error {{incompatible type}} + bc = bc + sc2; // expected-error {{incompatible type}} + bc = sc + bc2; // expected-error {{incompatible type}} + + sc = sc + sc_scalar; // expected-error {{can't convert}} + sc = sc + uc_scalar; // expected-error {{can't convert}} + sc = sc_scalar + sc; // expected-error {{can't convert}} + sc = uc_scalar + sc; // expected-error {{can't convert}} + uc = uc + sc_scalar; // expected-error {{can't convert}} + uc = uc + uc_scalar; // expected-error {{can't convert}} + uc = sc_scalar + uc; // expected-error {{can't convert}} + uc = uc_scalar + uc; // expected-error {{can't convert}} + + ss = ss + ss2; + us = us + us2; + bs = bs + bs2; // expected-error {{invalid operands}} + + si = si + si2; + ui = ui + ui2; + bi = bi + bi2; // expected-error {{invalid operands}} + + sl = sl + sl2; + ul = ul + ul2; + bl = bl + bl2; // expected-error {{invalid operands}} + + fd = fd + fd2; + fd = fd + ul2; // expected-error {{can't convert}} + fd = sl + fd2; // expected-error {{can't convert}} + + sc += sc2; + sc += uc2; // expected-error {{can't convert}} + sc += bc2; + + uc += uc2; + uc += sc2; // expected-error {{can't convert}} + uc += bc2; + + bc += bc2; // expected-error {{invalid operands}} + bc += sc2; // expected-error {{can't convert}} + bc += uc2; // expected-error {{can't convert}} + + sc += ss2; // expected-error {{can't convert}} + sc += si2; // expected-error {{can't convert}} + sc += sl2; // expected-error {{can't convert}} + sc += fd2; // expected-error {{can't convert}} + + sc += sc_scalar; // expected-error {{can't convert}} + sc += uc_scalar; // expected-error {{can't convert}} + uc += sc_scalar; // expected-error {{can't convert}} + uc += uc_scalar; // expected-error {{can't convert}} + + ss += ss2; + us += us2; + bs += bs2; // expected-error {{invalid operands}} + + si += si2; + ui += ui2; + bi += bi2; // expected-error {{invalid operands}} + + sl += sl2; + ul += ul2; + bl += bl2; // expected-error {{invalid operands}} + + fd += fd2; + + // ------------------------------------------------------------------------- + // Test that binary + rules apply to binary - too. + // ------------------------------------------------------------------------- + + sc = sc - sc2; + uc = uc - uc2; + bc = bc - bc2; // expected-error {{invalid operands}} + + sc = uc - sc2; // expected-error {{can't convert}} + sc = sc - bc2; + uc = bc - uc2; + + sc -= sc2; + uc -= uc2; + bc -= bc2; // expected-error {{invalid operands}} + + sc -= uc2; // expected-error {{can't convert}} + uc -= bc2; + bc -= sc2; // expected-error {{can't convert}} + + ss -= ss2; + us -= us2; + bs -= bs2; // expected-error {{invalid operands}} + + si -= si2; + ui -= ui2; + bi -= bi2; // expected-error {{invalid operands}} + + sl -= sl2; + ul -= ul2; + bl -= bl2; // expected-error {{invalid operands}} + + fd -= fd2; + + // ------------------------------------------------------------------------- + // Test that binary + rules apply to * too. 64-bit integer multiplication + // is not required by the spec and so isn't tested here. + // ------------------------------------------------------------------------- + + sc = sc * sc2; + uc = uc * uc2; + bc = bc * bc2; // expected-error {{invalid operands}} + + sc = uc * sc2; // expected-error {{can't convert}} + sc = sc * bc2; // expected-error {{can't convert}} + uc = bc * uc2; // expected-error {{can't convert}} + + sc *= sc2; + uc *= uc2; + bc *= bc2; // expected-error {{invalid operands}} + + sc *= uc2; // expected-error {{can't convert}} + uc *= bc2; // expected-error {{can't convert}} + bc *= sc2; // expected-error {{can't convert}} + + ss *= ss2; + us *= us2; + bs *= bs2; // expected-error {{invalid operands}} + + si *= si2; + ui *= ui2; + bi *= bi2; // expected-error {{invalid operands}} + + sl *= sl2; + ul *= ul2; + bl *= bl2; // expected-error {{invalid operands}} + + fd *= fd2; + + // ------------------------------------------------------------------------- + // Test that * rules apply to / too. + // ------------------------------------------------------------------------- + + sc = sc / sc2; + uc = uc / uc2; + bc = bc / bc2; // expected-error {{invalid operands}} + + sc = uc / sc2; // expected-error {{can't convert}} + sc = sc / bc2; // expected-error {{can't convert}} + uc = bc / uc2; // expected-error {{can't convert}} + + sc /= sc2; + uc /= uc2; + bc /= bc2; // expected-error {{invalid operands}} + + sc /= uc2; // expected-error {{can't convert}} + uc /= bc2; // expected-error {{can't convert}} + bc /= sc2; // expected-error {{can't convert}} + + ss /= ss2; + us /= us2; + bs /= bs2; // expected-error {{invalid operands}} + + si /= si2; + ui /= ui2; + bi /= bi2; // expected-error {{invalid operands}} + + sl /= sl2; + ul /= ul2; + bl /= bl2; // expected-error {{invalid operands}} + + fd /= fd2; + + // ------------------------------------------------------------------------- + // Test that / rules apply to % too, except that doubles are not allowed. + // ------------------------------------------------------------------------- + + sc = sc % sc2; + uc = uc % uc2; + bc = bc % bc2; // expected-error {{invalid operands}} + + sc = uc % sc2; // expected-error {{can't convert}} + sc = sc % bc2; // expected-error {{can't convert}} + uc = bc % uc2; // expected-error {{can't convert}} + + sc %= sc2; + uc %= uc2; + bc %= bc2; // expected-error {{invalid operands}} + + sc %= uc2; // expected-error {{can't convert}} + uc %= bc2; // expected-error {{can't convert}} + bc %= sc2; // expected-error {{can't convert}} + + ss %= ss2; + us %= us2; + bs %= bs2; // expected-error {{invalid operands}} + + si %= si2; + ui %= ui2; + bi %= bi2; // expected-error {{invalid operands}} + + sl %= sl2; + ul %= ul2; + bl %= bl2; // expected-error {{invalid operands}} + + fd %= fd2; // expected-error {{invalid operands}} + + // ------------------------------------------------------------------------- + // Test &. + // ------------------------------------------------------------------------- + + sc = sc & sc2; + sc = sc & uc2; // expected-error {{can't convert}} + sc = uc & sc2; // expected-error {{can't convert}} + sc = sc & bc2; + sc = bc & sc2; + + uc = uc & uc2; + uc = sc & uc2; // expected-error {{can't convert}} + uc = uc & sc2; // expected-error {{can't convert}} + uc = bc & uc2; + uc = uc & bc2; + + bc = bc & bc2; + bc = bc & uc2; // expected-error {{incompatible type}} + bc = uc & bc2; // expected-error {{incompatible type}} + bc = bc & sc2; // expected-error {{incompatible type}} + bc = sc & bc2; // expected-error {{incompatible type}} + + fd = fd & fd2; // expected-error {{invalid operands}} + fd = bl & fd2; // expected-error {{invalid operands}} + fd = fd & bl2; // expected-error {{invalid operands}} + fd = fd & sl2; // expected-error {{invalid operands}} + fd = fd & ul2; // expected-error {{invalid operands}} + + sc &= sc2; + sc &= uc2; // expected-error {{can't convert}} + sc &= bc2; + + uc &= uc2; + uc &= sc2; // expected-error {{can't convert}} + uc &= bc2; + + bc &= bc2; + bc &= sc2; // expected-error {{can't convert}} + bc &= uc2; // expected-error {{can't convert}} + + sc &= ss2; // expected-error {{can't convert}} + sc &= si2; // expected-error {{can't convert}} + sc &= sl2; // expected-error {{can't convert}} + sc &= fd2; // expected-error {{invalid operands}} + + us &= bc2; // expected-error {{can't convert}} + ui &= bc2; // expected-error {{can't convert}} + ul &= bc2; // expected-error {{can't convert}} + fd &= bc2; // expected-error {{invalid operands}} + + ss &= ss2; + us &= us2; + bs &= bs2; + + si &= si2; + ui &= ui2; + bi &= bi2; + + sl &= sl2; + ul &= ul2; + bl &= bl2; + + // ------------------------------------------------------------------------- + // Test that & rules apply to | too. + // ------------------------------------------------------------------------- + + sc = sc | sc2; + sc = sc | uc2; // expected-error {{can't convert}} + sc = sc | bc2; + + uc = uc | uc2; + uc = sc | uc2; // expected-error {{can't convert}} + uc = bc | uc2; + + bc = bc | bc2; + bc = uc | bc2; // expected-error {{incompatible type}} + bc = bc | sc2; // expected-error {{incompatible type}} + + fd = fd | fd2; // expected-error {{invalid operands}} + fd = bl | fd2; // expected-error {{invalid operands}} + + ss |= ss2; + us |= us2; + bs |= bs2; + + si |= si2; + ui |= ui2; + bi |= bi2; + + sl |= sl2; + ul |= ul2; + bl |= bl2; + + fd |= bl2; // expected-error {{invalid operands}} + fd |= fd2; // expected-error {{invalid operands}} + + // ------------------------------------------------------------------------- + // Test that & rules apply to ^ too. + // ------------------------------------------------------------------------- + + sc = sc ^ sc2; + sc = sc ^ uc2; // expected-error {{can't convert}} + sc = sc ^ bc2; + + uc = uc ^ uc2; + uc = sc ^ uc2; // expected-error {{can't convert}} + uc = bc ^ uc2; + + bc = bc ^ bc2; + bc = uc ^ bc2; // expected-error {{incompatible type}} + bc = bc ^ sc2; // expected-error {{incompatible type}} + + fd = fd ^ fd2; // expected-error {{invalid operands}} + fd = bl ^ fd2; // expected-error {{invalid operands}} + + ss ^= ss2; + us ^= us2; + bs ^= bs2; + + si ^= si2; + ui ^= ui2; + bi ^= bi2; + + sl ^= sl2; + ul ^= ul2; + bl ^= bl2; + + fd ^= bl2; // expected-error {{invalid operands}} + fd ^= fd2; // expected-error {{invalid operands}} + + // ------------------------------------------------------------------------- + // Test <<. + // ------------------------------------------------------------------------- + + sc = sc << sc2; + sc = sc << uc2; + sc = uc << sc2; // expected-error {{incompatible type}} + sc = sc << bc2; // expected-error {{invalid operands}} + sc = bc << sc2; // expected-error {{invalid operands}} + + uc = uc << uc2; + uc = sc << uc2; // expected-error {{assigning to}} + uc = uc << sc2; + uc = bc << uc2; // expected-error {{invalid operands}} + uc = uc << bc2; // expected-error {{invalid operands}} + + bc = bc << bc2; // expected-error {{invalid operands}} + bc = bc << uc2; // expected-error {{invalid operands}} + bc = uc << bc2; // expected-error {{invalid operands}} + bc = bc << sc2; // expected-error {{invalid operands}} + bc = sc << bc2; // expected-error {{invalid operands}} + + sc = sc << 1; + sc = sc << 1.0f; // expected-error {{integer is required}} + sc = sc << sc_scalar; + sc = sc << uc_scalar; + sc = sc << ss_scalar; + sc = sc << us_scalar; + sc = sc << si_scalar; + sc = sc << ui_scalar; + sc = sc << sl_scalar; + sc = sc << ul_scalar; + sc = sc_scalar << sc; // expected-error {{first operand is not a vector}} + sc = uc_scalar << sc; // expected-error {{first operand is not a vector}} + uc = uc << sc_scalar; + uc = uc << uc_scalar; + uc = sc_scalar << uc; // expected-error {{first operand is not a vector}} + uc = uc_scalar << uc; // expected-error {{first operand is not a vector}} + + ss = ss << ss2; + ss = ss << ss_scalar; + us = us << us2; + us = us << us_scalar; + bs = bs << bs2; // expected-error {{invalid operands}} + + si = si << si2; + si = si << si_scalar; + ui = ui << ui2; + ui = ui << ui_scalar; + bi = bi << bi2; // expected-error {{invalid operands}} + + sl = sl << sl2; + sl = sl << sl_scalar; + ul = ul << ul2; + ul = ul << ul_scalar; + bl = bl << bl2; // expected-error {{invalid operands}} + + fd = fd << fd2; // expected-error {{integer is required}} + fd = fd << ul2; // expected-error {{integer is required}} + fd = sl << fd2; // expected-error {{integer is required}} + + sc <<= sc2; + sc <<= uc2; + sc <<= bc2; // expected-error {{invalid operands}} + sc <<= sc_scalar; + + uc <<= uc2; + uc <<= sc2; + uc <<= bc2; // expected-error {{invalid operands}} + uc <<= uc_scalar; + + bc <<= bc2; // expected-error {{invalid operands}} + bc <<= sc2; // expected-error {{invalid operands}} + bc <<= uc2; // expected-error {{invalid operands}} + + sc <<= ss2; // expected-error {{vector operands do not have the same number of elements}} + sc <<= si2; // expected-error {{vector operands do not have the same number of elements}} + sc <<= sl2; // expected-error {{vector operands do not have the same number of elements}} + sc <<= fd2; // expected-error {{integer is required}} + + ss <<= ss2; + ss <<= ss_scalar; + us <<= us2; + us <<= us_scalar; + bs <<= bs2; // expected-error {{invalid operands}} + + si <<= si2; + si <<= si_scalar; + ui <<= ui2; + ui <<= ui_scalar; + bi <<= bi2; // expected-error {{invalid operands}} + + sl <<= sl2; + sl <<= sl_scalar; + ul <<= ul2; + ul <<= ul_scalar; + bl <<= bl2; // expected-error {{invalid operands}} + + fd <<= fd2; // expected-error {{integer is required}} + + // ------------------------------------------------------------------------- + // Test >>. + // ------------------------------------------------------------------------- + + sc = sc >> sc2; + sc = sc >> uc2; + sc = uc >> sc2; // expected-error {{incompatible type}} + sc = sc >> bc2; // expected-error {{invalid operands}} + sc = bc >> sc2; // expected-error {{invalid operands}} + + uc = uc >> uc2; + uc = sc >> uc2; // expected-error {{assigning to}} + uc = uc >> sc2; + uc = bc >> uc2; // expected-error {{invalid operands}} + uc = uc >> bc2; // expected-error {{invalid operands}} + + bc = bc >> bc2; // expected-error {{invalid operands}} + bc = bc >> uc2; // expected-error {{invalid operands}} + bc = uc >> bc2; // expected-error {{invalid operands}} + bc = bc >> sc2; // expected-error {{invalid operands}} + bc = sc >> bc2; // expected-error {{invalid operands}} + + sc = sc >> 1; + sc = sc >> 1.0f; // expected-error {{integer is required}} + sc = sc >> sc_scalar; + sc = sc >> uc_scalar; + sc = sc >> ss_scalar; + sc = sc >> us_scalar; + sc = sc >> si_scalar; + sc = sc >> ui_scalar; + sc = sc >> sl_scalar; + sc = sc >> ul_scalar; + sc = sc_scalar >> sc; // expected-error {{first operand is not a vector}} + sc = uc_scalar >> sc; // expected-error {{first operand is not a vector}} + uc = uc >> sc_scalar; + uc = uc >> uc_scalar; + uc = sc_scalar >> uc; // expected-error {{first operand is not a vector}} + uc = uc_scalar >> uc; // expected-error {{first operand is not a vector}} + + ss = ss >> ss2; + ss = ss >> ss_scalar; + us = us >> us2; + us = us >> us_scalar; + bs = bs >> bs2; // expected-error {{invalid operands}} + + si = si >> si2; + si = si >> si_scalar; + ui = ui >> ui2; + ui = ui >> ui_scalar; + bi = bi >> bi2; // expected-error {{invalid operands}} + + sl = sl >> sl2; + sl = sl >> sl_scalar; + ul = ul >> ul2; + ul = ul >> ul_scalar; + bl = bl >> bl2; // expected-error {{invalid operands}} + + fd = fd >> fd2; // expected-error {{integer is required}} + fd = fd >> ul2; // expected-error {{integer is required}} + fd = sl >> fd2; // expected-error {{integer is required}} + + sc >>= sc2; + sc >>= uc2; + sc >>= bc2; // expected-error {{invalid operands}} + sc >>= sc_scalar; + + uc >>= uc2; + uc >>= sc2; + uc >>= bc2; // expected-error {{invalid operands}} + uc >>= uc_scalar; + + bc >>= bc2; // expected-error {{invalid operands}} + bc >>= sc2; // expected-error {{invalid operands}} + bc >>= uc2; // expected-error {{invalid operands}} + + sc >>= ss2; // expected-error {{vector operands do not have the same number of elements}} + sc >>= si2; // expected-error {{vector operands do not have the same number of elements}} + sc >>= sl2; // expected-error {{vector operands do not have the same number of elements}} + sc >>= fd2; // expected-error {{integer is required}} + + ss >>= ss2; + ss >>= ss_scalar; + us >>= us2; + us >>= us_scalar; + bs >>= bs2; // expected-error {{invalid operands}} + + si >>= si2; + si >>= si_scalar; + ui >>= ui2; + ui >>= ui_scalar; + bi >>= bi2; // expected-error {{invalid operands}} + + sl >>= sl2; + sl >>= sl_scalar; + ul >>= ul2; + ul >>= ul_scalar; + bl >>= bl2; // expected-error {{invalid operands}} + + fd >>= fd2; // expected-error {{integer is required}} + + // ------------------------------------------------------------------------- + // Test ==. + // ------------------------------------------------------------------------- + + (void)(sc == sc2); + (void)(uc == uc2); + (void)(bc == bc2); + + (void)(sc == uc); // expected-error {{can't convert}} + (void)(sc == bc); + + (void)(uc == sc); // expected-error {{can't convert}} + (void)(uc == bc); + + (void)(bc == sc); + (void)(bc == uc); + + (void)(ss == ss2); + (void)(us == us2); + (void)(bs == bs2); + + (void)(si == si2); + (void)(ui == ui2); + (void)(bi == bi2); + + (void)(sl == sl2); + (void)(ul == ul2); + (void)(bl == bl2); + (void)(fd == fd2); + + (void)(fd == ul); // expected-error {{can't convert}} + (void)(ul == fd); // expected-error {{can't convert}} + + // ------------------------------------------------------------------------- + // Test that == rules apply to != too. + // ------------------------------------------------------------------------- + + (void)(sc != sc2); + (void)(uc != uc2); + (void)(bc != bc2); + + (void)(sc != uc); // expected-error {{can't convert}} + (void)(sc != bc); + + (void)(ss != ss2); + (void)(us != us2); + (void)(bs != bs2); + + (void)(si != si2); + (void)(ui != ui2); + (void)(bi != bi2); + + (void)(sl != sl2); + (void)(ul != ul2); + (void)(bl != bl2); + (void)(fd != fd2); + + // ------------------------------------------------------------------------- + // Test that == rules apply to <= too. + // ------------------------------------------------------------------------- + + (void)(sc <= sc2); + (void)(uc <= uc2); + (void)(bc <= bc2); + + (void)(sc <= uc); // expected-error {{can't convert}} + (void)(sc <= bc); + + (void)(ss <= ss2); + (void)(us <= us2); + (void)(bs <= bs2); + + (void)(si <= si2); + (void)(ui <= ui2); + (void)(bi <= bi2); + + (void)(sl <= sl2); + (void)(ul <= ul2); + (void)(bl <= bl2); + (void)(fd <= fd2); + + // ------------------------------------------------------------------------- + // Test that == rules apply to >= too. + // ------------------------------------------------------------------------- + + (void)(sc >= sc2); + (void)(uc >= uc2); + (void)(bc >= bc2); + + (void)(sc >= uc); // expected-error {{can't convert}} + (void)(sc >= bc); + + (void)(ss >= ss2); + (void)(us >= us2); + (void)(bs >= bs2); + + (void)(si >= si2); + (void)(ui >= ui2); + (void)(bi >= bi2); + + (void)(sl >= sl2); + (void)(ul >= ul2); + (void)(bl >= bl2); + (void)(fd >= fd2); + + // ------------------------------------------------------------------------- + // Test that == rules apply to < too. + // ------------------------------------------------------------------------- + + (void)(sc < sc2); + (void)(uc < uc2); + (void)(bc < bc2); + + (void)(sc < uc); // expected-error {{can't convert}} + (void)(sc < bc); + + (void)(ss < ss2); + (void)(us < us2); + (void)(bs < bs2); + + (void)(si < si2); + (void)(ui < ui2); + (void)(bi < bi2); + + (void)(sl < sl2); + (void)(ul < ul2); + (void)(bl < bl2); + (void)(fd < fd2); + + // ------------------------------------------------------------------------- + // Test that == rules apply to > too. + // ------------------------------------------------------------------------- + + (void)(sc > sc2); + (void)(uc > uc2); + (void)(bc > bc2); + + (void)(sc > uc); // expected-error {{can't convert}} + (void)(sc > bc); + + (void)(ss > ss2); + (void)(us > us2); + (void)(bs > bs2); + + (void)(si > si2); + (void)(ui > ui2); + (void)(bi > bi2); + + (void)(sl > sl2); + (void)(ul > ul2); + (void)(bl > bl2); + (void)(fd > fd2); +}